From 9fa6545690d711f740a6ae3f3069dafb56b879d4 Mon Sep 17 00:00:00 2001 From: cheme Date: Tue, 29 Jun 2021 18:55:43 +0200 Subject: [PATCH 01/19] state machine proofs. --- client/api/src/proof_provider.rs | 24 +- client/network/src/protocol/sync/state.rs | 7 +- client/service/src/client/client.rs | 11 +- primitives/state-machine/src/lib.rs | 291 ++++++++++++++++++- primitives/state-machine/src/trie_backend.rs | 6 +- 5 files changed, 318 insertions(+), 21 deletions(-) diff --git a/client/api/src/proof_provider.rs b/client/api/src/proof_provider.rs index 0e9fd5318ba90..7201765dc64a7 100644 --- a/client/api/src/proof_provider.rs +++ b/client/api/src/proof_provider.rs @@ -24,6 +24,17 @@ use sp_runtime::{ use crate::{StorageProof, ChangesProof}; use sp_storage::{ChildInfo, StorageKey, PrefixedStorageKey}; + +/// Multiple key value state. +/// States are ordered by root storage key. +pub struct KeyValueStates(Vec); + +/// A key value state. +pub struct KeyValueState { + parent_storage_key: Option>, + key_values: Vec<(Vec, Vec)>, +} + /// Interface for providing block proving utilities. pub trait ProofProvider { /// Reads storage value at a given block + key, returning read proof. @@ -71,12 +82,15 @@ pub trait ProofProvider { key: &StorageKey, ) -> sp_blockchain::Result>; - /// Given a `BlockId` iterate over all storage values starting at `start_key` exclusively, - /// building proofs until size limit is reached. Returns combined proof and the number of collected keys. + /// Given a `BlockId` iterate over all storage values starting at `start_keys`. + /// `start_keys` contain current parsed location of storage roots and + /// at last level the value to start at exclusively. + /// Proofs is build until size limit is reached. + /// Returns combined proof and the numbers of collected keys. fn read_proof_collection( &self, id: &BlockId, - start_key: &[u8], + start_keys: &[&Vec], size_limit: usize, ) -> sp_blockchain::Result<(StorageProof, u32)>; @@ -95,6 +109,6 @@ pub trait ProofProvider { &self, root: Block::Hash, proof: StorageProof, - start_key: &[u8], - ) -> sp_blockchain::Result<(Vec<(Vec, Vec)>, bool)>; + start_keys: &[&Vec], + ) -> sp_blockchain::Result<(KeyValueStates, bool)>; } diff --git a/client/network/src/protocol/sync/state.rs b/client/network/src/protocol/sync/state.rs index fc9dfdbb8c376..4e8b499b9f8ba 100644 --- a/client/network/src/protocol/sync/state.rs +++ b/client/network/src/protocol/sync/state.rs @@ -22,6 +22,7 @@ use sp_runtime::traits::{Block as BlockT, Header, NumberFor}; use sc_client_api::StorageProof; use crate::schema::v1::{StateRequest, StateResponse, StateEntry}; use crate::chain::{Client, ImportedState}; +use smallvec::SmallVec; use super::StateDownloadProgress; /// State sync support. @@ -32,7 +33,7 @@ pub struct StateSync { target_block: B::Hash, target_header: B::Header, target_root: B::Hash, - last_key: Vec, + last_key: SmallVec<[Vec; 2]>, state: Vec<(Vec, Vec)>, complete: bool, client: Arc>, @@ -58,7 +59,7 @@ impl StateSync { target_block: target.hash(), target_root: target.state_root().clone(), target_header: target, - last_key: Vec::default(), + last_key: SmallVec::default(), state: Vec::default(), complete: false, imported_bytes: 0, @@ -99,7 +100,7 @@ impl StateSync { let (values, complete) = match self.client.verify_range_proof( self.target_root, proof, - &self.last_key + &self.last_key[..], ) { Err(e) => { log::debug!( diff --git a/client/service/src/client/client.rs b/client/service/src/client/client.rs index 4a998a12d2b7f..ec4e45a119685 100644 --- a/client/service/src/client/client.rs +++ b/client/service/src/client/client.rs @@ -1399,20 +1399,21 @@ impl ProofProvider for Client where &self, root: Block::Hash, proof: StorageProof, - start_key: &[u8], - ) -> sp_blockchain::Result<(Vec<(Vec, Vec)>, bool)> { - Ok(read_range_proof_check::>( + start_key: &[&Vec], + ) -> sp_blockchain::Result<(Vec, Vec)>>, bool)> { + let state = read_range_proof_check::>( root, proof, None, None, None, Some(start_key), - )?) + )?; + + Ok(state) } } - impl BlockBuilderProvider for Client where B: backend::Backend + Send + Sync + 'static, diff --git a/primitives/state-machine/src/lib.rs b/primitives/state-machine/src/lib.rs index bc5b48f02db4e..7a9292893fdd0 100644 --- a/primitives/state-machine/src/lib.rs +++ b/primitives/state-machine/src/lib.rs @@ -177,7 +177,8 @@ mod execution { use hash_db::Hasher; use codec::{Decode, Encode, Codec}; use sp_core::{ - storage::ChildInfo, NativeOrEncoded, NeverNativeValue, hexdisplay::HexDisplay, + storage::{ChildInfo, ChildType, PrefixedStorageKey}, NativeOrEncoded, + NeverNativeValue, hexdisplay::HexDisplay, traits::{CodeExecutor, ReadRuntimeVersionExt, RuntimeCode, SpawnNamed}, }; use sp_externalities::Extensions; @@ -726,6 +727,126 @@ mod execution { prove_read_on_trie_backend(trie_backend, keys) } + /// State machine only allows a single level + /// of child trie. + pub const MAX_NESTED_TRIE_DEPTH: usize = 2; + + /// Multiple key value state. + /// States are ordered by root storage key. + pub struct KeyValueStates(pub Vec); + + /// A key value state. + pub struct KeyValueState { + /// Pair of key and values from this state. + pub key_values: Vec<(Vec, Vec)>, + } + + /// Generate range storage read proof, with child tries + /// content. + /// `start_at` key are included but omitted for child proof inclusion. + pub fn prove_range_read_with_child_with_size( + mut backend: B, + size_limit: usize, + start_at: &[Vec], + ) -> Result<(StorageProof, u32), Box> + where + B: Backend, + H: Hasher, + H::Out: Ord + Codec, + { + let trie_backend = backend.as_trie_backend() + .ok_or_else(|| Box::new(ExecutionError::UnableToGenerateProof) as Box)?; + prove_range_read_with_child_with_size_on_trie_backend( + trie_backend, + size_limit, + start_at, + ) + } + + /// Generate range storage read proof, with child tries + /// content. + pub fn prove_range_read_with_child_with_size_on_trie_backend( + trie_backend: &TrieBackend, + size_limit: usize, + start_at: &[Vec], + ) -> Result<(StorageProof, u32), Box> + where + S: trie_backend_essence::TrieBackendStorage, + H: Hasher, + H::Out: Ord + Codec, + { + if start_at.len() > MAX_NESTED_TRIE_DEPTH { + return Err(Box::new("Invalid start of range.")); + } + + let proving_backend = proving_backend::ProvingBackend::::new(trie_backend); + let mut count = 0; + + let (mut child_key, mut start_at) = if start_at.len() == 2 { + (start_at.get(0).cloned(), start_at.get(1).cloned()) + } else { + (None, start_at.get(0).cloned()) + }; + + loop { + let (child_info, depth) = if let Some(storage_key) = child_key.as_ref() { + let storage_key = PrefixedStorageKey::new_ref(storage_key); + (Some(match ChildType::from_prefixed_key(&storage_key) { + Some((ChildType::ParentKeyId, storage_key)) => ChildInfo::new_default(storage_key), + None => return Err(Box::new("Invalid range start child trie key.")), + }), 2) + } else { + (None, 1) + }; + + let mut switch_child_key = None; + let start_at_ref = start_at.as_ref().map(AsRef::as_ref); + let mut first = start_at.is_some(); + let completed = proving_backend.apply_to_key_values_while( + child_info.as_ref(), + None, + start_at_ref, + |key, _value| { + if count == 0 || proving_backend.estimate_encoded_size() <= size_limit { + count += 1; + if first { + if start_at_ref.as_ref().map(|start| key.as_slice() > start) + .unwrap_or(true) { + first = false; + } + } + if depth < MAX_NESTED_TRIE_DEPTH && !first + && sp_core::storage::well_known_keys::is_child_storage_key(key.as_slice()) { + switch_child_key = Some(key); + false + } else { + true + } + } else { + false + } + }, + false, + ).map_err(|e| Box::new(e) as Box)?; + + if switch_child_key.is_none() { + if depth == 1 { + break; + } else { + if completed { + start_at = child_key.take(); + } else { + break; + } + } + } else { + child_key = switch_child_key; + start_at = None; + } + } + Ok((proving_backend.extract_proof(), count)) + } + /// Generate range storage read proof. pub fn prove_range_read_with_size( mut backend: B, @@ -852,7 +973,28 @@ mod execution { Ok(result) } - /// Check child storage range proof, generated by `prove_range_read` call. + /// Check storage range proof with child trie included, generated by + /// `prove_range_read_with_child_with_size` call. + /// + /// Returns content and the depth of the pending state iteration, + /// 0 if completed. + pub fn read_range_proof_check_with_child( + root: H::Out, + proof: StorageProof, + start_at: &[Vec], + ) -> Result<(KeyValueStates, usize), Box> + where + H: Hasher, + H::Out: Ord + Codec, + { + let proving_backend = create_proof_check_backend::(root, proof)?; + read_range_proof_check_with_child_on_proving_backend( + &proving_backend, + start_at, + ) + } + + /// Check child storage range proof, generated by `prove_range_read_with_size` call. pub fn read_range_proof_check( root: H::Out, proof: StorageProof, @@ -952,6 +1094,94 @@ mod execution { Err(e) => Err(Box::new(e) as Box), } } + + /// Check storage range proof on pre-created proving backend. + /// + /// See `read_range_proof_check_with_child`. + pub fn read_range_proof_check_with_child_on_proving_backend( + proving_backend: &TrieBackend, H>, + start_at: &[Vec], + ) -> Result<(KeyValueStates, usize), Box> + where + H: Hasher, + H::Out: Ord + Codec, + { + let mut result = vec![KeyValueState { + key_values: Default::default(), + }]; + if start_at.len() > MAX_NESTED_TRIE_DEPTH { + return Err(Box::new("Invalid start of range.")); + } + + let (mut child_key, mut start_at) = if start_at.len() == 2 { + (start_at.get(0).cloned(), start_at.get(1).cloned()) + } else { + (None, start_at.get(0).cloned()) + }; + + let completed = loop { + let (child_info, depth) = if let Some(storage_key) = child_key.as_ref() { + result.push(KeyValueState { + key_values: Default::default(), + }); + let storage_key = PrefixedStorageKey::new_ref(storage_key); + (Some(match ChildType::from_prefixed_key(&storage_key) { + Some((ChildType::ParentKeyId, storage_key)) => ChildInfo::new_default(storage_key), + None => return Err(Box::new("Invalid range start child trie key.")), + }), 2) + } else { + (None, 1) + }; + + let mut switch_child_key = None; + let values = if child_info.is_some() { + &mut result.last_mut().expect("Added above").key_values + } else { + &mut result[0].key_values + }; + let mut first = start_at.is_some(); + let start_at_ref = start_at.as_ref().map(AsRef::as_ref); + let completed = proving_backend.apply_to_key_values_while( + child_info.as_ref(), + None, + start_at_ref, + |key, value| { + if first { + if start_at_ref.as_ref().map(|start| key.as_slice() > start) + .unwrap_or(true) { + first = false; + } + } + if !first { + values.push((key.to_vec(), value.to_vec())); + } + if depth < MAX_NESTED_TRIE_DEPTH && !first + && sp_core::storage::well_known_keys::is_child_storage_key(key.as_slice()) { + switch_child_key = Some(key); + false + } else { + true + } + }, + true, + ).map_err(|e| Box::new(e) as Box)?; + + if switch_child_key.is_none() { + if !completed { + break depth; + } + if depth == 1 { + break 0; + } else { + start_at = child_key.take(); + } + } else { + child_key = switch_child_key; + start_at = None; + } + }; + Ok((KeyValueStates(result), completed)) + } } #[cfg(test)] @@ -1075,7 +1305,6 @@ mod tests { ); } - #[test] fn execute_works_with_native_else_wasm() { let backend = trie_backend::tests::test_trie(); @@ -1579,7 +1808,7 @@ mod tests { ).unwrap(); assert_eq!( local_result1.into_iter().collect::>(), - vec![(b"value3".to_vec(), Some(vec![142]))], + vec![(b"value3".to_vec(), Some(vec![142; 33]))], ); assert_eq!( local_result2.into_iter().collect::>(), @@ -1592,7 +1821,7 @@ mod tests { let remote_backend = trie_backend::tests::test_trie(); let remote_root = remote_backend.storage_root(::std::iter::empty()).0; let (proof, count) = prove_range_read_with_size(remote_backend, None, None, 0, None).unwrap(); - // Alwasys contains at least some nodes. + // Always contains at least some nodes. assert_eq!(proof.into_memory_db::().drain().len(), 3); assert_eq!(count, 1); @@ -1638,6 +1867,58 @@ mod tests { assert_eq!(completed, true); } + #[test] + fn prove_range_with_child_works() { + let mut remote_backend = trie_backend::tests::test_trie(); + let remote_root = remote_backend.storage_root(::std::iter::empty()).0; + let mut start_at = Vec::new(); + let trie_backend = remote_backend.as_trie_backend().unwrap(); + let max_iter = 1000; + let mut nb_loop = 0; + loop { + nb_loop += 1; + if max_iter == nb_loop { + panic!("Too many loop in prove range"); + } + let (proof, count) = prove_range_read_with_child_with_size_on_trie_backend( + trie_backend, + 1, + start_at.as_slice(), + ).unwrap(); + // Always contains at least some nodes. + assert!(proof.clone().into_memory_db::().drain().len() > 0); + assert!(count < 3); // when doing child we include parent and first child key. + + let (result, completed) = read_range_proof_check_with_child::( + remote_root, + proof.clone(), + start_at.as_slice(), + ).unwrap(); + + if completed == 0 { + break; + } + let top_last = result.0.get(0).unwrap().key_values.last().map(|kv| kv.0.clone()) + // iterate into a trie keep existing pointer + .unwrap_or_else(|| start_at.get(0).unwrap().clone().clone()); + let last = result.0.last().unwrap().key_values.last().unwrap().0.clone(); + if completed == 1 { + // assert_eq!(last, top_last); this does not stand due to inline value included in top + // proof. + start_at = vec![top_last]; + } else if completed == 2 { + start_at = vec![ + top_last, + last, + ]; + } else { + panic!("unsupported nested depth"); + } + } + + assert_eq!(nb_loop, 10); + } + #[test] fn compact_multiple_child_trie() { // this root will be queried diff --git a/primitives/state-machine/src/trie_backend.rs b/primitives/state-machine/src/trie_backend.rs index 6162a9866a46c..83fe5e2a93881 100644 --- a/primitives/state-machine/src/trie_backend.rs +++ b/primitives/state-machine/src/trie_backend.rs @@ -277,8 +277,8 @@ pub mod tests { { let mut mdb = KeySpacedDBMut::new(&mut mdb, child_info.keyspace()); let mut trie = TrieDBMut::new(&mut mdb, &mut root); - trie.insert(b"value3", &[142]).expect("insert failed"); - trie.insert(b"value4", &[124]).expect("insert failed"); + trie.insert(b"value3", &[142; 33]).expect("insert failed"); + trie.insert(b"value4", &[124; 33]).expect("insert failed"); }; { @@ -313,7 +313,7 @@ pub mod tests { let test_trie = test_trie(); assert_eq!( test_trie.child_storage(&ChildInfo::new_default(CHILD_KEY_1), b"value3").unwrap(), - Some(vec![142u8]), + Some(vec![142u8; 33]), ); } From a10819ddd2965644890d7e91c0ee31a809ac4a8d Mon Sep 17 00:00:00 2001 From: cheme Date: Wed, 30 Jun 2021 13:11:29 +0200 Subject: [PATCH 02/19] initial implementation --- client/api/src/backend.rs | 2 +- client/api/src/proof_provider.rs | 27 ++-- client/network/src/chain.rs | 2 +- client/network/src/protocol/sync/state.rs | 88 ++++++---- client/network/src/schema/api.v1.proto | 19 ++- client/network/src/state_request_handler.rs | 38 ++--- client/service/src/client/client.rs | 153 +++++++++++++----- .../consensus/common/src/block_import.rs | 3 +- primitives/consensus/common/src/lib.rs | 2 +- primitives/state-machine/src/lib.rs | 100 ++++++++++-- 10 files changed, 307 insertions(+), 127 deletions(-) diff --git a/client/api/src/backend.rs b/client/api/src/backend.rs index 1f1ad13067b34..137f486f4b197 100644 --- a/client/api/src/backend.rs +++ b/client/api/src/backend.rs @@ -41,7 +41,7 @@ use sp_consensus::BlockOrigin; use parking_lot::RwLock; pub use sp_state_machine::Backend as StateBackend; -pub use sp_consensus::ImportedState; +pub use sp_consensus::{ImportedState, KeyValueStates}; use std::marker::PhantomData; /// Extracts the state backend type for the given backend. diff --git a/client/api/src/proof_provider.rs b/client/api/src/proof_provider.rs index 7201765dc64a7..f44a28b172446 100644 --- a/client/api/src/proof_provider.rs +++ b/client/api/src/proof_provider.rs @@ -23,18 +23,9 @@ use sp_runtime::{ }; use crate::{StorageProof, ChangesProof}; use sp_storage::{ChildInfo, StorageKey, PrefixedStorageKey}; +use sp_state_machine::{KeyValueStates, KeyValueState}; -/// Multiple key value state. -/// States are ordered by root storage key. -pub struct KeyValueStates(Vec); - -/// A key value state. -pub struct KeyValueState { - parent_storage_key: Option>, - key_values: Vec<(Vec, Vec)>, -} - /// Interface for providing block proving utilities. pub trait ProofProvider { /// Reads storage value at a given block + key, returning read proof. @@ -90,25 +81,27 @@ pub trait ProofProvider { fn read_proof_collection( &self, id: &BlockId, - start_keys: &[&Vec], + start_keys: &[Vec], size_limit: usize, ) -> sp_blockchain::Result<(StorageProof, u32)>; /// Given a `BlockId` iterate over all storage values starting at `start_key`. - /// Returns collected keys and values. + /// Returns collected keys and values. For each state indicate if state reach + /// end. fn storage_collection( &self, id: &BlockId, - start_key: &[u8], + start_key: &[Vec], size_limit: usize, - ) -> sp_blockchain::Result, Vec)>>; + ) -> sp_blockchain::Result>; /// Verify read storage proof for a set of keys. - /// Returns collected key-value pairs and a flag indicating if iteration is complete. + /// Returns collected key-value pairs and a the nested state + /// depth of current iteration or 0 if completed. fn verify_range_proof( &self, root: Block::Hash, proof: StorageProof, - start_keys: &[&Vec], - ) -> sp_blockchain::Result<(KeyValueStates, bool)>; + start_keys: &[Vec], + ) -> sp_blockchain::Result<(KeyValueStates, usize)>; } diff --git a/client/network/src/chain.rs b/client/network/src/chain.rs index 32d4cc9ff024f..2d5da1ef718f4 100644 --- a/client/network/src/chain.rs +++ b/client/network/src/chain.rs @@ -21,7 +21,7 @@ use sp_blockchain::{Error, HeaderBackend, HeaderMetadata}; use sc_client_api::{BlockBackend, ProofProvider}; use sp_runtime::traits::{Block as BlockT, BlockIdTo}; -pub use sc_client_api::{StorageKey, StorageData, ImportedState}; +pub use sc_client_api::{StorageKey, StorageData, ImportedState, KeyValueStates}; /// Local client abstraction for the network. pub trait Client: HeaderBackend + ProofProvider + BlockIdTo diff --git a/client/network/src/protocol/sync/state.rs b/client/network/src/protocol/sync/state.rs index 4e8b499b9f8ba..9a323f58b18b0 100644 --- a/client/network/src/protocol/sync/state.rs +++ b/client/network/src/protocol/sync/state.rs @@ -23,6 +23,7 @@ use sc_client_api::StorageProof; use crate::schema::v1::{StateRequest, StateResponse, StateEntry}; use crate::chain::{Client, ImportedState}; use smallvec::SmallVec; +use std::collections::HashMap; use super::StateDownloadProgress; /// State sync support. @@ -34,7 +35,7 @@ pub struct StateSync { target_header: B::Header, target_root: B::Hash, last_key: SmallVec<[Vec; 2]>, - state: Vec<(Vec, Vec)>, + state: HashMap>, Vec<(Vec, Vec)>>, complete: bool, client: Arc>, imported_bytes: u64, @@ -60,7 +61,7 @@ impl StateSync { target_root: target.state_root().clone(), target_header: target, last_key: SmallVec::default(), - state: Vec::default(), + state: HashMap::default(), complete: false, imported_bytes: 0, skip_proof, @@ -69,7 +70,8 @@ impl StateSync { /// Validate and import a state reponse. pub fn import(&mut self, response: StateResponse) -> ImportResult { - if response.entries.is_empty() && response.proof.is_empty() && !response.complete { + if response.entries.is_empty() && response.proof.is_empty() { +// TODO check from construction removing it is not an issue && !response.complete { log::debug!( target: "sync", "Bad state response", @@ -97,10 +99,10 @@ impl StateSync { return ImportResult::BadResponse; } }; - let (values, complete) = match self.client.verify_range_proof( + let (values, completed) = match self.client.verify_range_proof( self.target_root, proof, - &self.last_key[..], + self.last_key.as_slice(), ) { Err(e) => { log::debug!( @@ -114,38 +116,65 @@ impl StateSync { }; log::debug!(target: "sync", "Imported with {} keys", values.len()); - if let Some(last) = values.last().map(|(k, _)| k) { - self.last_key = last.clone(); - } - - for (key, value) in values { - self.imported_bytes += key.len() as u64; - self.state.push((key, value)) + let complete = if completed == 0 { + true + } else { + if !values.update_last_key(completed, &mut self.last_key) { + log::debug!(target: "sync", "Error updating key cursor, depth: {}", completed); + return ImportResult::BadResponse; + } + false }; + + for values in values.0 { + use std::collections::hash_map::Entry; + match self.state.entry(values.parent_storages) { + Entry::Occupied(mut entry) => { + for (key, value) in values.key_values { + self.imported_bytes += key.len() as u64; + entry.get_mut().push((key, value)) + } + }, + Entry::Vacant(entry) => { + for (key, _value) in values.key_values.iter() { + self.imported_bytes += key.len() as u64; + } + entry.insert(values.key_values); + }, + } + } self.imported_bytes += proof_size; complete } else { - log::debug!( - target: "sync", - "Importing state from {:?} to {:?}", - response.entries.last().map(|e| sp_core::hexdisplay::HexDisplay::from(&e.key)), - response.entries.first().map(|e| sp_core::hexdisplay::HexDisplay::from(&e.key)), - ); - - if let Some(e) = response.entries.last() { - self.last_key = e.key.clone(); - } - for StateEntry { key, value } in response.entries { - self.imported_bytes += (key.len() + value.len()) as u64; - self.state.push((key, value)) + let mut complete = true; + self.last_key.clear(); + for state in response.entries { + log::debug!( + target: "sync", + "Importing state from {:?} to {:?}", + state.entries.last().map(|e| sp_core::hexdisplay::HexDisplay::from(&e.key)), + state.entries.first().map(|e| sp_core::hexdisplay::HexDisplay::from(&e.key)), + ); + + if !state.complete { + if let Some(e) = state.entries.last() { + self.last_key.push(e.key.clone()); + } + complete = false; + } + let entry = self.state.entry(state.parent_storages).or_default(); + for StateEntry { key, value } in state.entries { + self.imported_bytes += key.len() as u64; + entry.push((key, value)) + } } - response.complete + complete }; if complete { self.complete = true; ImportResult::Import(self.target_block.clone(), self.target_header.clone(), ImportedState { block: self.target_block.clone(), - state: std::mem::take(&mut self.state) + state: std::mem::take(&mut self.state).into(), }) } else { ImportResult::Continue(self.next_request()) @@ -156,7 +185,7 @@ impl StateSync { pub fn next_request(&self) -> StateRequest { StateRequest { block: self.target_block.encode(), - start: self.last_key.clone(), + start: self.last_key.clone().into_vec(), no_proof: self.skip_proof, } } @@ -178,7 +207,8 @@ impl StateSync { /// Returns state sync estimated progress. pub fn progress(&self) -> StateDownloadProgress { - let percent_done = (*self.last_key.get(0).unwrap_or(&0u8) as u32) * 100 / 256; + let cursor = *self.last_key.get(0).and_then(|last| last.get(0)).unwrap_or(&0u8); + let percent_done = cursor as u32 * 100 / 256; StateDownloadProgress { percentage: percent_done, size: self.imported_bytes, diff --git a/client/network/src/schema/api.v1.proto b/client/network/src/schema/api.v1.proto index a16fdbaebc81b..8110358cd3063 100644 --- a/client/network/src/schema/api.v1.proto +++ b/client/network/src/schema/api.v1.proto @@ -72,22 +72,31 @@ message BlockData { message StateRequest { // Block header hash. bytes block = 1; - // Start from this key. Equivalent to if omitted. - bytes start = 2; // optional + // Start from this key. + // Multiple key for nested state start. + repeated bytes start = 2; // optional // if 'true' indicates that response should contain raw key-values, rather than proof. bool no_proof = 3; } message StateResponse { - // A collection of keys-values. Only populated if `no_proof` is `true` - repeated StateEntry entries = 1; + // A collection of keys-values states. Only populated if `no_proof` is `true` + repeated KeyValueStateEntry entries = 1; // If `no_proof` is false in request, this contains proof nodes. bytes proof = 2; +} + +// A key value state. +message KeyValueStateEntry { + // Parent nested storage location, empty for top state. + repeated bytes parent_storages = 1; + // A collection of keys-values. + repeated StateEntry entries = 2; // Set to true when there are no more keys to return. bool complete = 3; } -// A key-value pair +// A key-value pair. message StateEntry { bytes key = 1; bytes value = 2; diff --git a/client/network/src/state_request_handler.rs b/client/network/src/state_request_handler.rs index d340ff21bd449..c91c5b52e7b5c 100644 --- a/client/network/src/state_request_handler.rs +++ b/client/network/src/state_request_handler.rs @@ -21,7 +21,7 @@ use codec::{Encode, Decode}; use crate::chain::Client; use crate::config::ProtocolId; use crate::request_responses::{IncomingRequest, OutgoingResponse, ProtocolConfig}; -use crate::schema::v1::{StateResponse, StateRequest, StateEntry}; +use crate::schema::v1::{StateResponse, StateRequest, StateEntry, KeyValueStateEntry}; use crate::{PeerId, ReputationChange}; use futures::channel::{mpsc, oneshot}; use futures::stream::StreamExt; @@ -70,7 +70,7 @@ fn generate_protocol_name(protocol_id: &ProtocolId) -> String { struct SeenRequestsKey { peer: PeerId, block: B::Hash, - start: Vec, + start: Vec>, } impl Hash for SeenRequestsKey { @@ -168,10 +168,10 @@ impl StateRequestHandler { log::trace!( target: LOG_TARGET, - "Handling state request from {}: Block {:?}, Starting at {:?}, no_proof={}", + "Handling state request from {}: Block {:?}, Starting at {:x?}, no_proof={}", peer, request.block, - sp_core::hexdisplay::HexDisplay::from(&request.start), + &request.start, request.no_proof, ); @@ -179,35 +179,37 @@ impl StateRequestHandler { let mut response = StateResponse::default(); if !request.no_proof { - let (proof, count) = self.client.read_proof_collection( + let (proof, _count) = self.client.read_proof_collection( &BlockId::hash(block), - &request.start, + request.start.as_slice(), MAX_RESPONSE_BYTES, )?; response.proof = proof.encode(); - if count == 0 { - response.complete = true; - } } else { let entries = self.client.storage_collection( &BlockId::hash(block), - &request.start, + request.start.as_slice(), MAX_RESPONSE_BYTES, )?; - response.entries = entries.into_iter().map(|(key, value)| StateEntry { key, value }).collect(); - if response.entries.is_empty() { - response.complete = true; - } + response.entries = entries.into_iter().map(|(state, complete)| { + KeyValueStateEntry { + parent_storages: state.parent_storages, + entries: state.key_values.into_iter() + .map(|(key, value)| StateEntry { key, value }).collect(), + complete, + } + }).collect(); } log::trace!( target: LOG_TARGET, - "StateResponse contains {} keys, {}, proof nodes, complete={}, from {:?} to {:?}", + "StateResponse contains {} keys, {}, proof nodes, from {:?} to {:?}", response.entries.len(), response.proof.len(), - response.complete, - response.entries.first().map(|e| sp_core::hexdisplay::HexDisplay::from(&e.key)), - response.entries.last().map(|e| sp_core::hexdisplay::HexDisplay::from(&e.key)), + response.entries.get(0).and_then(|top| top.entries.first() + .map(|e| sp_core::hexdisplay::HexDisplay::from(&e.key))), + response.entries.get(0).and_then(|top| top.entries.last() + .map(|e| sp_core::hexdisplay::HexDisplay::from(&e.key))), ); if let Some(value) = self.seen_requests.get_mut(&key) { // If this is the first time we have processed this request, we need to change diff --git a/client/service/src/client/client.rs b/client/service/src/client/client.rs index ec4e45a119685..dde50749a5428 100644 --- a/client/service/src/client/client.rs +++ b/client/service/src/client/client.rs @@ -30,7 +30,10 @@ use codec::{Encode, Decode}; use hash_db::Prefix; use sp_core::{ convert_hash, - storage::{well_known_keys, ChildInfo, PrefixedStorageKey, StorageData, StorageKey}, + storage::{ + well_known_keys, ChildInfo, ChildType, PrefixedStorageKey, + StorageData, StorageKey, StorageChild, + }, ChangesTrieConfiguration, ExecutionContext, NativeOrEncoded, }; #[cfg(feature="test-helpers")] @@ -52,7 +55,8 @@ use sp_state_machine::{ DBValue, Backend as StateBackend, ChangesTrieAnchorBlockId, prove_read, prove_child_read, ChangesTrieRootsStorage, ChangesTrieStorage, ChangesTrieConfigurationRange, key_changes, key_changes_proof, - prove_range_read_with_size, read_range_proof_check, + prove_range_read_with_child_with_size, read_range_proof_check_with_child, + KeyValueStates, KeyValueState, MAX_NESTED_TRIE_DEPTH, }; use sc_executor::RuntimeVersion; use sp_consensus::{ @@ -801,10 +805,31 @@ impl Client where Some((main_sc, child_sc)) } sp_consensus::StorageChanges::Import(changes) => { - let storage = sp_storage::Storage { - top: changes.state.into_iter().collect(), - children_default: Default::default(), - }; + let mut storage = sp_storage::Storage::default(); + for state in changes.state.0.into_iter() { + if state.parent_storages.len() == 0 { + for (key, value) in state.key_values.into_iter() { + storage.top.insert(key, value); + } + } else if state.parent_storages.len() == 1 { + let storage_key = PrefixedStorageKey::new_ref(&state.parent_storages[0]); + let storage_key = match ChildType::from_prefixed_key(&storage_key) { + Some((ChildType::ParentKeyId, storage_key)) => storage_key, + None => return Err(Error::Backend("Invalid child storage key.".to_string())), + }; + let entry = storage.children_default.entry(storage_key.to_vec()) + .or_insert_with(|| StorageChild { + data: Default::default(), + child_info: ChildInfo::new_default(storage_key), + }); + for (key, value) in state.key_values.into_iter() { + entry.data.insert(key, value); + } + } + if state.parent_storages.len() > 1 { + return Err(Error::InvalidState); + } + } let state_root = operation.op.reset_storage(storage)?; if state_root != *import_headers.post().state_root() { @@ -1352,62 +1377,116 @@ impl ProofProvider for Client where fn read_proof_collection( &self, id: &BlockId, - start_key: &[u8], + start_key: &[Vec], size_limit: usize, ) -> sp_blockchain::Result<(StorageProof, u32)> { let state = self.state_at(id)?; - Ok(prove_range_read_with_size::<_, HashFor>( + Ok(prove_range_read_with_child_with_size::<_, HashFor>( state, - None, - None, size_limit, - Some(start_key) + start_key, )?) } fn storage_collection( &self, id: &BlockId, - start_key: &[u8], + start_key: &[Vec], size_limit: usize, - ) -> sp_blockchain::Result, Vec)>> { + ) -> sp_blockchain::Result> { + if start_key.len() > MAX_NESTED_TRIE_DEPTH { + return Err(Error::Backend("Invalid start key.".to_string())); + } let state = self.state_at(id)?; - let mut current_key = start_key.to_vec(); + let child_info = |storage_key: &Vec| -> sp_blockchain::Result { + let storage_key = PrefixedStorageKey::new_ref(&storage_key); + match ChildType::from_prefixed_key(&storage_key) { + Some((ChildType::ParentKeyId, storage_key)) => Ok(ChildInfo::new_default(storage_key)), + None => Err(Error::Backend("Invalid child storage key.".to_string())), + } + }; + let mut current_child = if start_key.len() == 2 { + Some(child_info(start_key.get(0).expect("checked len"))?) + } else { + None + }; + let mut current_key = start_key.last().map(Clone::clone).unwrap_or(Vec::new()); let mut total_size = 0; - let mut entries = Vec::new(); - while let Some(next_key) = state - .next_storage_key(¤t_key) - .map_err(|e| sp_blockchain::Error::from_state(Box::new(e)))? - { - let value = state - .storage(next_key.as_ref()) - .map_err(|e| sp_blockchain::Error::from_state(Box::new(e)))? - .unwrap_or_default(); - let size = value.len() + next_key.len(); - if total_size + size > size_limit && !entries.is_empty() { + let mut result = vec![(KeyValueState { + parent_storages: Vec::new(), + key_values: Vec::new(), + }, false)]; + loop { + let mut entries = Vec::new(); + let mut complete = true; + let mut switch_child_key = None; + while let Some(next_key) = if let Some(child) = current_child.as_ref() { + state + .next_child_storage_key(child, ¤t_key) + .map_err(|e| sp_blockchain::Error::from_state(Box::new(e)))? + } else { + state + .next_storage_key(¤t_key) + .map_err(|e| sp_blockchain::Error::from_state(Box::new(e)))? + } { + let value = if let Some(child) = current_child.as_ref() { + state + .child_storage(child, next_key.as_ref()) + .map_err(|e| sp_blockchain::Error::from_state(Box::new(e)))? + .unwrap_or_default() + } else { + state + .storage(next_key.as_ref()) + .map_err(|e| sp_blockchain::Error::from_state(Box::new(e)))? + .unwrap_or_default() + }; + let size = value.len() + next_key.len(); + if total_size + size > size_limit && !entries.is_empty() { + complete = false; + break; + } + total_size += size; + entries.push((next_key.clone(), value)); + + if current_child.is_none() + && sp_core::storage::well_known_keys::is_child_storage_key(next_key.as_slice()) { + switch_child_key = Some(next_key); + break; + } + current_key = next_key; + } + if let Some(child) = switch_child_key.take() { + current_child = Some(child_info(&child)?); + current_key = Vec::new(); + } else if let Some(child) = current_child.take() { + result.push((KeyValueState { + parent_storages: vec![current_key.clone()], + key_values: entries, + }, complete)); + if complete { + current_key = child.into_prefixed_storage_key().into_inner(); + } else { + break; + } + } else { + result[0].0.key_values.extend(entries.into_iter()); + result[0].1 = complete; break; } - total_size += size; - entries.push((next_key.clone(), value)); - current_key = next_key; } - Ok(entries) - + Ok(result) } fn verify_range_proof( &self, root: Block::Hash, proof: StorageProof, - start_key: &[&Vec], - ) -> sp_blockchain::Result<(Vec, Vec)>>, bool)> { - let state = read_range_proof_check::>( + start_key: &[Vec], + ) -> sp_blockchain::Result<(KeyValueStates, usize)> { + let state = read_range_proof_check_with_child::>( root, proof, - None, - None, - None, - Some(start_key), + start_key, )?; Ok(state) diff --git a/primitives/consensus/common/src/block_import.rs b/primitives/consensus/common/src/block_import.rs index 447ea5761f767..a5b707238785a 100644 --- a/primitives/consensus/common/src/block_import.rs +++ b/primitives/consensus/common/src/block_import.rs @@ -143,13 +143,14 @@ pub enum StorageChanges { Import(ImportedState), } + /// Imported state data. A vector of key-value pairs that should form a trie. #[derive(PartialEq, Eq, Clone)] pub struct ImportedState { /// Target block hash. pub block: B::Hash, /// State keys and values. - pub state: Vec<(Vec, Vec)>, + pub state: sp_state_machine::KeyValueStates, } impl std::fmt::Debug for ImportedState { diff --git a/primitives/consensus/common/src/lib.rs b/primitives/consensus/common/src/lib.rs index 51b2a96e17758..b0294939f42a0 100644 --- a/primitives/consensus/common/src/lib.rs +++ b/primitives/consensus/common/src/lib.rs @@ -53,7 +53,7 @@ pub use block_import::{ StateAction, StorageChanges, }; pub use select_chain::SelectChain; -pub use sp_state_machine::Backend as StateBackend; +pub use sp_state_machine::{Backend as StateBackend, KeyValueStates}; pub use import_queue::DefaultImportQueue; pub use sp_inherents::InherentData; diff --git a/primitives/state-machine/src/lib.rs b/primitives/state-machine/src/lib.rs index 7a9292893fdd0..5c7966180049d 100644 --- a/primitives/state-machine/src/lib.rs +++ b/primitives/state-machine/src/lib.rs @@ -182,6 +182,7 @@ mod execution { traits::{CodeExecutor, ReadRuntimeVersionExt, RuntimeCode, SpawnNamed}, }; use sp_externalities::Extensions; + use smallvec::SmallVec; const PROOF_CLOSE_TRANSACTION: &str = "\ @@ -733,14 +734,92 @@ mod execution { /// Multiple key value state. /// States are ordered by root storage key. + #[derive(PartialEq, Eq, Clone)] pub struct KeyValueStates(pub Vec); /// A key value state. + #[derive(PartialEq, Eq, Clone)] pub struct KeyValueState { + /// Storage key in parent states. + pub parent_storages: Vec>, /// Pair of key and values from this state. pub key_values: Vec<(Vec, Vec)>, } + impl From for KeyValueStates + where I: IntoIterator>, Vec<(Vec, Vec)>)> + { + fn from(b: I) -> Self { + let mut result = Vec::new(); + for (parent_storages, key_values) in b.into_iter() { + result.push(KeyValueState { + parent_storages, + key_values, + }) + } + KeyValueStates(result) + } + } + + impl KeyValueStates { + /// Return total number of key values in states. + pub fn len(&self) -> usize { + self.0.iter().fold(0, |nb, state| nb + state.key_values.len()) + } + + /// Update last keys accessed from this state. This function expect + /// that `last` keys was resized to the `completed` size. + pub fn update_last_key(&self, completed: usize, last: &mut SmallVec<[Vec; 2]>) -> bool { + if completed == 0 || completed > MAX_NESTED_TRIE_DEPTH { + return false; + } + match completed { + 1 => { + let top_last = self.0.get(0).and_then(|s| s.key_values.last().map(|kv| kv.0.clone())); + if let Some(top_last) = top_last { + match last.len() { + 0 => { + last.push(top_last); + return true + }, + 2 => { + last.pop(); + }, + _ => (), + } + last[0] = top_last; + return true; + } + }, + 2 => { + let child_last = self.0.last().and_then(|s| s.key_values.last().map(|kv| kv.0.clone())); + if let Some(child_last) = child_last { + match last.len() { + 0 => { + let top_last = self.0.get(0).and_then(|s| s.key_values.last().map(|kv| kv.0.clone())); + if let Some(top_last) = top_last { + last.push(top_last) + } else { + return false; + } + }, + 2 => { + last[1] = child_last; + return true; + }, + _ => (), + } + + last.push(child_last); + return true; + } + }, + _ => (), + } + false + } + } + /// Generate range storage read proof, with child tries /// content. /// `start_at` key are included but omitted for child proof inclusion. @@ -1107,6 +1186,7 @@ mod execution { H::Out: Ord + Codec, { let mut result = vec![KeyValueState { + parent_storages: Default::default(), key_values: Default::default(), }]; if start_at.len() > MAX_NESTED_TRIE_DEPTH { @@ -1122,6 +1202,7 @@ mod execution { let completed = loop { let (child_info, depth) = if let Some(storage_key) = child_key.as_ref() { result.push(KeyValueState { + parent_storages: vec![storage_key.clone()], key_values: Default::default(), }); let storage_key = PrefixedStorageKey::new_ref(storage_key); @@ -1871,7 +1952,7 @@ mod tests { fn prove_range_with_child_works() { let mut remote_backend = trie_backend::tests::test_trie(); let remote_root = remote_backend.storage_root(::std::iter::empty()).0; - let mut start_at = Vec::new(); + let mut start_at = smallvec::SmallVec::<[Vec; 2]>::new(); let trie_backend = remote_backend.as_trie_backend().unwrap(); let max_iter = 1000; let mut nb_loop = 0; @@ -1898,22 +1979,7 @@ mod tests { if completed == 0 { break; } - let top_last = result.0.get(0).unwrap().key_values.last().map(|kv| kv.0.clone()) - // iterate into a trie keep existing pointer - .unwrap_or_else(|| start_at.get(0).unwrap().clone().clone()); - let last = result.0.last().unwrap().key_values.last().unwrap().0.clone(); - if completed == 1 { - // assert_eq!(last, top_last); this does not stand due to inline value included in top - // proof. - start_at = vec![top_last]; - } else if completed == 2 { - start_at = vec![ - top_last, - last, - ]; - } else { - panic!("unsupported nested depth"); - } + assert!(result.update_last_key(completed, &mut start_at)); } assert_eq!(nb_loop, 10); From 8bb1dfa92822272cc24891bb853e0ac22355afc3 Mon Sep 17 00:00:00 2001 From: cheme Date: Wed, 30 Jun 2021 15:13:34 +0200 Subject: [PATCH 03/19] Remove todo. --- client/network/src/protocol/sync/state.rs | 2 -- client/network/test/src/sync.rs | 1 - 2 files changed, 3 deletions(-) diff --git a/client/network/src/protocol/sync/state.rs b/client/network/src/protocol/sync/state.rs index 9a323f58b18b0..78ef14ad8fcc7 100644 --- a/client/network/src/protocol/sync/state.rs +++ b/client/network/src/protocol/sync/state.rs @@ -71,7 +71,6 @@ impl StateSync { /// Validate and import a state reponse. pub fn import(&mut self, response: StateResponse) -> ImportResult { if response.entries.is_empty() && response.proof.is_empty() { -// TODO check from construction removing it is not an issue && !response.complete { log::debug!( target: "sync", "Bad state response", @@ -215,4 +214,3 @@ impl StateSync { } } } - diff --git a/client/network/test/src/sync.rs b/client/network/test/src/sync.rs index 56cec7e4cdfd9..1917da90efc50 100644 --- a/client/network/test/src/sync.rs +++ b/client/network/test/src/sync.rs @@ -1126,4 +1126,3 @@ fn syncs_state() { })); } } - From 9414a66646ce2419f7b942ea47f8a25b128dc5ed Mon Sep 17 00:00:00 2001 From: cheme Date: Wed, 30 Jun 2021 17:04:43 +0200 Subject: [PATCH 04/19] Extend test and fix import. --- client/network/src/protocol/sync/state.rs | 23 +++++++++++++---- client/network/test/src/lib.rs | 7 +++++ client/network/test/src/sync.rs | 31 +++++++++++++++++++---- client/service/src/client/client.rs | 7 ++--- test-utils/runtime/client/src/lib.rs | 5 ++++ 5 files changed, 60 insertions(+), 13 deletions(-) diff --git a/client/network/src/protocol/sync/state.rs b/client/network/src/protocol/sync/state.rs index 78ef14ad8fcc7..690ee068156eb 100644 --- a/client/network/src/protocol/sync/state.rs +++ b/client/network/src/protocol/sync/state.rs @@ -24,6 +24,7 @@ use crate::schema::v1::{StateRequest, StateResponse, StateEntry}; use crate::chain::{Client, ImportedState}; use smallvec::SmallVec; use std::collections::HashMap; +use sp_core::storage::well_known_keys; use super::StateDownloadProgress; /// State sync support. @@ -127,18 +128,26 @@ impl StateSync { for values in values.0 { use std::collections::hash_map::Entry; + let key_values = if values.parent_storages.len() == 0 { + // skip all child key root (will be recalculated on import) + values.key_values.into_iter() + .filter(|key_value| !well_known_keys::is_child_storage_key(key_value.0.as_slice())) + .collect() + } else { + values.key_values + }; match self.state.entry(values.parent_storages) { Entry::Occupied(mut entry) => { - for (key, value) in values.key_values { + for (key, value) in key_values { self.imported_bytes += key.len() as u64; entry.get_mut().push((key, value)) } }, Entry::Vacant(entry) => { - for (key, _value) in values.key_values.iter() { + for (key, _value) in key_values.iter() { self.imported_bytes += key.len() as u64; } - entry.insert(values.key_values); + entry.insert(key_values); }, } } @@ -161,10 +170,14 @@ impl StateSync { } complete = false; } + let is_top = state.parent_storages.len() == 0; let entry = self.state.entry(state.parent_storages).or_default(); for StateEntry { key, value } in state.entries { - self.imported_bytes += key.len() as u64; - entry.push((key, value)) + // Skip all child key root (will be recalculated on import). + if !(is_top && well_known_keys::is_child_storage_key(key.as_slice())) { + self.imported_bytes += key.len() as u64; + entry.push((key, value)) + } } } complete diff --git a/client/network/test/src/lib.rs b/client/network/test/src/lib.rs index b6e8f897bb809..bb8e3438b57a3 100644 --- a/client/network/test/src/lib.rs +++ b/client/network/test/src/lib.rs @@ -660,6 +660,8 @@ pub struct FullPeerConfig { pub is_authority: bool, /// Syncing mode pub sync_mode: SyncMode, + /// Extra genesis storage. + pub extra_storage: Option, } pub trait TestNetFactory: Sized where >::Transaction: Send { @@ -719,6 +721,11 @@ pub trait TestNetFactory: Sized where >: Some(keep_blocks) => TestClientBuilder::with_pruning_window(keep_blocks), None => TestClientBuilder::with_default_backend(), }; + if let Some(storage) = config.extra_storage { + let genesis_extra_storage = test_client_builder.genesis_init_mut().extra_storage(); + *genesis_extra_storage = storage; + } + if matches!(config.sync_mode, SyncMode::Fast{..}) { test_client_builder = test_client_builder.set_no_genesis(); } diff --git a/client/network/test/src/sync.rs b/client/network/test/src/sync.rs index 1917da90efc50..f684c2525b20d 100644 --- a/client/network/test/src/sync.rs +++ b/client/network/test/src/sync.rs @@ -1093,11 +1093,32 @@ fn syncs_state() { sp_tracing::try_init_simple(); for skip_proofs in &[ false, true ] { let mut net = TestNet::new(0); - net.add_full_peer_with_config(Default::default()); - net.add_full_peer_with_config(FullPeerConfig { - sync_mode: SyncMode::Fast { skip_proofs: *skip_proofs }, - ..Default::default() - }); + let mut genesis_storage: sp_core::storage::Storage = Default::default(); + genesis_storage.top.insert(b"additional_key".to_vec(), vec![1]); + let mut child_data: std::collections::BTreeMap, Vec> = Default::default(); + for i in 0u8..16 { + child_data.insert(vec![i; 5], vec![i; 33]); + } + let child1 = sp_core::storage::StorageChild { + data: child_data.clone(), + child_info: sp_core::storage::ChildInfo::new_default(b"child1"), + }; + for i in 22u8..33 { + child_data.insert(vec![i; 5], vec![i; 33]); + } + let child2 = sp_core::storage::StorageChild { + data: child_data.clone(), + child_info: sp_core::storage::ChildInfo::new_default(b"child2"), + }; + genesis_storage.children_default.insert(child1.child_info.storage_key().to_vec(), child1); + genesis_storage.children_default.insert(child2.child_info.storage_key().to_vec(), child2); + let mut config_one = FullPeerConfig::default(); + config_one.extra_storage = Some(genesis_storage.clone()); + net.add_full_peer_with_config(config_one); + let mut config_two = FullPeerConfig::default(); + config_two.extra_storage = Some(genesis_storage); + config_two.sync_mode = SyncMode::Fast { skip_proofs: *skip_proofs }; + net.add_full_peer_with_config(config_two); net.peer(0).push_blocks(64, false); // Wait for peer 1 to sync header chain. net.block_until_sync(); diff --git a/client/service/src/client/client.rs b/client/service/src/client/client.rs index dde50749a5428..7aa4d66fb4524 100644 --- a/client/service/src/client/client.rs +++ b/client/service/src/client/client.rs @@ -1416,6 +1416,7 @@ impl ProofProvider for Client where parent_storages: Vec::new(), key_values: Vec::new(), }, false)]; + loop { let mut entries = Vec::new(); let mut complete = true; @@ -1456,16 +1457,16 @@ impl ProofProvider for Client where current_key = next_key; } if let Some(child) = switch_child_key.take() { + result[0].0.key_values.extend(entries.into_iter()); current_child = Some(child_info(&child)?); current_key = Vec::new(); } else if let Some(child) = current_child.take() { + current_key = child.into_prefixed_storage_key().into_inner(); result.push((KeyValueState { parent_storages: vec![current_key.clone()], key_values: entries, }, complete)); - if complete { - current_key = child.into_prefixed_storage_key().into_inner(); - } else { + if !complete { break; } } else { diff --git a/test-utils/runtime/client/src/lib.rs b/test-utils/runtime/client/src/lib.rs index a9ff26a5adf8d..7fe92bc455d5e 100644 --- a/test-utils/runtime/client/src/lib.rs +++ b/test-utils/runtime/client/src/lib.rs @@ -121,6 +121,11 @@ impl GenesisParameters { pub fn set_wasm_code(&mut self, code: Vec) { self.wasm_code = Some(code); } + + /// Access extra genesis storage. + pub fn extra_storage(&mut self) -> &mut Storage { + &mut self.extra_storage + } } impl substrate_test_client::GenesisInit for GenesisParameters { From 1377863072f29373c2c74d11446e27ddf8f1f901 Mon Sep 17 00:00:00 2001 From: cheme Date: Wed, 30 Jun 2021 18:09:30 +0200 Subject: [PATCH 05/19] fix no proof, with proof ko. --- client/network/src/protocol/sync/state.rs | 7 ++++++- client/network/src/state_request_handler.rs | 2 +- 2 files changed, 7 insertions(+), 2 deletions(-) diff --git a/client/network/src/protocol/sync/state.rs b/client/network/src/protocol/sync/state.rs index 690ee068156eb..ccbc39aa589e9 100644 --- a/client/network/src/protocol/sync/state.rs +++ b/client/network/src/protocol/sync/state.rs @@ -155,7 +155,12 @@ impl StateSync { complete } else { let mut complete = true; - self.last_key.clear(); + if self.last_key.len() == 2 && response.entries[0].entries.len() == 0 { + // empty parent is possible when all batch is into child. + self.last_key.pop(); + } else { + self.last_key.clear(); + } for state in response.entries { log::debug!( target: "sync", diff --git a/client/network/src/state_request_handler.rs b/client/network/src/state_request_handler.rs index c91c5b52e7b5c..86e168bb9c008 100644 --- a/client/network/src/state_request_handler.rs +++ b/client/network/src/state_request_handler.rs @@ -35,7 +35,7 @@ use std::time::Duration; use std::hash::{Hasher, Hash}; const LOG_TARGET: &str = "sync"; -const MAX_RESPONSE_BYTES: usize = 2 * 1024 * 1024; // Actual reponse may be bigger. +const MAX_RESPONSE_BYTES: usize = 36; // Actual reponse may be bigger. const MAX_NUMBER_OF_SAME_REQUESTS_PER_PEER: usize = 2; mod rep { From ae2399801e20f689dd04947fa95ef0097903c151 Mon Sep 17 00:00:00 2001 From: cheme Date: Thu, 1 Jul 2021 12:58:28 +0200 Subject: [PATCH 06/19] fix start at logic. --- primitives/state-machine/src/lib.rs | 65 ++++++++++++++++++----------- 1 file changed, 40 insertions(+), 25 deletions(-) diff --git a/primitives/state-machine/src/lib.rs b/primitives/state-machine/src/lib.rs index 5c7966180049d..ea3d2b5baa5ea 100644 --- a/primitives/state-machine/src/lib.rs +++ b/primitives/state-machine/src/lib.rs @@ -747,7 +747,7 @@ mod execution { } impl From for KeyValueStates - where I: IntoIterator>, Vec<(Vec, Vec)>)> + where I: IntoIterator>, Vec<(Vec, Vec)>)> { fn from(b: I) -> Self { let mut result = Vec::new(); @@ -767,8 +767,7 @@ mod execution { self.0.iter().fold(0, |nb, state| nb + state.key_values.len()) } - /// Update last keys accessed from this state. This function expect - /// that `last` keys was resized to the `completed` size. + /// Update last keys accessed from this state. pub fn update_last_key(&self, completed: usize, last: &mut SmallVec<[Vec; 2]>) -> bool { if completed == 0 || completed > MAX_NESTED_TRIE_DEPTH { return false; @@ -789,29 +788,42 @@ mod execution { } last[0] = top_last; return true; + } else { + last.truncate(1); + return true; } }, 2 => { - let child_last = self.0.last().and_then(|s| s.key_values.last().map(|kv| kv.0.clone())); + let top_last = self.0.get(0).and_then(|s| s.key_values.last().map(|kv| kv.0.clone())); + let child_last = self.0.last().and_then(|s| s.key_values.last().map(|kv| kv.0.clone())); + if let Some(child_last) = child_last { match last.len() { 0 => { - let top_last = self.0.get(0).and_then(|s| s.key_values.last().map(|kv| kv.0.clone())); if let Some(top_last) = top_last { last.push(top_last) } else { return false; } }, + 1 => { + if let Some(top_last) = top_last { + last[0] = top_last; + } + }, 2 => { + if let Some(top_last) = top_last { + last[0] = top_last; + } last[1] = child_last; return true; }, _ => (), } - last.push(child_last); return true; + } else { + return false; } }, _ => (), @@ -878,29 +890,30 @@ mod execution { (None, 1) }; - let mut switch_child_key = None; let start_at_ref = start_at.as_ref().map(AsRef::as_ref); + let mut switch_child_key = None; let mut first = start_at.is_some(); let completed = proving_backend.apply_to_key_values_while( child_info.as_ref(), None, start_at_ref, |key, _value| { - if count == 0 || proving_backend.estimate_encoded_size() <= size_limit { - count += 1; - if first { - if start_at_ref.as_ref().map(|start| key.as_slice() > start) - .unwrap_or(true) { - first = false; - } + if first { + if start_at_ref.as_ref().map(|start| key.as_slice() > start) + .unwrap_or(true) { + first = false; } - if depth < MAX_NESTED_TRIE_DEPTH && !first + } + if first { + true + } else if depth < MAX_NESTED_TRIE_DEPTH && sp_core::storage::well_known_keys::is_child_storage_key(key.as_slice()) { - switch_child_key = Some(key); - false - } else { - true - } + switch_child_key = Some(key); + count += 1; + false + } else if proving_backend.estimate_encoded_size() <= size_limit { + count += 1; + true } else { false } @@ -913,7 +926,7 @@ mod execution { break; } else { if completed { - start_at = child_key.take(); + start_at = child_key.take(); } else { break; } @@ -1072,7 +1085,7 @@ mod execution { start_at, ) } - + /// Check child storage range proof, generated by `prove_range_read_with_size` call. pub fn read_range_proof_check( root: H::Out, @@ -1214,14 +1227,14 @@ mod execution { (None, 1) }; - let mut switch_child_key = None; let values = if child_info.is_some() { &mut result.last_mut().expect("Added above").key_values } else { &mut result[0].key_values }; - let mut first = start_at.is_some(); let start_at_ref = start_at.as_ref().map(AsRef::as_ref); + let mut switch_child_key = None; + let mut first = start_at.is_some(); let completed = proving_backend.apply_to_key_values_while( child_info.as_ref(), None, @@ -1236,7 +1249,9 @@ mod execution { if !first { values.push((key.to_vec(), value.to_vec())); } - if depth < MAX_NESTED_TRIE_DEPTH && !first + if first { + true + } else if depth < MAX_NESTED_TRIE_DEPTH && sp_core::storage::well_known_keys::is_child_storage_key(key.as_slice()) { switch_child_key = Some(key); false From 3ae8493657e0c78a9146df43740fac9fd09f8792 Mon Sep 17 00:00:00 2001 From: cheme Date: Thu, 1 Jul 2021 13:10:07 +0200 Subject: [PATCH 07/19] Restore response size. --- client/network/src/state_request_handler.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/client/network/src/state_request_handler.rs b/client/network/src/state_request_handler.rs index 86e168bb9c008..c91c5b52e7b5c 100644 --- a/client/network/src/state_request_handler.rs +++ b/client/network/src/state_request_handler.rs @@ -35,7 +35,7 @@ use std::time::Duration; use std::hash::{Hasher, Hash}; const LOG_TARGET: &str = "sync"; -const MAX_RESPONSE_BYTES: usize = 36; // Actual reponse may be bigger. +const MAX_RESPONSE_BYTES: usize = 2 * 1024 * 1024; // Actual reponse may be bigger. const MAX_NUMBER_OF_SAME_REQUESTS_PER_PEER: usize = 2; mod rep { From 0b37f3ed6e161a91e4ee9ecf87ea53cae0ce1d9c Mon Sep 17 00:00:00 2001 From: cheme Date: Thu, 1 Jul 2021 14:47:44 +0200 Subject: [PATCH 08/19] Rework comments. --- client/api/src/backend.rs | 4 +- client/api/src/proof_provider.rs | 14 ++++-- client/network/src/chain.rs | 2 +- client/network/src/protocol/sync/state.rs | 13 ++--- client/network/src/schema/api.v1.proto | 2 +- primitives/consensus/common/src/lib.rs | 2 +- primitives/state-machine/src/lib.rs | 59 +++++++++++------------ 7 files changed, 49 insertions(+), 47 deletions(-) diff --git a/client/api/src/backend.rs b/client/api/src/backend.rs index 137f486f4b197..6b65bfd22f65c 100644 --- a/client/api/src/backend.rs +++ b/client/api/src/backend.rs @@ -40,8 +40,8 @@ use sp_blockchain; use sp_consensus::BlockOrigin; use parking_lot::RwLock; -pub use sp_state_machine::Backend as StateBackend; -pub use sp_consensus::{ImportedState, KeyValueStates}; +pub use sp_state_machine::{Backend as StateBackend, KeyValueStates}; +pub use sp_consensus::ImportedState; use std::marker::PhantomData; /// Extracts the state backend type for the given backend. diff --git a/client/api/src/proof_provider.rs b/client/api/src/proof_provider.rs index f44a28b172446..e066c6d2da82d 100644 --- a/client/api/src/proof_provider.rs +++ b/client/api/src/proof_provider.rs @@ -74,9 +74,12 @@ pub trait ProofProvider { ) -> sp_blockchain::Result>; /// Given a `BlockId` iterate over all storage values starting at `start_keys`. - /// `start_keys` contain current parsed location of storage roots and + /// Last `start_keys` element contains last accessed key value. + /// With multiple `start_keys`, first `start_keys` element is + /// the current storage key of of the last accessed child trie. /// at last level the value to start at exclusively. - /// Proofs is build until size limit is reached. + /// Proofs is build until size limit is reached and always include at + /// least one key following `start_keys`. /// Returns combined proof and the numbers of collected keys. fn read_proof_collection( &self, @@ -86,7 +89,12 @@ pub trait ProofProvider { ) -> sp_blockchain::Result<(StorageProof, u32)>; /// Given a `BlockId` iterate over all storage values starting at `start_key`. - /// Returns collected keys and values. For each state indicate if state reach + /// Returns collected keys and values. + /// Returns the collected keys values content of the top trie followed by the + /// collected keys values of child tries. + /// Only child tries with their root part of the collected content or + /// related to `start_key` are attached. + /// For each collected state a boolean indicates if state reach /// end. fn storage_collection( &self, diff --git a/client/network/src/chain.rs b/client/network/src/chain.rs index 2d5da1ef718f4..32d4cc9ff024f 100644 --- a/client/network/src/chain.rs +++ b/client/network/src/chain.rs @@ -21,7 +21,7 @@ use sp_blockchain::{Error, HeaderBackend, HeaderMetadata}; use sc_client_api::{BlockBackend, ProofProvider}; use sp_runtime::traits::{Block as BlockT, BlockIdTo}; -pub use sc_client_api::{StorageKey, StorageData, ImportedState, KeyValueStates}; +pub use sc_client_api::{StorageKey, StorageData, ImportedState}; /// Local client abstraction for the network. pub trait Client: HeaderBackend + ProofProvider + BlockIdTo diff --git a/client/network/src/protocol/sync/state.rs b/client/network/src/protocol/sync/state.rs index ccbc39aa589e9..bc918b1bb4f9c 100644 --- a/client/network/src/protocol/sync/state.rs +++ b/client/network/src/protocol/sync/state.rs @@ -116,14 +116,9 @@ impl StateSync { }; log::debug!(target: "sync", "Imported with {} keys", values.len()); - let complete = if completed == 0 { - true - } else { - if !values.update_last_key(completed, &mut self.last_key) { - log::debug!(target: "sync", "Error updating key cursor, depth: {}", completed); - return ImportResult::BadResponse; - } - false + let complete = completed == 0; + if !complete && !values.update_last_key(completed, &mut self.last_key) { + log::debug!(target: "sync", "Error updating key cursor, depth: {}", completed); }; for values in values.0 { @@ -156,7 +151,7 @@ impl StateSync { } else { let mut complete = true; if self.last_key.len() == 2 && response.entries[0].entries.len() == 0 { - // empty parent is possible when all batch is into child. + // Unchanged parent trie key, keep old value. self.last_key.pop(); } else { self.last_key.clear(); diff --git a/client/network/src/schema/api.v1.proto b/client/network/src/schema/api.v1.proto index 8110358cd3063..6dafde843b180 100644 --- a/client/network/src/schema/api.v1.proto +++ b/client/network/src/schema/api.v1.proto @@ -73,7 +73,7 @@ message StateRequest { // Block header hash. bytes block = 1; // Start from this key. - // Multiple key for nested state start. + // Multiple keys used for nested state start. repeated bytes start = 2; // optional // if 'true' indicates that response should contain raw key-values, rather than proof. bool no_proof = 3; diff --git a/primitives/consensus/common/src/lib.rs b/primitives/consensus/common/src/lib.rs index b0294939f42a0..51b2a96e17758 100644 --- a/primitives/consensus/common/src/lib.rs +++ b/primitives/consensus/common/src/lib.rs @@ -53,7 +53,7 @@ pub use block_import::{ StateAction, StorageChanges, }; pub use select_chain::SelectChain; -pub use sp_state_machine::{Backend as StateBackend, KeyValueStates}; +pub use sp_state_machine::Backend as StateBackend; pub use import_queue::DefaultImportQueue; pub use sp_inherents::InherentData; diff --git a/primitives/state-machine/src/lib.rs b/primitives/state-machine/src/lib.rs index ea3d2b5baa5ea..6113bcb72dfe0 100644 --- a/primitives/state-machine/src/lib.rs +++ b/primitives/state-machine/src/lib.rs @@ -768,11 +768,11 @@ mod execution { } /// Update last keys accessed from this state. - pub fn update_last_key(&self, completed: usize, last: &mut SmallVec<[Vec; 2]>) -> bool { - if completed == 0 || completed > MAX_NESTED_TRIE_DEPTH { + pub fn update_last_key(&self, stopped_at: usize, last: &mut SmallVec<[Vec; 2]>) -> bool { + if stopped_at == 0 || stopped_at > MAX_NESTED_TRIE_DEPTH { return false; } - match completed { + match stopped_at { 1 => { let top_last = self.0.get(0).and_then(|s| s.key_values.last().map(|kv| kv.0.clone())); if let Some(top_last) = top_last { @@ -786,9 +786,12 @@ mod execution { }, _ => (), } + // update top trie access. last[0] = top_last; return true; } else { + // No change in top trie accesses. + // Indicates end of reading of a child trie. last.truncate(1); return true; } @@ -798,31 +801,22 @@ mod execution { let child_last = self.0.last().and_then(|s| s.key_values.last().map(|kv| kv.0.clone())); if let Some(child_last) = child_last { - match last.len() { - 0 => { - if let Some(top_last) = top_last { - last.push(top_last) - } else { - return false; - } - }, - 1 => { - if let Some(top_last) = top_last { - last[0] = top_last; - } - }, - 2 => { - if let Some(top_last) = top_last { - last[0] = top_last; - } - last[1] = child_last; - return true; - }, - _ => (), + if last.len() == 0 { + if let Some(top_last) = top_last { + last.push(top_last) + } else { + return false; + } + } else if let Some(top_last) = top_last { + last[0] = top_last; + } + if last.len() == 2 { + last.pop(); } last.push(child_last); return true; } else { + // stopped at level 2 so child last is define. return false; } }, @@ -834,7 +828,11 @@ mod execution { /// Generate range storage read proof, with child tries /// content. - /// `start_at` key are included but omitted for child proof inclusion. + /// A size limit is applied to the proof with the + /// exception that `start_at` and its following element + /// are always part of the proof. + /// If a key different than `start_at` is a child trie root, + /// the child trie content will be included in the proof. pub fn prove_range_read_with_child_with_size( mut backend: B, size_limit: usize, @@ -856,6 +854,7 @@ mod execution { /// Generate range storage read proof, with child tries /// content. + /// See `prove_range_read_with_child_with_size`. pub fn prove_range_read_with_child_with_size_on_trie_backend( trie_backend: &TrieBackend, size_limit: usize, @@ -1068,8 +1067,8 @@ mod execution { /// Check storage range proof with child trie included, generated by /// `prove_range_read_with_child_with_size` call. /// - /// Returns content and the depth of the pending state iteration, - /// 0 if completed. + /// Returns key values contents and the depth of the pending state iteration + /// (0 if completed). pub fn read_range_proof_check_with_child( root: H::Out, proof: StorageProof, @@ -1985,16 +1984,16 @@ mod tests { assert!(proof.clone().into_memory_db::().drain().len() > 0); assert!(count < 3); // when doing child we include parent and first child key. - let (result, completed) = read_range_proof_check_with_child::( + let (result, completed_depth) = read_range_proof_check_with_child::( remote_root, proof.clone(), start_at.as_slice(), ).unwrap(); - if completed == 0 { + if completed_depth == 0 { break; } - assert!(result.update_last_key(completed, &mut start_at)); + assert!(result.update_last_key(completed_depth, &mut start_at)); } assert_eq!(nb_loop, 10); From 8d5c3f41885a8bea099253c6287fa481593d3aff Mon Sep 17 00:00:00 2001 From: cheme Date: Thu, 1 Jul 2021 15:55:01 +0200 Subject: [PATCH 09/19] Add explicit ref --- primitives/state-machine/src/lib.rs | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/primitives/state-machine/src/lib.rs b/primitives/state-machine/src/lib.rs index 6113bcb72dfe0..d0f134018137b 100644 --- a/primitives/state-machine/src/lib.rs +++ b/primitives/state-machine/src/lib.rs @@ -898,7 +898,7 @@ mod execution { start_at_ref, |key, _value| { if first { - if start_at_ref.as_ref().map(|start| key.as_slice() > start) + if start_at_ref.as_ref().map(|start| key.as_slice() > &start) .unwrap_or(true) { first = false; } @@ -1240,7 +1240,7 @@ mod execution { start_at_ref, |key, value| { if first { - if start_at_ref.as_ref().map(|start| key.as_slice() > start) + if start_at_ref.as_ref().map(|start| key.as_slice() > &start) .unwrap_or(true) { first = false; } From 0d08f53587a146c878644ba5aee4d4ab9dbb4d81 Mon Sep 17 00:00:00 2001 From: cheme Date: Thu, 1 Jul 2021 15:58:32 +0200 Subject: [PATCH 10/19] Use compact proof. --- client/api/src/lib.rs | 2 +- client/api/src/proof_provider.rs | 6 ++-- client/network/src/protocol/sync/state.rs | 4 +-- client/service/src/client/client.rs | 39 +++++++++++++++-------- primitives/state-machine/src/lib.rs | 5 ++- primitives/trie/src/trie_codec.rs | 3 +- 6 files changed, 38 insertions(+), 21 deletions(-) diff --git a/client/api/src/lib.rs b/client/api/src/lib.rs index 71cf499f79943..3f939761aacf9 100644 --- a/client/api/src/lib.rs +++ b/client/api/src/lib.rs @@ -40,7 +40,7 @@ pub use notifications::*; pub use proof_provider::*; pub use sp_blockchain::HeaderBackend; -pub use sp_state_machine::{StorageProof, ExecutionStrategy}; +pub use sp_state_machine::{StorageProof, CompactProof, ExecutionStrategy}; pub use sp_storage::{StorageData, StorageKey, PrefixedStorageKey, ChildInfo}; /// Usage Information Provider interface diff --git a/client/api/src/proof_provider.rs b/client/api/src/proof_provider.rs index e066c6d2da82d..f15ecf66ad9bd 100644 --- a/client/api/src/proof_provider.rs +++ b/client/api/src/proof_provider.rs @@ -21,7 +21,7 @@ use sp_runtime::{ generic::BlockId, traits::{Block as BlockT}, }; -use crate::{StorageProof, ChangesProof}; +use crate::{StorageProof, ChangesProof, CompactProof}; use sp_storage::{ChildInfo, StorageKey, PrefixedStorageKey}; use sp_state_machine::{KeyValueStates, KeyValueState}; @@ -86,7 +86,7 @@ pub trait ProofProvider { id: &BlockId, start_keys: &[Vec], size_limit: usize, - ) -> sp_blockchain::Result<(StorageProof, u32)>; + ) -> sp_blockchain::Result<(CompactProof, u32)>; /// Given a `BlockId` iterate over all storage values starting at `start_key`. /// Returns collected keys and values. @@ -109,7 +109,7 @@ pub trait ProofProvider { fn verify_range_proof( &self, root: Block::Hash, - proof: StorageProof, + proof: CompactProof, start_keys: &[Vec], ) -> sp_blockchain::Result<(KeyValueStates, usize)>; } diff --git a/client/network/src/protocol/sync/state.rs b/client/network/src/protocol/sync/state.rs index bc918b1bb4f9c..c14dbe346e82c 100644 --- a/client/network/src/protocol/sync/state.rs +++ b/client/network/src/protocol/sync/state.rs @@ -19,7 +19,7 @@ use std::sync::Arc; use codec::{Encode, Decode}; use sp_runtime::traits::{Block as BlockT, Header, NumberFor}; -use sc_client_api::StorageProof; +use sc_client_api::CompactProof; use crate::schema::v1::{StateRequest, StateResponse, StateEntry}; use crate::chain::{Client, ImportedState}; use smallvec::SmallVec; @@ -92,7 +92,7 @@ impl StateSync { response.proof.len(), ); let proof_size = response.proof.len() as u64; - let proof = match StorageProof::decode(&mut response.proof.as_ref()) { + let proof = match CompactProof::decode(&mut response.proof.as_ref()) { Ok(proof) => proof, Err(e) => { log::debug!(target: "sync", "Error decoding proof: {:?}", e); diff --git a/client/service/src/client/client.rs b/client/service/src/client/client.rs index 7aa4d66fb4524..fae72daa32c65 100644 --- a/client/service/src/client/client.rs +++ b/client/service/src/client/client.rs @@ -55,8 +55,8 @@ use sp_state_machine::{ DBValue, Backend as StateBackend, ChangesTrieAnchorBlockId, prove_read, prove_child_read, ChangesTrieRootsStorage, ChangesTrieStorage, ChangesTrieConfigurationRange, key_changes, key_changes_proof, - prove_range_read_with_child_with_size, read_range_proof_check_with_child, - KeyValueStates, KeyValueState, MAX_NESTED_TRIE_DEPTH, + prove_range_read_with_child_with_size, KeyValueStates, KeyValueState, + read_range_proof_check_with_child_on_proving_backend, MAX_NESTED_TRIE_DEPTH, }; use sc_executor::RuntimeVersion; use sp_consensus::{ @@ -70,7 +70,7 @@ use sp_blockchain::{ well_known_cache_keys::Id as CacheKeyId, HeaderMetadata, CachedHeaderMetadata, }; -use sp_trie::StorageProof; +use sp_trie::{StorageProof, CompactProof}; use sp_api::{ CallApiAt, ConstructRuntimeApi, Core as CoreApi, ApiExt, ApiRef, ProvideRuntimeApi, CallApiAtParams, @@ -1379,13 +1379,18 @@ impl ProofProvider for Client where id: &BlockId, start_key: &[Vec], size_limit: usize, - ) -> sp_blockchain::Result<(StorageProof, u32)> { + ) -> sp_blockchain::Result<(CompactProof, u32)> { let state = self.state_at(id)?; - Ok(prove_range_read_with_child_with_size::<_, HashFor>( - state, - size_limit, - start_key, - )?) + let root = state.storage_root(std::iter::empty()).0; + + let (proof, count) = prove_range_read_with_child_with_size::<_, HashFor>( + state, + size_limit, + start_key, + )?; + let proof = sp_trie::encode_compact::>>(proof, root) + .map_err(|e| sp_blockchain::Error::from_state(Box::new(e)))?; + Ok((proof, count)) } fn storage_collection( @@ -1481,12 +1486,20 @@ impl ProofProvider for Client where fn verify_range_proof( &self, root: Block::Hash, - proof: StorageProof, + proof: CompactProof, start_key: &[Vec], ) -> sp_blockchain::Result<(KeyValueStates, usize)> { - let state = read_range_proof_check_with_child::>( - root, - proof, + let mut db = sp_state_machine::MemoryDB::>::new(&[]); + let _ = sp_trie::decode_compact::>, _, _>( + &mut db, + proof.iter_compact_encoded_nodes(), + Some(&root), + ).map_err(|e| { + sp_blockchain::Error::from_state(Box::new(e)) + })?; + let proving_backend = sp_state_machine::TrieBackend::new(db, root); + let state = read_range_proof_check_with_child_on_proving_backend::>( + &proving_backend, start_key, )?; diff --git a/primitives/state-machine/src/lib.rs b/primitives/state-machine/src/lib.rs index d0f134018137b..241b10bc3630f 100644 --- a/primitives/state-machine/src/lib.rs +++ b/primitives/state-machine/src/lib.rs @@ -143,7 +143,10 @@ mod changes_trie { #[cfg(feature = "std")] mod std_reexport { - pub use sp_trie::{trie_types::{Layout, TrieDBMut}, StorageProof, TrieMut, DBValue, MemoryDB}; + pub use sp_trie::{ + trie_types::{Layout, TrieDBMut}, StorageProof, CompactProof, TrieMut, + DBValue, MemoryDB, + }; pub use crate::testing::TestExternalities; pub use crate::basic::BasicExternalities; pub use crate::read_only::{ReadOnlyExternalities, InspectState}; diff --git a/primitives/trie/src/trie_codec.rs b/primitives/trie/src/trie_codec.rs index efe3223580f3f..7ea7896877764 100644 --- a/primitives/trie/src/trie_codec.rs +++ b/primitives/trie/src/trie_codec.rs @@ -163,9 +163,10 @@ pub fn decode_compact<'a, L, DB, I>( return Err(Error::IncompleteProof); } + let mut nodes_iter = nodes_iter.peekable(); let mut previous_extracted_child_trie = None; for child_root in child_tries.into_iter() { - if previous_extracted_child_trie.is_none() { + if previous_extracted_child_trie.is_none() && nodes_iter.peek().is_some() { let (top_root, _) = trie_db::decode_compact_from_iter::( db, &mut nodes_iter, From af0c29825d7f1154b037be8dc7045f0b84984262 Mon Sep 17 00:00:00 2001 From: cheme Date: Thu, 1 Jul 2021 16:20:53 +0200 Subject: [PATCH 11/19] ref change --- primitives/state-machine/src/lib.rs | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/primitives/state-machine/src/lib.rs b/primitives/state-machine/src/lib.rs index 241b10bc3630f..cef1c0539adce 100644 --- a/primitives/state-machine/src/lib.rs +++ b/primitives/state-machine/src/lib.rs @@ -901,7 +901,7 @@ mod execution { start_at_ref, |key, _value| { if first { - if start_at_ref.as_ref().map(|start| key.as_slice() > &start) + if start_at_ref.as_ref().map(|start| &key.as_slice() > start) .unwrap_or(true) { first = false; } @@ -1243,7 +1243,7 @@ mod execution { start_at_ref, |key, value| { if first { - if start_at_ref.as_ref().map(|start| key.as_slice() > &start) + if start_at_ref.as_ref().map(|start| &key.as_slice() > start) .unwrap_or(true) { first = false; } From 0a22cd85d8f8a9e393b32f98fed350e19d5e52a2 Mon Sep 17 00:00:00 2001 From: cheme Date: Mon, 5 Jul 2021 10:44:23 +0200 Subject: [PATCH 12/19] elaborato on empty change set condition. --- client/network/src/protocol/sync/state.rs | 6 +++++- 1 file changed, 5 insertions(+), 1 deletion(-) diff --git a/client/network/src/protocol/sync/state.rs b/client/network/src/protocol/sync/state.rs index c14dbe346e82c..ba7fc575fcad8 100644 --- a/client/network/src/protocol/sync/state.rs +++ b/client/network/src/protocol/sync/state.rs @@ -150,8 +150,12 @@ impl StateSync { complete } else { let mut complete = true; + // if the trie is a child trie and one of its parent trie is empty, + // the parent cursor stays valid. + // Empty parent trie content only happens when all the response content + // is part of a single child trie. if self.last_key.len() == 2 && response.entries[0].entries.len() == 0 { - // Unchanged parent trie key, keep old value. + // Do not remove the parent trie position. self.last_key.pop(); } else { self.last_key.clear(); From 54b296628ae8185d1d336850691a9457eb348a39 Mon Sep 17 00:00:00 2001 From: cheme Date: Mon, 5 Jul 2021 10:50:26 +0200 Subject: [PATCH 13/19] KeyValueState renaming. --- client/api/src/proof_provider.rs | 4 ++-- client/service/src/client/client.rs | 8 ++++---- primitives/state-machine/src/lib.rs | 10 +++++----- 3 files changed, 11 insertions(+), 11 deletions(-) diff --git a/client/api/src/proof_provider.rs b/client/api/src/proof_provider.rs index f15ecf66ad9bd..27000cc6f046e 100644 --- a/client/api/src/proof_provider.rs +++ b/client/api/src/proof_provider.rs @@ -23,7 +23,7 @@ use sp_runtime::{ }; use crate::{StorageProof, ChangesProof, CompactProof}; use sp_storage::{ChildInfo, StorageKey, PrefixedStorageKey}; -use sp_state_machine::{KeyValueStates, KeyValueState}; +use sp_state_machine::{KeyValueStates, KeyValueStorageLevel}; /// Interface for providing block proving utilities. @@ -101,7 +101,7 @@ pub trait ProofProvider { id: &BlockId, start_key: &[Vec], size_limit: usize, - ) -> sp_blockchain::Result>; + ) -> sp_blockchain::Result>; /// Verify read storage proof for a set of keys. /// Returns collected key-value pairs and a the nested state diff --git a/client/service/src/client/client.rs b/client/service/src/client/client.rs index 95b732f4ac8b6..e2d306cbf8c81 100644 --- a/client/service/src/client/client.rs +++ b/client/service/src/client/client.rs @@ -55,7 +55,7 @@ use sp_state_machine::{ DBValue, Backend as StateBackend, ChangesTrieAnchorBlockId, prove_read, prove_child_read, ChangesTrieRootsStorage, ChangesTrieStorage, ChangesTrieConfigurationRange, key_changes, key_changes_proof, - prove_range_read_with_child_with_size, KeyValueStates, KeyValueState, + prove_range_read_with_child_with_size, KeyValueStates, KeyValueStorageLevel, read_range_proof_check_with_child_on_proving_backend, MAX_NESTED_TRIE_DEPTH, }; use sc_executor::RuntimeVersion; @@ -1384,7 +1384,7 @@ impl ProofProvider for Client where id: &BlockId, start_key: &[Vec], size_limit: usize, - ) -> sp_blockchain::Result> { + ) -> sp_blockchain::Result> { if start_key.len() > MAX_NESTED_TRIE_DEPTH { return Err(Error::Backend("Invalid start key.".to_string())); } @@ -1403,7 +1403,7 @@ impl ProofProvider for Client where }; let mut current_key = start_key.last().map(Clone::clone).unwrap_or(Vec::new()); let mut total_size = 0; - let mut result = vec![(KeyValueState { + let mut result = vec![(KeyValueStorageLevel { parent_storages: Vec::new(), key_values: Vec::new(), }, false)]; @@ -1453,7 +1453,7 @@ impl ProofProvider for Client where current_key = Vec::new(); } else if let Some(child) = current_child.take() { current_key = child.into_prefixed_storage_key().into_inner(); - result.push((KeyValueState { + result.push((KeyValueStorageLevel { parent_storages: vec![current_key.clone()], key_values: entries, }, complete)); diff --git a/primitives/state-machine/src/lib.rs b/primitives/state-machine/src/lib.rs index cef1c0539adce..06f9465a12ad2 100644 --- a/primitives/state-machine/src/lib.rs +++ b/primitives/state-machine/src/lib.rs @@ -738,11 +738,11 @@ mod execution { /// Multiple key value state. /// States are ordered by root storage key. #[derive(PartialEq, Eq, Clone)] - pub struct KeyValueStates(pub Vec); + pub struct KeyValueStates(pub Vec); /// A key value state. #[derive(PartialEq, Eq, Clone)] - pub struct KeyValueState { + pub struct KeyValueStorageLevel { /// Storage key in parent states. pub parent_storages: Vec>, /// Pair of key and values from this state. @@ -755,7 +755,7 @@ mod execution { fn from(b: I) -> Self { let mut result = Vec::new(); for (parent_storages, key_values) in b.into_iter() { - result.push(KeyValueState { + result.push(KeyValueStorageLevel { parent_storages, key_values, }) @@ -1200,7 +1200,7 @@ mod execution { H: Hasher, H::Out: Ord + Codec, { - let mut result = vec![KeyValueState { + let mut result = vec![KeyValueStorageLevel { parent_storages: Default::default(), key_values: Default::default(), }]; @@ -1216,7 +1216,7 @@ mod execution { let completed = loop { let (child_info, depth) = if let Some(storage_key) = child_key.as_ref() { - result.push(KeyValueState { + result.push(KeyValueStorageLevel { parent_storages: vec![storage_key.clone()], key_values: Default::default(), }); From 4bd673fa16c6f7bcebf28def3182b253dab60711 Mon Sep 17 00:00:00 2001 From: cheme Date: Mon, 5 Jul 2021 15:59:08 +0200 Subject: [PATCH 14/19] Do not add two time child trie with same root to sync reply. --- client/network/src/protocol/sync/state.rs | 64 ++++++++++++------ client/network/src/schema/api.v1.proto | 5 +- client/network/src/state_request_handler.rs | 2 +- client/network/test/src/sync.rs | 5 ++ client/service/src/client/client.rs | 68 +++++++++++-------- primitives/state-machine/src/lib.rs | 75 ++++++++++++++++----- 6 files changed, 149 insertions(+), 70 deletions(-) diff --git a/client/network/src/protocol/sync/state.rs b/client/network/src/protocol/sync/state.rs index ba7fc575fcad8..26a523022782e 100644 --- a/client/network/src/protocol/sync/state.rs +++ b/client/network/src/protocol/sync/state.rs @@ -36,7 +36,7 @@ pub struct StateSync { target_header: B::Header, target_root: B::Hash, last_key: SmallVec<[Vec; 2]>, - state: HashMap>, Vec<(Vec, Vec)>>, + state: HashMap, (Vec<(Vec, Vec)>, Vec>)>, complete: bool, client: Arc>, imported_bytes: u64, @@ -122,28 +122,38 @@ impl StateSync { }; for values in values.0 { - use std::collections::hash_map::Entry; - let key_values = if values.parent_storages.len() == 0 { - // skip all child key root (will be recalculated on import) + let key_values = if values.state_root.is_empty() { + // Read child trie roots. values.key_values.into_iter() - .filter(|key_value| !well_known_keys::is_child_storage_key(key_value.0.as_slice())) + .filter(|key_value| if well_known_keys::is_child_storage_key(key_value.0.as_slice()) { + self.state.entry(key_value.1.clone()) + .or_default().1 + .push(key_value.0.clone()); + false + } else { + true + }) .collect() } else { values.key_values }; - match self.state.entry(values.parent_storages) { - Entry::Occupied(mut entry) => { - for (key, value) in key_values { + let mut entry = self.state.entry(values.state_root).or_default(); + if entry.0.len() > 0 && entry.1.len() > 1 { + // Already imported child_trie with same root. + // Warning this will not work with parallel download. + } else { + if entry.0.is_empty() { + for (key, _value) in key_values.iter() { self.imported_bytes += key.len() as u64; - entry.get_mut().push((key, value)) } - }, - Entry::Vacant(entry) => { - for (key, _value) in key_values.iter() { + + entry.0 = key_values; + } else { + for (key, value) in key_values { self.imported_bytes += key.len() as u64; + entry.0.push((key, value)) } - entry.insert(key_values); - }, + } } } self.imported_bytes += proof_size; @@ -174,13 +184,25 @@ impl StateSync { } complete = false; } - let is_top = state.parent_storages.len() == 0; - let entry = self.state.entry(state.parent_storages).or_default(); - for StateEntry { key, value } in state.entries { - // Skip all child key root (will be recalculated on import). - if !(is_top && well_known_keys::is_child_storage_key(key.as_slice())) { - self.imported_bytes += key.len() as u64; - entry.push((key, value)) + let is_top = state.state_root.is_empty(); + let entry = self.state.entry(state.state_root).or_default(); + if entry.0.len() > 0 && entry.1.len() > 1 { + // Already imported child trie with same root. + } else { + let mut child_roots = Vec::new(); + for StateEntry { key, value } in state.entries { + // Skip all child key root (will be recalculated on import). + if is_top && well_known_keys::is_child_storage_key(key.as_slice()) { + child_roots.push((value, key)); + } else { + self.imported_bytes += key.len() as u64; + entry.0.push((key, value)) + } + } + for (root, storage_key) in child_roots { + self.state.entry(root) + .or_default().1 + .push(storage_key); } } } diff --git a/client/network/src/schema/api.v1.proto b/client/network/src/schema/api.v1.proto index 6dafde843b180..59268d581cc09 100644 --- a/client/network/src/schema/api.v1.proto +++ b/client/network/src/schema/api.v1.proto @@ -88,8 +88,9 @@ message StateResponse { // A key value state. message KeyValueStateEntry { - // Parent nested storage location, empty for top state. - repeated bytes parent_storages = 1; + // Root of for this level, empty length bytes + // if top level. + bytes state_root = 1; // A collection of keys-values. repeated StateEntry entries = 2; // Set to true when there are no more keys to return. diff --git a/client/network/src/state_request_handler.rs b/client/network/src/state_request_handler.rs index c91c5b52e7b5c..ffa6e0915241e 100644 --- a/client/network/src/state_request_handler.rs +++ b/client/network/src/state_request_handler.rs @@ -193,7 +193,7 @@ impl StateRequestHandler { )?; response.entries = entries.into_iter().map(|(state, complete)| { KeyValueStateEntry { - parent_storages: state.parent_storages, + state_root: state.state_root, entries: state.key_values.into_iter() .map(|(key, value)| StateEntry { key, value }).collect(), complete, diff --git a/client/network/test/src/sync.rs b/client/network/test/src/sync.rs index f684c2525b20d..d1603a999884e 100644 --- a/client/network/test/src/sync.rs +++ b/client/network/test/src/sync.rs @@ -1103,6 +1103,10 @@ fn syncs_state() { data: child_data.clone(), child_info: sp_core::storage::ChildInfo::new_default(b"child1"), }; + let child3 = sp_core::storage::StorageChild { + data: child_data.clone(), + child_info: sp_core::storage::ChildInfo::new_default(b"child3"), + }; for i in 22u8..33 { child_data.insert(vec![i; 5], vec![i; 33]); } @@ -1112,6 +1116,7 @@ fn syncs_state() { }; genesis_storage.children_default.insert(child1.child_info.storage_key().to_vec(), child1); genesis_storage.children_default.insert(child2.child_info.storage_key().to_vec(), child2); + genesis_storage.children_default.insert(child3.child_info.storage_key().to_vec(), child3); let mut config_one = FullPeerConfig::default(); config_one.extra_storage = Some(genesis_storage.clone()); net.add_full_peer_with_config(config_one); diff --git a/client/service/src/client/client.rs b/client/service/src/client/client.rs index e2d306cbf8c81..efe13417cbd2a 100644 --- a/client/service/src/client/client.rs +++ b/client/service/src/client/client.rs @@ -807,28 +807,27 @@ impl Client where sp_consensus::StorageChanges::Import(changes) => { let mut storage = sp_storage::Storage::default(); for state in changes.state.0.into_iter() { - if state.parent_storages.len() == 0 { + if state.parent_storage_keys.len() == 0 && state.state_root.len() == 0 { for (key, value) in state.key_values.into_iter() { storage.top.insert(key, value); } - } else if state.parent_storages.len() == 1 { - let storage_key = PrefixedStorageKey::new_ref(&state.parent_storages[0]); - let storage_key = match ChildType::from_prefixed_key(&storage_key) { - Some((ChildType::ParentKeyId, storage_key)) => storage_key, - None => return Err(Error::Backend("Invalid child storage key.".to_string())), - }; - let entry = storage.children_default.entry(storage_key.to_vec()) - .or_insert_with(|| StorageChild { - data: Default::default(), - child_info: ChildInfo::new_default(storage_key), - }); - for (key, value) in state.key_values.into_iter() { - entry.data.insert(key, value); + } else { + for parent_storage in state.parent_storage_keys { + let storage_key = PrefixedStorageKey::new_ref(&parent_storage); + let storage_key = match ChildType::from_prefixed_key(&storage_key) { + Some((ChildType::ParentKeyId, storage_key)) => storage_key, + None => return Err(Error::Backend("Invalid child storage key.".to_string())), + }; + let entry = storage.children_default.entry(storage_key.to_vec()) + .or_insert_with(|| StorageChild { + data: Default::default(), + child_info: ChildInfo::new_default(storage_key), + }); + for (key, value) in state.key_values.iter() { + entry.data.insert(key.clone(), value.clone()); + } } } - if state.parent_storages.len() > 1 { - return Err(Error::InvalidState); - } } let state_root = operation.op.reset_storage(storage)?; @@ -1397,24 +1396,32 @@ impl ProofProvider for Client where } }; let mut current_child = if start_key.len() == 2 { - Some(child_info(start_key.get(0).expect("checked len"))?) + let start_key = start_key.get(0).expect("checked len"); + if let Some(child_root) = state.storage(&start_key) + .map_err(|e| sp_blockchain::Error::from_state(Box::new(e)))? { + Some((child_info(start_key)?, child_root)) + } else { + return Err(Error::Backend("Invalid root start key.".to_string())); + } } else { None }; let mut current_key = start_key.last().map(Clone::clone).unwrap_or(Vec::new()); let mut total_size = 0; let mut result = vec![(KeyValueStorageLevel { - parent_storages: Vec::new(), + state_root: Vec::new(), key_values: Vec::new(), + parent_storage_keys: Vec::new(), }, false)]; + let mut child_roots = HashSet::new(); loop { let mut entries = Vec::new(); let mut complete = true; let mut switch_child_key = None; while let Some(next_key) = if let Some(child) = current_child.as_ref() { state - .next_child_storage_key(child, ¤t_key) + .next_child_storage_key(&child.0, ¤t_key) .map_err(|e| sp_blockchain::Error::from_state(Box::new(e)))? } else { state @@ -1423,7 +1430,7 @@ impl ProofProvider for Client where } { let value = if let Some(child) = current_child.as_ref() { state - .child_storage(child, next_key.as_ref()) + .child_storage(&child.0, next_key.as_ref()) .map_err(|e| sp_blockchain::Error::from_state(Box::new(e)))? .unwrap_or_default() } else { @@ -1438,24 +1445,29 @@ impl ProofProvider for Client where break; } total_size += size; - entries.push((next_key.clone(), value)); if current_child.is_none() && sp_core::storage::well_known_keys::is_child_storage_key(next_key.as_slice()) { - switch_child_key = Some(next_key); - break; + if !child_roots.contains(value.as_slice()) { + child_roots.insert(value.clone()); + switch_child_key = Some((next_key.clone(), value.clone())); + entries.push((next_key.clone(), value)); + break; + } } + entries.push((next_key.clone(), value)); current_key = next_key; } - if let Some(child) = switch_child_key.take() { + if let Some((child, child_root)) = switch_child_key.take() { result[0].0.key_values.extend(entries.into_iter()); - current_child = Some(child_info(&child)?); + current_child = Some((child_info(&child)?, child_root)); current_key = Vec::new(); - } else if let Some(child) = current_child.take() { + } else if let Some((child, child_root)) = current_child.take() { current_key = child.into_prefixed_storage_key().into_inner(); result.push((KeyValueStorageLevel { - parent_storages: vec![current_key.clone()], + state_root: child_root, key_values: entries, + parent_storage_keys: Vec::new(), }, complete)); if !complete { break; diff --git a/primitives/state-machine/src/lib.rs b/primitives/state-machine/src/lib.rs index 06f9465a12ad2..bb20f72c56f8b 100644 --- a/primitives/state-machine/src/lib.rs +++ b/primitives/state-machine/src/lib.rs @@ -175,7 +175,7 @@ mod std_reexport { #[cfg(feature = "std")] mod execution { use super::*; - use std::{fmt, result, collections::HashMap, panic::UnwindSafe}; + use std::{fmt, result, collections::HashMap, collections::HashSet, panic::UnwindSafe}; use log::{warn, trace}; use hash_db::Hasher; use codec::{Decode, Encode, Codec}; @@ -740,24 +740,29 @@ mod execution { #[derive(PartialEq, Eq, Clone)] pub struct KeyValueStates(pub Vec); - /// A key value state. + /// A key value state at any storage level. #[derive(PartialEq, Eq, Clone)] pub struct KeyValueStorageLevel { - /// Storage key in parent states. - pub parent_storages: Vec>, + /// State root of the level, for + /// top trie it is as an empty byte array. + pub state_root: Vec, + /// Storage of parents, empty for top root or + /// when exporting (building proof). + pub parent_storage_keys: Vec>, /// Pair of key and values from this state. pub key_values: Vec<(Vec, Vec)>, } impl From for KeyValueStates - where I: IntoIterator>, Vec<(Vec, Vec)>)> + where I: IntoIterator, (Vec<(Vec, Vec)>, Vec>))> { fn from(b: I) -> Self { let mut result = Vec::new(); - for (parent_storages, key_values) in b.into_iter() { + for (state_root, (key_values, storage_paths)) in b.into_iter() { result.push(KeyValueStorageLevel { - parent_storages, + state_root, key_values, + parent_storage_keys: storage_paths, }) } KeyValueStates(result) @@ -875,8 +880,17 @@ mod execution { let proving_backend = proving_backend::ProvingBackend::::new(trie_backend); let mut count = 0; + let mut child_roots = HashSet::new(); let (mut child_key, mut start_at) = if start_at.len() == 2 { - (start_at.get(0).cloned(), start_at.get(1).cloned()) + let storage_key = start_at.get(0).expect("Checked length.").clone(); + if let Some(state_root) = proving_backend.storage(&storage_key) + .map_err(|e| Box::new(e) as Box)? { + child_roots.insert(state_root.clone()); + } else { + return Err(Box::new("Invalid range start child trie key.")); + } + + (Some(storage_key), start_at.get(1).cloned()) } else { (None, start_at.get(0).cloned()) }; @@ -899,7 +913,7 @@ mod execution { child_info.as_ref(), None, start_at_ref, - |key, _value| { + |key, value| { if first { if start_at_ref.as_ref().map(|start| &key.as_slice() > start) .unwrap_or(true) { @@ -910,9 +924,15 @@ mod execution { true } else if depth < MAX_NESTED_TRIE_DEPTH && sp_core::storage::well_known_keys::is_child_storage_key(key.as_slice()) { - switch_child_key = Some(key); count += 1; - false + if !child_roots.contains(value.as_slice()) { + child_roots.insert(value); + switch_child_key = Some(key); + false + } else { + // do not add two child trie with same root + true + } } else if proving_backend.estimate_encoded_size() <= size_limit { count += 1; true @@ -1201,25 +1221,38 @@ mod execution { H::Out: Ord + Codec, { let mut result = vec![KeyValueStorageLevel { - parent_storages: Default::default(), + state_root: Default::default(), key_values: Default::default(), + parent_storage_keys: Default::default(), }]; if start_at.len() > MAX_NESTED_TRIE_DEPTH { return Err(Box::new("Invalid start of range.")); } + let mut child_roots = HashSet::new(); let (mut child_key, mut start_at) = if start_at.len() == 2 { - (start_at.get(0).cloned(), start_at.get(1).cloned()) + let storage_key = start_at.get(0).expect("Checked length.").clone(); + let child_key = if let Some(state_root) = proving_backend.storage(&storage_key) + .map_err(|e| Box::new(e) as Box)? { + child_roots.insert(state_root.clone()); + Some((storage_key, state_root)) + } else { + return Err(Box::new("Invalid range start child trie key.")); + }; + + (child_key, start_at.get(1).cloned()) } else { (None, start_at.get(0).cloned()) }; let completed = loop { - let (child_info, depth) = if let Some(storage_key) = child_key.as_ref() { + let (child_info, depth) = if let Some((storage_key, state_root)) = child_key.as_ref() { result.push(KeyValueStorageLevel { - parent_storages: vec![storage_key.clone()], + state_root: state_root.clone(), key_values: Default::default(), + parent_storage_keys: Default::default(), }); + let storage_key = PrefixedStorageKey::new_ref(storage_key); (Some(match ChildType::from_prefixed_key(&storage_key) { Some((ChildType::ParentKeyId, storage_key)) => ChildInfo::new_default(storage_key), @@ -1255,8 +1288,14 @@ mod execution { true } else if depth < MAX_NESTED_TRIE_DEPTH && sp_core::storage::well_known_keys::is_child_storage_key(key.as_slice()) { - switch_child_key = Some(key); - false + if child_roots.contains(value.as_slice()) { + // Do not add two chid trie with same root. + true + } else { + child_roots.insert(value.clone()); + switch_child_key = Some((key, value)); + false + } } else { true } @@ -1271,7 +1310,7 @@ mod execution { if depth == 1 { break 0; } else { - start_at = child_key.take(); + start_at = child_key.take().map(|entry| entry.0); } } else { child_key = switch_child_key; From 043f03582490b4d94c7e738e31b24e1a2e41878f Mon Sep 17 00:00:00 2001 From: cheme Date: Thu, 22 Jul 2021 18:12:33 +0200 Subject: [PATCH 15/19] rust format --- client/api/src/backend.rs | 2 +- client/api/src/lib.rs | 4 +- client/api/src/proof_provider.rs | 9 +- client/network/src/protocol/sync/state.rs | 54 ++-- client/network/src/state_request_handler.rs | 26 +- client/network/test/src/lib.rs | 2 +- client/network/test/src/sync.rs | 15 +- client/service/src/client/client.rs | 97 ++++--- .../consensus/common/src/block_import.rs | 1 - primitives/state-machine/src/lib.rs | 248 ++++++++++-------- primitives/state-machine/src/trie_backend.rs | 4 +- primitives/trie/src/trie_codec.rs | 6 +- 12 files changed, 266 insertions(+), 202 deletions(-) diff --git a/client/api/src/backend.rs b/client/api/src/backend.rs index 00ad5581e805c..53a67f847de6f 100644 --- a/client/api/src/backend.rs +++ b/client/api/src/backend.rs @@ -42,8 +42,8 @@ use std::{ sync::Arc, }; -pub use sp_state_machine::{Backend as StateBackend, KeyValueStates}; pub use sp_consensus::ImportedState; +pub use sp_state_machine::{Backend as StateBackend, KeyValueStates}; use std::marker::PhantomData; /// Extracts the state backend type for the given backend. diff --git a/client/api/src/lib.rs b/client/api/src/lib.rs index ef8e257fbbebd..f1c78f6603eb8 100644 --- a/client/api/src/lib.rs +++ b/client/api/src/lib.rs @@ -39,8 +39,8 @@ pub use proof_provider::*; pub use sp_blockchain as blockchain; pub use sp_blockchain::HeaderBackend; -pub use sp_state_machine::{StorageProof, CompactProof, ExecutionStrategy}; -pub use sp_storage::{StorageData, StorageKey, PrefixedStorageKey, ChildInfo}; +pub use sp_state_machine::{CompactProof, ExecutionStrategy, StorageProof}; +pub use sp_storage::{ChildInfo, PrefixedStorageKey, StorageData, StorageKey}; /// Usage Information Provider interface pub trait UsageProvider { diff --git a/client/api/src/proof_provider.rs b/client/api/src/proof_provider.rs index 564511b70a18b..d03199e517def 100644 --- a/client/api/src/proof_provider.rs +++ b/client/api/src/proof_provider.rs @@ -17,13 +17,10 @@ // along with this program. If not, see . //! Proof utilities -use sp_runtime::{ - generic::BlockId, - traits::{Block as BlockT}, -}; -use crate::{StorageProof, ChangesProof, CompactProof}; -use sp_storage::{ChildInfo, StorageKey, PrefixedStorageKey}; +use crate::{ChangesProof, CompactProof, StorageProof}; +use sp_runtime::{generic::BlockId, traits::Block as BlockT}; use sp_state_machine::{KeyValueStates, KeyValueStorageLevel}; +use sp_storage::{ChildInfo, PrefixedStorageKey, StorageKey}; /// Interface for providing block proving utilities. pub trait ProofProvider { diff --git a/client/network/src/protocol/sync/state.rs b/client/network/src/protocol/sync/state.rs index 69ca67887e143..a9cdf450cb903 100644 --- a/client/network/src/protocol/sync/state.rs +++ b/client/network/src/protocol/sync/state.rs @@ -16,18 +16,17 @@ // You should have received a copy of the GNU General Public License // along with this program. If not, see . -use std::sync::Arc; -use sc_client_api::CompactProof; -use smallvec::SmallVec; -use std::collections::HashMap; -use sp_core::storage::well_known_keys; use super::StateDownloadProgress; use crate::{ chain::{Client, ImportedState}, schema::v1::{StateEntry, StateRequest, StateResponse}, }; use codec::{Decode, Encode}; +use sc_client_api::CompactProof; +use smallvec::SmallVec; +use sp_core::storage::well_known_keys; use sp_runtime::traits::{Block as BlockT, Header, NumberFor}; +use std::{collections::HashMap, sync::Arc}; /// State sync support. @@ -98,8 +97,8 @@ impl StateSync { Ok(proof) => proof, Err(e) => { log::debug!(target: "sync", "Error decoding proof: {:?}", e); - return ImportResult::BadResponse; - } + return ImportResult::BadResponse + }, }; let (values, completed) = match self.client.verify_range_proof( self.target_root, @@ -112,7 +111,7 @@ impl StateSync { "StateResponse failed proof verification: {:?}", e, ); - return ImportResult::BadResponse; + return ImportResult::BadResponse }, Ok(values) => values, }; @@ -126,15 +125,21 @@ impl StateSync { for values in values.0 { let key_values = if values.state_root.is_empty() { // Read child trie roots. - values.key_values.into_iter() - .filter(|key_value| if well_known_keys::is_child_storage_key(key_value.0.as_slice()) { - self.state.entry(key_value.1.clone()) - .or_default().1 + values + .key_values + .into_iter() + .filter(|key_value| { + if well_known_keys::is_child_storage_key(key_value.0.as_slice()) { + self.state + .entry(key_value.1.clone()) + .or_default() + .1 .push(key_value.0.clone()); false } else { true - }) + } + }) .collect() } else { values.key_values @@ -202,9 +207,7 @@ impl StateSync { } } for (root, storage_key) in child_roots { - self.state.entry(root) - .or_default().1 - .push(storage_key); + self.state.entry(root).or_default().1.push(storage_key); } } } @@ -212,10 +215,14 @@ impl StateSync { }; if complete { self.complete = true; - ImportResult::Import(self.target_block.clone(), self.target_header.clone(), ImportedState { - block: self.target_block.clone(), - state: std::mem::take(&mut self.state).into(), - }) + ImportResult::Import( + self.target_block.clone(), + self.target_header.clone(), + ImportedState { + block: self.target_block.clone(), + state: std::mem::take(&mut self.state).into(), + }, + ) } else { ImportResult::Continue(self.next_request()) } @@ -248,10 +255,7 @@ impl StateSync { /// Returns state sync estimated progress. pub fn progress(&self) -> StateDownloadProgress { let cursor = *self.last_key.get(0).and_then(|last| last.get(0)).unwrap_or(&0u8); - let percent_done = cursor as u32 * 100 / 256; - StateDownloadProgress { - percentage: percent_done, - size: self.imported_bytes, - } + let percent_done = cursor as u32 * 100 / 256; + StateDownloadProgress { percentage: percent_done, size: self.imported_bytes } } } diff --git a/client/network/src/state_request_handler.rs b/client/network/src/state_request_handler.rs index cbb23fab039b5..87abacee47105 100644 --- a/client/network/src/state_request_handler.rs +++ b/client/network/src/state_request_handler.rs @@ -17,11 +17,11 @@ //! Helper for handling (i.e. answering) state requests from a remote peer via the //! [`crate::request_responses::RequestResponsesBehaviour`]. -use crate::schema::v1::{StateResponse, StateRequest, StateEntry, KeyValueStateEntry}; use crate::{ chain::Client, config::ProtocolId, request_responses::{IncomingRequest, OutgoingResponse, ProtocolConfig}, + schema::v1::{KeyValueStateEntry, StateEntry, StateRequest, StateResponse}, PeerId, ReputationChange, }; use codec::{Decode, Encode}; @@ -191,14 +191,18 @@ impl StateRequestHandler { request.start.as_slice(), MAX_RESPONSE_BYTES, )?; - response.entries = entries.into_iter().map(|(state, complete)| { - KeyValueStateEntry { + response.entries = entries + .into_iter() + .map(|(state, complete)| KeyValueStateEntry { state_root: state.state_root, - entries: state.key_values.into_iter() - .map(|(key, value)| StateEntry { key, value }).collect(), + entries: state + .key_values + .into_iter() + .map(|(key, value)| StateEntry { key, value }) + .collect(), complete, - } - }).collect(); + }) + .collect(); } log::trace!( @@ -206,9 +210,13 @@ impl StateRequestHandler { "StateResponse contains {} keys, {}, proof nodes, from {:?} to {:?}", response.entries.len(), response.proof.len(), - response.entries.get(0).and_then(|top| top.entries.first() + response.entries.get(0).and_then(|top| top + .entries + .first() .map(|e| sp_core::hexdisplay::HexDisplay::from(&e.key))), - response.entries.get(0).and_then(|top| top.entries.last() + response.entries.get(0).and_then(|top| top + .entries + .last() .map(|e| sp_core::hexdisplay::HexDisplay::from(&e.key))), ); if let Some(value) = self.seen_requests.get_mut(&key) { diff --git a/client/network/test/src/lib.rs b/client/network/test/src/lib.rs index 8c32205f6b36e..a8f79164830f0 100644 --- a/client/network/test/src/lib.rs +++ b/client/network/test/src/lib.rs @@ -759,7 +759,7 @@ where *genesis_extra_storage = storage; } - if matches!(config.sync_mode, SyncMode::Fast{..}) { + if matches!(config.sync_mode, SyncMode::Fast { .. }) { test_client_builder = test_client_builder.set_no_genesis(); } let backend = test_client_builder.backend(); diff --git a/client/network/test/src/sync.rs b/client/network/test/src/sync.rs index d9829724a4deb..1d2a4f7c4518a 100644 --- a/client/network/test/src/sync.rs +++ b/client/network/test/src/sync.rs @@ -1123,15 +1123,22 @@ fn syncs_state() { data: child_data.clone(), child_info: sp_core::storage::ChildInfo::new_default(b"child2"), }; - genesis_storage.children_default.insert(child1.child_info.storage_key().to_vec(), child1); - genesis_storage.children_default.insert(child2.child_info.storage_key().to_vec(), child2); - genesis_storage.children_default.insert(child3.child_info.storage_key().to_vec(), child3); + genesis_storage + .children_default + .insert(child1.child_info.storage_key().to_vec(), child1); + genesis_storage + .children_default + .insert(child2.child_info.storage_key().to_vec(), child2); + genesis_storage + .children_default + .insert(child3.child_info.storage_key().to_vec(), child3); let mut config_one = FullPeerConfig::default(); config_one.extra_storage = Some(genesis_storage.clone()); net.add_full_peer_with_config(config_one); let mut config_two = FullPeerConfig::default(); config_two.extra_storage = Some(genesis_storage); - config_two.sync_mode = SyncMode::Fast { skip_proofs: *skip_proofs, storage_chain_mode: false }; + config_two.sync_mode = + SyncMode::Fast { skip_proofs: *skip_proofs, storage_chain_mode: false }; net.add_full_peer_with_config(config_two); net.peer(0).push_blocks(64, false); // Wait for peer 1 to sync header chain. diff --git a/client/service/src/client/client.rs b/client/service/src/client/client.rs index d9928c74783ee..5636133f03c65 100644 --- a/client/service/src/client/client.rs +++ b/client/service/src/client/client.rs @@ -63,8 +63,8 @@ use sp_consensus::{ use sp_core::{ convert_hash, storage::{ - well_known_keys, ChildInfo, ChildType, PrefixedStorageKey, - StorageData, StorageKey, StorageChild, + well_known_keys, ChildInfo, ChildType, PrefixedStorageKey, StorageChild, StorageData, + StorageKey, }, ChangesTrieConfiguration, ExecutionContext, NativeOrEncoded, }; @@ -79,13 +79,12 @@ use sp_runtime::{ BuildStorage, Justification, Justifications, }; use sp_state_machine::{ - key_changes, key_changes_proof, prove_child_read, prove_read, - Backend as StateBackend, ChangesTrieAnchorBlockId, - ChangesTrieConfigurationRange, ChangesTrieRootsStorage, ChangesTrieStorage, DBValue, - prove_range_read_with_child_with_size, KeyValueStates, KeyValueStorageLevel, - read_range_proof_check_with_child_on_proving_backend, MAX_NESTED_TRIE_DEPTH, + key_changes, key_changes_proof, prove_child_read, prove_range_read_with_child_with_size, + prove_read, read_range_proof_check_with_child_on_proving_backend, Backend as StateBackend, + ChangesTrieAnchorBlockId, ChangesTrieConfigurationRange, ChangesTrieRootsStorage, + ChangesTrieStorage, DBValue, KeyValueStates, KeyValueStorageLevel, MAX_NESTED_TRIE_DEPTH, }; -use sp_trie::{StorageProof, CompactProof}; +use sp_trie::{CompactProof, StorageProof}; use sp_utils::mpsc::{tracing_unbounded, TracingUnboundedSender}; use std::{ collections::{BTreeMap, HashMap, HashSet}, @@ -828,11 +827,18 @@ where } else { for parent_storage in state.parent_storage_keys { let storage_key = PrefixedStorageKey::new_ref(&parent_storage); - let storage_key = match ChildType::from_prefixed_key(&storage_key) { - Some((ChildType::ParentKeyId, storage_key)) => storage_key, - None => return Err(Error::Backend("Invalid child storage key.".to_string())), - }; - let entry = storage.children_default.entry(storage_key.to_vec()) + let storage_key = + match ChildType::from_prefixed_key(&storage_key) { + Some((ChildType::ParentKeyId, storage_key)) => + storage_key, + None => + return Err(Error::Backend( + "Invalid child storage key.".to_string(), + )), + }; + let entry = storage + .children_default + .entry(storage_key.to_vec()) .or_insert_with(|| StorageChild { data: Default::default(), child_info: ChildInfo::new_default(storage_key), @@ -1371,9 +1377,7 @@ where let root = state.storage_root(std::iter::empty()).0; let (proof, count) = prove_range_read_with_child_with_size::<_, HashFor>( - state, - size_limit, - start_key, + state, size_limit, start_key, )?; let proof = sp_trie::encode_compact::>>(proof, root) .map_err(|e| sp_blockchain::Error::from_state(Box::new(e)))?; @@ -1387,34 +1391,40 @@ where size_limit: usize, ) -> sp_blockchain::Result> { if start_key.len() > MAX_NESTED_TRIE_DEPTH { - return Err(Error::Backend("Invalid start key.".to_string())); + return Err(Error::Backend("Invalid start key.".to_string())) } let state = self.state_at(id)?; let child_info = |storage_key: &Vec| -> sp_blockchain::Result { let storage_key = PrefixedStorageKey::new_ref(&storage_key); match ChildType::from_prefixed_key(&storage_key) { - Some((ChildType::ParentKeyId, storage_key)) => Ok(ChildInfo::new_default(storage_key)), + Some((ChildType::ParentKeyId, storage_key)) => + Ok(ChildInfo::new_default(storage_key)), None => Err(Error::Backend("Invalid child storage key.".to_string())), } }; let mut current_child = if start_key.len() == 2 { let start_key = start_key.get(0).expect("checked len"); - if let Some(child_root) = state.storage(&start_key) - .map_err(|e| sp_blockchain::Error::from_state(Box::new(e)))? { + if let Some(child_root) = state + .storage(&start_key) + .map_err(|e| sp_blockchain::Error::from_state(Box::new(e)))? + { Some((child_info(start_key)?, child_root)) } else { - return Err(Error::Backend("Invalid root start key.".to_string())); + return Err(Error::Backend("Invalid root start key.".to_string())) } } else { None }; let mut current_key = start_key.last().map(Clone::clone).unwrap_or(Vec::new()); let mut total_size = 0; - let mut result = vec![(KeyValueStorageLevel { - state_root: Vec::new(), - key_values: Vec::new(), - parent_storage_keys: Vec::new(), - }, false)]; + let mut result = vec![( + KeyValueStorageLevel { + state_root: Vec::new(), + key_values: Vec::new(), + parent_storage_keys: Vec::new(), + }, + false, + )]; let mut child_roots = HashSet::new(); loop { @@ -1444,17 +1454,18 @@ where let size = value.len() + next_key.len(); if total_size + size > size_limit && !entries.is_empty() { complete = false; - break; + break } total_size += size; - if current_child.is_none() - && sp_core::storage::well_known_keys::is_child_storage_key(next_key.as_slice()) { + if current_child.is_none() && + sp_core::storage::well_known_keys::is_child_storage_key(next_key.as_slice()) + { if !child_roots.contains(value.as_slice()) { child_roots.insert(value.clone()); switch_child_key = Some((next_key.clone(), value.clone())); entries.push((next_key.clone(), value)); - break; + break } } entries.push((next_key.clone(), value)); @@ -1466,18 +1477,21 @@ where current_key = Vec::new(); } else if let Some((child, child_root)) = current_child.take() { current_key = child.into_prefixed_storage_key().into_inner(); - result.push((KeyValueStorageLevel { - state_root: child_root, - key_values: entries, - parent_storage_keys: Vec::new(), - }, complete)); + result.push(( + KeyValueStorageLevel { + state_root: child_root, + key_values: entries, + parent_storage_keys: Vec::new(), + }, + complete, + )); if !complete { - break; + break } } else { result[0].0.key_values.extend(entries.into_iter()); result[0].1 = complete; - break; + break } } Ok(result) @@ -1494,13 +1508,12 @@ where &mut db, proof.iter_compact_encoded_nodes(), Some(&root), - ).map_err(|e| { - sp_blockchain::Error::from_state(Box::new(e)) - })?; + ) + .map_err(|e| sp_blockchain::Error::from_state(Box::new(e)))?; let proving_backend = sp_state_machine::TrieBackend::new(db, root); let state = read_range_proof_check_with_child_on_proving_backend::>( - &proving_backend, - start_key, + &proving_backend, + start_key, )?; Ok(state) diff --git a/primitives/consensus/common/src/block_import.rs b/primitives/consensus/common/src/block_import.rs index f0603729116bc..1b630f2240aa2 100644 --- a/primitives/consensus/common/src/block_import.rs +++ b/primitives/consensus/common/src/block_import.rs @@ -141,7 +141,6 @@ pub enum StorageChanges { Import(ImportedState), } - /// Imported state data. A vector of key-value pairs that should form a trie. #[derive(PartialEq, Eq, Clone)] pub struct ImportedState { diff --git a/primitives/state-machine/src/lib.rs b/primitives/state-machine/src/lib.rs index 98b46bb23ace7..9ab5b669c0de0 100644 --- a/primitives/state-machine/src/lib.rs +++ b/primitives/state-machine/src/lib.rs @@ -163,7 +163,7 @@ mod std_reexport { }; pub use sp_trie::{ trie_types::{Layout, TrieDBMut}, - DBValue, MemoryDB, StorageProof, CompactProof, TrieMut, + CompactProof, DBValue, MemoryDB, StorageProof, TrieMut, }; } @@ -173,6 +173,7 @@ mod execution { use codec::{Codec, Decode, Encode}; use hash_db::Hasher; use log::{trace, warn}; + use smallvec::SmallVec; use sp_core::{ hexdisplay::HexDisplay, storage::{ChildInfo, ChildType, PrefixedStorageKey}, @@ -180,8 +181,12 @@ mod execution { NativeOrEncoded, NeverNativeValue, }; use sp_externalities::Extensions; - use smallvec::SmallVec; - use std::{collections::{HashMap, HashSet}, fmt, panic::UnwindSafe, result}; + use std::{ + collections::{HashMap, HashSet}, + fmt, + panic::UnwindSafe, + result, + }; const PROOF_CLOSE_TRANSACTION: &str = "\ Closing a transaction that was started in this function. Client initiated transactions @@ -736,7 +741,8 @@ mod execution { } impl From for KeyValueStates - where I: IntoIterator, (Vec<(Vec, Vec)>, Vec>))> + where + I: IntoIterator, (Vec<(Vec, Vec)>, Vec>))>, { fn from(b: I) -> Self { let mut result = Vec::new(); @@ -758,13 +764,18 @@ mod execution { } /// Update last keys accessed from this state. - pub fn update_last_key(&self, stopped_at: usize, last: &mut SmallVec<[Vec; 2]>) -> bool { + pub fn update_last_key( + &self, + stopped_at: usize, + last: &mut SmallVec<[Vec; 2]>, + ) -> bool { if stopped_at == 0 || stopped_at > MAX_NESTED_TRIE_DEPTH { - return false; + return false } match stopped_at { 1 => { - let top_last = self.0.get(0).and_then(|s| s.key_values.last().map(|kv| kv.0.clone())); + let top_last = + self.0.get(0).and_then(|s| s.key_values.last().map(|kv| kv.0.clone())); if let Some(top_last) = top_last { match last.len() { 0 => { @@ -778,24 +789,26 @@ mod execution { } // update top trie access. last[0] = top_last; - return true; + return true } else { // No change in top trie accesses. // Indicates end of reading of a child trie. last.truncate(1); - return true; + return true } }, 2 => { - let top_last = self.0.get(0).and_then(|s| s.key_values.last().map(|kv| kv.0.clone())); - let child_last = self.0.last().and_then(|s| s.key_values.last().map(|kv| kv.0.clone())); + let top_last = + self.0.get(0).and_then(|s| s.key_values.last().map(|kv| kv.0.clone())); + let child_last = + self.0.last().and_then(|s| s.key_values.last().map(|kv| kv.0.clone())); if let Some(child_last) = child_last { if last.len() == 0 { if let Some(top_last) = top_last { last.push(top_last) } else { - return false; + return false } } else if let Some(top_last) = top_last { last[0] = top_last; @@ -804,10 +817,10 @@ mod execution { last.pop(); } last.push(child_last); - return true; + return true } else { // stopped at level 2 so child last is define. - return false; + return false } }, _ => (), @@ -833,13 +846,10 @@ mod execution { H: Hasher, H::Out: Ord + Codec, { - let trie_backend = backend.as_trie_backend() + let trie_backend = backend + .as_trie_backend() .ok_or_else(|| Box::new(ExecutionError::UnableToGenerateProof) as Box)?; - prove_range_read_with_child_with_size_on_trie_backend( - trie_backend, - size_limit, - start_at, - ) + prove_range_read_with_child_with_size_on_trie_backend(trie_backend, size_limit, start_at) } /// Generate range storage read proof, with child tries @@ -856,7 +866,7 @@ mod execution { H::Out: Ord + Codec, { if start_at.len() > MAX_NESTED_TRIE_DEPTH { - return Err(Box::new("Invalid start of range.")); + return Err(Box::new("Invalid start of range.")) } let proving_backend = proving_backend::ProvingBackend::::new(trie_backend); @@ -865,11 +875,13 @@ mod execution { let mut child_roots = HashSet::new(); let (mut child_key, mut start_at) = if start_at.len() == 2 { let storage_key = start_at.get(0).expect("Checked length.").clone(); - if let Some(state_root) = proving_backend.storage(&storage_key) - .map_err(|e| Box::new(e) as Box)? { + if let Some(state_root) = proving_backend + .storage(&storage_key) + .map_err(|e| Box::new(e) as Box)? + { child_roots.insert(state_root.clone()); } else { - return Err(Box::new("Invalid range start child trie key.")); + return Err(Box::new("Invalid range start child trie key.")) } (Some(storage_key), start_at.get(1).cloned()) @@ -880,10 +892,14 @@ mod execution { loop { let (child_info, depth) = if let Some(storage_key) = child_key.as_ref() { let storage_key = PrefixedStorageKey::new_ref(storage_key); - (Some(match ChildType::from_prefixed_key(&storage_key) { - Some((ChildType::ParentKeyId, storage_key)) => ChildInfo::new_default(storage_key), - None => return Err(Box::new("Invalid range start child trie key.")), - }), 2) + ( + Some(match ChildType::from_prefixed_key(&storage_key) { + Some((ChildType::ParentKeyId, storage_key)) => + ChildInfo::new_default(storage_key), + None => return Err(Box::new("Invalid range start child trie key.")), + }), + 2, + ) } else { (None, 1) }; @@ -891,48 +907,55 @@ mod execution { let start_at_ref = start_at.as_ref().map(AsRef::as_ref); let mut switch_child_key = None; let mut first = start_at.is_some(); - let completed = proving_backend.apply_to_key_values_while( - child_info.as_ref(), - None, - start_at_ref, - |key, value| { - if first { - if start_at_ref.as_ref().map(|start| &key.as_slice() > start) - .unwrap_or(true) { - first = false; + let completed = proving_backend + .apply_to_key_values_while( + child_info.as_ref(), + None, + start_at_ref, + |key, value| { + if first { + if start_at_ref + .as_ref() + .map(|start| &key.as_slice() > start) + .unwrap_or(true) + { + first = false; + } } - } - if first { - true - } else if depth < MAX_NESTED_TRIE_DEPTH - && sp_core::storage::well_known_keys::is_child_storage_key(key.as_slice()) { - count += 1; - if !child_roots.contains(value.as_slice()) { - child_roots.insert(value); - switch_child_key = Some(key); - false - } else { - // do not add two child trie with same root + if first { + true + } else if depth < MAX_NESTED_TRIE_DEPTH && + sp_core::storage::well_known_keys::is_child_storage_key( + key.as_slice(), + ) { + count += 1; + if !child_roots.contains(value.as_slice()) { + child_roots.insert(value); + switch_child_key = Some(key); + false + } else { + // do not add two child trie with same root + true + } + } else if proving_backend.estimate_encoded_size() <= size_limit { + count += 1; true + } else { + false } - } else if proving_backend.estimate_encoded_size() <= size_limit { - count += 1; - true - } else { - false - } - }, - false, - ).map_err(|e| Box::new(e) as Box)?; + }, + false, + ) + .map_err(|e| Box::new(e) as Box)?; if switch_child_key.is_none() { if depth == 1 { - break; + break } else { if completed { start_at = child_key.take(); } else { - break; + break } } } else { @@ -1100,10 +1123,7 @@ mod execution { H::Out: Ord + Codec, { let proving_backend = create_proof_check_backend::(root, proof)?; - read_range_proof_check_with_child_on_proving_backend( - &proving_backend, - start_at, - ) + read_range_proof_check_with_child_on_proving_backend(&proving_backend, start_at) } /// Check child storage range proof, generated by `prove_range_read_with_size` call. @@ -1231,18 +1251,20 @@ mod execution { parent_storage_keys: Default::default(), }]; if start_at.len() > MAX_NESTED_TRIE_DEPTH { - return Err(Box::new("Invalid start of range.")); + return Err(Box::new("Invalid start of range.")) } let mut child_roots = HashSet::new(); let (mut child_key, mut start_at) = if start_at.len() == 2 { let storage_key = start_at.get(0).expect("Checked length.").clone(); - let child_key = if let Some(state_root) = proving_backend.storage(&storage_key) - .map_err(|e| Box::new(e) as Box)? { + let child_key = if let Some(state_root) = proving_backend + .storage(&storage_key) + .map_err(|e| Box::new(e) as Box)? + { child_roots.insert(state_root.clone()); Some((storage_key, state_root)) } else { - return Err(Box::new("Invalid range start child trie key.")); + return Err(Box::new("Invalid range start child trie key.")) }; (child_key, start_at.get(1).cloned()) @@ -1259,10 +1281,14 @@ mod execution { }); let storage_key = PrefixedStorageKey::new_ref(storage_key); - (Some(match ChildType::from_prefixed_key(&storage_key) { - Some((ChildType::ParentKeyId, storage_key)) => ChildInfo::new_default(storage_key), - None => return Err(Box::new("Invalid range start child trie key.")), - }), 2) + ( + Some(match ChildType::from_prefixed_key(&storage_key) { + Some((ChildType::ParentKeyId, storage_key)) => + ChildInfo::new_default(storage_key), + None => return Err(Box::new("Invalid range start child trie key.")), + }), + 2, + ) } else { (None, 1) }; @@ -1275,45 +1301,52 @@ mod execution { let start_at_ref = start_at.as_ref().map(AsRef::as_ref); let mut switch_child_key = None; let mut first = start_at.is_some(); - let completed = proving_backend.apply_to_key_values_while( - child_info.as_ref(), - None, - start_at_ref, - |key, value| { - if first { - if start_at_ref.as_ref().map(|start| &key.as_slice() > start) - .unwrap_or(true) { - first = false; + let completed = proving_backend + .apply_to_key_values_while( + child_info.as_ref(), + None, + start_at_ref, + |key, value| { + if first { + if start_at_ref + .as_ref() + .map(|start| &key.as_slice() > start) + .unwrap_or(true) + { + first = false; + } } - } - if !first { - values.push((key.to_vec(), value.to_vec())); - } - if first { - true - } else if depth < MAX_NESTED_TRIE_DEPTH - && sp_core::storage::well_known_keys::is_child_storage_key(key.as_slice()) { - if child_roots.contains(value.as_slice()) { - // Do not add two chid trie with same root. + if !first { + values.push((key.to_vec(), value.to_vec())); + } + if first { true + } else if depth < MAX_NESTED_TRIE_DEPTH && + sp_core::storage::well_known_keys::is_child_storage_key( + key.as_slice(), + ) { + if child_roots.contains(value.as_slice()) { + // Do not add two chid trie with same root. + true + } else { + child_roots.insert(value.clone()); + switch_child_key = Some((key, value)); + false + } } else { - child_roots.insert(value.clone()); - switch_child_key = Some((key, value)); - false + true } - } else { - true - } - }, - true, - ).map_err(|e| Box::new(e) as Box)?; + }, + true, + ) + .map_err(|e| Box::new(e) as Box)?; if switch_child_key.is_none() { if !completed { - break depth; + break depth } if depth == 1 { - break 0; + break 0 } else { start_at = child_key.take().map(|entry| entry.0); } @@ -1906,7 +1939,8 @@ mod tests { fn prove_read_with_size_limit_works() { let remote_backend = trie_backend::tests::test_trie(); let remote_root = remote_backend.storage_root(::std::iter::empty()).0; - let (proof, count) = prove_range_read_with_size(remote_backend, None, None, 0, None).unwrap(); + let (proof, count) = + prove_range_read_with_size(remote_backend, None, None, 0, None).unwrap(); // Always contains at least some nodes. assert_eq!(proof.into_memory_db::().drain().len(), 3); assert_eq!(count, 1); @@ -1969,7 +2003,8 @@ mod tests { trie_backend, 1, start_at.as_slice(), - ).unwrap(); + ) + .unwrap(); // Always contains at least some nodes. assert!(proof.clone().into_memory_db::().drain().len() > 0); assert!(count < 3); // when doing child we include parent and first child key. @@ -1978,10 +2013,11 @@ mod tests { remote_root, proof.clone(), start_at.as_slice(), - ).unwrap(); + ) + .unwrap(); if completed_depth == 0 { - break; + break } assert!(result.update_last_key(completed_depth, &mut start_at)); } diff --git a/primitives/state-machine/src/trie_backend.rs b/primitives/state-machine/src/trie_backend.rs index 1e70aa2e3235b..f1bbdb3bb0ea9 100644 --- a/primitives/state-machine/src/trie_backend.rs +++ b/primitives/state-machine/src/trie_backend.rs @@ -321,7 +321,9 @@ pub mod tests { fn read_from_child_storage_returns_some() { let test_trie = test_trie(); assert_eq!( - test_trie.child_storage(&ChildInfo::new_default(CHILD_KEY_1), b"value3").unwrap(), + test_trie + .child_storage(&ChildInfo::new_default(CHILD_KEY_1), b"value3") + .unwrap(), Some(vec![142u8; 33]), ); } diff --git a/primitives/trie/src/trie_codec.rs b/primitives/trie/src/trie_codec.rs index d26d974522249..dd7cdaca7d17c 100644 --- a/primitives/trie/src/trie_codec.rs +++ b/primitives/trie/src/trie_codec.rs @@ -163,10 +163,8 @@ where let mut previous_extracted_child_trie = None; for child_root in child_tries.into_iter() { if previous_extracted_child_trie.is_none() && nodes_iter.peek().is_some() { - let (top_root, _) = trie_db::decode_compact_from_iter::( - db, - &mut nodes_iter, - )?; + let (top_root, _) = + trie_db::decode_compact_from_iter::(db, &mut nodes_iter)?; previous_extracted_child_trie = Some(top_root); } From 7c0ff497ca20a59e1e411b35eab96b7c9e9c1c61 Mon Sep 17 00:00:00 2001 From: cheme Date: Wed, 4 Aug 2021 13:21:50 +0200 Subject: [PATCH 16/19] Fix merge. --- client/service/src/client/client.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/client/service/src/client/client.rs b/client/service/src/client/client.rs index 7331e594051c5..db8a3da936b90 100644 --- a/client/service/src/client/client.rs +++ b/client/service/src/client/client.rs @@ -823,7 +823,7 @@ where } Some((main_sc, child_sc)) }, - sp_consensus::StorageChanges::Import(changes) => { + sc_consensus::StorageChanges::Import(changes) => { let mut storage = sp_storage::Storage::default(); for state in changes.state.0.into_iter() { if state.parent_storage_keys.len() == 0 && state.state_root.len() == 0 { From c4aa2d4b0cce7848253e088212c2bef54f446fa5 Mon Sep 17 00:00:00 2001 From: cheme Date: Wed, 15 Sep 2021 18:42:06 +0200 Subject: [PATCH 17/19] fix warnings and fmt --- client/network/src/protocol/sync/state.rs | 2 +- primitives/state-machine/src/lib.rs | 4 ++-- primitives/trie/src/trie_codec.rs | 1 - 3 files changed, 3 insertions(+), 4 deletions(-) diff --git a/client/network/src/protocol/sync/state.rs b/client/network/src/protocol/sync/state.rs index a169820483e8d..17119224a5015 100644 --- a/client/network/src/protocol/sync/state.rs +++ b/client/network/src/protocol/sync/state.rs @@ -73,7 +73,7 @@ impl StateSync { /// Validate and import a state reponse. pub fn import(&mut self, response: StateResponse) -> ImportResult { - if response.entries.is_empty() && response.proof.is_empty() && !response.complete { + if response.entries.is_empty() && response.proof.is_empty() { debug!(target: "sync", "Bad state response"); return ImportResult::BadResponse } diff --git a/primitives/state-machine/src/lib.rs b/primitives/state-machine/src/lib.rs index a7b353bfdd02c..fca2f6bae9fe0 100644 --- a/primitives/state-machine/src/lib.rs +++ b/primitives/state-machine/src/lib.rs @@ -857,7 +857,7 @@ mod execution { /// If a key different than `start_at` is a child trie root, /// the child trie content will be included in the proof. pub fn prove_range_read_with_child_with_size( - mut backend: B, + backend: B, size_limit: usize, start_at: &[Vec], ) -> Result<(StorageProof, u32), Box> @@ -2111,7 +2111,7 @@ mod tests { #[test] fn prove_range_with_child_works() { - let mut remote_backend = trie_backend::tests::test_trie(); + let remote_backend = trie_backend::tests::test_trie(); let remote_root = remote_backend.storage_root(::std::iter::empty()).0; let mut start_at = smallvec::SmallVec::<[Vec; 2]>::new(); let trie_backend = remote_backend.as_trie_backend().unwrap(); diff --git a/primitives/trie/src/trie_codec.rs b/primitives/trie/src/trie_codec.rs index 919ec22f0125e..1596229f2b5de 100644 --- a/primitives/trie/src/trie_codec.rs +++ b/primitives/trie/src/trie_codec.rs @@ -160,7 +160,6 @@ where return Err(Error::IncompleteProof) } - let mut nodes_iter = nodes_iter.peekable(); let mut previous_extracted_child_trie = None; let mut nodes_iter = nodes_iter.peekable(); for child_root in child_tries.into_iter() { From 670c67affddb6abe7e032024e54cc68d1880a3a2 Mon Sep 17 00:00:00 2001 From: cheme Date: Tue, 12 Oct 2021 18:06:33 +0200 Subject: [PATCH 18/19] fmt --- client/network/src/protocol/sync/state.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/client/network/src/protocol/sync/state.rs b/client/network/src/protocol/sync/state.rs index 83cf1c322fc5c..43aa1c4629f0e 100644 --- a/client/network/src/protocol/sync/state.rs +++ b/client/network/src/protocol/sync/state.rs @@ -22,10 +22,10 @@ use crate::{ schema::v1::{StateEntry, StateRequest, StateResponse}, }; use codec::{Decode, Encode}; +use log::debug; use sc_client_api::CompactProof; use smallvec::SmallVec; use sp_core::storage::well_known_keys; -use log::debug; use sp_runtime::traits::{Block as BlockT, Header, NumberFor}; use std::{collections::HashMap, sync::Arc}; From e4cec7dfcac3a6393b0095fe77b6bc531bd67de9 Mon Sep 17 00:00:00 2001 From: cheme Date: Wed, 13 Oct 2021 10:05:12 +0200 Subject: [PATCH 19/19] update protocol id to V2 --- client/network/src/state_request_handler.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/client/network/src/state_request_handler.rs b/client/network/src/state_request_handler.rs index a71d4c64463cf..0d710c13af607 100644 --- a/client/network/src/state_request_handler.rs +++ b/client/network/src/state_request_handler.rs @@ -66,7 +66,7 @@ fn generate_protocol_name(protocol_id: &ProtocolId) -> String { let mut s = String::new(); s.push_str("/"); s.push_str(protocol_id.as_ref()); - s.push_str("/state/1"); + s.push_str("/state/2"); s }