diff --git a/client/api/src/backend.rs b/client/api/src/backend.rs
index 8b5bd50ffa614..9dfe82a57ab3b 100644
--- a/client/api/src/backend.rs
+++ b/client/api/src/backend.rs
@@ -42,7 +42,7 @@ use std::{
sync::Arc,
};
-pub use sp_state_machine::Backend as StateBackend;
+pub use sp_state_machine::{Backend as StateBackend, KeyValueStates};
use std::marker::PhantomData;
/// Extracts the state backend type for the given backend.
diff --git a/client/api/src/lib.rs b/client/api/src/lib.rs
index 16935b1e846cf..f1c78f6603eb8 100644
--- a/client/api/src/lib.rs
+++ b/client/api/src/lib.rs
@@ -39,7 +39,7 @@ pub use proof_provider::*;
pub use sp_blockchain as blockchain;
pub use sp_blockchain::HeaderBackend;
-pub use sp_state_machine::{ExecutionStrategy, StorageProof};
+pub use sp_state_machine::{CompactProof, ExecutionStrategy, StorageProof};
pub use sp_storage::{ChildInfo, PrefixedStorageKey, StorageData, StorageKey};
/// Usage Information Provider interface
diff --git a/client/api/src/proof_provider.rs b/client/api/src/proof_provider.rs
index 79444f0069232..75f9c55e134d2 100644
--- a/client/api/src/proof_provider.rs
+++ b/client/api/src/proof_provider.rs
@@ -17,8 +17,9 @@
// along with this program. If not, see .
//! Proof utilities
-use crate::{ChangesProof, StorageProof};
+use crate::{ChangesProof, CompactProof, StorageProof};
use sp_runtime::{generic::BlockId, traits::Block as BlockT};
+use sp_state_machine::{KeyValueStates, KeyValueStorageLevel};
use sp_storage::{ChildInfo, PrefixedStorageKey, StorageKey};
/// Interface for providing block proving utilities.
@@ -71,31 +72,43 @@ pub trait ProofProvider {
key: &StorageKey,
) -> sp_blockchain::Result>;
- /// Given a `BlockId` iterate over all storage values starting at `start_key` exclusively,
- /// building proofs until size limit is reached. Returns combined proof and the number of
- /// collected keys.
+ /// Given a `BlockId` iterate over all storage values starting at `start_keys`.
+ /// Last `start_keys` element contains last accessed key value.
+ /// With multiple `start_keys`, first `start_keys` element is
+ /// the current storage key of of the last accessed child trie.
+ /// at last level the value to start at exclusively.
+ /// Proofs is build until size limit is reached and always include at
+ /// least one key following `start_keys`.
+ /// Returns combined proof and the numbers of collected keys.
fn read_proof_collection(
&self,
id: &BlockId,
- start_key: &[u8],
+ start_keys: &[Vec],
size_limit: usize,
- ) -> sp_blockchain::Result<(StorageProof, u32)>;
+ ) -> sp_blockchain::Result<(CompactProof, u32)>;
/// Given a `BlockId` iterate over all storage values starting at `start_key`.
/// Returns collected keys and values.
+ /// Returns the collected keys values content of the top trie followed by the
+ /// collected keys values of child tries.
+ /// Only child tries with their root part of the collected content or
+ /// related to `start_key` are attached.
+ /// For each collected state a boolean indicates if state reach
+ /// end.
fn storage_collection(
&self,
id: &BlockId,
- start_key: &[u8],
+ start_key: &[Vec],
size_limit: usize,
- ) -> sp_blockchain::Result, Vec)>>;
+ ) -> sp_blockchain::Result>;
/// Verify read storage proof for a set of keys.
- /// Returns collected key-value pairs and a flag indicating if iteration is complete.
+ /// Returns collected key-value pairs and a the nested state
+ /// depth of current iteration or 0 if completed.
fn verify_range_proof(
&self,
root: Block::Hash,
- proof: StorageProof,
- start_key: &[u8],
- ) -> sp_blockchain::Result<(Vec<(Vec, Vec)>, bool)>;
+ proof: CompactProof,
+ start_keys: &[Vec],
+ ) -> sp_blockchain::Result<(KeyValueStates, usize)>;
}
diff --git a/client/consensus/common/src/block_import.rs b/client/consensus/common/src/block_import.rs
index d828e54bc7e3e..5294db2396042 100644
--- a/client/consensus/common/src/block_import.rs
+++ b/client/consensus/common/src/block_import.rs
@@ -133,7 +133,7 @@ pub struct ImportedState {
/// Target block hash.
pub block: B::Hash,
/// State keys and values.
- pub state: Vec<(Vec, Vec)>,
+ pub state: sp_state_machine::KeyValueStates,
}
impl std::fmt::Debug for ImportedState {
diff --git a/client/network/src/protocol/sync/state.rs b/client/network/src/protocol/sync/state.rs
index e644ba1013e4d..43aa1c4629f0e 100644
--- a/client/network/src/protocol/sync/state.rs
+++ b/client/network/src/protocol/sync/state.rs
@@ -23,9 +23,11 @@ use crate::{
};
use codec::{Decode, Encode};
use log::debug;
-use sc_client_api::StorageProof;
+use sc_client_api::CompactProof;
+use smallvec::SmallVec;
+use sp_core::storage::well_known_keys;
use sp_runtime::traits::{Block as BlockT, Header, NumberFor};
-use std::sync::Arc;
+use std::{collections::HashMap, sync::Arc};
/// State sync support.
@@ -35,8 +37,8 @@ pub struct StateSync {
target_block: B::Hash,
target_header: B::Header,
target_root: B::Hash,
- last_key: Vec,
- state: Vec<(Vec, Vec)>,
+ last_key: SmallVec<[Vec; 2]>,
+ state: HashMap, (Vec<(Vec, Vec)>, Vec>)>,
complete: bool,
client: Arc>,
imported_bytes: u64,
@@ -61,8 +63,8 @@ impl StateSync {
target_block: target.hash(),
target_root: target.state_root().clone(),
target_header: target,
- last_key: Vec::default(),
- state: Vec::default(),
+ last_key: SmallVec::default(),
+ state: HashMap::default(),
complete: false,
imported_bytes: 0,
skip_proof,
@@ -71,7 +73,7 @@ impl StateSync {
/// Validate and import a state reponse.
pub fn import(&mut self, response: StateResponse) -> ImportResult {
- if response.entries.is_empty() && response.proof.is_empty() && !response.complete {
+ if response.entries.is_empty() && response.proof.is_empty() {
debug!(target: "sync", "Bad state response");
return ImportResult::BadResponse
}
@@ -82,56 +84,135 @@ impl StateSync {
let complete = if !self.skip_proof {
debug!(target: "sync", "Importing state from {} trie nodes", response.proof.len());
let proof_size = response.proof.len() as u64;
- let proof = match StorageProof::decode(&mut response.proof.as_ref()) {
+ let proof = match CompactProof::decode(&mut response.proof.as_ref()) {
Ok(proof) => proof,
Err(e) => {
debug!(target: "sync", "Error decoding proof: {:?}", e);
return ImportResult::BadResponse
},
};
- let (values, complete) =
- match self.client.verify_range_proof(self.target_root, proof, &self.last_key) {
- Err(e) => {
- debug!(target: "sync", "StateResponse failed proof verification: {:?}", e);
- return ImportResult::BadResponse
- },
- Ok(values) => values,
- };
+ let (values, completed) = match self.client.verify_range_proof(
+ self.target_root,
+ proof,
+ self.last_key.as_slice(),
+ ) {
+ Err(e) => {
+ debug!(
+ target: "sync",
+ "StateResponse failed proof verification: {:?}",
+ e,
+ );
+ return ImportResult::BadResponse
+ },
+ Ok(values) => values,
+ };
debug!(target: "sync", "Imported with {} keys", values.len());
- if let Some(last) = values.last().map(|(k, _)| k) {
- self.last_key = last.clone();
- }
+ let complete = completed == 0;
+ if !complete && !values.update_last_key(completed, &mut self.last_key) {
+ debug!(target: "sync", "Error updating key cursor, depth: {}", completed);
+ };
- for (key, value) in values {
- self.imported_bytes += key.len() as u64;
- self.state.push((key, value))
+ for values in values.0 {
+ let key_values = if values.state_root.is_empty() {
+ // Read child trie roots.
+ values
+ .key_values
+ .into_iter()
+ .filter(|key_value| {
+ if well_known_keys::is_child_storage_key(key_value.0.as_slice()) {
+ self.state
+ .entry(key_value.1.clone())
+ .or_default()
+ .1
+ .push(key_value.0.clone());
+ false
+ } else {
+ true
+ }
+ })
+ .collect()
+ } else {
+ values.key_values
+ };
+ let mut entry = self.state.entry(values.state_root).or_default();
+ if entry.0.len() > 0 && entry.1.len() > 1 {
+ // Already imported child_trie with same root.
+ // Warning this will not work with parallel download.
+ } else {
+ if entry.0.is_empty() {
+ for (key, _value) in key_values.iter() {
+ self.imported_bytes += key.len() as u64;
+ }
+
+ entry.0 = key_values;
+ } else {
+ for (key, value) in key_values {
+ self.imported_bytes += key.len() as u64;
+ entry.0.push((key, value))
+ }
+ }
+ }
}
self.imported_bytes += proof_size;
complete
} else {
- debug!(
- target: "sync",
- "Importing state from {:?} to {:?}",
- response.entries.last().map(|e| sp_core::hexdisplay::HexDisplay::from(&e.key)),
- response.entries.first().map(|e| sp_core::hexdisplay::HexDisplay::from(&e.key)),
- );
-
- if let Some(e) = response.entries.last() {
- self.last_key = e.key.clone();
+ let mut complete = true;
+ // if the trie is a child trie and one of its parent trie is empty,
+ // the parent cursor stays valid.
+ // Empty parent trie content only happens when all the response content
+ // is part of a single child trie.
+ if self.last_key.len() == 2 && response.entries[0].entries.len() == 0 {
+ // Do not remove the parent trie position.
+ self.last_key.pop();
+ } else {
+ self.last_key.clear();
}
- for StateEntry { key, value } in response.entries {
- self.imported_bytes += (key.len() + value.len()) as u64;
- self.state.push((key, value))
+ for state in response.entries {
+ debug!(
+ target: "sync",
+ "Importing state from {:?} to {:?}",
+ state.entries.last().map(|e| sp_core::hexdisplay::HexDisplay::from(&e.key)),
+ state.entries.first().map(|e| sp_core::hexdisplay::HexDisplay::from(&e.key)),
+ );
+
+ if !state.complete {
+ if let Some(e) = state.entries.last() {
+ self.last_key.push(e.key.clone());
+ }
+ complete = false;
+ }
+ let is_top = state.state_root.is_empty();
+ let entry = self.state.entry(state.state_root).or_default();
+ if entry.0.len() > 0 && entry.1.len() > 1 {
+ // Already imported child trie with same root.
+ } else {
+ let mut child_roots = Vec::new();
+ for StateEntry { key, value } in state.entries {
+ // Skip all child key root (will be recalculated on import).
+ if is_top && well_known_keys::is_child_storage_key(key.as_slice()) {
+ child_roots.push((value, key));
+ } else {
+ self.imported_bytes += key.len() as u64;
+ entry.0.push((key, value))
+ }
+ }
+ for (root, storage_key) in child_roots {
+ self.state.entry(root).or_default().1.push(storage_key);
+ }
+ }
}
- response.complete
+ complete
};
if complete {
self.complete = true;
ImportResult::Import(
self.target_block,
self.target_header.clone(),
- ImportedState { block: self.target_block, state: std::mem::take(&mut self.state) },
+ ImportedState {
+ block: self.target_block.clone(),
+ state: std::mem::take(&mut self.state).into(),
+ },
)
} else {
ImportResult::Continue
@@ -142,7 +223,7 @@ impl StateSync {
pub fn next_request(&self) -> StateRequest {
StateRequest {
block: self.target_block.encode(),
- start: self.last_key.clone(),
+ start: self.last_key.clone().into_vec(),
no_proof: self.skip_proof,
}
}
@@ -164,7 +245,8 @@ impl StateSync {
/// Returns state sync estimated progress.
pub fn progress(&self) -> StateDownloadProgress {
- let percent_done = (*self.last_key.get(0).unwrap_or(&0u8) as u32) * 100 / 256;
+ let cursor = *self.last_key.get(0).and_then(|last| last.get(0)).unwrap_or(&0u8);
+ let percent_done = cursor as u32 * 100 / 256;
StateDownloadProgress { percentage: percent_done, size: self.imported_bytes }
}
}
diff --git a/client/network/src/schema/api.v1.proto b/client/network/src/schema/api.v1.proto
index c5333c7dcdbf1..b51137d1d51d4 100644
--- a/client/network/src/schema/api.v1.proto
+++ b/client/network/src/schema/api.v1.proto
@@ -74,22 +74,32 @@ message BlockData {
message StateRequest {
// Block header hash.
bytes block = 1;
- // Start from this key. Equivalent to if omitted.
- bytes start = 2; // optional
+ // Start from this key.
+ // Multiple keys used for nested state start.
+ repeated bytes start = 2; // optional
// if 'true' indicates that response should contain raw key-values, rather than proof.
bool no_proof = 3;
}
message StateResponse {
- // A collection of keys-values. Only populated if `no_proof` is `true`
- repeated StateEntry entries = 1;
+ // A collection of keys-values states. Only populated if `no_proof` is `true`
+ repeated KeyValueStateEntry entries = 1;
// If `no_proof` is false in request, this contains proof nodes.
bytes proof = 2;
+}
+
+// A key value state.
+message KeyValueStateEntry {
+ // Root of for this level, empty length bytes
+ // if top level.
+ bytes state_root = 1;
+ // A collection of keys-values.
+ repeated StateEntry entries = 2;
// Set to true when there are no more keys to return.
bool complete = 3;
}
-// A key-value pair
+// A key-value pair.
message StateEntry {
bytes key = 1;
bytes value = 2;
diff --git a/client/network/src/state_request_handler.rs b/client/network/src/state_request_handler.rs
index d2e58ce955197..0d710c13af607 100644
--- a/client/network/src/state_request_handler.rs
+++ b/client/network/src/state_request_handler.rs
@@ -21,7 +21,7 @@ use crate::{
chain::Client,
config::ProtocolId,
request_responses::{IncomingRequest, OutgoingResponse, ProtocolConfig},
- schema::v1::{StateEntry, StateRequest, StateResponse},
+ schema::v1::{KeyValueStateEntry, StateEntry, StateRequest, StateResponse},
PeerId, ReputationChange,
};
use codec::{Decode, Encode};
@@ -66,7 +66,7 @@ fn generate_protocol_name(protocol_id: &ProtocolId) -> String {
let mut s = String::new();
s.push_str("/");
s.push_str(protocol_id.as_ref());
- s.push_str("/state/1");
+ s.push_str("/state/2");
s
}
@@ -75,7 +75,7 @@ fn generate_protocol_name(protocol_id: &ProtocolId) -> String {
struct SeenRequestsKey {
peer: PeerId,
block: B::Hash,
- start: Vec,
+ start: Vec>,
}
#[allow(clippy::derive_hash_xor_eq)]
@@ -169,10 +169,10 @@ impl StateRequestHandler {
trace!(
target: LOG_TARGET,
- "Handling state request from {}: Block {:?}, Starting at {:?}, no_proof={}",
+ "Handling state request from {}: Block {:?}, Starting at {:x?}, no_proof={}",
peer,
request.block,
- sp_core::hexdisplay::HexDisplay::from(&request.start),
+ &request.start,
request.no_proof,
);
@@ -180,36 +180,45 @@ impl StateRequestHandler {
let mut response = StateResponse::default();
if !request.no_proof {
- let (proof, count) = self.client.read_proof_collection(
+ let (proof, _count) = self.client.read_proof_collection(
&BlockId::hash(block),
- &request.start,
+ request.start.as_slice(),
MAX_RESPONSE_BYTES,
)?;
response.proof = proof.encode();
- if count == 0 {
- response.complete = true;
- }
} else {
let entries = self.client.storage_collection(
&BlockId::hash(block),
- &request.start,
+ request.start.as_slice(),
MAX_RESPONSE_BYTES,
)?;
- response.entries =
- entries.into_iter().map(|(key, value)| StateEntry { key, value }).collect();
- if response.entries.is_empty() {
- response.complete = true;
- }
+ response.entries = entries
+ .into_iter()
+ .map(|(state, complete)| KeyValueStateEntry {
+ state_root: state.state_root,
+ entries: state
+ .key_values
+ .into_iter()
+ .map(|(key, value)| StateEntry { key, value })
+ .collect(),
+ complete,
+ })
+ .collect();
}
trace!(
target: LOG_TARGET,
- "StateResponse contains {} keys, {}, proof nodes, complete={}, from {:?} to {:?}",
+ "StateResponse contains {} keys, {}, proof nodes, from {:?} to {:?}",
response.entries.len(),
response.proof.len(),
- response.complete,
- response.entries.first().map(|e| sp_core::hexdisplay::HexDisplay::from(&e.key)),
- response.entries.last().map(|e| sp_core::hexdisplay::HexDisplay::from(&e.key)),
+ response.entries.get(0).and_then(|top| top
+ .entries
+ .first()
+ .map(|e| sp_core::hexdisplay::HexDisplay::from(&e.key))),
+ response.entries.get(0).and_then(|top| top
+ .entries
+ .last()
+ .map(|e| sp_core::hexdisplay::HexDisplay::from(&e.key))),
);
if let Some(value) = self.seen_requests.get_mut(&key) {
// If this is the first time we have processed this request, we need to change
diff --git a/client/network/test/src/lib.rs b/client/network/test/src/lib.rs
index fb0012aaf5baf..084b09fd65f8f 100644
--- a/client/network/test/src/lib.rs
+++ b/client/network/test/src/lib.rs
@@ -697,6 +697,8 @@ pub struct FullPeerConfig {
pub is_authority: bool,
/// Syncing mode
pub sync_mode: SyncMode,
+ /// Extra genesis storage.
+ pub extra_storage: Option,
/// Enable transaction indexing.
pub storage_chain: bool,
}
@@ -765,6 +767,11 @@ where
(Some(keep_blocks), false) => TestClientBuilder::with_pruning_window(keep_blocks),
(None, false) => TestClientBuilder::with_default_backend(),
};
+ if let Some(storage) = config.extra_storage {
+ let genesis_extra_storage = test_client_builder.genesis_init_mut().extra_storage();
+ *genesis_extra_storage = storage;
+ }
+
if matches!(config.sync_mode, SyncMode::Fast { .. } | SyncMode::Warp) {
test_client_builder = test_client_builder.set_no_genesis();
}
diff --git a/client/network/test/src/sync.rs b/client/network/test/src/sync.rs
index f3af7f8ff6fc3..ff62b5476d1e6 100644
--- a/client/network/test/src/sync.rs
+++ b/client/network/test/src/sync.rs
@@ -1110,11 +1110,44 @@ fn syncs_state() {
sp_tracing::try_init_simple();
for skip_proofs in &[false, true] {
let mut net = TestNet::new(0);
- net.add_full_peer_with_config(Default::default());
- net.add_full_peer_with_config(FullPeerConfig {
- sync_mode: SyncMode::Fast { skip_proofs: *skip_proofs, storage_chain_mode: false },
- ..Default::default()
- });
+ let mut genesis_storage: sp_core::storage::Storage = Default::default();
+ genesis_storage.top.insert(b"additional_key".to_vec(), vec![1]);
+ let mut child_data: std::collections::BTreeMap, Vec> = Default::default();
+ for i in 0u8..16 {
+ child_data.insert(vec![i; 5], vec![i; 33]);
+ }
+ let child1 = sp_core::storage::StorageChild {
+ data: child_data.clone(),
+ child_info: sp_core::storage::ChildInfo::new_default(b"child1"),
+ };
+ let child3 = sp_core::storage::StorageChild {
+ data: child_data.clone(),
+ child_info: sp_core::storage::ChildInfo::new_default(b"child3"),
+ };
+ for i in 22u8..33 {
+ child_data.insert(vec![i; 5], vec![i; 33]);
+ }
+ let child2 = sp_core::storage::StorageChild {
+ data: child_data.clone(),
+ child_info: sp_core::storage::ChildInfo::new_default(b"child2"),
+ };
+ genesis_storage
+ .children_default
+ .insert(child1.child_info.storage_key().to_vec(), child1);
+ genesis_storage
+ .children_default
+ .insert(child2.child_info.storage_key().to_vec(), child2);
+ genesis_storage
+ .children_default
+ .insert(child3.child_info.storage_key().to_vec(), child3);
+ let mut config_one = FullPeerConfig::default();
+ config_one.extra_storage = Some(genesis_storage.clone());
+ net.add_full_peer_with_config(config_one);
+ let mut config_two = FullPeerConfig::default();
+ config_two.extra_storage = Some(genesis_storage);
+ config_two.sync_mode =
+ SyncMode::Fast { skip_proofs: *skip_proofs, storage_chain_mode: false };
+ net.add_full_peer_with_config(config_two);
net.peer(0).push_blocks(64, false);
// Wait for peer 1 to sync header chain.
net.block_until_sync();
diff --git a/client/service/src/client/client.rs b/client/service/src/client/client.rs
index 4e3cb0aaf234b..6ce2feb050759 100644
--- a/client/service/src/client/client.rs
+++ b/client/service/src/client/client.rs
@@ -64,7 +64,10 @@ use sp_consensus::{BlockOrigin, BlockStatus, Error as ConsensusError};
use sc_utils::mpsc::{tracing_unbounded, TracingUnboundedSender};
use sp_core::{
convert_hash,
- storage::{well_known_keys, ChildInfo, PrefixedStorageKey, StorageData, StorageKey},
+ storage::{
+ well_known_keys, ChildInfo, ChildType, PrefixedStorageKey, StorageChild, StorageData,
+ StorageKey,
+ },
ChangesTrieConfiguration, NativeOrEncoded,
};
#[cfg(feature = "test-helpers")]
@@ -78,11 +81,12 @@ use sp_runtime::{
BuildStorage, Justification, Justifications,
};
use sp_state_machine::{
- key_changes, key_changes_proof, prove_child_read, prove_range_read_with_size, prove_read,
- read_range_proof_check, Backend as StateBackend, ChangesTrieAnchorBlockId,
- ChangesTrieConfigurationRange, ChangesTrieRootsStorage, ChangesTrieStorage, DBValue,
+ key_changes, key_changes_proof, prove_child_read, prove_range_read_with_child_with_size,
+ prove_read, read_range_proof_check_with_child_on_proving_backend, Backend as StateBackend,
+ ChangesTrieAnchorBlockId, ChangesTrieConfigurationRange, ChangesTrieRootsStorage,
+ ChangesTrieStorage, DBValue, KeyValueStates, KeyValueStorageLevel, MAX_NESTED_TRIE_DEPTH,
};
-use sp_trie::StorageProof;
+use sp_trie::{CompactProof, StorageProof};
use std::{
collections::{BTreeMap, HashMap, HashSet},
marker::PhantomData,
@@ -824,10 +828,37 @@ where
Some((main_sc, child_sc))
},
sc_consensus::StorageChanges::Import(changes) => {
- let storage = sp_storage::Storage {
- top: changes.state.into_iter().collect(),
- children_default: Default::default(),
- };
+ let mut storage = sp_storage::Storage::default();
+ for state in changes.state.0.into_iter() {
+ if state.parent_storage_keys.len() == 0 && state.state_root.len() == 0 {
+ for (key, value) in state.key_values.into_iter() {
+ storage.top.insert(key, value);
+ }
+ } else {
+ for parent_storage in state.parent_storage_keys {
+ let storage_key = PrefixedStorageKey::new_ref(&parent_storage);
+ let storage_key =
+ match ChildType::from_prefixed_key(&storage_key) {
+ Some((ChildType::ParentKeyId, storage_key)) =>
+ storage_key,
+ None =>
+ return Err(Error::Backend(
+ "Invalid child storage key.".to_string(),
+ )),
+ };
+ let entry = storage
+ .children_default
+ .entry(storage_key.to_vec())
+ .or_insert_with(|| StorageChild {
+ data: Default::default(),
+ child_info: ChildInfo::new_default(storage_key),
+ });
+ for (key, value) in state.key_values.iter() {
+ entry.data.insert(key.clone(), value.clone());
+ }
+ }
+ }
+ }
let state_root = operation.op.reset_storage(storage)?;
if state_root != *import_headers.post().state_root() {
@@ -1347,62 +1378,153 @@ where
fn read_proof_collection(
&self,
id: &BlockId,
- start_key: &[u8],
+ start_key: &[Vec],
size_limit: usize,
- ) -> sp_blockchain::Result<(StorageProof, u32)> {
+ ) -> sp_blockchain::Result<(CompactProof, u32)> {
let state = self.state_at(id)?;
- Ok(prove_range_read_with_size::<_, HashFor>(
- state,
- None,
- None,
- size_limit,
- Some(start_key),
- )?)
+ let root = state.storage_root(std::iter::empty()).0;
+
+ let (proof, count) = prove_range_read_with_child_with_size::<_, HashFor>(
+ state, size_limit, start_key,
+ )?;
+ let proof = sp_trie::encode_compact::>>(proof, root)
+ .map_err(|e| sp_blockchain::Error::from_state(Box::new(e)))?;
+ Ok((proof, count))
}
fn storage_collection(
&self,
id: &BlockId,
- start_key: &[u8],
+ start_key: &[Vec],
size_limit: usize,
- ) -> sp_blockchain::Result, Vec)>> {
+ ) -> sp_blockchain::Result> {
+ if start_key.len() > MAX_NESTED_TRIE_DEPTH {
+ return Err(Error::Backend("Invalid start key.".to_string()))
+ }
let state = self.state_at(id)?;
- let mut current_key = start_key.to_vec();
- let mut total_size = 0;
- let mut entries = Vec::new();
- while let Some(next_key) = state
- .next_storage_key(¤t_key)
- .map_err(|e| sp_blockchain::Error::from_state(Box::new(e)))?
- {
- let value = state
- .storage(next_key.as_ref())
+ let child_info = |storage_key: &Vec| -> sp_blockchain::Result {
+ let storage_key = PrefixedStorageKey::new_ref(&storage_key);
+ match ChildType::from_prefixed_key(&storage_key) {
+ Some((ChildType::ParentKeyId, storage_key)) =>
+ Ok(ChildInfo::new_default(storage_key)),
+ None => Err(Error::Backend("Invalid child storage key.".to_string())),
+ }
+ };
+ let mut current_child = if start_key.len() == 2 {
+ let start_key = start_key.get(0).expect("checked len");
+ if let Some(child_root) = state
+ .storage(&start_key)
.map_err(|e| sp_blockchain::Error::from_state(Box::new(e)))?
- .unwrap_or_default();
- let size = value.len() + next_key.len();
- if total_size + size > size_limit && !entries.is_empty() {
+ {
+ Some((child_info(start_key)?, child_root))
+ } else {
+ return Err(Error::Backend("Invalid root start key.".to_string()))
+ }
+ } else {
+ None
+ };
+ let mut current_key = start_key.last().map(Clone::clone).unwrap_or(Vec::new());
+ let mut total_size = 0;
+ let mut result = vec![(
+ KeyValueStorageLevel {
+ state_root: Vec::new(),
+ key_values: Vec::new(),
+ parent_storage_keys: Vec::new(),
+ },
+ false,
+ )];
+
+ let mut child_roots = HashSet::new();
+ loop {
+ let mut entries = Vec::new();
+ let mut complete = true;
+ let mut switch_child_key = None;
+ while let Some(next_key) = if let Some(child) = current_child.as_ref() {
+ state
+ .next_child_storage_key(&child.0, ¤t_key)
+ .map_err(|e| sp_blockchain::Error::from_state(Box::new(e)))?
+ } else {
+ state
+ .next_storage_key(¤t_key)
+ .map_err(|e| sp_blockchain::Error::from_state(Box::new(e)))?
+ } {
+ let value = if let Some(child) = current_child.as_ref() {
+ state
+ .child_storage(&child.0, next_key.as_ref())
+ .map_err(|e| sp_blockchain::Error::from_state(Box::new(e)))?
+ .unwrap_or_default()
+ } else {
+ state
+ .storage(next_key.as_ref())
+ .map_err(|e| sp_blockchain::Error::from_state(Box::new(e)))?
+ .unwrap_or_default()
+ };
+ let size = value.len() + next_key.len();
+ if total_size + size > size_limit && !entries.is_empty() {
+ complete = false;
+ break
+ }
+ total_size += size;
+
+ if current_child.is_none() &&
+ sp_core::storage::well_known_keys::is_child_storage_key(next_key.as_slice())
+ {
+ if !child_roots.contains(value.as_slice()) {
+ child_roots.insert(value.clone());
+ switch_child_key = Some((next_key.clone(), value.clone()));
+ entries.push((next_key.clone(), value));
+ break
+ }
+ }
+ entries.push((next_key.clone(), value));
+ current_key = next_key;
+ }
+ if let Some((child, child_root)) = switch_child_key.take() {
+ result[0].0.key_values.extend(entries.into_iter());
+ current_child = Some((child_info(&child)?, child_root));
+ current_key = Vec::new();
+ } else if let Some((child, child_root)) = current_child.take() {
+ current_key = child.into_prefixed_storage_key().into_inner();
+ result.push((
+ KeyValueStorageLevel {
+ state_root: child_root,
+ key_values: entries,
+ parent_storage_keys: Vec::new(),
+ },
+ complete,
+ ));
+ if !complete {
+ break
+ }
+ } else {
+ result[0].0.key_values.extend(entries.into_iter());
+ result[0].1 = complete;
break
}
- total_size += size;
- entries.push((next_key.clone(), value));
- current_key = next_key;
}
- Ok(entries)
+ Ok(result)
}
fn verify_range_proof(
&self,
root: Block::Hash,
- proof: StorageProof,
- start_key: &[u8],
- ) -> sp_blockchain::Result<(Vec<(Vec, Vec)>, bool)> {
- Ok(read_range_proof_check::>(
- root,
- proof,
- None,
- None,
- None,
- Some(start_key),
- )?)
+ proof: CompactProof,
+ start_key: &[Vec],
+ ) -> sp_blockchain::Result<(KeyValueStates, usize)> {
+ let mut db = sp_state_machine::MemoryDB::>::new(&[]);
+ let _ = sp_trie::decode_compact::>, _, _>(
+ &mut db,
+ proof.iter_compact_encoded_nodes(),
+ Some(&root),
+ )
+ .map_err(|e| sp_blockchain::Error::from_state(Box::new(e)))?;
+ let proving_backend = sp_state_machine::TrieBackend::new(db, root);
+ let state = read_range_proof_check_with_child_on_proving_backend::>(
+ &proving_backend,
+ start_key,
+ )?;
+
+ Ok(state)
}
}
diff --git a/primitives/state-machine/src/lib.rs b/primitives/state-machine/src/lib.rs
index a724cf5c9a0b4..b0178021f3130 100644
--- a/primitives/state-machine/src/lib.rs
+++ b/primitives/state-machine/src/lib.rs
@@ -172,7 +172,7 @@ mod std_reexport {
};
pub use sp_trie::{
trie_types::{Layout, TrieDBMut},
- DBValue, MemoryDB, StorageProof, TrieMut,
+ CompactProof, DBValue, MemoryDB, StorageProof, TrieMut,
};
}
@@ -181,15 +181,20 @@ mod execution {
use super::*;
use codec::{Codec, Decode, Encode};
use hash_db::Hasher;
+ use smallvec::SmallVec;
use sp_core::{
hexdisplay::HexDisplay,
- storage::ChildInfo,
+ storage::{ChildInfo, ChildType, PrefixedStorageKey},
traits::{CodeExecutor, ReadRuntimeVersionExt, RuntimeCode, SpawnNamed},
NativeOrEncoded, NeverNativeValue,
};
use sp_externalities::Extensions;
- use std::{collections::HashMap, fmt, panic::UnwindSafe, result};
- use tracing::{trace, warn};
+ use std::{
+ collections::{HashMap, HashSet},
+ fmt,
+ panic::UnwindSafe,
+ result,
+ };
const PROOF_CLOSE_TRANSACTION: &str = "\
Closing a transaction that was started in this function. Client initiated transactions
@@ -742,6 +747,254 @@ mod execution {
prove_read_on_trie_backend(trie_backend, keys)
}
+ /// State machine only allows a single level
+ /// of child trie.
+ pub const MAX_NESTED_TRIE_DEPTH: usize = 2;
+
+ /// Multiple key value state.
+ /// States are ordered by root storage key.
+ #[derive(PartialEq, Eq, Clone)]
+ pub struct KeyValueStates(pub Vec);
+
+ /// A key value state at any storage level.
+ #[derive(PartialEq, Eq, Clone)]
+ pub struct KeyValueStorageLevel {
+ /// State root of the level, for
+ /// top trie it is as an empty byte array.
+ pub state_root: Vec,
+ /// Storage of parents, empty for top root or
+ /// when exporting (building proof).
+ pub parent_storage_keys: Vec>,
+ /// Pair of key and values from this state.
+ pub key_values: Vec<(Vec, Vec)>,
+ }
+
+ impl From for KeyValueStates
+ where
+ I: IntoIterator- , (Vec<(Vec, Vec)>, Vec>))>,
+ {
+ fn from(b: I) -> Self {
+ let mut result = Vec::new();
+ for (state_root, (key_values, storage_paths)) in b.into_iter() {
+ result.push(KeyValueStorageLevel {
+ state_root,
+ key_values,
+ parent_storage_keys: storage_paths,
+ })
+ }
+ KeyValueStates(result)
+ }
+ }
+
+ impl KeyValueStates {
+ /// Return total number of key values in states.
+ pub fn len(&self) -> usize {
+ self.0.iter().fold(0, |nb, state| nb + state.key_values.len())
+ }
+
+ /// Update last keys accessed from this state.
+ pub fn update_last_key(
+ &self,
+ stopped_at: usize,
+ last: &mut SmallVec<[Vec; 2]>,
+ ) -> bool {
+ if stopped_at == 0 || stopped_at > MAX_NESTED_TRIE_DEPTH {
+ return false
+ }
+ match stopped_at {
+ 1 => {
+ let top_last =
+ self.0.get(0).and_then(|s| s.key_values.last().map(|kv| kv.0.clone()));
+ if let Some(top_last) = top_last {
+ match last.len() {
+ 0 => {
+ last.push(top_last);
+ return true
+ },
+ 2 => {
+ last.pop();
+ },
+ _ => (),
+ }
+ // update top trie access.
+ last[0] = top_last;
+ return true
+ } else {
+ // No change in top trie accesses.
+ // Indicates end of reading of a child trie.
+ last.truncate(1);
+ return true
+ }
+ },
+ 2 => {
+ let top_last =
+ self.0.get(0).and_then(|s| s.key_values.last().map(|kv| kv.0.clone()));
+ let child_last =
+ self.0.last().and_then(|s| s.key_values.last().map(|kv| kv.0.clone()));
+
+ if let Some(child_last) = child_last {
+ if last.len() == 0 {
+ if let Some(top_last) = top_last {
+ last.push(top_last)
+ } else {
+ return false
+ }
+ } else if let Some(top_last) = top_last {
+ last[0] = top_last;
+ }
+ if last.len() == 2 {
+ last.pop();
+ }
+ last.push(child_last);
+ return true
+ } else {
+ // stopped at level 2 so child last is define.
+ return false
+ }
+ },
+ _ => (),
+ }
+ false
+ }
+ }
+
+ /// Generate range storage read proof, with child tries
+ /// content.
+ /// A size limit is applied to the proof with the
+ /// exception that `start_at` and its following element
+ /// are always part of the proof.
+ /// If a key different than `start_at` is a child trie root,
+ /// the child trie content will be included in the proof.
+ pub fn prove_range_read_with_child_with_size(
+ backend: B,
+ size_limit: usize,
+ start_at: &[Vec],
+ ) -> Result<(StorageProof, u32), Box>
+ where
+ B: Backend,
+ H: Hasher,
+ H::Out: Ord + Codec,
+ {
+ let trie_backend = backend
+ .as_trie_backend()
+ .ok_or_else(|| Box::new(ExecutionError::UnableToGenerateProof) as Box)?;
+ prove_range_read_with_child_with_size_on_trie_backend(trie_backend, size_limit, start_at)
+ }
+
+ /// Generate range storage read proof, with child tries
+ /// content.
+ /// See `prove_range_read_with_child_with_size`.
+ pub fn prove_range_read_with_child_with_size_on_trie_backend
(
+ trie_backend: &TrieBackend,
+ size_limit: usize,
+ start_at: &[Vec],
+ ) -> Result<(StorageProof, u32), Box>
+ where
+ S: trie_backend_essence::TrieBackendStorage,
+ H: Hasher,
+ H::Out: Ord + Codec,
+ {
+ if start_at.len() > MAX_NESTED_TRIE_DEPTH {
+ return Err(Box::new("Invalid start of range."))
+ }
+
+ let proving_backend = proving_backend::ProvingBackend::::new(trie_backend);
+ let mut count = 0;
+
+ let mut child_roots = HashSet::new();
+ let (mut child_key, mut start_at) = if start_at.len() == 2 {
+ let storage_key = start_at.get(0).expect("Checked length.").clone();
+ if let Some(state_root) = proving_backend
+ .storage(&storage_key)
+ .map_err(|e| Box::new(e) as Box)?
+ {
+ child_roots.insert(state_root.clone());
+ } else {
+ return Err(Box::new("Invalid range start child trie key."))
+ }
+
+ (Some(storage_key), start_at.get(1).cloned())
+ } else {
+ (None, start_at.get(0).cloned())
+ };
+
+ loop {
+ let (child_info, depth) = if let Some(storage_key) = child_key.as_ref() {
+ let storage_key = PrefixedStorageKey::new_ref(storage_key);
+ (
+ Some(match ChildType::from_prefixed_key(&storage_key) {
+ Some((ChildType::ParentKeyId, storage_key)) =>
+ ChildInfo::new_default(storage_key),
+ None => return Err(Box::new("Invalid range start child trie key.")),
+ }),
+ 2,
+ )
+ } else {
+ (None, 1)
+ };
+
+ let start_at_ref = start_at.as_ref().map(AsRef::as_ref);
+ let mut switch_child_key = None;
+ let mut first = start_at.is_some();
+ let completed = proving_backend
+ .apply_to_key_values_while(
+ child_info.as_ref(),
+ None,
+ start_at_ref,
+ |key, value| {
+ if first {
+ if start_at_ref
+ .as_ref()
+ .map(|start| &key.as_slice() > start)
+ .unwrap_or(true)
+ {
+ first = false;
+ }
+ }
+ if first {
+ true
+ } else if depth < MAX_NESTED_TRIE_DEPTH &&
+ sp_core::storage::well_known_keys::is_child_storage_key(
+ key.as_slice(),
+ ) {
+ count += 1;
+ if !child_roots.contains(value.as_slice()) {
+ child_roots.insert(value);
+ switch_child_key = Some(key);
+ false
+ } else {
+ // do not add two child trie with same root
+ true
+ }
+ } else if proving_backend.estimate_encoded_size() <= size_limit {
+ count += 1;
+ true
+ } else {
+ false
+ }
+ },
+ false,
+ )
+ .map_err(|e| Box::new(e) as Box)?;
+
+ if switch_child_key.is_none() {
+ if depth == 1 {
+ break
+ } else {
+ if completed {
+ start_at = child_key.take();
+ } else {
+ break
+ }
+ }
+ } else {
+ child_key = switch_child_key;
+ start_at = None;
+ }
+ }
+ Ok((proving_backend.extract_proof(), count))
+ }
+
/// Generate range storage read proof.
pub fn prove_range_read_with_size(
backend: B,
@@ -884,7 +1137,25 @@ mod execution {
Ok(result)
}
- /// Check child storage range proof, generated by `prove_range_read` call.
+ /// Check storage range proof with child trie included, generated by
+ /// `prove_range_read_with_child_with_size` call.
+ ///
+ /// Returns key values contents and the depth of the pending state iteration
+ /// (0 if completed).
+ pub fn read_range_proof_check_with_child(
+ root: H::Out,
+ proof: StorageProof,
+ start_at: &[Vec],
+ ) -> Result<(KeyValueStates, usize), Box>
+ where
+ H: Hasher,
+ H::Out: Ord + Codec,
+ {
+ let proving_backend = create_proof_check_backend::(root, proof)?;
+ read_range_proof_check_with_child_on_proving_backend(&proving_backend, start_at)
+ }
+
+ /// Check child storage range proof, generated by `prove_range_read_with_size` call.
pub fn read_range_proof_check(
root: H::Out,
proof: StorageProof,
@@ -991,6 +1262,130 @@ mod execution {
Err(e) => Err(Box::new(e) as Box),
}
}
+
+ /// Check storage range proof on pre-created proving backend.
+ ///
+ /// See `read_range_proof_check_with_child`.
+ pub fn read_range_proof_check_with_child_on_proving_backend(
+ proving_backend: &TrieBackend, H>,
+ start_at: &[Vec],
+ ) -> Result<(KeyValueStates, usize), Box>
+ where
+ H: Hasher,
+ H::Out: Ord + Codec,
+ {
+ let mut result = vec![KeyValueStorageLevel {
+ state_root: Default::default(),
+ key_values: Default::default(),
+ parent_storage_keys: Default::default(),
+ }];
+ if start_at.len() > MAX_NESTED_TRIE_DEPTH {
+ return Err(Box::new("Invalid start of range."))
+ }
+
+ let mut child_roots = HashSet::new();
+ let (mut child_key, mut start_at) = if start_at.len() == 2 {
+ let storage_key = start_at.get(0).expect("Checked length.").clone();
+ let child_key = if let Some(state_root) = proving_backend
+ .storage(&storage_key)
+ .map_err(|e| Box::new(e) as Box)?
+ {
+ child_roots.insert(state_root.clone());
+ Some((storage_key, state_root))
+ } else {
+ return Err(Box::new("Invalid range start child trie key."))
+ };
+
+ (child_key, start_at.get(1).cloned())
+ } else {
+ (None, start_at.get(0).cloned())
+ };
+
+ let completed = loop {
+ let (child_info, depth) = if let Some((storage_key, state_root)) = child_key.as_ref() {
+ result.push(KeyValueStorageLevel {
+ state_root: state_root.clone(),
+ key_values: Default::default(),
+ parent_storage_keys: Default::default(),
+ });
+
+ let storage_key = PrefixedStorageKey::new_ref(storage_key);
+ (
+ Some(match ChildType::from_prefixed_key(&storage_key) {
+ Some((ChildType::ParentKeyId, storage_key)) =>
+ ChildInfo::new_default(storage_key),
+ None => return Err(Box::new("Invalid range start child trie key.")),
+ }),
+ 2,
+ )
+ } else {
+ (None, 1)
+ };
+
+ let values = if child_info.is_some() {
+ &mut result.last_mut().expect("Added above").key_values
+ } else {
+ &mut result[0].key_values
+ };
+ let start_at_ref = start_at.as_ref().map(AsRef::as_ref);
+ let mut switch_child_key = None;
+ let mut first = start_at.is_some();
+ let completed = proving_backend
+ .apply_to_key_values_while(
+ child_info.as_ref(),
+ None,
+ start_at_ref,
+ |key, value| {
+ if first {
+ if start_at_ref
+ .as_ref()
+ .map(|start| &key.as_slice() > start)
+ .unwrap_or(true)
+ {
+ first = false;
+ }
+ }
+ if !first {
+ values.push((key.to_vec(), value.to_vec()));
+ }
+ if first {
+ true
+ } else if depth < MAX_NESTED_TRIE_DEPTH &&
+ sp_core::storage::well_known_keys::is_child_storage_key(
+ key.as_slice(),
+ ) {
+ if child_roots.contains(value.as_slice()) {
+ // Do not add two chid trie with same root.
+ true
+ } else {
+ child_roots.insert(value.clone());
+ switch_child_key = Some((key, value));
+ false
+ }
+ } else {
+ true
+ }
+ },
+ true,
+ )
+ .map_err(|e| Box::new(e) as Box)?;
+
+ if switch_child_key.is_none() {
+ if !completed {
+ break depth
+ }
+ if depth == 1 {
+ break 0
+ } else {
+ start_at = child_key.take().map(|entry| entry.0);
+ }
+ } else {
+ child_key = switch_child_key;
+ start_at = None;
+ }
+ };
+ Ok((KeyValueStates(result), completed))
+ }
}
#[cfg(test)]
@@ -1574,7 +1969,7 @@ mod tests {
assert_eq!(
local_result1.into_iter().collect::>(),
- vec![(b"value3".to_vec(), Some(vec![142]))],
+ vec![(b"value3".to_vec(), Some(vec![142; 33]))],
);
assert_eq!(local_result2.into_iter().collect::>(), vec![(b"value2".to_vec(), None)]);
assert_eq!(local_result3.into_iter().collect::>(), vec![(b"dummy".to_vec(), None)]);
@@ -1678,7 +2073,7 @@ mod tests {
let remote_root = remote_backend.storage_root(::std::iter::empty()).0;
let (proof, count) =
prove_range_read_with_size(remote_backend, None, None, 0, None).unwrap();
- // Alwasys contains at least some nodes.
+ // Always contains at least some nodes.
assert_eq!(proof.into_memory_db::().drain().len(), 3);
assert_eq!(count, 1);
@@ -1723,6 +2118,45 @@ mod tests {
assert_eq!(completed, true);
}
+ #[test]
+ fn prove_range_with_child_works() {
+ let remote_backend = trie_backend::tests::test_trie();
+ let remote_root = remote_backend.storage_root(::std::iter::empty()).0;
+ let mut start_at = smallvec::SmallVec::<[Vec; 2]>::new();
+ let trie_backend = remote_backend.as_trie_backend().unwrap();
+ let max_iter = 1000;
+ let mut nb_loop = 0;
+ loop {
+ nb_loop += 1;
+ if max_iter == nb_loop {
+ panic!("Too many loop in prove range");
+ }
+ let (proof, count) = prove_range_read_with_child_with_size_on_trie_backend(
+ trie_backend,
+ 1,
+ start_at.as_slice(),
+ )
+ .unwrap();
+ // Always contains at least some nodes.
+ assert!(proof.clone().into_memory_db::().drain().len() > 0);
+ assert!(count < 3); // when doing child we include parent and first child key.
+
+ let (result, completed_depth) = read_range_proof_check_with_child::(
+ remote_root,
+ proof.clone(),
+ start_at.as_slice(),
+ )
+ .unwrap();
+
+ if completed_depth == 0 {
+ break
+ }
+ assert!(result.update_last_key(completed_depth, &mut start_at));
+ }
+
+ assert_eq!(nb_loop, 10);
+ }
+
#[test]
fn compact_multiple_child_trie() {
// this root will be queried
diff --git a/primitives/state-machine/src/trie_backend.rs b/primitives/state-machine/src/trie_backend.rs
index 7cb725a80503d..7f9a02e055251 100644
--- a/primitives/state-machine/src/trie_backend.rs
+++ b/primitives/state-machine/src/trie_backend.rs
@@ -281,8 +281,8 @@ pub mod tests {
{
let mut mdb = KeySpacedDBMut::new(&mut mdb, child_info.keyspace());
let mut trie = TrieDBMut::new(&mut mdb, &mut root);
- trie.insert(b"value3", &[142]).expect("insert failed");
- trie.insert(b"value4", &[124]).expect("insert failed");
+ trie.insert(b"value3", &[142; 33]).expect("insert failed");
+ trie.insert(b"value4", &[124; 33]).expect("insert failed");
};
{
@@ -319,7 +319,7 @@ pub mod tests {
test_trie
.child_storage(&ChildInfo::new_default(CHILD_KEY_1), b"value3")
.unwrap(),
- Some(vec![142u8]),
+ Some(vec![142u8; 33]),
);
// Change cache entry to check that caching is active.
test_trie
diff --git a/test-utils/runtime/client/src/lib.rs b/test-utils/runtime/client/src/lib.rs
index bcfe93b6f7975..da92e0f37983c 100644
--- a/test-utils/runtime/client/src/lib.rs
+++ b/test-utils/runtime/client/src/lib.rs
@@ -137,6 +137,11 @@ impl GenesisParameters {
pub fn set_wasm_code(&mut self, code: Vec) {
self.wasm_code = Some(code);
}
+
+ /// Access extra genesis storage.
+ pub fn extra_storage(&mut self) -> &mut Storage {
+ &mut self.extra_storage
+ }
}
impl substrate_test_client::GenesisInit for GenesisParameters {