From e97accb0ff0fdca0fde53a7859d531c163f86301 Mon Sep 17 00:00:00 2001 From: cheme Date: Tue, 28 Jan 2020 09:45:06 +0100 Subject: [PATCH 01/85] change from cache root pr --- client/db/src/lib.rs | 5 +++-- client/db/src/storage_cache.rs | 7 +++++-- client/src/client.rs | 2 +- client/src/in_mem.rs | 5 +++-- client/src/light/backend.rs | 2 +- primitives/state-machine/src/backend.rs | 18 ++++++++++++---- primitives/state-machine/src/ext.rs | 21 +++++-------------- .../state-machine/src/overlayed_changes.rs | 11 +++++++--- .../state-machine/src/proving_backend.rs | 3 ++- 9 files changed, 42 insertions(+), 32 deletions(-) diff --git a/client/db/src/lib.rs b/client/db/src/lib.rs index be569194972cc..7e76c41d8bbd1 100644 --- a/client/db/src/lib.rs +++ b/client/db/src/lib.rs @@ -594,7 +594,7 @@ impl sc_client_api::backend::BlockImportOperation for Bloc ); let mut changes_trie_config: Option = None; - let (root, transaction) = self.old_state.full_storage_root( + let (root, transaction, _) = self.old_state.full_storage_root( storage.top.into_iter().map(|(k, v)| { if k == well_known_keys::CHANGES_TRIE_CONFIG { changes_trie_config = Some( @@ -604,7 +604,8 @@ impl sc_client_api::backend::BlockImportOperation for Bloc } (k, Some(v)) }), - child_delta + child_delta, + false, ); self.db_updates = transaction; diff --git a/client/db/src/storage_cache.rs b/client/db/src/storage_cache.rs index fd85a899b628e..e300ec8b29312 100644 --- a/client/db/src/storage_cache.rs +++ b/client/db/src/storage_cache.rs @@ -379,7 +379,7 @@ impl CacheChanges { } let mut modifications = HashSet::new(); let mut child_modifications = HashSet::new(); - child_changes.into_iter().for_each(|(sk, changes)| + child_changes.into_iter().for_each(|(sk, changes, _ci)| for (k, v) in changes.into_iter() { let k = (sk.clone(), k); if is_best { @@ -677,6 +677,9 @@ mod tests { type Block = RawBlock>; + const CHILD_KEY_1: &'static [u8] = b"unique_id_1"; + const CHILD_INFO_1: ChildInfo<'static> = ChildInfo::new_default(CHILD_KEY_1); + #[test] fn smoke() { //init_log(); @@ -993,7 +996,7 @@ mod tests { &[], &[], vec![], - vec![(s_key.clone(), vec![(key.clone(), Some(vec![1, 2]))])], + vec![(s_key.clone(), vec![(key.clone(), Some(vec![1, 2]))], CHILD_INFO_1.to_owned())], Some(h0), Some(0), true, diff --git a/client/src/client.rs b/client/src/client.rs index a3bbf84f7d725..2850ef9b417b2 100644 --- a/client/src/client.rs +++ b/client/src/client.rs @@ -1180,7 +1180,7 @@ impl Client where .trigger( ¬ify_import.hash, storage_changes.0.into_iter(), - storage_changes.1.into_iter().map(|(sk, v)| (sk, v.into_iter())), + storage_changes.1.into_iter().map(|(sk, v, _ci)| (sk, v.into_iter())), ); } diff --git a/client/src/in_mem.rs b/client/src/in_mem.rs index dcff8102aeb6d..b28c46a3edbcc 100644 --- a/client/src/in_mem.rs +++ b/client/src/in_mem.rs @@ -519,9 +519,10 @@ impl backend::BlockImportOperation for BlockImportOperatio .map(|(storage_key, child_content)| (storage_key, child_content.data.into_iter().map(|(k, v)| (k, Some(v))), child_content.child_info)); - let (root, transaction) = self.old_state.full_storage_root( + let (root, transaction, _) = self.old_state.full_storage_root( storage.top.into_iter().map(|(k, v)| (k, Some(v))), - child_delta + child_delta, + false, ); self.new_state = Some(InMemoryBackend::from(transaction)); diff --git a/client/src/light/backend.rs b/client/src/light/backend.rs index ad9f43587e4cd..34259ac895539 100644 --- a/client/src/light/backend.rs +++ b/client/src/light/backend.rs @@ -326,7 +326,7 @@ impl BlockImportOperation for ImportOperation } let storage_update = InMemoryBackend::from(storage); - let (storage_root, _) = storage_update.full_storage_root(std::iter::empty(), child_delta); + let (storage_root, _, _) = storage_update.full_storage_root(std::iter::empty(), child_delta, false); self.storage_update = Some(storage_update); Ok(storage_root) diff --git a/primitives/state-machine/src/backend.rs b/primitives/state-machine/src/backend.rs index 4ef9b970ae21d..9ef9055a82a6e 100644 --- a/primitives/state-machine/src/backend.rs +++ b/primitives/state-machine/src/backend.rs @@ -178,8 +178,9 @@ pub trait Backend: std::fmt::Debug { fn full_storage_root( &self, delta: I1, - child_deltas: I2) - -> (H::Out, Self::Transaction) + child_deltas: I2, + return_child_roots: bool, + ) -> (H::Out, Self::Transaction, Vec<(StorageKey, Option)>) where I1: IntoIterator)>, I2i: IntoIterator)>, @@ -188,22 +189,31 @@ pub trait Backend: std::fmt::Debug { { let mut txs: Self::Transaction = Default::default(); let mut child_roots: Vec<_> = Default::default(); + let mut result_child_roots: Vec<_> = Default::default(); // child first for (storage_key, child_delta, child_info) in child_deltas { let (child_root, empty, child_txs) = self.child_storage_root(&storage_key[..], child_info.as_ref(), child_delta); txs.consolidate(child_txs); if empty { + if return_child_roots { + result_child_roots.push((storage_key.clone(), None)); + } child_roots.push((storage_key, None)); } else { - child_roots.push((storage_key, Some(child_root.encode()))); + if return_child_roots { + child_roots.push((storage_key.clone(), Some(child_root.encode()))); + result_child_roots.push((storage_key, Some(child_root))); + } else { + child_roots.push((storage_key, Some(child_root.encode()))); + } } } let (root, parent_txs) = self.storage_root( delta.into_iter().chain(child_roots.into_iter()) ); txs.consolidate(parent_txs); - (root, txs) + (root, txs, result_child_roots) } /// Query backend usage statistics (i/o, memory) diff --git a/primitives/state-machine/src/ext.rs b/primitives/state-machine/src/ext.rs index f293ae9f51615..9d70382bf4ccc 100644 --- a/primitives/state-machine/src/ext.rs +++ b/primitives/state-machine/src/ext.rs @@ -493,23 +493,22 @@ where ) -> Vec { let _guard = sp_panic_handler::AbortGuard::force_abort(); if self.storage_transaction_cache.transaction_storage_root.is_some() { - let root = self - .storage(storage_key.as_ref()) - .and_then(|k| Decode::decode(&mut &k[..]).ok()) + let root = self.storage_transaction_cache.transaction_child_storage_root.get(storage_key.as_ref()) + .map(|root| root.encode()) .unwrap_or( - default_child_trie_root::>(storage_key.as_ref()) + default_child_trie_root::>(storage_key.as_ref()).encode() ); trace!(target: "state-trace", "{:04x}: ChildRoot({}) (cached) {}", self.id, HexDisplay::from(&storage_key.as_ref()), HexDisplay::from(&root.as_ref()), ); - root.encode() + root } else { let storage_key = storage_key.as_ref(); if let Some(child_info) = self.overlay.child_info(storage_key).cloned() { - let (root, is_empty, _) = { + let (root, _is_empty, _) = { let delta = self.overlay.committed.children.get(storage_key) .into_iter() .flat_map(|(map, _)| map.clone().into_iter().map(|(k, v)| (k, v.value))) @@ -523,16 +522,6 @@ where }; let root = root.encode(); - // We store update in the overlay in order to be able to use 'self.storage_transaction' - // cache. This is brittle as it rely on Ext only querying the trie backend for - // storage root. - // A better design would be to manage 'child_storage_transaction' in a - // similar way as 'storage_transaction' but for each child trie. - if is_empty { - self.overlay.set_storage(storage_key.into(), None); - } else { - self.overlay.set_storage(storage_key.into(), Some(root.clone())); - } trace!(target: "state-trace", "{:04x}: ChildRoot({}) {}", self.id, diff --git a/primitives/state-machine/src/overlayed_changes.rs b/primitives/state-machine/src/overlayed_changes.rs index ed6f30a4f596b..a15e8c613d3d0 100644 --- a/primitives/state-machine/src/overlayed_changes.rs +++ b/primitives/state-machine/src/overlayed_changes.rs @@ -43,7 +43,7 @@ pub type StorageValue = Vec; pub type StorageCollection = Vec<(StorageKey, Option)>; /// In memory arrays of storage values for multiple child tries. -pub type ChildStorageCollection = Vec<(StorageKey, StorageCollection)>; +pub type ChildStorageCollection = Vec<(StorageKey, StorageCollection, OwnedChildInfo)>; /// The overlayed changes to state to be queried on top of the backend. /// @@ -130,6 +130,8 @@ pub struct StorageTransactionCache { pub(crate) transaction: Option, /// The storage root after applying the transaction. pub(crate) transaction_storage_root: Option, + /// The child root storage root after applying the transaction. + pub(crate) transaction_child_storage_root: BTreeMap>, /// Contains the changes trie transaction. pub(crate) changes_trie_transaction: Option>>, /// The storage root after applying the changes trie transaction. @@ -148,6 +150,7 @@ impl Default for StorageTransactionCache Self { transaction: None, transaction_storage_root: None, + transaction_child_storage_root: Default::default(), changes_trie_transaction: None, changes_trie_transaction_storage_root: None, } @@ -478,7 +481,8 @@ impl OverlayedChanges { Ok(StorageChanges { main_storage_changes: main_storage_changes.collect(), - child_storage_changes: child_storage_changes.map(|(sk, it)| (sk, it.0.collect())).collect(), + child_storage_changes: child_storage_changes + .map(|(sk, it)| (sk, it.0.collect(), it.1)).collect(), transaction, transaction_storage_root, changes_trie_transaction, @@ -542,10 +546,11 @@ impl OverlayedChanges { let delta = self.committed.top.iter().map(|(k, v)| (k.clone(), v.value.clone())) .chain(self.prospective.top.iter().map(|(k, v)| (k.clone(), v.value.clone()))); - let (root, transaction) = backend.full_storage_root(delta, child_delta_iter); + let (root, transaction, child_roots) = backend.full_storage_root(delta, child_delta_iter, true); cache.transaction = Some(transaction); cache.transaction_storage_root = Some(root); + cache.transaction_child_storage_root = child_roots.into_iter().collect(); root } diff --git a/primitives/state-machine/src/proving_backend.rs b/primitives/state-machine/src/proving_backend.rs index 70124927fdd2e..cbec12476200d 100644 --- a/primitives/state-machine/src/proving_backend.rs +++ b/primitives/state-machine/src/proving_backend.rs @@ -487,7 +487,8 @@ mod tests { let mut in_memory = in_memory.update(contents); let in_memory_root = in_memory.full_storage_root::<_, Vec<_>, _>( ::std::iter::empty(), - in_memory.child_storage_keys().map(|k|(k.0.to_vec(), Vec::new(), k.1.to_owned())) + in_memory.child_storage_keys().map(|k|(k.0.to_vec(), Vec::new(), k.1.to_owned())), + false, ).0; (0..64).for_each(|i| assert_eq!( in_memory.storage(&[i]).unwrap().unwrap(), From cf6393afa6923bb4fc04db70a86fa1a45a55a918 Mon Sep 17 00:00:00 2001 From: cheme Date: Tue, 28 Jan 2020 14:25:41 +0100 Subject: [PATCH 02/85] Targetted way of putting keyspace. Note that KeyspacedDB are still use. KeyspacedDBMut only for test. --- client/db/src/lib.rs | 31 ++- client/network/test/src/lib.rs | 2 +- client/state-db/src/lib.rs | 12 +- primitives/state-machine/Cargo.toml | 1 + .../state-machine/src/changes_trie/mod.rs | 2 +- .../state-machine/src/changes_trie/storage.rs | 4 +- primitives/state-machine/src/lib.rs | 4 +- .../state-machine/src/proving_backend.rs | 6 +- primitives/state-machine/src/trie_backend.rs | 13 +- .../state-machine/src/trie_backend_essence.rs | 225 ++++++++++++------ primitives/trie/Cargo.toml | 1 + primitives/trie/src/lib.rs | 10 +- 12 files changed, 214 insertions(+), 97 deletions(-) diff --git a/client/db/src/lib.rs b/client/db/src/lib.rs index 7e76c41d8bbd1..de8fb754f5859 100644 --- a/client/db/src/lib.rs +++ b/client/db/src/lib.rs @@ -1308,11 +1308,32 @@ impl Backend { } fn apply_state_commit(transaction: &mut DBTransaction, commit: sc_state_db::CommitSet>) { - for (key, val) in commit.data.inserted.into_iter() { - transaction.put(columns::STATE, &key[..], &val); - } - for key in commit.data.deleted.into_iter() { - transaction.delete(columns::STATE, &key[..]); + let mut key_buffer = Vec::new(); + for child_data in commit.data.into_iter() { + if let Some(child_info) = child_data.info { + // children tries with prefixes + let keyspace = child_info.keyspace(); + let keyspace_len = keyspace.len(); + key_buffer.copy_from_slice[..keyspace_len] = keyspace; + for (key, val) in commit.data.inserted.into_iter() { + key_buffer.resize(keyspace_len + key.len()); + key_buffer[keyspace_len..].copy_from_slice(&key[..]); + transaction.put(columns::STATE, &key_buffer[..], &val); + } + for key in commit.data.deleted.into_iter() { + key_buffer.resize(keyspace_len + key.len()); + key_buffer[keyspace_len..].copy_from_slice(&key[..]); + transaction.delete(columns::STATE, &key_buffer[..]); + } + } else { + // top trie without prefixes + for (key, val) in commit.data.inserted.into_iter() { + transaction.put(columns::STATE, &key[..], &val); + } + for key in commit.data.deleted.into_iter() { + transaction.delete(columns::STATE, &key[..]); + } + } } for (key, val) in commit.meta.inserted.into_iter() { transaction.put(columns::STATE_META, &key[..], &val); diff --git a/client/network/test/src/lib.rs b/client/network/test/src/lib.rs index 1e14ec7bb02c9..4dbddd77ddadb 100644 --- a/client/network/test/src/lib.rs +++ b/client/network/test/src/lib.rs @@ -392,7 +392,7 @@ impl TransactionPool for EmptyTransactionPool { fn on_broadcasted(&self, _: HashMap>) {} - fn transaction(&self, h: &Hash) -> Option { None } + fn transaction(&self, _: &Hash) -> Option { None } } pub trait SpecializationFactory { diff --git a/client/state-db/src/lib.rs b/client/state-db/src/lib.rs index f2722ae308068..bf9bfc58e5a88 100644 --- a/client/state-db/src/lib.rs +++ b/client/state-db/src/lib.rs @@ -40,6 +40,7 @@ use std::collections::{HashMap, hash_map::Entry}; use noncanonical::NonCanonicalOverlay; use pruning::RefWindow; use log::trace; +use sp_core::storage::OwnedChildInfo; const PRUNING_MODE: &[u8] = b"mode"; const PRUNING_MODE_ARCHIVE: &[u8] = b"archive"; @@ -120,12 +121,21 @@ pub struct ChangeSet { pub deleted: Vec, } +/// A set of state node changes for a child trie. +#[derive(Debug, Clone)] +pub struct ChildTrieChangeSet { + /// Change set of this element. + pub data: ChangeSet, + /// Child trie descripton. + /// If not set, this is the top trie. + pub info: Option, +} /// A set of changes to the backing database. #[derive(Default, Debug, Clone)] pub struct CommitSet { /// State node changes. - pub data: ChangeSet, + pub data: Vec>, /// Metadata changes. pub meta: ChangeSet>, } diff --git a/primitives/state-machine/Cargo.toml b/primitives/state-machine/Cargo.toml index d390471aca2d6..78ab9c9156327 100644 --- a/primitives/state-machine/Cargo.toml +++ b/primitives/state-machine/Cargo.toml @@ -21,6 +21,7 @@ sp-externalities = { version = "0.8.0", path = "../externalities" } [dev-dependencies] hex-literal = "0.2.1" +sp-trie = { version = "2.0.0", path = "../trie", features = ["test-helpers"] } [features] default = [] diff --git a/primitives/state-machine/src/changes_trie/mod.rs b/primitives/state-machine/src/changes_trie/mod.rs index 12074b7261aa5..d57cf75e19ae0 100644 --- a/primitives/state-machine/src/changes_trie/mod.rs +++ b/primitives/state-machine/src/changes_trie/mod.rs @@ -165,7 +165,7 @@ pub trait Storage: RootsStorage { /// Changes trie storage -> trie backend essence adapter. pub struct TrieBackendStorageAdapter<'a, H: Hasher, Number: BlockNumber>(pub &'a dyn Storage); -impl<'a, H: Hasher, N: BlockNumber> crate::TrieBackendStorage for TrieBackendStorageAdapter<'a, H, N> { +impl<'a, H: Hasher, N: BlockNumber> crate::TrieBackendStorageRef for TrieBackendStorageAdapter<'a, H, N> { type Overlay = sp_trie::MemoryDB; fn get(&self, key: &H::Out, prefix: Prefix) -> Result, String> { diff --git a/primitives/state-machine/src/changes_trie/storage.rs b/primitives/state-machine/src/changes_trie/storage.rs index 7fb418672872b..9271eb87a8aa2 100644 --- a/primitives/state-machine/src/changes_trie/storage.rs +++ b/primitives/state-machine/src/changes_trie/storage.rs @@ -23,7 +23,7 @@ use sp_trie::MemoryDB; use parking_lot::RwLock; use crate::{ StorageKey, - trie_backend_essence::TrieBackendStorage, + trie_backend_essence::TrieBackendStorageRef, changes_trie::{BuildCache, RootsStorage, Storage, AnchorBlockId, BlockNumber}, }; @@ -198,7 +198,7 @@ impl<'a, H: Hasher, Number: BlockNumber> TrieBackendAdapter<'a, H, Number> { } } -impl<'a, H, Number> TrieBackendStorage for TrieBackendAdapter<'a, H, Number> +impl<'a, H, Number> TrieBackendStorageRef for TrieBackendAdapter<'a, H, Number> where Number: BlockNumber, H: Hasher, diff --git a/primitives/state-machine/src/lib.rs b/primitives/state-machine/src/lib.rs index bb62df6da4905..9c3925da6b50b 100644 --- a/primitives/state-machine/src/lib.rs +++ b/primitives/state-machine/src/lib.rs @@ -70,7 +70,7 @@ pub use proving_backend::{ create_proof_check_backend, create_proof_check_backend_storage, merge_storage_proofs, ProofRecorder, ProvingBackend, ProvingBackendRecorder, StorageProof, }; -pub use trie_backend_essence::{TrieBackendStorage, Storage}; +pub use trie_backend_essence::{TrieBackendStorage, TrieBackendStorageRef, Storage}; pub use trie_backend::TrieBackend; pub use error::{Error, ExecutionError}; pub use in_memory_backend::InMemory as InMemoryBackend; @@ -1026,7 +1026,7 @@ mod tests { ); } - #[test] + //#[test] TODO this will not make sense when child transaction get separated fn child_storage_uuid() { const CHILD_INFO_1: ChildInfo<'static> = ChildInfo::new_default(b"unique_id_1"); const CHILD_INFO_2: ChildInfo<'static> = ChildInfo::new_default(b"unique_id_2"); diff --git a/primitives/state-machine/src/proving_backend.rs b/primitives/state-machine/src/proving_backend.rs index cbec12476200d..5081104fdc15f 100644 --- a/primitives/state-machine/src/proving_backend.rs +++ b/primitives/state-machine/src/proving_backend.rs @@ -28,7 +28,7 @@ use sp_trie::{ pub use sp_trie::Recorder; pub use sp_trie::trie_types::{Layout, TrieError}; use crate::trie_backend::TrieBackend; -use crate::trie_backend_essence::{Ephemeral, TrieBackendEssence, TrieBackendStorage}; +use crate::trie_backend_essence::{Ephemeral, TrieBackendEssence, TrieBackendStorage, TrieBackendStorageRef}; use crate::{Error, ExecutionError, Backend}; use std::collections::{HashMap, HashSet}; use crate::DBValue; @@ -132,7 +132,7 @@ impl<'a, S, H> ProvingBackendRecorder<'a, S, H> let map_e = |e| format!("Trie lookup error: {}", e); - read_trie_value_with::, _, Ephemeral>( + read_trie_value_with::, _, Ephemeral>( &eph, self.backend.root(), key, @@ -238,7 +238,7 @@ impl<'a, S: 'a + TrieBackendStorage, H: 'a + Hasher> ProvingBackend<'a, S, H> } } -impl<'a, S: 'a + TrieBackendStorage, H: 'a + Hasher> TrieBackendStorage +impl<'a, S: 'a + TrieBackendStorage, H: 'a + Hasher> TrieBackendStorageRef for ProofRecorderBackend<'a, S, H> { type Overlay = S::Overlay; diff --git a/primitives/state-machine/src/trie_backend.rs b/primitives/state-machine/src/trie_backend.rs index dbaae323c09f2..4676618c77026 100644 --- a/primitives/state-machine/src/trie_backend.rs +++ b/primitives/state-machine/src/trie_backend.rs @@ -24,7 +24,7 @@ use sp_core::storage::ChildInfo; use codec::{Codec, Decode}; use crate::{ StorageKey, StorageValue, Backend, - trie_backend_essence::{TrieBackendEssence, TrieBackendStorage, Ephemeral}, + trie_backend_essence::{TrieBackendEssence, TrieBackendStorage, Ephemeral, BackendStorageDBRef}, }; /// Patricia trie-based backend. Transaction type is an overlay of changes to commit. @@ -128,8 +128,7 @@ impl, H: Hasher> Backend for TrieBackend where } fn pairs(&self) -> Vec<(StorageKey, StorageValue)> { - let mut read_overlay = S::Overlay::default(); - let eph = Ephemeral::new(self.essence.backend_storage(), &mut read_overlay); + let eph = BackendStorageDBRef::new(self.essence.backend_storage()); let collect_all = || -> Result<_, Box>> { let trie = TrieDB::::new(&eph, self.essence.root())?; @@ -152,8 +151,7 @@ impl, H: Hasher> Backend for TrieBackend where } fn keys(&self, prefix: &[u8]) -> Vec { - let mut read_overlay = S::Overlay::default(); - let eph = Ephemeral::new(self.essence.backend_storage(), &mut read_overlay); + let eph = BackendStorageDBRef::new(self.essence.backend_storage()); let collect_all = || -> Result<_, Box>> { let trie = TrieDB::::new(&eph, self.essence.root())?; @@ -215,14 +213,15 @@ impl, H: Hasher> Backend for TrieBackend where }; { + let keyspaced_backend = (self.essence.backend_storage(), child_info.keyspace()); + // Do not write prefix in overlay. let mut eph = Ephemeral::new( - self.essence.backend_storage(), + &keyspaced_backend, &mut write_overlay, ); match child_delta_trie_root::, _, _, _, _, _>( storage_key, - child_info.keyspace(), &mut eph, root, delta diff --git a/primitives/state-machine/src/trie_backend_essence.rs b/primitives/state-machine/src/trie_backend_essence.rs index 2598682ae0668..0e1943e47209d 100644 --- a/primitives/state-machine/src/trie_backend_essence.rs +++ b/primitives/state-machine/src/trie_backend_essence.rs @@ -19,11 +19,12 @@ use std::ops::Deref; use std::sync::Arc; +use std::marker::PhantomData; use log::{debug, warn}; use hash_db::{self, Hasher, EMPTY_PREFIX, Prefix}; use sp_trie::{Trie, MemoryDB, PrefixedMemoryDB, DBValue, default_child_trie_root, read_trie_value, read_child_trie_value, - for_keys_in_child_trie, KeySpacedDB}; + for_keys_in_child_trie, KeySpacedDB, keyspace_as_prefix_alloc}; use sp_trie::trie_types::{TrieDB, TrieError, Layout}; use crate::{backend::Consolidate, StorageKey, StorageValue}; use sp_core::storage::ChildInfo; @@ -36,12 +37,12 @@ pub trait Storage: Send + Sync { } /// Patricia trie-based pairs storage essence. -pub struct TrieBackendEssence, H: Hasher> { +pub struct TrieBackendEssence, H: Hasher> { storage: S, root: H::Out, } -impl, H: Hasher> TrieBackendEssence where H::Out: Encode { +impl, H: Hasher> TrieBackendEssence where H::Out: Encode { /// Create new trie-based backend. pub fn new(storage: S, root: H::Out) -> Self { TrieBackendEssence { @@ -102,11 +103,7 @@ impl, H: Hasher> TrieBackendEssence where H::Out: child_info: Option, key: &[u8], ) -> Result, String> { - let mut read_overlay = S::Overlay::default(); - let eph = Ephemeral { - storage: &self.storage, - overlay: &mut read_overlay, - }; + let eph = BackendStorageDBRef::new(&self.storage); let dyn_eph: &dyn hash_db::HashDBRef<_, _>; let keyspace_eph; if let Some(child_info) = child_info.as_ref() { @@ -147,11 +144,7 @@ impl, H: Hasher> TrieBackendEssence where H::Out: /// Get the value of storage at given key. pub fn storage(&self, key: &[u8]) -> Result, String> { - let mut read_overlay = S::Overlay::default(); - let eph = Ephemeral { - storage: &self.storage, - overlay: &mut read_overlay, - }; + let eph = BackendStorageDBRef::new(&self.storage); let map_e = |e| format!("Trie lookup error: {}", e); @@ -168,11 +161,7 @@ impl, H: Hasher> TrieBackendEssence where H::Out: let root = self.storage(storage_key)? .unwrap_or(default_child_trie_root::>(storage_key).encode()); - let mut read_overlay = S::Overlay::default(); - let eph = Ephemeral { - storage: &self.storage, - overlay: &mut read_overlay, - }; + let eph = BackendStorageDBRef::new(&self.storage); let map_e = |e| format!("Trie lookup error: {}", e); @@ -195,13 +184,9 @@ impl, H: Hasher> TrieBackendEssence where H::Out: } }; - let mut read_overlay = S::Overlay::default(); - let eph = Ephemeral { - storage: &self.storage, - overlay: &mut read_overlay, - }; + let eph = BackendStorageDBRef::new(&self.storage); - if let Err(e) = for_keys_in_child_trie::, _, Ephemeral>( + if let Err(e) = for_keys_in_child_trie::, _, BackendStorageDBRef>( storage_key, child_info.keyspace(), &eph, @@ -244,11 +229,7 @@ impl, H: Hasher> TrieBackendEssence where H::Out: mut f: F, child_info: Option, ) { - let mut read_overlay = S::Overlay::default(); - let eph = Ephemeral { - storage: &self.storage, - overlay: &mut read_overlay, - }; + let eph = BackendStorageDBRef::new(&self.storage); let mut iter = move |db| -> Result<(), Box>> { let trie = TrieDB::::new(db, root)?; @@ -286,13 +267,28 @@ impl, H: Hasher> TrieBackendEssence where H::Out: } } -pub(crate) struct Ephemeral<'a, S: 'a + TrieBackendStorage, H: 'a + Hasher> { +pub(crate) struct Ephemeral<'a, S, H, O> where + S: 'a + TrieBackendStorageRef, + H: 'a + Hasher, + O: hash_db::HashDB + Default + Consolidate, +{ storage: &'a S, - overlay: &'a mut S::Overlay, + overlay: &'a mut O, + _ph: PhantomData, } -impl<'a, S: 'a + TrieBackendStorage, H: 'a + Hasher> hash_db::AsPlainDB - for Ephemeral<'a, S, H> +pub(crate) struct BackendStorageDBRef<'a, S, H> where + S: 'a + TrieBackendStorageRef, + H: 'a + Hasher, +{ + storage: &'a S, + _ph: PhantomData, +} + +impl<'a, S, H, O> hash_db::AsPlainDB for Ephemeral<'a, S, H, O> where + S: 'a + TrieBackendStorage, + H: 'a + Hasher, + O: hash_db::HashDB + Default + Consolidate, { fn as_plain_db<'b>(&'b self) -> &'b (dyn hash_db::PlainDB + 'b) { self } fn as_plain_db_mut<'b>(&'b mut self) -> &'b mut (dyn hash_db::PlainDB + 'b) { @@ -300,24 +296,67 @@ impl<'a, S: 'a + TrieBackendStorage, H: 'a + Hasher> hash_db::AsPlainDB, H: 'a + Hasher> hash_db::AsHashDB - for Ephemeral<'a, S, H> +impl<'a, S, H, O> hash_db::AsHashDB for Ephemeral<'a, S, H, O> where + S: 'a + TrieBackendStorage, + H: 'a + Hasher, + O: hash_db::HashDB + Default + Consolidate, { fn as_hash_db<'b>(&'b self) -> &'b (dyn hash_db::HashDB + 'b) { self } fn as_hash_db_mut<'b>(&'b mut self) -> &'b mut (dyn hash_db::HashDB + 'b) { self } } -impl<'a, S: TrieBackendStorage, H: Hasher> Ephemeral<'a, S, H> { - pub fn new(storage: &'a S, overlay: &'a mut S::Overlay) -> Self { +impl<'a, S, H, O> Ephemeral<'a, S, H, O> where + S: 'a + TrieBackendStorageRef, + H: 'a + Hasher, + O: hash_db::HashDB + Default + Consolidate, +{ + pub fn new(storage: &'a S, overlay: &'a mut O) -> Self { Ephemeral { storage, overlay, + _ph: PhantomData, } } } -impl<'a, S: 'a + TrieBackendStorage, H: Hasher> hash_db::PlainDB - for Ephemeral<'a, S, H> +impl<'a, S, H> BackendStorageDBRef<'a, S, H> where + S: 'a + TrieBackendStorageRef, + H: 'a + Hasher, +{ + pub fn new(storage: &'a S) -> Self { + BackendStorageDBRef { + storage, + _ph: PhantomData, + } + } +} + +impl<'a, S, H, O> hash_db::PlainDB for Ephemeral<'a, S, H, O> where + S: 'a + TrieBackendStorage, + H: 'a + Hasher, + O: hash_db::HashDB + Default + Consolidate, +{ + fn get(&self, key: &H::Out) -> Option { + hash_db::PlainDBRef::get(self, key) + } + + fn contains(&self, key: &H::Out) -> bool { + hash_db::PlainDBRef::contains(self, key) + } + + fn emplace(&mut self, key: H::Out, value: DBValue) { + hash_db::HashDB::emplace(self.overlay, key, EMPTY_PREFIX, value) + } + + fn remove(&mut self, key: &H::Out) { + hash_db::HashDB::remove(self.overlay, key, EMPTY_PREFIX) + } +} + +impl<'a, S, H, O> hash_db::PlainDBRef for Ephemeral<'a, S, H, O> where + S: 'a + TrieBackendStorageRef, + H: 'a + Hasher, + O: hash_db::HashDB + Default + Consolidate, { fn get(&self, key: &H::Out) -> Option { if let Some(val) = hash_db::HashDB::get(self.overlay, key, EMPTY_PREFIX) { @@ -334,27 +373,61 @@ impl<'a, S: 'a + TrieBackendStorage, H: Hasher> hash_db::PlainDB bool { - hash_db::HashDB::get(self, key, EMPTY_PREFIX).is_some() + hash_db::HashDBRef::get(self, key, EMPTY_PREFIX).is_some() } +} - fn emplace(&mut self, key: H::Out, value: DBValue) { - hash_db::HashDB::emplace(self.overlay, key, EMPTY_PREFIX, value) +impl<'a, S, H> hash_db::PlainDBRef for BackendStorageDBRef<'a, S, H> where + S: 'a + TrieBackendStorageRef, + H: 'a + Hasher, +{ + fn get(&self, key: &H::Out) -> Option { + match self.storage.get(&key, EMPTY_PREFIX) { + Ok(x) => x, + Err(e) => { + warn!(target: "trie", "Failed to read from DB: {}", e); + None + }, + } } - fn remove(&mut self, key: &H::Out) { - hash_db::HashDB::remove(self.overlay, key, EMPTY_PREFIX) + fn contains(&self, key: &H::Out) -> bool { + hash_db::HashDBRef::get(self, key, EMPTY_PREFIX).is_some() } } -impl<'a, S: 'a + TrieBackendStorage, H: Hasher> hash_db::PlainDBRef - for Ephemeral<'a, S, H> + +impl<'a, S, H, O> hash_db::HashDB for Ephemeral<'a, S, H, O> where + S: 'a + TrieBackendStorage, + H: 'a + Hasher, + O: hash_db::HashDB + Default + Consolidate, { - fn get(&self, key: &H::Out) -> Option { hash_db::PlainDB::get(self, key) } - fn contains(&self, key: &H::Out) -> bool { hash_db::PlainDB::contains(self, key) } + + fn get(&self, key: &H::Out, prefix: Prefix) -> Option { + hash_db::HashDBRef::get(self, key, prefix) + } + + fn contains(&self, key: &H::Out, prefix: Prefix) -> bool { + hash_db::HashDBRef::contains(self, key, prefix) + } + + fn insert(&mut self, prefix: Prefix, value: &[u8]) -> H::Out { + hash_db::HashDB::insert(self.overlay, prefix, value) + } + + fn emplace(&mut self, key: H::Out, prefix: Prefix, value: DBValue) { + hash_db::HashDB::emplace(self.overlay, key, prefix, value) + } + + fn remove(&mut self, key: &H::Out, prefix: Prefix) { + hash_db::HashDB::remove(self.overlay, key, prefix) + } } -impl<'a, S: 'a + TrieBackendStorage, H: Hasher> hash_db::HashDB - for Ephemeral<'a, S, H> +impl<'a, S, H, O> hash_db::HashDBRef for Ephemeral<'a, S, H, O> where + S: 'a + TrieBackendStorageRef, + H: 'a + Hasher, + O: hash_db::HashDB + Default + Consolidate, { fn get(&self, key: &H::Out, prefix: Prefix) -> Option { if let Some(val) = hash_db::HashDB::get(self.overlay, key, prefix) { @@ -371,44 +444,45 @@ impl<'a, S: 'a + TrieBackendStorage, H: Hasher> hash_db::HashDB } fn contains(&self, key: &H::Out, prefix: Prefix) -> bool { - hash_db::HashDB::get(self, key, prefix).is_some() - } - - fn insert(&mut self, prefix: Prefix, value: &[u8]) -> H::Out { - hash_db::HashDB::insert(self.overlay, prefix, value) - } - - fn emplace(&mut self, key: H::Out, prefix: Prefix, value: DBValue) { - hash_db::HashDB::emplace(self.overlay, key, prefix, value) - } - - fn remove(&mut self, key: &H::Out, prefix: Prefix) { - hash_db::HashDB::remove(self.overlay, key, prefix) + hash_db::HashDBRef::get(self, key, prefix).is_some() } } -impl<'a, S: 'a + TrieBackendStorage, H: Hasher> hash_db::HashDBRef - for Ephemeral<'a, S, H> +impl<'a, S, H> hash_db::HashDBRef for BackendStorageDBRef<'a, S, H> where + S: 'a + TrieBackendStorageRef, + H: 'a + Hasher, { fn get(&self, key: &H::Out, prefix: Prefix) -> Option { - hash_db::HashDB::get(self, key, prefix) + match self.storage.get(&key, prefix) { + Ok(x) => x, + Err(e) => { + warn!(target: "trie", "Failed to read from DB: {}", e); + None + }, + } } fn contains(&self, key: &H::Out, prefix: Prefix) -> bool { - hash_db::HashDB::contains(self, key, prefix) + hash_db::HashDBRef::get(self, key, prefix).is_some() } } + /// Key-value pairs storage that is used by trie backend essence. -pub trait TrieBackendStorage: Send + Sync { +pub trait TrieBackendStorageRef { /// Type of in-memory overlay. type Overlay: hash_db::HashDB + Default + Consolidate; /// Get the value stored at key. fn get(&self, key: &H::Out, prefix: Prefix) -> Result, String>; } +/// Key-value pairs storage that is used by trie backend essence. +pub trait TrieBackendStorage: TrieBackendStorageRef + Send + Sync { } + +impl + Send + Sync> TrieBackendStorage for B {} + // This implementation is used by normal storage trie clients. -impl TrieBackendStorage for Arc> { +impl TrieBackendStorageRef for Arc> { type Overlay = PrefixedMemoryDB; fn get(&self, key: &H::Out, prefix: Prefix) -> Result, String> { @@ -417,7 +491,7 @@ impl TrieBackendStorage for Arc> { } // This implementation is used by test storage trie clients. -impl TrieBackendStorage for PrefixedMemoryDB { +impl TrieBackendStorageRef for PrefixedMemoryDB { type Overlay = PrefixedMemoryDB; fn get(&self, key: &H::Out, prefix: Prefix) -> Result, String> { @@ -425,7 +499,7 @@ impl TrieBackendStorage for PrefixedMemoryDB { } } -impl TrieBackendStorage for MemoryDB { +impl TrieBackendStorageRef for MemoryDB { type Overlay = MemoryDB; fn get(&self, key: &H::Out, prefix: Prefix) -> Result, String> { @@ -433,6 +507,15 @@ impl TrieBackendStorage for MemoryDB { } } +impl<'a, H: Hasher, B: TrieBackendStorageRef> TrieBackendStorageRef for (&'a B, &'a [u8]) { + type Overlay = PrefixedMemoryDB; + + fn get(&self, key: &H::Out, prefix: Prefix) -> Result, String> { + let prefix = keyspace_as_prefix_alloc(self.1, prefix); + self.0.get(key, (prefix.0.as_slice(), prefix.1)) + } +} + #[cfg(test)] mod test { use sp_core::{Blake2Hasher, H256}; diff --git a/primitives/trie/Cargo.toml b/primitives/trie/Cargo.toml index a78a26db736c4..6cbd19cd0f70b 100644 --- a/primitives/trie/Cargo.toml +++ b/primitives/trie/Cargo.toml @@ -28,6 +28,7 @@ hex-literal = "0.2.1" [features] default = ["std"] +test-helpers = [] std = [ "sp-std/std", "codec/std", diff --git a/primitives/trie/src/lib.rs b/primitives/trie/src/lib.rs index c71d3fb84ce79..ca80c8dbd0370 100644 --- a/primitives/trie/src/lib.rs +++ b/primitives/trie/src/lib.rs @@ -235,7 +235,6 @@ pub fn child_trie_root( /// but a generic implementation may ignore this type parameter and use other hashers. pub fn child_delta_trie_root( _storage_key: &[u8], - keyspace: &[u8], db: &mut DB, root_data: RD, delta: I, @@ -253,8 +252,7 @@ pub fn child_delta_trie_root( root.as_mut().copy_from_slice(root_data.as_ref()); { - let mut db = KeySpacedDBMut::new(&mut *db, keyspace); - let mut trie = TrieDBMut::::from_existing(&mut db, &mut root)?; + let mut trie = TrieDBMut::::from_existing(db, &mut root)?; for (key, change) in delta { match change { @@ -363,6 +361,7 @@ pub fn read_child_trie_value_with(&'a DB, &'a [u8], PhantomData); +#[cfg(feature="test-helpers")] /// `HashDBMut` implementation that append a encoded prefix (unique id bytes) in addition to the /// prefix of every key value. /// @@ -371,7 +370,7 @@ pub struct KeySpacedDBMut<'a, DB, H>(&'a mut DB, &'a [u8], PhantomData); /// Utility function used to merge some byte data (keyspace) and `prefix` data /// before calling key value database primitives. -fn keyspace_as_prefix_alloc(ks: &[u8], prefix: Prefix) -> (Vec, Option) { +pub fn keyspace_as_prefix_alloc(ks: &[u8], prefix: Prefix) -> (Vec, Option) { let mut result = sp_std::vec![0; ks.len() + prefix.0.len()]; result[..ks.len()].copy_from_slice(ks); result[ks.len()..].copy_from_slice(prefix.0); @@ -387,6 +386,7 @@ impl<'a, DB, H> KeySpacedDB<'a, DB, H> where } } +#[cfg(feature="test-helpers")] impl<'a, DB, H> KeySpacedDBMut<'a, DB, H> where H: Hasher, { @@ -412,6 +412,7 @@ impl<'a, DB, H, T> hash_db::HashDBRef for KeySpacedDB<'a, DB, H> where } } +#[cfg(feature="test-helpers")] impl<'a, DB, H, T> hash_db::HashDB for KeySpacedDBMut<'a, DB, H> where DB: hash_db::HashDB, H: Hasher, @@ -443,6 +444,7 @@ impl<'a, DB, H, T> hash_db::HashDB for KeySpacedDBMut<'a, DB, H> where } } +#[cfg(feature="test-helpers")] // TODO see if can be deleted impl<'a, DB, H, T> hash_db::AsHashDB for KeySpacedDBMut<'a, DB, H> where DB: hash_db::HashDB, H: Hasher, From 2845d0e1b1a83914848dba8fa13169597d059543 Mon Sep 17 00:00:00 2001 From: cheme Date: Tue, 28 Jan 2020 18:13:31 +0100 Subject: [PATCH 03/85] changes to state-db --- Cargo.lock | 1 + client/state-db/src/lib.rs | 50 +++-- client/state-db/src/noncanonical.rs | 297 ++++++++++++++++++---------- client/state-db/src/pruning.rs | 166 +++++++++++++--- client/state-db/src/test.rs | 42 ++-- primitives/storage/Cargo.toml | 3 +- primitives/storage/src/lib.rs | 6 +- 7 files changed, 401 insertions(+), 164 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index ecf929384ac59..e197367ca2049 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -6764,6 +6764,7 @@ name = "sp-storage" version = "2.0.0" dependencies = [ "impl-serde 0.2.3 (registry+https://github.com/rust-lang/crates.io-index)", + "parity-scale-codec 1.1.2 (registry+https://github.com/rust-lang/crates.io-index)", "serde 1.0.104 (registry+https://github.com/rust-lang/crates.io-index)", "sp-debug-derive 2.0.0", "sp-std 2.0.0", diff --git a/client/state-db/src/lib.rs b/client/state-db/src/lib.rs index bf9bfc58e5a88..adc038a0efaf4 100644 --- a/client/state-db/src/lib.rs +++ b/client/state-db/src/lib.rs @@ -140,6 +140,18 @@ pub struct CommitSet { pub meta: ChangeSet>, } +impl CommitSet { + /// Number of inserted key value element in the set. + pub fn inserted_len(&self) -> usize { + self.data.iter().map(|set| set.data.inserted.len()).sum() + } + + /// Number of deleted key value element in the set. + pub fn deleted_len(&self) -> usize { + self.data.iter().map(|set| set.data.deleted.len()).sum() + } +} + /// Pruning constraints. If none are specified pruning is #[derive(Default, Debug, Clone, Eq, PartialEq)] pub struct Constraints { @@ -244,7 +256,13 @@ impl StateDbSync { } } - pub fn insert_block(&mut self, hash: &BlockHash, number: u64, parent_hash: &BlockHash, mut changeset: ChangeSet) -> Result, Error> { + pub fn insert_block( + &mut self, + hash: &BlockHash, + number: u64, + parent_hash: &BlockHash, + mut changeset: Vec>, + ) -> Result, Error> { let mut meta = ChangeSet::default(); if number == 0 { // Save pruning mode when writing first block. @@ -253,7 +271,9 @@ impl StateDbSync { match self.mode { PruningMode::ArchiveAll => { - changeset.deleted.clear(); + for changeset in changeset.iter_mut() { + changeset.data.deleted.clear(); + } // write changes immediately Ok(CommitSet { data: changeset, @@ -278,7 +298,9 @@ impl StateDbSync { match self.non_canonical.canonicalize(&hash, &mut commit) { Ok(()) => { if self.mode == PruningMode::ArchiveCanonical { - commit.data.deleted.clear(); + for commit in commit.data.iter_mut() { + commit.data.deleted.clear(); + } } } Err(e) => return Err(e), @@ -424,7 +446,13 @@ impl StateDb { } /// Add a new non-canonical block. - pub fn insert_block(&self, hash: &BlockHash, number: u64, parent_hash: &BlockHash, changeset: ChangeSet) -> Result, Error> { + pub fn insert_block( + &self, + hash: &BlockHash, + number: u64, + parent_hash: &BlockHash, + changeset: Vec>, + ) -> Result, Error> { self.db.write().insert_block(hash, number, parent_hash, changeset) } @@ -483,7 +511,7 @@ mod tests { use std::io; use sp_core::H256; use crate::{StateDb, PruningMode, Constraints}; - use crate::test::{make_db, make_changeset, TestDb}; + use crate::test::{make_db, make_childchangeset, TestDb}; fn make_test_db(settings: PruningMode) -> (TestDb, StateDb) { let mut db = make_db(&[91, 921, 922, 93, 94]); @@ -495,7 +523,7 @@ mod tests { &H256::from_low_u64_be(1), 1, &H256::from_low_u64_be(0), - make_changeset(&[1], &[91]), + make_childchangeset(&[1], &[91]), ) .unwrap(), ); @@ -505,7 +533,7 @@ mod tests { &H256::from_low_u64_be(21), 2, &H256::from_low_u64_be(1), - make_changeset(&[21], &[921, 1]), + make_childchangeset(&[21], &[921, 1]), ) .unwrap(), ); @@ -515,7 +543,7 @@ mod tests { &H256::from_low_u64_be(22), 2, &H256::from_low_u64_be(1), - make_changeset(&[22], &[922]), + make_childchangeset(&[22], &[922]), ) .unwrap(), ); @@ -525,7 +553,7 @@ mod tests { &H256::from_low_u64_be(3), 3, &H256::from_low_u64_be(21), - make_changeset(&[3], &[93]), + make_childchangeset(&[3], &[93]), ) .unwrap(), ); @@ -538,7 +566,7 @@ mod tests { &H256::from_low_u64_be(4), 4, &H256::from_low_u64_be(3), - make_changeset(&[4], &[94]), + make_childchangeset(&[4], &[94]), ) .unwrap(), ); @@ -609,7 +637,7 @@ mod tests { &H256::from_low_u64_be(0), 0, &H256::from_low_u64_be(0), - make_changeset(&[], &[]), + make_childchangeset(&[], &[]), ) .unwrap(), ); diff --git a/client/state-db/src/noncanonical.rs b/client/state-db/src/noncanonical.rs index 373c1aa0da076..1bb4fd0914210 100644 --- a/client/state-db/src/noncanonical.rs +++ b/client/state-db/src/noncanonical.rs @@ -22,13 +22,19 @@ use std::fmt; use std::collections::{HashMap, VecDeque, hash_map::Entry}; -use super::{Error, DBValue, ChangeSet, CommitSet, MetaDb, Hash, to_meta_key}; +use super::{Error, DBValue, ChildTrieChangeSet, CommitSet, MetaDb, Hash, to_meta_key, ChangeSet}; use codec::{Encode, Decode}; use log::trace; +use sp_core::storage::OwnedChildInfo; const NON_CANONICAL_JOURNAL: &[u8] = b"noncanonical_journal"; +// version at start to avoid collision when adding a unit +const NON_CANONICAL_JOURNAL_V1: &[u8] = b"v1_non_canonical_journal"; const LAST_CANONICAL: &[u8] = b"last_canonical"; +type Keys = Vec<(Option, Vec)>; +type KeyVals = Vec<(Option, Vec<(Key, DBValue)>)>; + /// See module documentation. pub struct NonCanonicalOverlay { last_canonicalized: Option<(BlockHash, u64)>, @@ -39,49 +45,79 @@ pub struct NonCanonicalOverlay { values: HashMap, //ref counted //would be deleted but kept around because block is pinned, ref counted. pinned: HashMap, - pinned_insertions: HashMap>, + pinned_insertions: HashMap>, } #[derive(Encode, Decode)] -struct JournalRecord { +struct JournalRecordCompat { hash: BlockHash, parent_hash: BlockHash, inserted: Vec<(Key, DBValue)>, deleted: Vec, } -fn to_journal_key(block: u64, index: u64) -> Vec { +#[derive(Encode, Decode)] +struct JournalRecordV1 { + hash: BlockHash, + parent_hash: BlockHash, + inserted: KeyVals, + deleted: Keys, +} + +impl From> for JournalRecordV1 { + // Note that this compatibility only works as long as the backend + // db strategy match the one from current implementation, that + // is for default child trie which use same state column as top. + fn from(old: JournalRecordCompat) -> Self { + JournalRecordV1 { + hash: old.hash, + parent_hash: old.parent_hash, + inserted: vec![(None, old.inserted)], + deleted: vec![(None, old.deleted)], + } + } +} + +fn to_old_journal_key(block: u64, index: u64) -> Vec { to_meta_key(NON_CANONICAL_JOURNAL, &(block, index)) } +fn to_journal_key_v1(block: u64, index: u64) -> Vec { + to_meta_key(NON_CANONICAL_JOURNAL_V1, &(block, index)) +} + #[cfg_attr(test, derive(PartialEq, Debug))] struct BlockOverlay { hash: BlockHash, journal_key: Vec, - inserted: Vec, - deleted: Vec, + inserted: Keys, + deleted: Keys, } -fn insert_values(values: &mut HashMap, inserted: Vec<(Key, DBValue)>) { - for (k, v) in inserted { - debug_assert!(values.get(&k).map_or(true, |(_, value)| *value == v)); - let (ref mut counter, _) = values.entry(k).or_insert_with(|| (0, v)); - *counter += 1; +fn insert_values(values: &mut HashMap, inserted: KeyVals) { + for (_ct, inserted) in inserted { + for (k, v) in inserted { + debug_assert!(values.get(&k).map_or(true, |(_, value)| *value == v)); + let (ref mut counter, _) = values.entry(k).or_insert_with(|| (0, v)); + *counter += 1; + } } } -fn discard_values(values: &mut HashMap, inserted: Vec) { - for k in inserted { - match values.entry(k) { - Entry::Occupied(mut e) => { - let (ref mut counter, _) = e.get_mut(); - *counter -= 1; - if *counter == 0 { - e.remove_entry(); +fn discard_values(values: &mut HashMap, inserted: Keys) { + for inserted in inserted { + for k in inserted.1 { + match values.entry(k) { + Entry::Occupied(mut e) => { + let (ref mut counter, _) = e.get_mut(); + *counter -= 1; + if *counter == 0 { + e.remove_entry(); + } + }, + Entry::Vacant(_) => { + debug_assert!(false, "Trying to discard missing value"); } - }, - Entry::Vacant(_) => { - debug_assert!(false, "Trying to discard missing value"); } } } @@ -93,7 +129,7 @@ fn discard_descendants( index: usize, parents: &mut HashMap, pinned: &HashMap, - pinned_insertions: &mut HashMap>, + pinned_insertions: &mut HashMap>, hash: &BlockHash, ) { let mut discarded = Vec::new(); @@ -142,26 +178,33 @@ impl NonCanonicalOverlay { let mut index: u64 = 0; let mut level = Vec::new(); loop { - let journal_key = to_journal_key(block, index); - match db.get_meta(&journal_key).map_err(|e| Error::Db(e))? { - Some(record) => { - let record: JournalRecord = Decode::decode(&mut record.as_slice())?; - let inserted = record.inserted.iter().map(|(k, _)| k.clone()).collect(); - let overlay = BlockOverlay { - hash: record.hash.clone(), - journal_key, - inserted: inserted, - deleted: record.deleted, - }; - insert_values(&mut values, record.inserted); - trace!(target: "state-db", "Uncanonicalized journal entry {}.{} ({} inserted, {} deleted)", block, index, overlay.inserted.len(), overlay.deleted.len()); - level.push(overlay); - parents.insert(record.hash, record.parent_hash); - index += 1; - total += 1; + let journal_key = to_journal_key_v1(block, index); + let record: JournalRecordV1 = match db.get_meta(&journal_key).map_err(|e| Error::Db(e))? { + Some(record) => Decode::decode(&mut record.as_slice())?, + None => { + let journal_key = to_old_journal_key(block, index); + match db.get_meta(&journal_key).map_err(|e| Error::Db(e))? { + Some(record) => { + let record: JournalRecordCompat = Decode::decode(&mut record.as_slice())?; + record.into() + }, + None => break, + } }, - None => break, - } + }; + let inserted = record.inserted.iter().map(|(ct, rec)| (ct.clone(), rec.iter().map(|(k, _)| k.clone()).collect())).collect(); + let overlay = BlockOverlay { + hash: record.hash.clone(), + journal_key, + inserted: inserted, + deleted: record.deleted, + }; + insert_values(&mut values, record.inserted); + trace!(target: "state-db", "Uncanonicalized journal entry {}.{} ({} inserted, {} deleted)", block, index, overlay.inserted.len(), overlay.deleted.len()); + level.push(overlay); + parents.insert(record.hash, record.parent_hash); + index += 1; + total += 1; } if level.is_empty() { break; @@ -184,7 +227,13 @@ impl NonCanonicalOverlay { } /// Insert a new block into the overlay. If inserted on the second level or lover expects parent to be present in the window. - pub fn insert(&mut self, hash: &BlockHash, number: u64, parent_hash: &BlockHash, changeset: ChangeSet) -> Result, Error> { + pub fn insert( + &mut self, + hash: &BlockHash, + number: u64, + parent_hash: &BlockHash, + changeset: Vec>, + ) -> Result, Error> { let mut commit = CommitSet::default(); let front_block_number = self.front_block_number(); if self.levels.is_empty() && self.last_canonicalized.is_none() && number > 0 { @@ -219,22 +268,39 @@ impl NonCanonicalOverlay { }; let index = level.len() as u64; - let journal_key = to_journal_key(number, index); + let journal_key = to_journal_key_v1(number, index); + + let mut inserted = Vec::with_capacity(changeset.len()); + let mut inserted_block = Vec::with_capacity(changeset.len()); + let mut deleted = Vec::with_capacity(changeset.len()); + for changeset in changeset.into_iter() { + inserted_block.push(( + changeset.info.clone(), + changeset.data.inserted.iter().map(|(k, _)| k.clone()).collect(), + )); + inserted.push(( + changeset.info.clone(), + changeset.data.inserted, + )); + deleted.push(( + changeset.info, + changeset.data.deleted, + )); + } - let inserted = changeset.inserted.iter().map(|(k, _)| k.clone()).collect(); let overlay = BlockOverlay { hash: hash.clone(), journal_key: journal_key.clone(), - inserted: inserted, - deleted: changeset.deleted.clone(), + inserted: inserted_block, + deleted: deleted.clone(), }; level.push(overlay); self.parents.insert(hash.clone(), parent_hash.clone()); - let journal_record = JournalRecord { + let journal_record = JournalRecordV1 { hash: hash.clone(), parent_hash: parent_hash.clone(), - inserted: changeset.inserted, - deleted: changeset.deleted, + inserted, + deleted, }; commit.meta.inserted.push((journal_key, journal_record.encode())); trace!(target: "state-db", "Inserted uncanonicalized changeset {}.{} ({} inserted, {} deleted)", number, index, journal_record.inserted.len(), journal_record.deleted.len()); @@ -317,9 +383,26 @@ impl NonCanonicalOverlay { // get the one we need to canonicalize let overlay = &level[index]; - commit.data.inserted.extend(overlay.inserted.iter() - .map(|k| (k.clone(), self.values.get(k).expect("For each key in overlays there's a value in values").1.clone()))); - commit.data.deleted.extend(overlay.deleted.clone()); + commit.data.extend(overlay.inserted.iter() + .map(|(ct, keys)| ChildTrieChangeSet { + info: ct.clone(), + data: ChangeSet { + inserted: keys.iter().map(|k| ( + k.clone(), + self.values.get(k) + .expect("For each key in overlays there's a value in values").1.clone(), + )).collect(), + deleted: Vec::new(), + }, + })); + commit.data.extend(overlay.deleted.iter().cloned() + .map(|(ct, keys)| ChildTrieChangeSet { + info: ct, + data: ChangeSet { + inserted: Vec::new(), + deleted: keys, + }, + })); commit.meta.deleted.append(&mut discarded_journals); let canonicalized = (hash.clone(), self.front_block_number() + self.pending_canonicalizations.len() as u64); @@ -471,9 +554,9 @@ impl NonCanonicalOverlay { mod tests { use std::io; use sp_core::H256; - use super::{NonCanonicalOverlay, to_journal_key}; - use crate::{ChangeSet, CommitSet}; - use crate::test::{make_db, make_changeset}; + use super::{NonCanonicalOverlay, to_journal_key_v1}; + use crate::CommitSet; + use crate::test::{make_db, make_childchangeset}; fn contains(overlay: &NonCanonicalOverlay, key: u64) -> bool { overlay.get(&H256::from_low_u64_be(key)) == Some(H256::from_low_u64_be(key).as_bytes().to_vec()) @@ -504,8 +587,8 @@ mod tests { let h1 = H256::random(); let h2 = H256::random(); let mut overlay = NonCanonicalOverlay::::new(&db).unwrap(); - overlay.insert::(&h1, 2, &H256::default(), ChangeSet::default()).unwrap(); - overlay.insert::(&h2, 1, &h1, ChangeSet::default()).unwrap(); + overlay.insert::(&h1, 2, &H256::default(), Default::default()).unwrap(); + overlay.insert::(&h2, 1, &h1, Default::default()).unwrap(); } #[test] @@ -515,8 +598,8 @@ mod tests { let h2 = H256::random(); let db = make_db(&[]); let mut overlay = NonCanonicalOverlay::::new(&db).unwrap(); - overlay.insert::(&h1, 1, &H256::default(), ChangeSet::default()).unwrap(); - overlay.insert::(&h2, 3, &h1, ChangeSet::default()).unwrap(); + overlay.insert::(&h1, 1, &H256::default(), Default::default()).unwrap(); + overlay.insert::(&h2, 3, &h1, Default::default()).unwrap(); } #[test] @@ -526,8 +609,8 @@ mod tests { let h1 = H256::random(); let h2 = H256::random(); let mut overlay = NonCanonicalOverlay::::new(&db).unwrap(); - overlay.insert::(&h1, 1, &H256::default(), ChangeSet::default()).unwrap(); - overlay.insert::(&h2, 2, &H256::default(), ChangeSet::default()).unwrap(); + overlay.insert::(&h1, 1, &H256::default(), Default::default()).unwrap(); + overlay.insert::(&h2, 2, &H256::default(), Default::default()).unwrap(); } #[test] @@ -537,7 +620,7 @@ mod tests { let h2 = H256::random(); let db = make_db(&[]); let mut overlay = NonCanonicalOverlay::::new(&db).unwrap(); - overlay.insert::(&h1, 1, &H256::default(), ChangeSet::default()).unwrap(); + overlay.insert::(&h1, 1, &H256::default(), Default::default()).unwrap(); let mut commit = CommitSet::default(); overlay.canonicalize::(&h2, &mut commit).unwrap(); } @@ -547,17 +630,19 @@ mod tests { let h1 = H256::random(); let mut db = make_db(&[1, 2]); let mut overlay = NonCanonicalOverlay::::new(&db).unwrap(); - let changeset = make_changeset(&[3, 4], &[2]); + let changeset = make_childchangeset(&[3, 4], &[2]); let insertion = overlay.insert::(&h1, 1, &H256::default(), changeset.clone()).unwrap(); - assert_eq!(insertion.data.inserted.len(), 0); - assert_eq!(insertion.data.deleted.len(), 0); + assert_eq!(insertion.inserted_len(), 0); + assert_eq!(insertion.deleted_len(), 0); assert_eq!(insertion.meta.inserted.len(), 2); assert_eq!(insertion.meta.deleted.len(), 0); db.commit(&insertion); let mut finalization = CommitSet::default(); overlay.canonicalize::(&h1, &mut finalization).unwrap(); - assert_eq!(finalization.data.inserted.len(), changeset.inserted.len()); - assert_eq!(finalization.data.deleted.len(), changeset.deleted.len()); + let inserted_len = changeset.iter().map(|set| set.data.inserted.len()).sum(); + let deleted_len = changeset.iter().map(|set| set.data.deleted.len()).sum(); + assert_eq!(finalization.inserted_len(), inserted_len); + assert_eq!(finalization.deleted_len(), deleted_len); assert_eq!(finalization.meta.inserted.len(), 1); assert_eq!(finalization.meta.deleted.len(), 1); db.commit(&finalization); @@ -570,8 +655,8 @@ mod tests { let h2 = H256::random(); let mut db = make_db(&[1, 2]); let mut overlay = NonCanonicalOverlay::::new(&db).unwrap(); - db.commit(&overlay.insert::(&h1, 10, &H256::default(), make_changeset(&[3, 4], &[2])).unwrap()); - db.commit(&overlay.insert::(&h2, 11, &h1, make_changeset(&[5], &[3])).unwrap()); + db.commit(&overlay.insert::(&h1, 10, &H256::default(), make_childchangeset(&[3, 4], &[2])).unwrap()); + db.commit(&overlay.insert::(&h2, 11, &h1, make_childchangeset(&[5], &[3])).unwrap()); assert_eq!(db.meta.len(), 3); let overlay2 = NonCanonicalOverlay::::new(&db).unwrap(); @@ -586,8 +671,8 @@ mod tests { let h2 = H256::random(); let mut db = make_db(&[1, 2]); let mut overlay = NonCanonicalOverlay::::new(&db).unwrap(); - db.commit(&overlay.insert::(&h1, 10, &H256::default(), make_changeset(&[3, 4], &[2])).unwrap()); - db.commit(&overlay.insert::(&h2, 11, &h1, make_changeset(&[5], &[3])).unwrap()); + db.commit(&overlay.insert::(&h1, 10, &H256::default(), make_childchangeset(&[3, 4], &[2])).unwrap()); + db.commit(&overlay.insert::(&h2, 11, &h1, make_childchangeset(&[5], &[3])).unwrap()); let mut commit = CommitSet::default(); overlay.canonicalize::(&h1, &mut commit).unwrap(); db.commit(&commit); @@ -606,8 +691,8 @@ mod tests { let h2 = H256::random(); let mut db = make_db(&[1, 2, 3, 4]); let mut overlay = NonCanonicalOverlay::::new(&db).unwrap(); - let changeset1 = make_changeset(&[5, 6], &[2]); - let changeset2 = make_changeset(&[7, 8], &[5, 3]); + let changeset1 = make_childchangeset(&[5, 6], &[2]); + let changeset2 = make_childchangeset(&[7, 8], &[5, 3]); db.commit(&overlay.insert::(&h1, 1, &H256::default(), changeset1).unwrap()); assert!(contains(&overlay, 5)); db.commit(&overlay.insert::(&h2, 2, &h1, changeset2).unwrap()); @@ -638,8 +723,8 @@ mod tests { #[test] fn insert_same_key() { let mut db = make_db(&[]); - let (h_1, c_1) = (H256::random(), make_changeset(&[1], &[])); - let (h_2, c_2) = (H256::random(), make_changeset(&[1], &[])); + let (h_1, c_1) = (H256::random(), make_childchangeset(&[1], &[])); + let (h_2, c_2) = (H256::random(), make_childchangeset(&[1], &[])); let mut overlay = NonCanonicalOverlay::::new(&db).unwrap(); db.commit(&overlay.insert::(&h_1, 1, &H256::default(), c_1).unwrap()); @@ -660,7 +745,7 @@ mod tests { let h3 = H256::random(); let mut db = make_db(&[]); let mut overlay = NonCanonicalOverlay::::new(&db).unwrap(); - let changeset = make_changeset(&[], &[]); + let changeset = make_childchangeset(&[], &[]); db.commit(&overlay.insert::(&h1, 1, &H256::default(), changeset.clone()).unwrap()); db.commit(&overlay.insert::(&h2, 2, &h1, changeset.clone()).unwrap()); overlay.apply_pending(); @@ -688,19 +773,19 @@ mod tests { // // 1_2_2 is the winner - let (h_1, c_1) = (H256::random(), make_changeset(&[1], &[])); - let (h_2, c_2) = (H256::random(), make_changeset(&[2], &[])); + let (h_1, c_1) = (H256::random(), make_childchangeset(&[1], &[])); + let (h_2, c_2) = (H256::random(), make_childchangeset(&[2], &[])); - let (h_1_1, c_1_1) = (H256::random(), make_changeset(&[11], &[])); - let (h_1_2, c_1_2) = (H256::random(), make_changeset(&[12], &[])); - let (h_2_1, c_2_1) = (H256::random(), make_changeset(&[21], &[])); - let (h_2_2, c_2_2) = (H256::random(), make_changeset(&[22], &[])); + let (h_1_1, c_1_1) = (H256::random(), make_childchangeset(&[11], &[])); + let (h_1_2, c_1_2) = (H256::random(), make_childchangeset(&[12], &[])); + let (h_2_1, c_2_1) = (H256::random(), make_childchangeset(&[21], &[])); + let (h_2_2, c_2_2) = (H256::random(), make_childchangeset(&[22], &[])); - let (h_1_1_1, c_1_1_1) = (H256::random(), make_changeset(&[111], &[])); - let (h_1_2_1, c_1_2_1) = (H256::random(), make_changeset(&[121], &[])); - let (h_1_2_2, c_1_2_2) = (H256::random(), make_changeset(&[122], &[])); - let (h_1_2_3, c_1_2_3) = (H256::random(), make_changeset(&[123], &[])); - let (h_2_1_1, c_2_1_1) = (H256::random(), make_changeset(&[211], &[])); + let (h_1_1_1, c_1_1_1) = (H256::random(), make_childchangeset(&[111], &[])); + let (h_1_2_1, c_1_2_1) = (H256::random(), make_childchangeset(&[121], &[])); + let (h_1_2_2, c_1_2_2) = (H256::random(), make_childchangeset(&[122], &[])); + let (h_1_2_3, c_1_2_3) = (H256::random(), make_childchangeset(&[123], &[])); + let (h_2_1_1, c_2_1_1) = (H256::random(), make_childchangeset(&[211], &[])); let mut overlay = NonCanonicalOverlay::::new(&db).unwrap(); db.commit(&overlay.insert::(&h_1, 1, &H256::default(), c_1).unwrap()); @@ -750,11 +835,11 @@ mod tests { assert!(contains(&overlay, 111)); assert!(!contains(&overlay, 211)); // check that journals are deleted - assert!(db.get_meta(&to_journal_key(1, 0)).unwrap().is_none()); - assert!(db.get_meta(&to_journal_key(1, 1)).unwrap().is_none()); - assert!(db.get_meta(&to_journal_key(2, 1)).unwrap().is_some()); - assert!(db.get_meta(&to_journal_key(2, 2)).unwrap().is_none()); - assert!(db.get_meta(&to_journal_key(2, 3)).unwrap().is_none()); + assert!(db.get_meta(&to_journal_key_v1(1, 0)).unwrap().is_none()); + assert!(db.get_meta(&to_journal_key_v1(1, 1)).unwrap().is_none()); + assert!(db.get_meta(&to_journal_key_v1(2, 1)).unwrap().is_some()); + assert!(db.get_meta(&to_journal_key_v1(2, 2)).unwrap().is_none()); + assert!(db.get_meta(&to_journal_key_v1(2, 3)).unwrap().is_none()); // canonicalize 1_2. 1_1 and all its children should be discarded let mut commit = CommitSet::default(); @@ -791,8 +876,8 @@ mod tests { let mut db = make_db(&[1, 2, 3, 4]); let mut overlay = NonCanonicalOverlay::::new(&db).unwrap(); assert!(overlay.revert_one().is_none()); - let changeset1 = make_changeset(&[5, 6], &[2]); - let changeset2 = make_changeset(&[7, 8], &[5, 3]); + let changeset1 = make_childchangeset(&[5, 6], &[2]); + let changeset2 = make_childchangeset(&[7, 8], &[5, 3]); db.commit(&overlay.insert::(&h1, 1, &H256::default(), changeset1).unwrap()); db.commit(&overlay.insert::(&h2, 2, &h1, changeset2).unwrap()); assert!(contains(&overlay, 7)); @@ -813,9 +898,9 @@ mod tests { let h2_2 = H256::random(); let db = make_db(&[]); let mut overlay = NonCanonicalOverlay::::new(&db).unwrap(); - let changeset1 = make_changeset(&[5, 6], &[2]); - let changeset2 = make_changeset(&[7, 8], &[5, 3]); - let changeset3 = make_changeset(&[9], &[]); + let changeset1 = make_childchangeset(&[5, 6], &[2]); + let changeset2 = make_childchangeset(&[7, 8], &[5, 3]); + let changeset3 = make_childchangeset(&[9], &[]); overlay.insert::(&h1, 1, &H256::default(), changeset1).unwrap(); assert!(contains(&overlay, 5)); overlay.insert::(&h2_1, 2, &h1, changeset2).unwrap(); @@ -838,8 +923,8 @@ mod tests { // - 0 - 1_1 // \ 1_2 - let (h_1, c_1) = (H256::random(), make_changeset(&[1], &[])); - let (h_2, c_2) = (H256::random(), make_changeset(&[2], &[])); + let (h_1, c_1) = (H256::random(), make_childchangeset(&[1], &[])); + let (h_2, c_2) = (H256::random(), make_childchangeset(&[2], &[])); let mut overlay = NonCanonicalOverlay::::new(&db).unwrap(); db.commit(&overlay.insert::(&h_1, 1, &H256::default(), c_1).unwrap()); @@ -866,9 +951,9 @@ mod tests { // \ 1_3 // 1_1 and 1_2 both make the same change - let (h_1, c_1) = (H256::random(), make_changeset(&[1], &[])); - let (h_2, c_2) = (H256::random(), make_changeset(&[1], &[])); - let (h_3, c_3) = (H256::random(), make_changeset(&[], &[])); + let (h_1, c_1) = (H256::random(), make_childchangeset(&[1], &[])); + let (h_2, c_2) = (H256::random(), make_childchangeset(&[1], &[])); + let (h_3, c_3) = (H256::random(), make_childchangeset(&[], &[])); let mut overlay = NonCanonicalOverlay::::new(&db).unwrap(); db.commit(&overlay.insert::(&h_1, 1, &H256::default(), c_1).unwrap()); @@ -895,9 +980,9 @@ mod tests { // - 0 - 1_1 - 2_1 // \ 1_2 - let (h_11, c_11) = (H256::random(), make_changeset(&[1], &[])); - let (h_12, c_12) = (H256::random(), make_changeset(&[], &[])); - let (h_21, c_21) = (H256::random(), make_changeset(&[], &[])); + let (h_11, c_11) = (H256::random(), make_childchangeset(&[1], &[])); + let (h_12, c_12) = (H256::random(), make_childchangeset(&[], &[])); + let (h_21, c_21) = (H256::random(), make_childchangeset(&[], &[])); let mut overlay = NonCanonicalOverlay::::new(&db).unwrap(); db.commit(&overlay.insert::(&h_11, 1, &H256::default(), c_11).unwrap()); diff --git a/client/state-db/src/pruning.rs b/client/state-db/src/pruning.rs index a993df4f111ac..a680bfbb27139 100644 --- a/client/state-db/src/pruning.rs +++ b/client/state-db/src/pruning.rs @@ -26,16 +26,21 @@ use std::collections::{HashMap, HashSet, VecDeque}; use codec::{Encode, Decode}; use crate::{CommitSet, Error, MetaDb, to_meta_key, Hash}; use log::{trace, warn}; +use sp_core::storage::OwnedChildInfo; +use super::{ChildTrieChangeSet, ChangeSet}; const LAST_PRUNED: &[u8] = b"last_pruned"; -const PRUNING_JOURNAL: &[u8] = b"pruning_journal"; +const OLD_PRUNING_JOURNAL: &[u8] = b"pruning_journal"; +const PRUNING_JOURNAL_V1: &[u8] = b"v1_pruning_journal"; + +type Keys = Vec<(Option, Vec)>; /// See module documentation. pub struct RefWindow { /// A queue of keys that should be deleted for each block in the pruning window. death_rows: VecDeque>, /// An index that maps each key from `death_rows` to block number. - death_index: HashMap, + death_index: HashMap, HashMap>, /// Block number that corresponts to the front of `death_rows` pending_number: u64, /// Number of call of `note_canonical` after @@ -46,22 +51,63 @@ pub struct RefWindow { pending_prunings: usize, } +impl RefWindow { + fn remove_death_index(&mut self, ct: &Option, key: &Key) -> Option { + if let Some(child_index) = self.death_index.get_mut(ct) { + child_index.remove(key) + } else { + None + } + } +} + #[derive(Debug, PartialEq, Eq)] struct DeathRow { hash: BlockHash, journal_key: Vec, - deleted: HashSet, + deleted: HashMap, HashSet>, +} + +impl DeathRow { + fn remove_deleted(&mut self, ct: &Option, key: &Key) -> bool { + if let Some(child_index) = self.deleted.get_mut(ct) { + child_index.remove(key) + } else { + false + } + } } #[derive(Encode, Decode)] -struct JournalRecord { +struct JournalRecordCompat { hash: BlockHash, inserted: Vec, deleted: Vec, } -fn to_journal_key(block: u64) -> Vec { - to_meta_key(PRUNING_JOURNAL, &block) +#[derive(Encode, Decode)] +struct JournalRecordV1 { + hash: BlockHash, + inserted: Keys, + deleted: Keys, +} + +fn to_old_journal_key(block: u64) -> Vec { + to_meta_key(OLD_PRUNING_JOURNAL, &block) +} + +fn to_journal_key_v1(block: u64) -> Vec { + to_meta_key(PRUNING_JOURNAL_V1, &block) +} + +impl From> for JournalRecordV1 { + fn from(old: JournalRecordCompat) -> Self { + JournalRecordV1 { + hash: old.hash, + inserted: vec![(None, old.inserted)], + deleted: vec![(None, old.deleted)], + } + } } impl RefWindow { @@ -83,37 +129,65 @@ impl RefWindow { // read the journal trace!(target: "state-db", "Reading pruning journal. Pending #{}", pending_number); loop { - let journal_key = to_journal_key(block); - match db.get_meta(&journal_key).map_err(|e| Error::Db(e))? { - Some(record) => { - let record: JournalRecord = Decode::decode(&mut record.as_slice())?; - trace!(target: "state-db", "Pruning journal entry {} ({} inserted, {} deleted)", block, record.inserted.len(), record.deleted.len()); - pruning.import(&record.hash, journal_key, record.inserted.into_iter(), record.deleted); + let journal_key = to_journal_key_v1(block); + let record: JournalRecordV1 = match db.get_meta(&journal_key) + .map_err(|e| Error::Db(e))? { + Some(record) => Decode::decode(&mut record.as_slice())?, + None => { + let journal_key = to_old_journal_key(block); + match db.get_meta(&journal_key).map_err(|e| Error::Db(e))? { + Some(record) => JournalRecordCompat::decode(&mut record.as_slice())?.into(), + None => break, + } }, - None => break, - } + }; + trace!( + target: "state-db", "Pruning journal entry {} ({} inserted, {} deleted)", + block, + record.inserted.len(), + record.deleted.len(), + ); + pruning.import(&record.hash, journal_key, record.inserted.into_iter(), record.deleted); block += 1; } Ok(pruning) } - fn import>(&mut self, hash: &BlockHash, journal_key: Vec, inserted: I, deleted: Vec) { + fn import, Vec)>>( + &mut self, + hash: &BlockHash, + journal_key: Vec, + inserted: I, + deleted: Keys, + ) { // remove all re-inserted keys from death rows - for k in inserted { - if let Some(block) = self.death_index.remove(&k) { - self.death_rows[(block - self.pending_number) as usize].deleted.remove(&k); + for (ct, inserted) in inserted { + for k in inserted { + if let Some(block) = self.remove_death_index(&ct, &k) { + self.death_rows[(block - self.pending_number) as usize] + .remove_deleted(&ct, &k); + } } } // add new keys let imported_block = self.pending_number + self.death_rows.len() as u64; - for k in deleted.iter() { - self.death_index.insert(k.clone(), imported_block); + for (ct, deleted) in deleted.iter() { + let entry = self.death_index.entry(ct.clone()).or_default(); + for k in deleted.iter() { + entry.insert(k.clone(), imported_block); + } + } + let mut deleted_death_row = HashMap::, HashSet>::new(); + for (ct, deleted) in deleted.into_iter() { + let entry = deleted_death_row.entry(ct).or_default(); + entry.extend(deleted); } + self.death_rows.push_back( DeathRow { hash: hash.clone(), - deleted: deleted.into_iter().collect(), + deleted: deleted_death_row, journal_key: journal_key, } ); @@ -144,7 +218,16 @@ impl RefWindow { if let Some(pruned) = self.death_rows.get(self.pending_prunings) { trace!(target: "state-db", "Pruning {:?} ({} deleted)", pruned.hash, pruned.deleted.len()); let index = self.pending_number + self.pending_prunings as u64; - commit.data.deleted.extend(pruned.deleted.iter().cloned()); + + commit.data.extend(pruned.deleted.iter() + .map(|(ct, keys)| ChildTrieChangeSet { + info: ct.clone(), + data: ChangeSet { + inserted: Vec::new(), + deleted: keys.iter().cloned().collect(), + }, + })); + commit.meta.inserted.push((to_meta_key(LAST_PRUNED, &()), index.encode())); commit.meta.deleted.push(pruned.journal_key.clone()); self.pending_prunings += 1; @@ -155,16 +238,29 @@ impl RefWindow { /// Add a change set to the window. Creates a journal record and pushes it to `commit` pub fn note_canonical(&mut self, hash: &BlockHash, commit: &mut CommitSet) { - trace!(target: "state-db", "Adding to pruning window: {:?} ({} inserted, {} deleted)", hash, commit.data.inserted.len(), commit.data.deleted.len()); - let inserted = commit.data.inserted.iter().map(|(k, _)| k.clone()).collect(); - let deleted = ::std::mem::replace(&mut commit.data.deleted, Vec::new()); - let journal_record = JournalRecord { + trace!( + target: "state-db", + "Adding to pruning window: {:?} ({} inserted, {} deleted)", + hash, + commit.inserted_len(), + commit.deleted_len(), + ); + let inserted = commit.data.iter().map(|changeset| ( + changeset.info.clone(), + changeset.data.inserted.iter().map(|(k, _)| k.clone()).collect(), + )).collect(); + let deleted = commit.data.iter_mut().map(|changeset| ( + changeset.info.clone(), + ::std::mem::replace(&mut changeset.data.deleted, Vec::new()), + )).collect(); + + let journal_record = JournalRecordV1 { hash: hash.clone(), inserted, deleted, }; let block = self.pending_number + self.death_rows.len() as u64; - let journal_key = to_journal_key(block); + let journal_key = to_old_journal_key(block); commit.meta.inserted.push((journal_key.clone(), journal_record.encode())); self.import(&journal_record.hash, journal_key, journal_record.inserted.into_iter(), journal_record.deleted); self.pending_canonicalizations += 1; @@ -176,8 +272,12 @@ impl RefWindow { for _ in 0 .. self.pending_prunings { let pruned = self.death_rows.pop_front().expect("pending_prunings is always < death_rows.len()"); trace!(target: "state-db", "Applying pruning {:?} ({} deleted)", pruned.hash, pruned.deleted.len()); - for k in pruned.deleted.iter() { - self.death_index.remove(&k); + for (ct, deleted) in pruned.deleted.iter() { + if let Some(child_index) = self.death_index.get_mut(ct) { + for key in deleted.iter() { + child_index.remove(key); + } + } } self.pending_number += 1; } @@ -192,7 +292,11 @@ impl RefWindow { // deleted in case transaction fails and `revert_pending` is called. self.death_rows.truncate(self.death_rows.len() - self.pending_canonicalizations); let new_max_block = self.death_rows.len() as u64 + self.pending_number; - self.death_index.retain(|_, block| *block < new_max_block); + + self.death_index.retain(|_ct, child_index| { + child_index.retain(|_, block| *block < new_max_block); + !child_index.is_empty() + }); self.pending_canonicalizations = 0; self.pending_prunings = 0; } @@ -245,7 +349,7 @@ mod tests { assert!(pruning.have_block(&h)); pruning.apply_pending(); assert!(pruning.have_block(&h)); - assert!(commit.data.deleted.is_empty()); + assert_eq!(commit.deleted_len(), 0); assert_eq!(pruning.death_rows.len(), 1); assert_eq!(pruning.death_index.len(), 2); assert!(db.data_eq(&make_db(&[1, 2, 3, 4, 5]))); diff --git a/client/state-db/src/test.rs b/client/state-db/src/test.rs index accafa9bf831f..bb2a21219c6c9 100644 --- a/client/state-db/src/test.rs +++ b/client/state-db/src/test.rs @@ -18,11 +18,12 @@ use std::collections::HashMap; use sp_core::H256; -use crate::{DBValue, ChangeSet, CommitSet, MetaDb, NodeDb}; +use crate::{DBValue, ChangeSet, CommitSet, MetaDb, NodeDb, ChildTrieChangeSet}; +use sp_core::storage::OwnedChildInfo; #[derive(Default, Debug, Clone, PartialEq, Eq)] pub struct TestDb { - pub data: HashMap, + pub data: HashMap, HashMap>, pub meta: HashMap, DBValue>, } @@ -39,16 +40,23 @@ impl NodeDb for TestDb { type Key = H256; fn get(&self, key: &H256) -> Result, ()> { - Ok(self.data.get(key).cloned()) + Ok(self.data.get(&None).and_then(|data| data.get(key).cloned())) } } impl TestDb { pub fn commit(&mut self, commit: &CommitSet) { - self.data.extend(commit.data.inserted.iter().cloned()); + for ct in commit.data.iter() { + self.data.entry(ct.info.clone()).or_default() + .extend(ct.data.inserted.iter().cloned()) + } self.meta.extend(commit.meta.inserted.iter().cloned()); - for k in commit.data.deleted.iter() { - self.data.remove(k); + for ct in commit.data.iter() { + if let Some(self_data) = self.data.get_mut(&ct.info) { + for k in ct.data.deleted.iter() { + self_data.remove(k); + } + } } self.meta.extend(commit.meta.inserted.iter().cloned()); for k in commit.meta.deleted.iter() { @@ -73,21 +81,29 @@ pub fn make_changeset(inserted: &[u64], deleted: &[u64]) -> ChangeSet { } } +pub fn make_childchangeset(inserted: &[u64], deleted: &[u64]) -> Vec> { + vec![ChildTrieChangeSet { + info: None, + data: make_changeset(inserted, deleted), + }] +} + pub fn make_commit(inserted: &[u64], deleted: &[u64]) -> CommitSet { CommitSet { - data: make_changeset(inserted, deleted), + data: make_childchangeset(inserted, deleted), meta: ChangeSet::default(), } } pub fn make_db(inserted: &[u64]) -> TestDb { + let mut data = HashMap::new(); + data.insert(None, inserted.iter() + .map(|v| { + (H256::from_low_u64_be(*v), H256::from_low_u64_be(*v).as_bytes().to_vec()) + }) + .collect()); TestDb { - data: inserted - .iter() - .map(|v| { - (H256::from_low_u64_be(*v), H256::from_low_u64_be(*v).as_bytes().to_vec()) - }) - .collect(), + data, meta: Default::default(), } } diff --git a/primitives/storage/Cargo.toml b/primitives/storage/Cargo.toml index 384519cc1d69d..21a51b0385ca1 100644 --- a/primitives/storage/Cargo.toml +++ b/primitives/storage/Cargo.toml @@ -10,7 +10,8 @@ sp-std = { version = "2.0.0", default-features = false, path = "../std" } serde = { version = "1.0.101", optional = true, features = ["derive"] } impl-serde = { version = "0.2.3", optional = true } sp-debug-derive = { version = "2.0.0", path = "../debug-derive" } +codec = { package = "parity-scale-codec", version = "1.0.0", default-features = false } [features] default = [ "std" ] -std = [ "sp-std/std", "serde", "impl-serde" ] +std = [ "sp-std/std", "serde", "impl-serde", "codec/std" ] diff --git a/primitives/storage/src/lib.rs b/primitives/storage/src/lib.rs index d32c54aae8c47..0407444e0055b 100644 --- a/primitives/storage/src/lib.rs +++ b/primitives/storage/src/lib.rs @@ -18,6 +18,8 @@ #![cfg_attr(not(feature = "std"), no_std)] +#[cfg(feature = "std")] +use codec::{Decode, Encode}; #[cfg(feature = "std")] use serde::{Serialize, Deserialize}; use sp_debug_derive::RuntimeDebug; @@ -184,7 +186,7 @@ pub enum ChildInfo<'a> { /// Owned version of `ChildInfo`. /// To be use in persistence layers. #[derive(Debug, Clone)] -#[cfg_attr(feature = "std", derive(PartialEq, Eq, Hash, PartialOrd, Ord))] +#[cfg_attr(feature = "std", derive(PartialEq, Eq, Hash, PartialOrd, Ord, Encode, Decode))] pub enum OwnedChildInfo { Default(OwnedChildTrie), } @@ -288,7 +290,7 @@ pub struct ChildTrie<'a> { /// Owned version of default child trie `ChildTrie`. #[derive(Debug, Clone)] -#[cfg_attr(feature = "std", derive(PartialEq, Eq, Hash, PartialOrd, Ord))] +#[cfg_attr(feature = "std", derive(PartialEq, Eq, Hash, PartialOrd, Ord, Encode, Decode))] pub struct OwnedChildTrie { /// See `ChildTrie` reference field documentation. data: Vec, From a0532d1a492cc4ad4403d0551f6bc6ee45b8b610 Mon Sep 17 00:00:00 2001 From: cheme Date: Tue, 28 Jan 2020 19:59:21 +0100 Subject: [PATCH 04/85] change transaction to be by child trie. --- client/api/src/backend.rs | 2 +- client/db/src/lib.rs | 70 ++++++++++++------- primitives/state-machine/src/backend.rs | 7 +- primitives/state-machine/src/lib.rs | 35 ---------- .../state-machine/src/proving_backend.rs | 11 ++- primitives/state-machine/src/trie_backend.rs | 24 ++++--- primitives/trie/src/lib.rs | 2 +- 7 files changed, 71 insertions(+), 80 deletions(-) diff --git a/client/api/src/backend.rs b/client/api/src/backend.rs index a389af5671b32..d61034f5ad00b 100644 --- a/client/api/src/backend.rs +++ b/client/api/src/backend.rs @@ -24,7 +24,7 @@ use sp_runtime::{generic::BlockId, Justification, Storage}; use sp_runtime::traits::{Block as BlockT, NumberFor, HasherFor}; use sp_state_machine::{ ChangesTrieState, ChangesTrieStorage as StateChangesTrieStorage, ChangesTrieTransaction, - StorageCollection, ChildStorageCollection, + ChildStorageCollection, StorageCollection, }; use crate::{ blockchain::{ diff --git a/client/db/src/lib.rs b/client/db/src/lib.rs index de8fb754f5859..810991dd2387f 100644 --- a/client/db/src/lib.rs +++ b/client/db/src/lib.rs @@ -79,6 +79,7 @@ use crate::storage_cache::{CachingState, SharedCache, new_shared_cache}; use crate::stats::StateUsageStats; use log::{trace, debug, warn}; pub use sc_state_db::PruningMode; +use sp_core::storage::OwnedChildInfo; #[cfg(feature = "test-helpers")] use sc_client::in_mem::Backend as InMemoryBackend; @@ -513,7 +514,7 @@ impl HeaderMetadata for BlockchainDb { /// Database transaction pub struct BlockImportOperation { old_state: CachingState, Block>, - db_updates: PrefixedMemoryDB>, + db_updates: Vec<(Option, PrefixedMemoryDB>)>, storage_updates: StorageCollection, child_storage_updates: ChildStorageCollection, changes_trie_updates: MemoryDB>, @@ -568,7 +569,10 @@ impl sc_client_api::backend::BlockImportOperation for Bloc // Currently cache isn't implemented on full nodes. } - fn update_db_storage(&mut self, update: PrefixedMemoryDB>) -> ClientResult<()> { + fn update_db_storage( + &mut self, + update: Vec<(Option, PrefixedMemoryDB>)>, + ) -> ClientResult<()> { self.db_updates = update; Ok(()) } @@ -1103,26 +1107,30 @@ impl Backend { } let finalized = if operation.commit_state { - let mut changeset: sc_state_db::ChangeSet> = sc_state_db::ChangeSet::default(); + let mut changesets = Vec::new(); let mut ops: u64 = 0; let mut bytes: u64 = 0; - for (key, (val, rc)) in operation.db_updates.drain() { - if rc > 0 { - ops += 1; - bytes += key.len() as u64 + val.len() as u64; - - changeset.inserted.push((key, val.to_vec())); - } else if rc < 0 { - ops += 1; - bytes += key.len() as u64; - - changeset.deleted.push(key); + for (info, mut updates) in operation.db_updates.into_iter() { + let mut data: sc_state_db::ChangeSet> = sc_state_db::ChangeSet::default(); + for (key, (val, rc)) in updates.drain() { + if rc > 0 { + ops += 1; + bytes += key.len() as u64 + val.len() as u64; + + data.inserted.push((key, val.to_vec())); + } else if rc < 0 { + ops += 1; + bytes += key.len() as u64; + + data.deleted.push(key); + } } + changesets.push(sc_state_db::ChildTrieChangeSet{ info, data }); } self.state_usage.tally_writes(ops, bytes); let number_u64 = number.saturated_into::(); - let commit = self.storage.state_db.insert_block(&hash, number_u64, &pending_block.header.parent_hash(), changeset) + let commit = self.storage.state_db.insert_block(&hash, number_u64, &pending_block.header.parent_hash(), changesets) .map_err(|e: sc_state_db::Error| sp_blockchain::Error::from(format!("State database error: {:?}", e)))?; apply_state_commit(&mut transaction, commit); @@ -1312,25 +1320,26 @@ fn apply_state_commit(transaction: &mut DBTransaction, commit: sc_state_db::Comm for child_data in commit.data.into_iter() { if let Some(child_info) = child_data.info { // children tries with prefixes + let child_info = child_info.as_ref(); let keyspace = child_info.keyspace(); let keyspace_len = keyspace.len(); - key_buffer.copy_from_slice[..keyspace_len] = keyspace; - for (key, val) in commit.data.inserted.into_iter() { - key_buffer.resize(keyspace_len + key.len()); + key_buffer[..keyspace_len].copy_from_slice(keyspace); + for (key, val) in child_data.data.inserted.into_iter() { + key_buffer.resize(keyspace_len + key.len(), 0); key_buffer[keyspace_len..].copy_from_slice(&key[..]); transaction.put(columns::STATE, &key_buffer[..], &val); } - for key in commit.data.deleted.into_iter() { - key_buffer.resize(keyspace_len + key.len()); + for key in child_data.data.deleted.into_iter() { + key_buffer.resize(keyspace_len + key.len(), 0); key_buffer[keyspace_len..].copy_from_slice(&key[..]); transaction.delete(columns::STATE, &key_buffer[..]); } } else { // top trie without prefixes - for (key, val) in commit.data.inserted.into_iter() { + for (key, val) in child_data.data.inserted.into_iter() { transaction.put(columns::STATE, &key[..], &val); } - for key in commit.data.deleted.into_iter() { + for key in child_data.data.deleted.into_iter() { transaction.delete(columns::STATE, &key[..]); } } @@ -1378,7 +1387,7 @@ impl sc_client_api::backend::Backend for Backend { Ok(BlockImportOperation { pending_block: None, old_state, - db_updates: PrefixedMemoryDB::default(), + db_updates: Default::default(), storage_updates: Default::default(), child_storage_updates: Default::default(), changes_trie_config_update: None, @@ -1898,7 +1907,9 @@ pub(crate) mod tests { children: Default::default(), }).unwrap(); - key = op.db_updates.insert(EMPTY_PREFIX, b"hello"); + let mut map: PrefixedMemoryDB> = Default::default(); + key = map.insert(EMPTY_PREFIX, b"hello"); + op.db_updates.push((None, map)); op.set_block_data( header, Some(vec![]), @@ -1934,8 +1945,11 @@ pub(crate) mod tests { ).0.into(); let hash = header.hash(); - op.db_updates.insert(EMPTY_PREFIX, b"hello"); - op.db_updates.remove(&key, EMPTY_PREFIX); + let mut map: PrefixedMemoryDB> = Default::default(); + map.insert(EMPTY_PREFIX, b"hello"); + op.db_updates.iter_mut().for_each(|(ct, map)| if ct.is_none() { + map.remove(&key, EMPTY_PREFIX); + }); op.set_block_data( header, Some(vec![]), @@ -1971,7 +1985,9 @@ pub(crate) mod tests { ).0.into(); let hash = header.hash(); - op.db_updates.remove(&key, EMPTY_PREFIX); + op.db_updates.iter_mut().for_each(|(ct, map)| if ct.is_none() { + map.remove(&key, EMPTY_PREFIX); + }); op.set_block_data( header, Some(vec![]), diff --git a/primitives/state-machine/src/backend.rs b/primitives/state-machine/src/backend.rs index 9ef9055a82a6e..e3b1cdfe39be9 100644 --- a/primitives/state-machine/src/backend.rs +++ b/primitives/state-machine/src/backend.rs @@ -26,7 +26,7 @@ use sp_trie::{TrieMut, MemoryDB, trie_types::TrieDBMut}; use crate::{ trie_backend::TrieBackend, trie_backend_essence::TrieBackendStorage, - UsageInfo, StorageKey, StorageValue, StorageCollection, + UsageInfo, StorageKey, StorageValue, }; /// A state backend is used to read state data and can have changes committed @@ -325,10 +325,7 @@ impl Consolidate for () { } } -impl Consolidate for Vec<( - Option<(StorageKey, OwnedChildInfo)>, - StorageCollection, - )> { +impl Consolidate for Vec { fn consolidate(&mut self, mut other: Self) { self.append(&mut other); } diff --git a/primitives/state-machine/src/lib.rs b/primitives/state-machine/src/lib.rs index 9c3925da6b50b..173de031c5db7 100644 --- a/primitives/state-machine/src/lib.rs +++ b/primitives/state-machine/src/lib.rs @@ -1025,39 +1025,4 @@ mod tests { vec![(b"value2".to_vec(), None)], ); } - - //#[test] TODO this will not make sense when child transaction get separated - fn child_storage_uuid() { - const CHILD_INFO_1: ChildInfo<'static> = ChildInfo::new_default(b"unique_id_1"); - const CHILD_INFO_2: ChildInfo<'static> = ChildInfo::new_default(b"unique_id_2"); - use crate::trie_backend::tests::test_trie; - let mut overlay = OverlayedChanges::default(); - - let subtrie1 = ChildStorageKey::from_slice(b":child_storage:default:sub_test1").unwrap(); - let subtrie2 = ChildStorageKey::from_slice(b":child_storage:default:sub_test2").unwrap(); - let mut transaction = { - let backend = test_trie(); - let mut cache = StorageTransactionCache::default(); - let mut ext = Ext::new( - &mut overlay, - &mut cache, - &backend, - changes_trie::disabled_state::<_, u64>(), - None, - ); - ext.set_child_storage(subtrie1, CHILD_INFO_1, b"abc".to_vec(), b"def".to_vec()); - ext.set_child_storage(subtrie2, CHILD_INFO_2, b"abc".to_vec(), b"def".to_vec()); - ext.storage_root(); - cache.transaction.unwrap() - }; - let mut duplicate = false; - for (k, (value, rc)) in transaction.drain().iter() { - // look for a key inserted twice: transaction rc is 2 - if *rc == 2 { - duplicate = true; - println!("test duplicate for {:?} {:?}", k, value); - } - } - assert!(!duplicate); - } } diff --git a/primitives/state-machine/src/proving_backend.rs b/primitives/state-machine/src/proving_backend.rs index 5081104fdc15f..444cfc1eedd83 100644 --- a/primitives/state-machine/src/proving_backend.rs +++ b/primitives/state-machine/src/proving_backend.rs @@ -344,7 +344,12 @@ impl<'a, S, H> Backend for ProvingBackend<'a, S, H> fn storage_root(&self, delta: I) -> (H::Out, Self::Transaction) where I: IntoIterator, Option>)> { - self.0.storage_root(delta) + let (root, mut tx) = self.0.storage_root(delta); + // This is hacky, it supposes we return a single child + // transaction. Next move should be to change proving backend + // transaction to not merge the child trie datas and use + // separate proof for each trie. + (root, tx.remove(0).1) } fn child_storage_root( @@ -357,7 +362,8 @@ impl<'a, S, H> Backend for ProvingBackend<'a, S, H> I: IntoIterator, Option>)>, H::Out: Ord { - self.0.child_storage_root(storage_key, child_info, delta) + let (root, is_empty, mut tx) = self.0.child_storage_root(storage_key, child_info, delta); + (root, is_empty, tx.remove(0).1) } } @@ -445,6 +451,7 @@ mod tests { let (trie_root, mut trie_mdb) = trie_backend.storage_root(::std::iter::empty()); let (proving_root, mut proving_mdb) = proving_backend.storage_root(::std::iter::empty()); assert_eq!(trie_root, proving_root); + let mut trie_mdb = trie_mdb.remove(0).1; assert_eq!(trie_mdb.drain(), proving_mdb.drain()); } diff --git a/primitives/state-machine/src/trie_backend.rs b/primitives/state-machine/src/trie_backend.rs index 4676618c77026..c0052ce77b271 100644 --- a/primitives/state-machine/src/trie_backend.rs +++ b/primitives/state-machine/src/trie_backend.rs @@ -20,14 +20,15 @@ use log::{warn, debug}; use hash_db::Hasher; use sp_trie::{Trie, delta_trie_root, default_child_trie_root, child_delta_trie_root}; use sp_trie::trie_types::{TrieDB, TrieError, Layout}; -use sp_core::storage::ChildInfo; +use sp_core::storage::{ChildInfo, OwnedChildInfo}; use codec::{Codec, Decode}; use crate::{ StorageKey, StorageValue, Backend, trie_backend_essence::{TrieBackendEssence, TrieBackendStorage, Ephemeral, BackendStorageDBRef}, }; -/// Patricia trie-based backend. Transaction type is an overlay of changes to commit. +/// Patricia trie-based backend. Transaction type is overlays of changes to commit +/// for this trie and child tries. pub struct TrieBackend, H: Hasher> { essence: TrieBackendEssence, } @@ -71,7 +72,7 @@ impl, H: Hasher> Backend for TrieBackend where H::Out: Ord + Codec, { type Error = String; - type Transaction = S::Overlay; + type Transaction = Vec<(Option, S::Overlay)>; type TrieBackendStorage = S; fn storage(&self, key: &[u8]) -> Result, Self::Error> { @@ -169,7 +170,7 @@ impl, H: Hasher> Backend for TrieBackend where collect_all().map_err(|e| debug!(target: "trie", "Error extracting trie keys: {}", e)).unwrap_or_default() } - fn storage_root(&self, delta: I) -> (H::Out, S::Overlay) + fn storage_root(&self, delta: I) -> (H::Out, Vec<(Option, S::Overlay)>) where I: IntoIterator)> { let mut write_overlay = S::Overlay::default(); @@ -187,7 +188,7 @@ impl, H: Hasher> Backend for TrieBackend where } } - (root, write_overlay) + (root, vec![(None, write_overlay)]) } fn child_storage_root( @@ -233,7 +234,7 @@ impl, H: Hasher> Backend for TrieBackend where let is_default = root == default_root; - (root, is_default, write_overlay) + (root, is_default, vec![(Some(child_info.to_owned()), write_overlay)]) } fn as_trie_backend(&mut self) -> Option<&TrieBackend> { @@ -324,13 +325,18 @@ pub mod tests { #[test] fn storage_root_transaction_is_empty() { - assert!(test_trie().storage_root(::std::iter::empty()).1.drain().is_empty()); + let tx = test_trie().storage_root(::std::iter::empty()).1; + for (_ct, mut tx) in tx.into_iter() { + assert!(tx.drain().is_empty()); + } } #[test] fn storage_root_transaction_is_non_empty() { - let (new_root, mut tx) = test_trie().storage_root(vec![(b"new-key".to_vec(), Some(b"new-value".to_vec()))]); - assert!(!tx.drain().is_empty()); + let (new_root, tx) = test_trie().storage_root(vec![(b"new-key".to_vec(), Some(b"new-value".to_vec()))]); + for (_ct, mut tx) in tx.into_iter() { + assert!(!tx.drain().is_empty()); + } assert!(new_root != test_trie().storage_root(::std::iter::empty()).0); } diff --git a/primitives/trie/src/lib.rs b/primitives/trie/src/lib.rs index ca80c8dbd0370..fe8d7e66a6331 100644 --- a/primitives/trie/src/lib.rs +++ b/primitives/trie/src/lib.rs @@ -444,7 +444,7 @@ impl<'a, DB, H, T> hash_db::HashDB for KeySpacedDBMut<'a, DB, H> where } } -#[cfg(feature="test-helpers")] // TODO see if can be deleted +#[cfg(feature="test-helpers")] impl<'a, DB, H, T> hash_db::AsHashDB for KeySpacedDBMut<'a, DB, H> where DB: hash_db::HashDB, H: Hasher, From eb5961f54b06f96ecd70b8ca97b4c1207fd1dbe5 Mon Sep 17 00:00:00 2001 From: cheme Date: Tue, 28 Jan 2020 20:42:34 +0100 Subject: [PATCH 05/85] slice index fix, many failing tests. --- client/db/src/lib.rs | 1 + 1 file changed, 1 insertion(+) diff --git a/client/db/src/lib.rs b/client/db/src/lib.rs index 810991dd2387f..96d49cc3d25f6 100644 --- a/client/db/src/lib.rs +++ b/client/db/src/lib.rs @@ -1323,6 +1323,7 @@ fn apply_state_commit(transaction: &mut DBTransaction, commit: sc_state_db::Comm let child_info = child_info.as_ref(); let keyspace = child_info.keyspace(); let keyspace_len = keyspace.len(); + key_buffer.resize(keyspace_len, 0); key_buffer[..keyspace_len].copy_from_slice(keyspace); for (key, val) in child_data.data.inserted.into_iter() { key_buffer.resize(keyspace_len + key.len(), 0); From 67687f8ec4cc36495f3912f263aa074a3e5c5e3a Mon Sep 17 00:00:00 2001 From: cheme Date: Tue, 28 Jan 2020 21:33:04 +0100 Subject: [PATCH 06/85] fix state-db tests --- client/state-db/src/pruning.rs | 8 +++++--- 1 file changed, 5 insertions(+), 3 deletions(-) diff --git a/client/state-db/src/pruning.rs b/client/state-db/src/pruning.rs index a680bfbb27139..f55d7bf1afee8 100644 --- a/client/state-db/src/pruning.rs +++ b/client/state-db/src/pruning.rs @@ -260,7 +260,7 @@ impl RefWindow { deleted, }; let block = self.pending_number + self.death_rows.len() as u64; - let journal_key = to_old_journal_key(block); + let journal_key = to_journal_key_v1(block); commit.meta.inserted.push((journal_key.clone(), journal_record.encode())); self.import(&journal_record.hash, journal_key, journal_record.inserted.into_iter(), journal_record.deleted); self.pending_canonicalizations += 1; @@ -351,7 +351,8 @@ mod tests { assert!(pruning.have_block(&h)); assert_eq!(commit.deleted_len(), 0); assert_eq!(pruning.death_rows.len(), 1); - assert_eq!(pruning.death_index.len(), 2); + let death_index_len: usize = pruning.death_index.iter().map(|(_ct, map)| map.len()).sum(); + assert_eq!(death_index_len, 2); assert!(db.data_eq(&make_db(&[1, 2, 3, 4, 5]))); check_journal(&pruning, &db); @@ -363,7 +364,8 @@ mod tests { assert!(!pruning.have_block(&h)); assert!(db.data_eq(&make_db(&[2, 4, 5]))); assert!(pruning.death_rows.is_empty()); - assert!(pruning.death_index.is_empty()); + let death_index_len: usize = pruning.death_index.iter().map(|(_ct, map)| map.len()).sum(); + assert!(death_index_len == 0); assert_eq!(pruning.pending_number, 1); } From 48df830d20b24f4745756e5d240333d156b334ef Mon Sep 17 00:00:00 2001 From: Emeric Chevalier Date: Wed, 29 Jan 2020 17:36:55 +0100 Subject: [PATCH 07/85] vec with multiple entry of a same rc prefixeddb did not make sense, switching to a btreemap. --- client/db/src/lib.rs | 21 ++++-------- primitives/state-machine/src/backend.rs | 32 +++++++++++++++++-- .../state-machine/src/proving_backend.rs | 17 ++++------ primitives/state-machine/src/trie_backend.rs | 14 +++++--- 4 files changed, 52 insertions(+), 32 deletions(-) diff --git a/client/db/src/lib.rs b/client/db/src/lib.rs index 96d49cc3d25f6..d42ef59285cb9 100644 --- a/client/db/src/lib.rs +++ b/client/db/src/lib.rs @@ -41,7 +41,7 @@ mod stats; use std::sync::Arc; use std::path::PathBuf; use std::io; -use std::collections::HashMap; +use std::collections::{HashMap, BTreeMap}; use sc_client_api::{execution_extensions::ExecutionExtensions, ForkBlocks, UsageInfo, MemoryInfo, BadBlocks, IoInfo}; use sc_client_api::backend::NewBlockState; @@ -514,7 +514,7 @@ impl HeaderMetadata for BlockchainDb { /// Database transaction pub struct BlockImportOperation { old_state: CachingState, Block>, - db_updates: Vec<(Option, PrefixedMemoryDB>)>, + db_updates: BTreeMap, PrefixedMemoryDB>>, storage_updates: StorageCollection, child_storage_updates: ChildStorageCollection, changes_trie_updates: MemoryDB>, @@ -571,7 +571,7 @@ impl sc_client_api::backend::BlockImportOperation for Bloc fn update_db_storage( &mut self, - update: Vec<(Option, PrefixedMemoryDB>)>, + update: BTreeMap, PrefixedMemoryDB>>, ) -> ClientResult<()> { self.db_updates = update; Ok(()) @@ -1908,9 +1908,7 @@ pub(crate) mod tests { children: Default::default(), }).unwrap(); - let mut map: PrefixedMemoryDB> = Default::default(); - key = map.insert(EMPTY_PREFIX, b"hello"); - op.db_updates.push((None, map)); + key = op.db_updates.entry(None).or_insert_with(Default::default).insert(EMPTY_PREFIX, b"hello"); op.set_block_data( header, Some(vec![]), @@ -1946,11 +1944,8 @@ pub(crate) mod tests { ).0.into(); let hash = header.hash(); - let mut map: PrefixedMemoryDB> = Default::default(); - map.insert(EMPTY_PREFIX, b"hello"); - op.db_updates.iter_mut().for_each(|(ct, map)| if ct.is_none() { - map.remove(&key, EMPTY_PREFIX); - }); + op.db_updates.entry(None).or_insert_with(Default::default).insert(EMPTY_PREFIX, b"hello"); + op.db_updates.entry(None).or_insert_with(Default::default).remove(&key, EMPTY_PREFIX); op.set_block_data( header, Some(vec![]), @@ -1986,9 +1981,7 @@ pub(crate) mod tests { ).0.into(); let hash = header.hash(); - op.db_updates.iter_mut().for_each(|(ct, map)| if ct.is_none() { - map.remove(&key, EMPTY_PREFIX); - }); + op.db_updates.entry(None).or_insert_with(Default::default).remove(&key, EMPTY_PREFIX); op.set_block_data( header, Some(vec![]), diff --git a/primitives/state-machine/src/backend.rs b/primitives/state-machine/src/backend.rs index e3b1cdfe39be9..cd8a69f3f2d2a 100644 --- a/primitives/state-machine/src/backend.rs +++ b/primitives/state-machine/src/backend.rs @@ -22,11 +22,11 @@ use codec::Encode; use sp_core::storage::{ChildInfo, OwnedChildInfo}; use sp_trie::{TrieMut, MemoryDB, trie_types::TrieDBMut}; - +use std::collections::{BTreeMap, btree_map::Entry}; use crate::{ trie_backend::TrieBackend, trie_backend_essence::TrieBackendStorage, - UsageInfo, StorageKey, StorageValue, + UsageInfo, StorageKey, StorageValue, StorageCollection, }; /// A state backend is used to read state data and can have changes committed @@ -325,12 +325,38 @@ impl Consolidate for () { } } -impl Consolidate for Vec { +impl Consolidate for Vec<( + Option<(StorageKey, OwnedChildInfo)>, + StorageCollection, + )> { fn consolidate(&mut self, mut other: Self) { self.append(&mut other); } } +impl Consolidate for BTreeMap { + fn consolidate(&mut self, other: Self) { + for (k, v) in other.into_iter() { + match self.entry(k) { + Entry::Occupied(mut e) => e.get_mut().consolidate(v), + Entry::Vacant(e) => { e.insert(v); }, + } + } + } +} + +impl Consolidate for Option { + fn consolidate(&mut self, other: Self) { + if let Some(v) = self.as_mut() { + if let Some(other) = other { + v.consolidate(other); + } + } else { + *self = other; + } + } +} + impl> Consolidate for sp_trie::GenericMemoryDB { fn consolidate(&mut self, other: Self) { sp_trie::GenericMemoryDB::consolidate(self, other) diff --git a/primitives/state-machine/src/proving_backend.rs b/primitives/state-machine/src/proving_backend.rs index 444cfc1eedd83..6d5b45596a7ed 100644 --- a/primitives/state-machine/src/proving_backend.rs +++ b/primitives/state-machine/src/proving_backend.rs @@ -268,7 +268,7 @@ impl<'a, S, H> Backend for ProvingBackend<'a, S, H> H::Out: Ord + Codec, { type Error = String; - type Transaction = S::Overlay; + type Transaction = Option; type TrieBackendStorage = S; fn storage(&self, key: &[u8]) -> Result>, Self::Error> { @@ -345,11 +345,8 @@ impl<'a, S, H> Backend for ProvingBackend<'a, S, H> where I: IntoIterator, Option>)> { let (root, mut tx) = self.0.storage_root(delta); - // This is hacky, it supposes we return a single child - // transaction. Next move should be to change proving backend - // transaction to not merge the child trie datas and use - // separate proof for each trie. - (root, tx.remove(0).1) + // We may rather want to return a btreemap + (root, tx.remove(&None)) } fn child_storage_root( @@ -363,7 +360,7 @@ impl<'a, S, H> Backend for ProvingBackend<'a, S, H> H::Out: Ord { let (root, is_empty, mut tx) = self.0.child_storage_root(storage_key, child_info, delta); - (root, is_empty, tx.remove(0).1) + (root, is_empty, tx.remove(&Some(child_info.to_owned()))) } } @@ -449,10 +446,10 @@ mod tests { assert_eq!(trie_backend.pairs(), proving_backend.pairs()); let (trie_root, mut trie_mdb) = trie_backend.storage_root(::std::iter::empty()); - let (proving_root, mut proving_mdb) = proving_backend.storage_root(::std::iter::empty()); + let (proving_root, proving_mdb) = proving_backend.storage_root(::std::iter::empty()); assert_eq!(trie_root, proving_root); - let mut trie_mdb = trie_mdb.remove(0).1; - assert_eq!(trie_mdb.drain(), proving_mdb.drain()); + let mut trie_mdb = trie_mdb.remove(&None).unwrap(); + assert_eq!(trie_mdb.drain(), proving_mdb.unwrap().drain()); } #[test] diff --git a/primitives/state-machine/src/trie_backend.rs b/primitives/state-machine/src/trie_backend.rs index c0052ce77b271..9d17043de7e18 100644 --- a/primitives/state-machine/src/trie_backend.rs +++ b/primitives/state-machine/src/trie_backend.rs @@ -21,6 +21,7 @@ use hash_db::Hasher; use sp_trie::{Trie, delta_trie_root, default_child_trie_root, child_delta_trie_root}; use sp_trie::trie_types::{TrieDB, TrieError, Layout}; use sp_core::storage::{ChildInfo, OwnedChildInfo}; +use std::collections::BTreeMap; use codec::{Codec, Decode}; use crate::{ StorageKey, StorageValue, Backend, @@ -72,7 +73,7 @@ impl, H: Hasher> Backend for TrieBackend where H::Out: Ord + Codec, { type Error = String; - type Transaction = Vec<(Option, S::Overlay)>; + type Transaction = BTreeMap, S::Overlay>; type TrieBackendStorage = S; fn storage(&self, key: &[u8]) -> Result, Self::Error> { @@ -170,7 +171,7 @@ impl, H: Hasher> Backend for TrieBackend where collect_all().map_err(|e| debug!(target: "trie", "Error extracting trie keys: {}", e)).unwrap_or_default() } - fn storage_root(&self, delta: I) -> (H::Out, Vec<(Option, S::Overlay)>) + fn storage_root(&self, delta: I) -> (H::Out, BTreeMap, S::Overlay>) where I: IntoIterator)> { let mut write_overlay = S::Overlay::default(); @@ -187,8 +188,9 @@ impl, H: Hasher> Backend for TrieBackend where Err(e) => warn!(target: "trie", "Failed to write to trie: {}", e), } } - - (root, vec![(None, write_overlay)]) + let mut tx = BTreeMap::new(); + tx.insert(None, write_overlay); + (root, tx) } fn child_storage_root( @@ -234,7 +236,9 @@ impl, H: Hasher> Backend for TrieBackend where let is_default = root == default_root; - (root, is_default, vec![(Some(child_info.to_owned()), write_overlay)]) + let mut tx = BTreeMap::new(); + tx.insert(Some(child_info.to_owned()), write_overlay); + (root, is_default, tx) } fn as_trie_backend(&mut self) -> Option<&TrieBackend> { From cb4c4a96ff2c30eca88f56a8c351bedaa89f10b9 Mon Sep 17 00:00:00 2001 From: Emeric Chevalier Date: Wed, 29 Jan 2020 20:20:48 +0100 Subject: [PATCH 08/85] change set to btreemap, seems useless (at least do no solve changetrie issue). --- client/db/src/lib.rs | 25 +++++++++++------- client/state-db/src/lib.rs | 34 ++++++++++++++++++------ client/state-db/src/noncanonical.rs | 40 ++++++++++++++--------------- client/state-db/src/pruning.rs | 20 +++++++-------- client/state-db/src/test.rs | 19 +++++++------- 5 files changed, 81 insertions(+), 57 deletions(-) diff --git a/client/db/src/lib.rs b/client/db/src/lib.rs index d42ef59285cb9..651e54593f0e2 100644 --- a/client/db/src/lib.rs +++ b/client/db/src/lib.rs @@ -41,7 +41,7 @@ mod stats; use std::sync::Arc; use std::path::PathBuf; use std::io; -use std::collections::{HashMap, BTreeMap}; +use std::collections::{HashMap, BTreeMap, btree_map::Entry}; use sc_client_api::{execution_extensions::ExecutionExtensions, ForkBlocks, UsageInfo, MemoryInfo, BadBlocks, IoInfo}; use sc_client_api::backend::NewBlockState; @@ -1107,7 +1107,7 @@ impl Backend { } let finalized = if operation.commit_state { - let mut changesets = Vec::new(); + let mut changesets = BTreeMap::new(); let mut ops: u64 = 0; let mut bytes: u64 = 0; for (info, mut updates) in operation.db_updates.into_iter() { @@ -1125,7 +1125,14 @@ impl Backend { data.deleted.push(key); } } - changesets.push(sc_state_db::ChildTrieChangeSet{ info, data }); + match changesets.entry(info) { + Entry::Vacant(e) => { e.insert(data); }, + Entry::Occupied(mut e) => { + let e = e.get_mut(); + e.inserted.extend(data.inserted); + e.deleted.extend(data.deleted); + }, + } } self.state_usage.tally_writes(ops, bytes); @@ -1316,31 +1323,31 @@ impl Backend { } fn apply_state_commit(transaction: &mut DBTransaction, commit: sc_state_db::CommitSet>) { - let mut key_buffer = Vec::new(); + let mut key_buffer = Vec::new(); for child_data in commit.data.into_iter() { - if let Some(child_info) = child_data.info { + if let Some(child_info) = child_data.0 { // children tries with prefixes let child_info = child_info.as_ref(); let keyspace = child_info.keyspace(); let keyspace_len = keyspace.len(); key_buffer.resize(keyspace_len, 0); key_buffer[..keyspace_len].copy_from_slice(keyspace); - for (key, val) in child_data.data.inserted.into_iter() { + for (key, val) in child_data.1.inserted.into_iter() { key_buffer.resize(keyspace_len + key.len(), 0); key_buffer[keyspace_len..].copy_from_slice(&key[..]); transaction.put(columns::STATE, &key_buffer[..], &val); } - for key in child_data.data.deleted.into_iter() { + for key in child_data.1.deleted.into_iter() { key_buffer.resize(keyspace_len + key.len(), 0); key_buffer[keyspace_len..].copy_from_slice(&key[..]); transaction.delete(columns::STATE, &key_buffer[..]); } } else { // top trie without prefixes - for (key, val) in child_data.data.inserted.into_iter() { + for (key, val) in child_data.1.inserted.into_iter() { transaction.put(columns::STATE, &key[..], &val); } - for key in child_data.data.deleted.into_iter() { + for key in child_data.1.deleted.into_iter() { transaction.delete(columns::STATE, &key[..]); } } diff --git a/client/state-db/src/lib.rs b/client/state-db/src/lib.rs index adc038a0efaf4..18098af8b882c 100644 --- a/client/state-db/src/lib.rs +++ b/client/state-db/src/lib.rs @@ -36,7 +36,7 @@ mod pruning; use std::fmt; use parking_lot::RwLock; use codec::Codec; -use std::collections::{HashMap, hash_map::Entry}; +use std::collections::{BTreeMap, HashMap, hash_map::Entry, btree_map::Entry as BEntry}; use noncanonical::NonCanonicalOverlay; use pruning::RefWindow; use log::trace; @@ -122,6 +122,7 @@ pub struct ChangeSet { } /// A set of state node changes for a child trie. +/// TODO remove?? #[derive(Debug, Clone)] pub struct ChildTrieChangeSet { /// Change set of this element. @@ -131,11 +132,28 @@ pub struct ChildTrieChangeSet { pub info: Option, } +/// Change sets of all child trie (top is key None). +pub type ChildTrieChangeSets = BTreeMap, ChangeSet>; + +/// Extends for `ChildTrieChangeSets` is merging. +fn extend_change_sets(set: &mut ChildTrieChangeSets, other: impl Iterator, ChangeSet)>) { + for (ci, o_cs) in other { + match set.entry(ci) { + BEntry::Occupied(mut e) => { + let entry = e.get_mut(); + entry.inserted.extend(o_cs.inserted); + entry.deleted.extend(o_cs.deleted); + }, + BEntry::Vacant(e) => { e.insert(o_cs); }, + } + } +} + /// A set of changes to the backing database. #[derive(Default, Debug, Clone)] pub struct CommitSet { /// State node changes. - pub data: Vec>, + pub data: ChildTrieChangeSets, /// Metadata changes. pub meta: ChangeSet>, } @@ -143,12 +161,12 @@ pub struct CommitSet { impl CommitSet { /// Number of inserted key value element in the set. pub fn inserted_len(&self) -> usize { - self.data.iter().map(|set| set.data.inserted.len()).sum() + self.data.iter().map(|set| set.1.inserted.len()).sum() } /// Number of deleted key value element in the set. pub fn deleted_len(&self) -> usize { - self.data.iter().map(|set| set.data.deleted.len()).sum() + self.data.iter().map(|set| set.1.deleted.len()).sum() } } @@ -261,7 +279,7 @@ impl StateDbSync { hash: &BlockHash, number: u64, parent_hash: &BlockHash, - mut changeset: Vec>, + mut changeset: ChildTrieChangeSets, ) -> Result, Error> { let mut meta = ChangeSet::default(); if number == 0 { @@ -272,7 +290,7 @@ impl StateDbSync { match self.mode { PruningMode::ArchiveAll => { for changeset in changeset.iter_mut() { - changeset.data.deleted.clear(); + changeset.1.deleted.clear(); } // write changes immediately Ok(CommitSet { @@ -299,7 +317,7 @@ impl StateDbSync { Ok(()) => { if self.mode == PruningMode::ArchiveCanonical { for commit in commit.data.iter_mut() { - commit.data.deleted.clear(); + commit.1.deleted.clear(); } } } @@ -451,7 +469,7 @@ impl StateDb { hash: &BlockHash, number: u64, parent_hash: &BlockHash, - changeset: Vec>, + changeset: ChildTrieChangeSets, ) -> Result, Error> { self.db.write().insert_block(hash, number, parent_hash, changeset) } diff --git a/client/state-db/src/noncanonical.rs b/client/state-db/src/noncanonical.rs index 1bb4fd0914210..93f26245e28a5 100644 --- a/client/state-db/src/noncanonical.rs +++ b/client/state-db/src/noncanonical.rs @@ -22,7 +22,7 @@ use std::fmt; use std::collections::{HashMap, VecDeque, hash_map::Entry}; -use super::{Error, DBValue, ChildTrieChangeSet, CommitSet, MetaDb, Hash, to_meta_key, ChangeSet}; +use super::{Error, DBValue, ChildTrieChangeSets, CommitSet, MetaDb, Hash, to_meta_key, ChangeSet}; use codec::{Encode, Decode}; use log::trace; use sp_core::storage::OwnedChildInfo; @@ -232,7 +232,7 @@ impl NonCanonicalOverlay { hash: &BlockHash, number: u64, parent_hash: &BlockHash, - changeset: Vec>, + changeset: ChildTrieChangeSets, ) -> Result, Error> { let mut commit = CommitSet::default(); let front_block_number = self.front_block_number(); @@ -275,16 +275,16 @@ impl NonCanonicalOverlay { let mut deleted = Vec::with_capacity(changeset.len()); for changeset in changeset.into_iter() { inserted_block.push(( - changeset.info.clone(), - changeset.data.inserted.iter().map(|(k, _)| k.clone()).collect(), + changeset.0.clone(), + changeset.1.inserted.iter().map(|(k, _)| k.clone()).collect(), )); inserted.push(( - changeset.info.clone(), - changeset.data.inserted, + changeset.0.clone(), + changeset.1.inserted, )); deleted.push(( - changeset.info, - changeset.data.deleted, + changeset.0, + changeset.1.deleted, )); } @@ -383,10 +383,10 @@ impl NonCanonicalOverlay { // get the one we need to canonicalize let overlay = &level[index]; - commit.data.extend(overlay.inserted.iter() - .map(|(ct, keys)| ChildTrieChangeSet { - info: ct.clone(), - data: ChangeSet { + crate::extend_change_sets(&mut commit.data, overlay.inserted.iter() + .map(|(ct, keys)| ( + ct.clone(), + ChangeSet { inserted: keys.iter().map(|k| ( k.clone(), self.values.get(k) @@ -394,15 +394,15 @@ impl NonCanonicalOverlay { )).collect(), deleted: Vec::new(), }, - })); - commit.data.extend(overlay.deleted.iter().cloned() - .map(|(ct, keys)| ChildTrieChangeSet { - info: ct, - data: ChangeSet { + ))); + crate::extend_change_sets(&mut commit.data, overlay.deleted.iter().cloned() + .map(|(ct, keys)| ( + ct, + ChangeSet { inserted: Vec::new(), deleted: keys, }, - })); + ))); commit.meta.deleted.append(&mut discarded_journals); let canonicalized = (hash.clone(), self.front_block_number() + self.pending_canonicalizations.len() as u64); @@ -639,8 +639,8 @@ mod tests { db.commit(&insertion); let mut finalization = CommitSet::default(); overlay.canonicalize::(&h1, &mut finalization).unwrap(); - let inserted_len = changeset.iter().map(|set| set.data.inserted.len()).sum(); - let deleted_len = changeset.iter().map(|set| set.data.deleted.len()).sum(); + let inserted_len = changeset.iter().map(|set| set.1.inserted.len()).sum(); + let deleted_len = changeset.iter().map(|set| set.1.deleted.len()).sum(); assert_eq!(finalization.inserted_len(), inserted_len); assert_eq!(finalization.deleted_len(), deleted_len); assert_eq!(finalization.meta.inserted.len(), 1); diff --git a/client/state-db/src/pruning.rs b/client/state-db/src/pruning.rs index f55d7bf1afee8..fdf5dec0515b7 100644 --- a/client/state-db/src/pruning.rs +++ b/client/state-db/src/pruning.rs @@ -27,7 +27,7 @@ use codec::{Encode, Decode}; use crate::{CommitSet, Error, MetaDb, to_meta_key, Hash}; use log::{trace, warn}; use sp_core::storage::OwnedChildInfo; -use super::{ChildTrieChangeSet, ChangeSet}; +use super::ChangeSet; const LAST_PRUNED: &[u8] = b"last_pruned"; const OLD_PRUNING_JOURNAL: &[u8] = b"pruning_journal"; @@ -219,14 +219,14 @@ impl RefWindow { trace!(target: "state-db", "Pruning {:?} ({} deleted)", pruned.hash, pruned.deleted.len()); let index = self.pending_number + self.pending_prunings as u64; - commit.data.extend(pruned.deleted.iter() - .map(|(ct, keys)| ChildTrieChangeSet { - info: ct.clone(), - data: ChangeSet { + crate::extend_change_sets(&mut commit.data, pruned.deleted.iter() + .map(|(ct, keys)| ( + ct.clone(), + ChangeSet { inserted: Vec::new(), deleted: keys.iter().cloned().collect(), }, - })); + ))); commit.meta.inserted.push((to_meta_key(LAST_PRUNED, &()), index.encode())); commit.meta.deleted.push(pruned.journal_key.clone()); @@ -246,12 +246,12 @@ impl RefWindow { commit.deleted_len(), ); let inserted = commit.data.iter().map(|changeset| ( - changeset.info.clone(), - changeset.data.inserted.iter().map(|(k, _)| k.clone()).collect(), + changeset.0.clone(), + changeset.1.inserted.iter().map(|(k, _)| k.clone()).collect(), )).collect(); let deleted = commit.data.iter_mut().map(|changeset| ( - changeset.info.clone(), - ::std::mem::replace(&mut changeset.data.deleted, Vec::new()), + changeset.0.clone(), + ::std::mem::replace(&mut changeset.1.deleted, Vec::new()), )).collect(); let journal_record = JournalRecordV1 { diff --git a/client/state-db/src/test.rs b/client/state-db/src/test.rs index bb2a21219c6c9..c7be13fb15595 100644 --- a/client/state-db/src/test.rs +++ b/client/state-db/src/test.rs @@ -18,7 +18,7 @@ use std::collections::HashMap; use sp_core::H256; -use crate::{DBValue, ChangeSet, CommitSet, MetaDb, NodeDb, ChildTrieChangeSet}; +use crate::{DBValue, ChangeSet, CommitSet, MetaDb, NodeDb, ChildTrieChangeSets}; use sp_core::storage::OwnedChildInfo; #[derive(Default, Debug, Clone, PartialEq, Eq)] @@ -47,13 +47,13 @@ impl NodeDb for TestDb { impl TestDb { pub fn commit(&mut self, commit: &CommitSet) { for ct in commit.data.iter() { - self.data.entry(ct.info.clone()).or_default() - .extend(ct.data.inserted.iter().cloned()) + self.data.entry(ct.0.clone()).or_default() + .extend(ct.1.inserted.iter().cloned()) } self.meta.extend(commit.meta.inserted.iter().cloned()); for ct in commit.data.iter() { - if let Some(self_data) = self.data.get_mut(&ct.info) { - for k in ct.data.deleted.iter() { + if let Some(self_data) = self.data.get_mut(&ct.0) { + for k in ct.1.deleted.iter() { self_data.remove(k); } } @@ -81,11 +81,10 @@ pub fn make_changeset(inserted: &[u64], deleted: &[u64]) -> ChangeSet { } } -pub fn make_childchangeset(inserted: &[u64], deleted: &[u64]) -> Vec> { - vec![ChildTrieChangeSet { - info: None, - data: make_changeset(inserted, deleted), - }] +pub fn make_childchangeset(inserted: &[u64], deleted: &[u64]) -> ChildTrieChangeSets { + let mut result = ChildTrieChangeSets::new(); + result.insert(None, make_changeset(inserted, deleted)); + result } pub fn make_commit(inserted: &[u64], deleted: &[u64]) -> CommitSet { From a398b826fba66574ba99eb6a1441e06c021fe33e Mon Sep 17 00:00:00 2001 From: Emeric Chevalier Date: Thu, 30 Jan 2020 10:18:32 +0100 Subject: [PATCH 09/85] moving get_1 to get, state-machine needs rework --- client/db/src/changes_tries_storage.rs | 6 +- client/db/src/lib.rs | 19 +++-- client/src/client.rs | 6 +- client/state-db/src/lib.rs | 20 ++++-- client/state-db/src/noncanonical.rs | 44 +++++++----- .../state-machine/src/changes_trie/mod.rs | 15 +++- .../state-machine/src/changes_trie/storage.rs | 19 ++++- .../state-machine/src/proving_backend.rs | 10 ++- .../state-machine/src/trie_backend_essence.rs | 69 +++++++++++++++---- 9 files changed, 160 insertions(+), 48 deletions(-) diff --git a/client/db/src/changes_tries_storage.rs b/client/db/src/changes_tries_storage.rs index 72163a5694213..ab8c7465badd1 100644 --- a/client/db/src/changes_tries_storage.rs +++ b/client/db/src/changes_tries_storage.rs @@ -486,7 +486,11 @@ where self.build_cache.read().with_changed_keys(root, functor) } - fn get(&self, key: &Block::Hash, _prefix: Prefix) -> Result, String> { + fn get( + &self, + key: &Block::Hash, + _prefix: Prefix, + ) -> Result, String> { self.db.get(self.changes_tries_column, key.as_ref()) .map_err(|err| format!("{}", err)) } diff --git a/client/db/src/lib.rs b/client/db/src/lib.rs index d42ef59285cb9..73ee6737c2955 100644 --- a/client/db/src/lib.rs +++ b/client/db/src/lib.rs @@ -666,9 +666,14 @@ struct StorageDb { } impl sp_state_machine::Storage> for StorageDb { - fn get(&self, key: &Block::Hash, prefix: Prefix) -> Result, String> { + fn get( + &self, + trie: Option, + key: &Block::Hash, + prefix: Prefix, + ) -> Result, String> { let key = prefixed_key::>(key, prefix); - self.state_db.get(&key, self) + self.state_db.get(trie, &key, self) .map_err(|e| format!("Database backend error: {:?}", e)) } } @@ -694,7 +699,12 @@ impl DbGenesisStorage { } impl sp_state_machine::Storage> for DbGenesisStorage { - fn get(&self, _key: &Block::Hash, _prefix: Prefix) -> Result, String> { + fn get( + &self, + _trie: Option, + _key: &Block::Hash, + _prefix: Prefix, + ) -> Result, String> { Ok(None) } } @@ -1316,7 +1326,7 @@ impl Backend { } fn apply_state_commit(transaction: &mut DBTransaction, commit: sc_state_db::CommitSet>) { - let mut key_buffer = Vec::new(); + let mut key_buffer = Vec::new(); for child_data in commit.data.into_iter() { if let Some(child_info) = child_data.info { // children tries with prefixes @@ -1640,6 +1650,7 @@ impl sc_client_api::backend::Backend for Backend { Ok(Some(header)) => { sp_state_machine::Storage::get( self.storage.as_ref(), + None, // header in top trie &header.state_root(), (&[], None), ).unwrap_or(None).is_some() diff --git a/client/src/client.rs b/client/src/client.rs index 2850ef9b417b2..118487c4e7b72 100644 --- a/client/src/client.rs +++ b/client/src/client.rs @@ -564,7 +564,11 @@ impl Client where self.storage.with_cached_changed_keys(root, functor) } - fn get(&self, key: &Block::Hash, prefix: Prefix) -> Result, String> { + fn get( + &self, + key: &Block::Hash, + prefix: Prefix, + ) -> Result, String> { self.storage.get(key, prefix) } } diff --git a/client/state-db/src/lib.rs b/client/state-db/src/lib.rs index adc038a0efaf4..a54d73d4ab0f3 100644 --- a/client/state-db/src/lib.rs +++ b/client/state-db/src/lib.rs @@ -40,7 +40,7 @@ use std::collections::{HashMap, hash_map::Entry}; use noncanonical::NonCanonicalOverlay; use pruning::RefWindow; use log::trace; -use sp_core::storage::OwnedChildInfo; +use sp_core::storage::{OwnedChildInfo, ChildInfo}; const PRUNING_MODE: &[u8] = b"mode"; const PRUNING_MODE_ARCHIVE: &[u8] = b"archive"; @@ -400,10 +400,15 @@ impl StateDbSync { } } - pub fn get(&self, key: &Key, db: &D) -> Result, Error> + pub fn get( + &self, + trie: Option, + key: &Key, + db: &D, + ) -> Result, Error> where Key: AsRef { - if let Some(value) = self.non_canonical.get(key) { + if let Some(value) = self.non_canonical.get(trie, key) { return Ok(Some(value)); } db.get(key.as_ref()).map_err(|e| Error::Db(e)) @@ -472,10 +477,15 @@ impl StateDb { } /// Get a value from non-canonical/pruning overlay or the backing DB. - pub fn get(&self, key: &Key, db: &D) -> Result, Error> + pub fn get( + &self, + trie: Option, + key: &Key, + db: &D, + ) -> Result, Error> where Key: AsRef { - self.db.read().get(key, db) + self.db.read().get(trie, key, db) } /// Revert all non-canonical blocks with the best block number. diff --git a/client/state-db/src/noncanonical.rs b/client/state-db/src/noncanonical.rs index 1bb4fd0914210..67c2ba6e19f6b 100644 --- a/client/state-db/src/noncanonical.rs +++ b/client/state-db/src/noncanonical.rs @@ -21,11 +21,11 @@ //! `revert_pending` use std::fmt; -use std::collections::{HashMap, VecDeque, hash_map::Entry}; +use std::collections::{HashMap, VecDeque, hash_map::Entry, BTreeMap}; use super::{Error, DBValue, ChildTrieChangeSet, CommitSet, MetaDb, Hash, to_meta_key, ChangeSet}; use codec::{Encode, Decode}; use log::trace; -use sp_core::storage::OwnedChildInfo; +use sp_core::storage::{ChildInfo, OwnedChildInfo}; const NON_CANONICAL_JOURNAL: &[u8] = b"noncanonical_journal"; // version at start to avoid collision when adding a unit @@ -34,6 +34,7 @@ const LAST_CANONICAL: &[u8] = b"last_canonical"; type Keys = Vec<(Option, Vec)>; type KeyVals = Vec<(Option, Vec<(Key, DBValue)>)>; +type ChildKeyVals = BTreeMap, HashMap>; /// See module documentation. pub struct NonCanonicalOverlay { @@ -42,7 +43,7 @@ pub struct NonCanonicalOverlay { parents: HashMap, pending_canonicalizations: Vec, pending_insertions: Vec, - values: HashMap, //ref counted + values: ChildKeyVals, //ref counted //would be deleted but kept around because block is pinned, ref counted. pinned: HashMap, pinned_insertions: HashMap>, @@ -94,8 +95,12 @@ struct BlockOverlay { deleted: Keys, } -fn insert_values(values: &mut HashMap, inserted: KeyVals) { - for (_ct, inserted) in inserted { +fn insert_values( + values: &mut ChildKeyVals, + inserted: KeyVals, +) { + for (ct, inserted) in inserted { + let values = values.entry(ct).or_default(); for (k, v) in inserted { debug_assert!(values.get(&k).map_or(true, |(_, value)| *value == v)); let (ref mut counter, _) = values.entry(k).or_insert_with(|| (0, v)); @@ -104,9 +109,10 @@ fn insert_values(values: &mut HashMap, inserted: } } -fn discard_values(values: &mut HashMap, inserted: Keys) { - for inserted in inserted { - for k in inserted.1 { +fn discard_values(values: &mut ChildKeyVals, inserted: Keys) { + for (ct, inserted) in inserted { + let values = values.entry(ct).or_default(); + for k in inserted { match values.entry(k) { Entry::Occupied(mut e) => { let (ref mut counter, _) = e.get_mut(); @@ -125,7 +131,7 @@ fn discard_values(values: &mut HashMap, inserted fn discard_descendants( levels: &mut VecDeque>>, - mut values: &mut HashMap, + mut values: &mut ChildKeyVals, index: usize, parents: &mut HashMap, pinned: &HashMap, @@ -168,7 +174,7 @@ impl NonCanonicalOverlay { }; let mut levels = VecDeque::new(); let mut parents = HashMap::new(); - let mut values = HashMap::new(); + let mut values = BTreeMap::new(); if let Some((ref hash, mut block)) = last_canonicalized { // read the journal trace!(target: "state-db", "Reading uncanonicalized journal. Last canonicalized #{} ({:?})", block, hash); @@ -196,7 +202,7 @@ impl NonCanonicalOverlay { let overlay = BlockOverlay { hash: record.hash.clone(), journal_key, - inserted: inserted, + inserted, deleted: record.deleted, }; insert_values(&mut values, record.inserted); @@ -389,7 +395,10 @@ impl NonCanonicalOverlay { data: ChangeSet { inserted: keys.iter().map(|k| ( k.clone(), - self.values.get(k) + self.values + .get(ct) + .expect("For each key in overlays there's a value in values") + .get(k) .expect("For each key in overlays there's a value in values").1.clone(), )).collect(), deleted: Vec::new(), @@ -451,9 +460,12 @@ impl NonCanonicalOverlay { } /// Get a value from the node overlay. This searches in every existing changeset. - pub fn get(&self, key: &Key) -> Option { - if let Some((_, value)) = self.values.get(&key) { - return Some(value.clone()); + pub fn get(&self, trie: Option, key: &Key) -> Option { + // TODO make storage over data representation of OwnedChildInfo to use borrow + if let Some(values) = self.values.get(&trie.map(|t| t.to_owned())) { + if let Some((_, value)) = values.get(&key) { + return Some(value.clone()); + } } None } @@ -559,7 +571,7 @@ mod tests { use crate::test::{make_db, make_childchangeset}; fn contains(overlay: &NonCanonicalOverlay, key: u64) -> bool { - overlay.get(&H256::from_low_u64_be(key)) == Some(H256::from_low_u64_be(key).as_bytes().to_vec()) + overlay.get(None, &H256::from_low_u64_be(key)) == Some(H256::from_low_u64_be(key).as_bytes().to_vec()) } #[test] diff --git a/primitives/state-machine/src/changes_trie/mod.rs b/primitives/state-machine/src/changes_trie/mod.rs index d57cf75e19ae0..f3e0ae1159ba5 100644 --- a/primitives/state-machine/src/changes_trie/mod.rs +++ b/primitives/state-machine/src/changes_trie/mod.rs @@ -71,6 +71,7 @@ use hash_db::{Hasher, Prefix}; use num_traits::{One, Zero}; use codec::{Decode, Encode}; use sp_core; +use sp_core::storage::{OwnedChildInfo, ChildInfo}; use sp_trie::{MemoryDB, DBValue, TrieMut}; use sp_trie::trie_types::TrieDBMut; use crate::{ @@ -159,7 +160,11 @@ pub trait Storage: RootsStorage { functor: &mut dyn FnMut(&HashMap, HashSet>), ) -> bool; /// Get a trie node. - fn get(&self, key: &H::Out, prefix: Prefix) -> Result, String>; + fn get( + &self, + key: &H::Out, + prefix: Prefix, + ) -> Result, String>; } /// Changes trie storage -> trie backend essence adapter. @@ -168,7 +173,13 @@ pub struct TrieBackendStorageAdapter<'a, H: Hasher, Number: BlockNumber>(pub &'a impl<'a, H: Hasher, N: BlockNumber> crate::TrieBackendStorageRef for TrieBackendStorageAdapter<'a, H, N> { type Overlay = sp_trie::MemoryDB; - fn get(&self, key: &H::Out, prefix: Prefix) -> Result, String> { + fn get( + &self, + trie: Option, + key: &H::Out, + prefix: Prefix, + ) -> Result, String> { + assert!(trie.is_none(), "Change trie is using a single top trie"); self.0.get(key, prefix) } } diff --git a/primitives/state-machine/src/changes_trie/storage.rs b/primitives/state-machine/src/changes_trie/storage.rs index 9271eb87a8aa2..b875a3cc70c92 100644 --- a/primitives/state-machine/src/changes_trie/storage.rs +++ b/primitives/state-machine/src/changes_trie/storage.rs @@ -26,6 +26,7 @@ use crate::{ trie_backend_essence::TrieBackendStorageRef, changes_trie::{BuildCache, RootsStorage, Storage, AnchorBlockId, BlockNumber}, }; +use sp_core::storage::ChildInfo; #[cfg(test)] use crate::backend::insert_into_memory_db; @@ -187,8 +188,14 @@ impl Storage for InMemoryStorage Result, String> { - MemoryDB::::get(&self.data.read().mdb, key, prefix) + fn get( + &self, + key: &H::Out, + prefix: Prefix, + ) -> Result, String> { + // Change trie is a default top trie. + let trie = None; + MemoryDB::::get(&self.data.read().mdb, trie, key, prefix) } } @@ -205,7 +212,13 @@ impl<'a, H, Number> TrieBackendStorageRef for TrieBackendAdapter<'a, H, Numbe { type Overlay = MemoryDB; - fn get(&self, key: &H::Out, prefix: Prefix) -> Result, String> { + fn get( + &self, + trie: Option, + key: &H::Out, + prefix: Prefix, + ) -> Result, String> { + assert!(trie.is_none(), "Change trie is a single top trie"); self.storage.get(key, prefix) } } diff --git a/primitives/state-machine/src/proving_backend.rs b/primitives/state-machine/src/proving_backend.rs index 6d5b45596a7ed..80e292310102b 100644 --- a/primitives/state-machine/src/proving_backend.rs +++ b/primitives/state-machine/src/proving_backend.rs @@ -243,11 +243,17 @@ impl<'a, S: 'a + TrieBackendStorage, H: 'a + Hasher> TrieBackendStorageRef { type Overlay = S::Overlay; - fn get(&self, key: &H::Out, prefix: Prefix) -> Result, String> { + fn get( + &self, + trie: Option, + key: &H::Out, + prefix: Prefix, + ) -> Result, String> { + // TODO switch proof model too (use a trie) if let Some(v) = self.proof_recorder.read().get(key) { return Ok(v.clone()); } - let backend_value = self.backend.get(key, prefix)?; + let backend_value = self.backend.get(trie, key, prefix)?; self.proof_recorder.write().insert(key.clone(), backend_value.clone()); Ok(backend_value) } diff --git a/primitives/state-machine/src/trie_backend_essence.rs b/primitives/state-machine/src/trie_backend_essence.rs index 0e1943e47209d..06db5c946d6da 100644 --- a/primitives/state-machine/src/trie_backend_essence.rs +++ b/primitives/state-machine/src/trie_backend_essence.rs @@ -27,13 +27,18 @@ use sp_trie::{Trie, MemoryDB, PrefixedMemoryDB, DBValue, for_keys_in_child_trie, KeySpacedDB, keyspace_as_prefix_alloc}; use sp_trie::trie_types::{TrieDB, TrieError, Layout}; use crate::{backend::Consolidate, StorageKey, StorageValue}; -use sp_core::storage::ChildInfo; +use sp_core::storage::{ChildInfo, OwnedChildInfo}; use codec::Encode; /// Patricia trie-based storage trait. pub trait Storage: Send + Sync { /// Get a trie node. - fn get(&self, key: &H::Out, prefix: Prefix) -> Result, String>; + fn get( + &self, + trie: Option, + key: &H::Out, + prefix: Prefix, + ) -> Result, String>; } /// Patricia trie-based pairs storage essence. @@ -359,10 +364,12 @@ impl<'a, S, H, O> hash_db::PlainDBRef for Ephemeral<'a, S, H, O O: hash_db::HashDB + Default + Consolidate, { fn get(&self, key: &H::Out) -> Option { + // TODO need new trait with ct as parameter!!! if let Some(val) = hash_db::HashDB::get(self.overlay, key, EMPTY_PREFIX) { Some(val) } else { - match self.storage.get(&key, EMPTY_PREFIX) { + unimplemented!("new trait with ct as parameter"); + match self.storage.get(None, &key, EMPTY_PREFIX) { Ok(x) => x, Err(e) => { warn!(target: "trie", "Failed to read from DB: {}", e); @@ -382,7 +389,8 @@ impl<'a, S, H> hash_db::PlainDBRef for BackendStorageDBRef<'a, H: 'a + Hasher, { fn get(&self, key: &H::Out) -> Option { - match self.storage.get(&key, EMPTY_PREFIX) { + unimplemented!("new trait with ct as parameter"); + match self.storage.get(None, &key, EMPTY_PREFIX) { Ok(x) => x, Err(e) => { warn!(target: "trie", "Failed to read from DB: {}", e); @@ -432,8 +440,9 @@ impl<'a, S, H, O> hash_db::HashDBRef for Ephemeral<'a, S, H, O> wher fn get(&self, key: &H::Out, prefix: Prefix) -> Option { if let Some(val) = hash_db::HashDB::get(self.overlay, key, prefix) { Some(val) - } else { - match self.storage.get(&key, prefix) { + } else { + unimplemented!("new trait with ct as parameter"); + match self.storage.get(None, &key, prefix) { Ok(x) => x, Err(e) => { warn!(target: "trie", "Failed to read from DB: {}", e); @@ -453,7 +462,8 @@ impl<'a, S, H> hash_db::HashDBRef for BackendStorageDBRef<'a, S, H> H: 'a + Hasher, { fn get(&self, key: &H::Out, prefix: Prefix) -> Option { - match self.storage.get(&key, prefix) { + unimplemented!("new trait with ct as parameter"); + match self.storage.get(None, &key, prefix) { Ok(x) => x, Err(e) => { warn!(target: "trie", "Failed to read from DB: {}", e); @@ -473,7 +483,12 @@ pub trait TrieBackendStorageRef { /// Type of in-memory overlay. type Overlay: hash_db::HashDB + Default + Consolidate; /// Get the value stored at key. - fn get(&self, key: &H::Out, prefix: Prefix) -> Result, String>; + fn get( + &self, + trie: Option, + key: &H::Out, + prefix: Prefix, + ) -> Result, String>; } /// Key-value pairs storage that is used by trie backend essence. @@ -485,8 +500,13 @@ impl + Send + Sync> TrieBackendStorage impl TrieBackendStorageRef for Arc> { type Overlay = PrefixedMemoryDB; - fn get(&self, key: &H::Out, prefix: Prefix) -> Result, String> { - Storage::::get(self.deref(), key, prefix) + fn get( + &self, + trie: Option, + key: &H::Out, + prefix: Prefix, + ) -> Result, String> { + Storage::::get(self.deref(), trie, key, prefix) } } @@ -494,7 +514,14 @@ impl TrieBackendStorageRef for Arc> { impl TrieBackendStorageRef for PrefixedMemoryDB { type Overlay = PrefixedMemoryDB; - fn get(&self, key: &H::Out, prefix: Prefix) -> Result, String> { + fn get( + &self, + trie: Option, + key: &H::Out, + prefix: Prefix, + ) -> Result, String> { + // TODO should we split prefixed memory db too?? -> likely yes: sharing + // rc does not make sense -> change type of PrefixedMemoryDB. Ok(hash_db::HashDB::get(self, key, prefix)) } } @@ -502,17 +529,31 @@ impl TrieBackendStorageRef for PrefixedMemoryDB { impl TrieBackendStorageRef for MemoryDB { type Overlay = MemoryDB; - fn get(&self, key: &H::Out, prefix: Prefix) -> Result, String> { + fn get( + &self, + trie: Option, + key: &H::Out, + prefix: Prefix, + ) -> Result, String> { + // TODO should we split prefixed memory db too?? -> likely yes: sharing + // rc does not make sense -> change type of PrefixedMemoryDB. + // This could be mergde with prefixed impl through genericmemorydb Ok(hash_db::HashDB::get(self, key, prefix)) } } +// TODO remove : should not be used anymore. impl<'a, H: Hasher, B: TrieBackendStorageRef> TrieBackendStorageRef for (&'a B, &'a [u8]) { type Overlay = PrefixedMemoryDB; - fn get(&self, key: &H::Out, prefix: Prefix) -> Result, String> { + fn get( + &self, + trie: Option, + key: &H::Out, + prefix: Prefix, + ) -> Result, String> { let prefix = keyspace_as_prefix_alloc(self.1, prefix); - self.0.get(key, (prefix.0.as_slice(), prefix.1)) + self.0.get(trie, key, (prefix.0.as_slice(), prefix.1)) } } From 7b26a93634ecf1ea527cd479dd58956c8bdfabfa Mon Sep 17 00:00:00 2001 From: Emeric Chevalier Date: Thu, 30 Jan 2020 14:47:11 +0100 Subject: [PATCH 10/85] Resolve a bit of child trie --- client/db/src/lib.rs | 6 +- .../state-machine/src/changes_trie/mod.rs | 3 - .../state-machine/src/changes_trie/storage.rs | 7 +- .../state-machine/src/in_memory_backend.rs | 5 +- .../state-machine/src/proving_backend.rs | 3 +- primitives/state-machine/src/trie_backend.rs | 67 +++++-- .../state-machine/src/trie_backend_essence.rs | 188 ++++++------------ primitives/storage/src/lib.rs | 2 +- primitives/trie/src/lib.rs | 88 ++------ 9 files changed, 143 insertions(+), 226 deletions(-) diff --git a/client/db/src/lib.rs b/client/db/src/lib.rs index 73ee6737c2955..52441a3666eaf 100644 --- a/client/db/src/lib.rs +++ b/client/db/src/lib.rs @@ -92,7 +92,7 @@ const DEFAULT_CHILD_RATIO: (usize, usize) = (1, 10); /// DB-backed patricia trie state, transaction type is an overlay of changes to commit. pub type DbState = sp_state_machine::TrieBackend< - Arc>>, HasherFor + (Arc>>, Option), HasherFor >; /// Re-export the KVDB trait so that one can pass an implementation of it. @@ -1601,7 +1601,7 @@ impl sc_client_api::backend::Backend for Backend { BlockId::Hash(h) if h == Default::default() => { let genesis_storage = DbGenesisStorage::::new(); let root = genesis_storage.0.clone(); - let db_state = DbState::::new(Arc::new(genesis_storage), root); + let db_state = DbState::::new((Arc::new(genesis_storage), None), root); let state = RefTrackingState::new(db_state, self.storage.clone(), None); return Ok(CachingState::new(state, self.shared_cache.clone(), None)); }, @@ -1620,7 +1620,7 @@ impl sc_client_api::backend::Backend for Backend { } if let Ok(()) = self.storage.state_db.pin(&hash) { let root = hdr.state_root(); - let db_state = DbState::::new(self.storage.clone(), *root); + let db_state = DbState::::new((self.storage.clone(), None), *root); let state = RefTrackingState::new( db_state, self.storage.clone(), diff --git a/primitives/state-machine/src/changes_trie/mod.rs b/primitives/state-machine/src/changes_trie/mod.rs index f3e0ae1159ba5..77fbd2f17ad14 100644 --- a/primitives/state-machine/src/changes_trie/mod.rs +++ b/primitives/state-machine/src/changes_trie/mod.rs @@ -71,7 +71,6 @@ use hash_db::{Hasher, Prefix}; use num_traits::{One, Zero}; use codec::{Decode, Encode}; use sp_core; -use sp_core::storage::{OwnedChildInfo, ChildInfo}; use sp_trie::{MemoryDB, DBValue, TrieMut}; use sp_trie::trie_types::TrieDBMut; use crate::{ @@ -175,11 +174,9 @@ impl<'a, H: Hasher, N: BlockNumber> crate::TrieBackendStorageRef for TrieBack fn get( &self, - trie: Option, key: &H::Out, prefix: Prefix, ) -> Result, String> { - assert!(trie.is_none(), "Change trie is using a single top trie"); self.0.get(key, prefix) } } diff --git a/primitives/state-machine/src/changes_trie/storage.rs b/primitives/state-machine/src/changes_trie/storage.rs index b875a3cc70c92..7e4a79548d78a 100644 --- a/primitives/state-machine/src/changes_trie/storage.rs +++ b/primitives/state-machine/src/changes_trie/storage.rs @@ -26,7 +26,6 @@ use crate::{ trie_backend_essence::TrieBackendStorageRef, changes_trie::{BuildCache, RootsStorage, Storage, AnchorBlockId, BlockNumber}, }; -use sp_core::storage::ChildInfo; #[cfg(test)] use crate::backend::insert_into_memory_db; @@ -193,9 +192,7 @@ impl Storage for InMemoryStorage Result, String> { - // Change trie is a default top trie. - let trie = None; - MemoryDB::::get(&self.data.read().mdb, trie, key, prefix) + MemoryDB::::get(&self.data.read().mdb, key, prefix) } } @@ -214,11 +211,9 @@ impl<'a, H, Number> TrieBackendStorageRef for TrieBackendAdapter<'a, H, Numbe fn get( &self, - trie: Option, key: &H::Out, prefix: Prefix, ) -> Result, String> { - assert!(trie.is_none(), "Change trie is a single top trie"); self.storage.get(key, prefix) } } diff --git a/primitives/state-machine/src/in_memory_backend.rs b/primitives/state-machine/src/in_memory_backend.rs index 0a29468bbc4ef..4dd50a74828a1 100644 --- a/primitives/state-machine/src/in_memory_backend.rs +++ b/primitives/state-machine/src/in_memory_backend.rs @@ -24,7 +24,7 @@ use crate::{ use std::{error, fmt, collections::{BTreeMap, HashMap}, marker::PhantomData, ops}; use hash_db::Hasher; use sp_trie::{ - MemoryDB, child_trie_root, default_child_trie_root, TrieConfiguration, trie_types::Layout, + MemoryDB, default_child_trie_root, TrieConfiguration, trie_types::Layout, }; use codec::Codec; use sp_core::storage::{ChildInfo, OwnedChildInfo, Storage}; @@ -268,8 +268,7 @@ impl Backend for InMemory where H::Out: Codec { .flat_map(|map| map.iter().map(|(k, v)| (k.clone(), Some(v.clone())))); let transaction: Vec<_> = delta.into_iter().collect(); - let root = child_trie_root::, _, _, _>( - &storage_key, + let root = Layout::::trie_root( existing_pairs.chain(transaction.iter().cloned()) .collect::>() .into_iter() diff --git a/primitives/state-machine/src/proving_backend.rs b/primitives/state-machine/src/proving_backend.rs index 80e292310102b..3f925e252ccc6 100644 --- a/primitives/state-machine/src/proving_backend.rs +++ b/primitives/state-machine/src/proving_backend.rs @@ -245,7 +245,6 @@ impl<'a, S: 'a + TrieBackendStorage, H: 'a + Hasher> TrieBackendStorageRef fn get( &self, - trie: Option, key: &H::Out, prefix: Prefix, ) -> Result, String> { @@ -253,7 +252,7 @@ impl<'a, S: 'a + TrieBackendStorage, H: 'a + Hasher> TrieBackendStorageRef if let Some(v) = self.proof_recorder.read().get(key) { return Ok(v.clone()); } - let backend_value = self.backend.get(trie, key, prefix)?; + let backend_value = self.backend.get(key, prefix)?; self.proof_recorder.write().insert(key.clone(), backend_value.clone()); Ok(backend_value) } diff --git a/primitives/state-machine/src/trie_backend.rs b/primitives/state-machine/src/trie_backend.rs index 9d17043de7e18..2daee660a8246 100644 --- a/primitives/state-machine/src/trie_backend.rs +++ b/primitives/state-machine/src/trie_backend.rs @@ -15,17 +15,16 @@ // along with Substrate. If not, see . //! Trie-based state machine backend. - use log::{warn, debug}; use hash_db::Hasher; -use sp_trie::{Trie, delta_trie_root, default_child_trie_root, child_delta_trie_root}; +use sp_trie::{Trie, delta_trie_root, default_child_trie_root}; use sp_trie::trie_types::{TrieDB, TrieError, Layout}; use sp_core::storage::{ChildInfo, OwnedChildInfo}; use std::collections::BTreeMap; use codec::{Codec, Decode}; use crate::{ StorageKey, StorageValue, Backend, - trie_backend_essence::{TrieBackendEssence, TrieBackendStorage, Ephemeral, BackendStorageDBRef}, + trie_backend_essence::{TrieBackendEssence, TrieBackendStorage, Ephemeral, BackendStorageDBRef, ChildTrieBackendStorage}, }; /// Patricia trie-based backend. Transaction type is overlays of changes to commit @@ -86,7 +85,13 @@ impl, H: Hasher> Backend for TrieBackend where child_info: ChildInfo, key: &[u8], ) -> Result, Self::Error> { - self.essence.child_storage(storage_key, child_info, key) + // TODO switch to &mut self like in overlay pr + let mut buf = Vec::new(); + if let Some(essence) = self.child_essence(storage_key, child_info, &mut buf)? { + essence.storage(key) + } else { + Ok(None) + } } fn next_storage_key(&self, key: &[u8]) -> Result, Self::Error> { @@ -99,7 +104,13 @@ impl, H: Hasher> Backend for TrieBackend where child_info: ChildInfo, key: &[u8], ) -> Result, Self::Error> { - self.essence.next_child_storage_key(storage_key, child_info, key) + // TODO switch to &mut self like in overlay pr + let mut buf = Vec::new(); + if let Some(essence) = self.child_essence(storage_key, child_info, &mut buf)? { + essence.next_storage_key(key) + } else { + Ok(None) + } } fn for_keys_with_prefix(&self, prefix: &[u8], f: F) { @@ -116,7 +127,11 @@ impl, H: Hasher> Backend for TrieBackend where child_info: ChildInfo, f: F, ) { - self.essence.for_keys_in_child_storage(storage_key, child_info, f) + // TODO switch to &mut self like in overlay pr + let mut buf = Vec::new(); + if let Ok(Some(essence)) = self.child_essence(storage_key, child_info, &mut buf) { + essence.for_keys(f) + } } fn for_child_keys_with_prefix( @@ -126,7 +141,11 @@ impl, H: Hasher> Backend for TrieBackend where prefix: &[u8], f: F, ) { - self.essence.for_child_keys_with_prefix(storage_key, child_info, prefix, f) + // TODO switch to &mut self like in overlay pr + let mut buf = Vec::new(); + if let Ok(Some(essence)) = self.child_essence(storage_key, child_info, &mut buf) { + essence.for_keys_with_prefix(prefix, f) + } } fn pairs(&self) -> Vec<(StorageKey, StorageValue)> { @@ -206,7 +225,7 @@ impl, H: Hasher> Backend for TrieBackend where let default_root = default_child_trie_root::>(storage_key); let mut write_overlay = S::Overlay::default(); - let mut root = match self.storage(storage_key) { + let mut root: H::Out = match self.storage(storage_key) { Ok(value) => value.and_then(|r| Decode::decode(&mut &r[..]).ok()).unwrap_or(default_root.clone()), Err(e) => { @@ -216,15 +235,16 @@ impl, H: Hasher> Backend for TrieBackend where }; { - let keyspaced_backend = (self.essence.backend_storage(), child_info.keyspace()); + // TODO switch to &mut self like in overlay pr + let mut buf = Vec::new(); + let child_essence = ChildTrieBackendStorage::new(self.essence.backend_storage(), Some(child_info), &mut buf); // Do not write prefix in overlay. let mut eph = Ephemeral::new( - &keyspaced_backend, + &child_essence, &mut write_overlay, ); - match child_delta_trie_root::, _, _, _, _, _>( - storage_key, + match delta_trie_root::, _, _, _, _>( &mut eph, root, delta @@ -246,6 +266,29 @@ impl, H: Hasher> Backend for TrieBackend where } } +impl, H: Hasher> TrieBackend where + H::Out: Ord + Codec, +{ + fn child_essence<'a>( + &'a self, + storage_key: &[u8], + child_info: ChildInfo<'a>, + buffer: &'a mut Vec, + ) -> Result, H>>, >::Error> { + let root: Option = self.storage(storage_key)? + .and_then(|encoded_root| Decode::decode(&mut &encoded_root[..]).ok()); + Ok(if let Some(root) = root { + Some(TrieBackendEssence::new(ChildTrieBackendStorage::new( + self.essence.backend_storage(), + Some(child_info), + buffer, + ), root)) + } else { + None + }) + } +} + #[cfg(test)] pub mod tests { use std::collections::HashSet; diff --git a/primitives/state-machine/src/trie_backend_essence.rs b/primitives/state-machine/src/trie_backend_essence.rs index 06db5c946d6da..07bba94106d3f 100644 --- a/primitives/state-machine/src/trie_backend_essence.rs +++ b/primitives/state-machine/src/trie_backend_essence.rs @@ -23,8 +23,8 @@ use std::marker::PhantomData; use log::{debug, warn}; use hash_db::{self, Hasher, EMPTY_PREFIX, Prefix}; use sp_trie::{Trie, MemoryDB, PrefixedMemoryDB, DBValue, - default_child_trie_root, read_trie_value, read_child_trie_value, - for_keys_in_child_trie, KeySpacedDB, keyspace_as_prefix_alloc}; + read_trie_value, + for_keys_in_trie, KeySpacedDB, keyspace_as_prefix_alloc}; use sp_trie::trie_types::{TrieDB, TrieError, Layout}; use crate::{backend::Consolidate, StorageKey, StorageValue}; use sp_core::storage::{ChildInfo, OwnedChildInfo}; @@ -74,51 +74,9 @@ impl, H: Hasher> TrieBackendEssence where H::O /// Return the next key in the trie i.e. the minimum key that is strictly superior to `key` in /// lexicographic order. pub fn next_storage_key(&self, key: &[u8]) -> Result, String> { - self.next_storage_key_from_root(&self.root, None, key) - } - - /// Return the next key in the child trie i.e. the minimum key that is strictly superior to - /// `key` in lexicographic order. - pub fn next_child_storage_key( - &self, - storage_key: &[u8], - child_info: ChildInfo, - key: &[u8], - ) -> Result, String> { - let child_root = match self.storage(storage_key)? { - Some(child_root) => child_root, - None => return Ok(None), - }; - - let mut hash = H::Out::default(); - - if child_root.len() != hash.as_ref().len() { - return Err(format!("Invalid child storage hash at {:?}", storage_key)); - } - // note: child_root and hash must be same size, panics otherwise. - hash.as_mut().copy_from_slice(&child_root[..]); - - self.next_storage_key_from_root(&hash, Some(child_info), key) - } - - /// Return next key from main trie or child trie by providing corresponding root. - fn next_storage_key_from_root( - &self, - root: &H::Out, - child_info: Option, - key: &[u8], - ) -> Result, String> { let eph = BackendStorageDBRef::new(&self.storage); - let dyn_eph: &dyn hash_db::HashDBRef<_, _>; - let keyspace_eph; - if let Some(child_info) = child_info.as_ref() { - keyspace_eph = KeySpacedDB::new(&eph, child_info.keyspace()); - dyn_eph = &keyspace_eph; - } else { - dyn_eph = &eph; - } - let trie = TrieDB::::new(dyn_eph, root) + let trie = TrieDB::::new(&eph, &self.root) .map_err(|e| format!("TrieDB creation error: {}", e))?; let mut iter = trie.iter() .map_err(|e| format!("TrieDB iteration error: {}", e))?; @@ -156,72 +114,22 @@ impl, H: Hasher> TrieBackendEssence where H::O read_trie_value::, _>(&eph, &self.root, key).map_err(map_e) } - /// Get the value of child storage at given key. - pub fn child_storage( - &self, - storage_key: &[u8], - child_info: ChildInfo, - key: &[u8], - ) -> Result, String> { - let root = self.storage(storage_key)? - .unwrap_or(default_child_trie_root::>(storage_key).encode()); - - let eph = BackendStorageDBRef::new(&self.storage); - - let map_e = |e| format!("Trie lookup error: {}", e); - - read_child_trie_value::, _>(storage_key, child_info.keyspace(), &eph, &root, key) - .map_err(map_e) - } - - /// Retrieve all entries keys of child storage and call `f` for each of those keys. - pub fn for_keys_in_child_storage( + /// Retrieve all entries keys of storage and call `f` for each of those keys. + pub fn for_keys( &self, - storage_key: &[u8], - child_info: ChildInfo, f: F, ) { - let root = match self.storage(storage_key) { - Ok(v) => v.unwrap_or(default_child_trie_root::>(storage_key).encode()), - Err(e) => { - debug!(target: "trie", "Error while iterating child storage: {}", e); - return; - } - }; - let eph = BackendStorageDBRef::new(&self.storage); - if let Err(e) = for_keys_in_child_trie::, _, BackendStorageDBRef>( - storage_key, - child_info.keyspace(), + if let Err(e) = for_keys_in_trie::, _, BackendStorageDBRef>( &eph, - &root, + &self.root, f, ) { debug!(target: "trie", "Error while iterating child storage: {}", e); } } - /// Execute given closure for all keys starting with prefix. - pub fn for_child_keys_with_prefix( - &self, - storage_key: &[u8], - child_info: ChildInfo, - prefix: &[u8], - mut f: F, - ) { - let root_vec = match self.storage(storage_key) { - Ok(v) => v.unwrap_or(default_child_trie_root::>(storage_key).encode()), - Err(e) => { - debug!(target: "trie", "Error while iterating child storage: {}", e); - return; - } - }; - let mut root = H::Out::default(); - root.as_mut().copy_from_slice(&root_vec); - self.keys_values_with_prefix_inner(&root, prefix, |k, _v| f(k), Some(child_info)) - } - /// Execute given closure for all keys starting with prefix. pub fn for_keys_with_prefix(&self, prefix: &[u8], mut f: F) { self.keys_values_with_prefix_inner(&self.root, prefix, |k, _v| f(k), None) @@ -368,8 +276,7 @@ impl<'a, S, H, O> hash_db::PlainDBRef for Ephemeral<'a, S, H, O if let Some(val) = hash_db::HashDB::get(self.overlay, key, EMPTY_PREFIX) { Some(val) } else { - unimplemented!("new trait with ct as parameter"); - match self.storage.get(None, &key, EMPTY_PREFIX) { + match self.storage.get(&key, EMPTY_PREFIX) { Ok(x) => x, Err(e) => { warn!(target: "trie", "Failed to read from DB: {}", e); @@ -389,8 +296,7 @@ impl<'a, S, H> hash_db::PlainDBRef for BackendStorageDBRef<'a, H: 'a + Hasher, { fn get(&self, key: &H::Out) -> Option { - unimplemented!("new trait with ct as parameter"); - match self.storage.get(None, &key, EMPTY_PREFIX) { + match self.storage.get(&key, EMPTY_PREFIX) { Ok(x) => x, Err(e) => { warn!(target: "trie", "Failed to read from DB: {}", e); @@ -441,8 +347,7 @@ impl<'a, S, H, O> hash_db::HashDBRef for Ephemeral<'a, S, H, O> wher if let Some(val) = hash_db::HashDB::get(self.overlay, key, prefix) { Some(val) } else { - unimplemented!("new trait with ct as parameter"); - match self.storage.get(None, &key, prefix) { + match self.storage.get(&key, prefix) { Ok(x) => x, Err(e) => { warn!(target: "trie", "Failed to read from DB: {}", e); @@ -462,8 +367,7 @@ impl<'a, S, H> hash_db::HashDBRef for BackendStorageDBRef<'a, S, H> H: 'a + Hasher, { fn get(&self, key: &H::Out, prefix: Prefix) -> Option { - unimplemented!("new trait with ct as parameter"); - match self.storage.get(None, &key, prefix) { + match self.storage.get(&key, prefix) { Ok(x) => x, Err(e) => { warn!(target: "trie", "Failed to read from DB: {}", e); @@ -485,7 +389,6 @@ pub trait TrieBackendStorageRef { /// Get the value stored at key. fn get( &self, - trie: Option, key: &H::Out, prefix: Prefix, ) -> Result, String>; @@ -497,63 +400,88 @@ pub trait TrieBackendStorage: TrieBackendStorageRef + Send + Sync impl + Send + Sync> TrieBackendStorage for B {} // This implementation is used by normal storage trie clients. -impl TrieBackendStorageRef for Arc> { +impl TrieBackendStorageRef for (Arc>, Option) { type Overlay = PrefixedMemoryDB; fn get( &self, - trie: Option, key: &H::Out, prefix: Prefix, ) -> Result, String> { - Storage::::get(self.deref(), trie, key, prefix) + let child_info = self.1.as_ref(); + Storage::::get(self.0.deref(), child_info.map(|c| c.as_ref()), key, prefix) } } -// This implementation is used by test storage trie clients. -impl TrieBackendStorageRef for PrefixedMemoryDB { + +/// This is an essence for the child trie backend. +pub struct ChildTrieBackendStorage<'a, H: Hasher, B: TrieBackendStorageRef> { + db: &'a B, + info: Option>, + buffer: &'a mut Vec, + _ph: PhantomData, +} + +impl<'a, H: Hasher, B: TrieBackendStorageRef> ChildTrieBackendStorage<'a, H, B> { + /// Instantiate a `ChildTrieBackendStorage`. + pub fn new(db: &'a B, info: Option>, buffer: &'a mut Vec) -> Self { + ChildTrieBackendStorage { + db, + info, + buffer, + _ph: PhantomData, + } + } +} + +impl<'a, H: Hasher, B: TrieBackendStorageRef> TrieBackendStorageRef for ChildTrieBackendStorage<'a, H, B> { type Overlay = PrefixedMemoryDB; fn get( &self, - trie: Option, key: &H::Out, prefix: Prefix, ) -> Result, String> { - // TODO should we split prefixed memory db too?? -> likely yes: sharing - // rc does not make sense -> change type of PrefixedMemoryDB. - Ok(hash_db::HashDB::get(self, key, prefix)) + if let Some(keyspace) = self.info.as_ref().map(|ci| ci.keyspace()) { + // TODO switch to &mut self like in overlay pr and use commented code + /*self.buffer.resize(keyspace.len() + prefix.0.len(), 0); + self.buffer[..keyspace.len()].copy_from_slice(keyspace); + self.buffer[keyspace.len()..].copy_from_slice(prefix.0); + self.db.get(key, (self.buffer.as_slice(), prefix.1))*/ + + let prefix = keyspace_as_prefix_alloc(keyspace, prefix); + self.db.get(key, (prefix.0.as_slice(), prefix.1)) + } else { + self.db.get(key, prefix) + } } } -impl TrieBackendStorageRef for MemoryDB { - type Overlay = MemoryDB; + +// This implementation is used by test storage trie clients. +impl TrieBackendStorageRef for PrefixedMemoryDB { + type Overlay = PrefixedMemoryDB; fn get( &self, - trie: Option, key: &H::Out, prefix: Prefix, ) -> Result, String> { // TODO should we split prefixed memory db too?? -> likely yes: sharing // rc does not make sense -> change type of PrefixedMemoryDB. - // This could be mergde with prefixed impl through genericmemorydb Ok(hash_db::HashDB::get(self, key, prefix)) } } -// TODO remove : should not be used anymore. -impl<'a, H: Hasher, B: TrieBackendStorageRef> TrieBackendStorageRef for (&'a B, &'a [u8]) { - type Overlay = PrefixedMemoryDB; +impl TrieBackendStorageRef for MemoryDB { + type Overlay = MemoryDB; fn get( &self, - trie: Option, key: &H::Out, prefix: Prefix, ) -> Result, String> { - let prefix = keyspace_as_prefix_alloc(self.1, prefix); - self.0.get(trie, key, (prefix.0.as_slice(), prefix.1)) + Ok(hash_db::HashDB::get(self, key, prefix)) } } @@ -562,6 +490,8 @@ mod test { use sp_core::{Blake2Hasher, H256}; use sp_trie::{TrieMut, PrefixedMemoryDB, trie_types::TrieDBMut, KeySpacedDBMut}; use super::*; + use crate::trie_backend::TrieBackend; + use crate::backend::Backend; #[test] fn next_storage_key_and_next_child_storage_key_work() { @@ -592,7 +522,7 @@ mod test { trie.insert(b"MyChild", root_1.as_ref()).expect("insert failed"); }; - let essence_1 = TrieBackendEssence::new(mdb, root_1); + let essence_1 = TrieBackend::new(mdb, root_1); assert_eq!(essence_1.next_storage_key(b"2"), Ok(Some(b"3".to_vec()))); assert_eq!(essence_1.next_storage_key(b"3"), Ok(Some(b"4".to_vec()))); @@ -601,7 +531,7 @@ mod test { assert_eq!(essence_1.next_storage_key(b"6"), Ok(None)); let mdb = essence_1.into_storage(); - let essence_2 = TrieBackendEssence::new(mdb, root_2); + let essence_2 = TrieBackend::new(mdb, root_2); assert_eq!( essence_2.next_child_storage_key(b"MyChild", child_info, b"2"), Ok(Some(b"3".to_vec())) diff --git a/primitives/storage/src/lib.rs b/primitives/storage/src/lib.rs index 0407444e0055b..34e7f0ead6d18 100644 --- a/primitives/storage/src/lib.rs +++ b/primitives/storage/src/lib.rs @@ -113,7 +113,7 @@ pub mod well_known_keys { /// /// For now, the only valid child trie keys are those starting with `:child_storage:default:`. /// - /// `child_trie_root` and `child_delta_trie_root` can panic if invalid value is provided to them. + /// `trie_root` can panic if invalid value is provided to them. pub fn is_child_trie_key_valid(storage_key: &[u8]) -> bool { let has_right_prefix = storage_key.starts_with(b":child_storage:default:"); if has_right_prefix { diff --git a/primitives/trie/src/lib.rs b/primitives/trie/src/lib.rs index fe8d7e66a6331..788c3627b2af4 100644 --- a/primitives/trie/src/lib.rs +++ b/primitives/trie/src/lib.rs @@ -217,72 +217,45 @@ pub fn default_child_trie_root( L::trie_root::<_, Vec, Vec>(core::iter::empty()) } -/// Determine a child trie root given its ordered contents, closed form. H is the default hasher, -/// but a generic implementation may ignore this type parameter and use other hashers. -pub fn child_trie_root( - _storage_key: &[u8], - input: I, -) -> ::Out - where - I: IntoIterator, - A: AsRef<[u8]> + Ord, - B: AsRef<[u8]>, -{ - L::trie_root(input) -} - -/// Determine a child trie root given a hash DB and delta values. H is the default hasher, -/// but a generic implementation may ignore this type parameter and use other hashers. -pub fn child_delta_trie_root( +/// Call `f` for all keys in a child trie. +pub fn for_keys_in_child_trie( _storage_key: &[u8], - db: &mut DB, - root_data: RD, - delta: I, -) -> Result<::Out, Box>> + keyspace: &[u8], + db: &DB, + root_slice: &[u8], + mut f: F +) -> Result<(), Box>> where - I: IntoIterator)>, - A: AsRef<[u8]> + Ord, - B: AsRef<[u8]>, - RD: AsRef<[u8]>, - DB: hash_db::HashDB - + hash_db::PlainDB, trie_db::DBValue>, + DB: hash_db::HashDBRef + + hash_db::PlainDBRef, trie_db::DBValue>, { let mut root = TrieHash::::default(); // root is fetched from DB, not writable by runtime, so it's always valid. - root.as_mut().copy_from_slice(root_data.as_ref()); + root.as_mut().copy_from_slice(root_slice); - { - let mut trie = TrieDBMut::::from_existing(db, &mut root)?; + let db = KeySpacedDB::new(&*db, keyspace); + let trie = TrieDB::::new(&db, &root)?; + let iter = trie.iter()?; - for (key, change) in delta { - match change { - Some(val) => trie.insert(key.as_ref(), val.as_ref())?, - None => trie.remove(key.as_ref())?, - }; - } + for x in iter { + let (key, _) = x?; + f(&key); } - Ok(root) + Ok(()) } /// Call `f` for all keys in a child trie. -pub fn for_keys_in_child_trie( - _storage_key: &[u8], - keyspace: &[u8], +pub fn for_keys_in_trie( db: &DB, - root_slice: &[u8], + root: &TrieHash, mut f: F ) -> Result<(), Box>> where DB: hash_db::HashDBRef + hash_db::PlainDBRef, trie_db::DBValue>, { - let mut root = TrieHash::::default(); - // root is fetched from DB, not writable by runtime, so it's always valid. - root.as_mut().copy_from_slice(root_slice); - - let db = KeySpacedDB::new(&*db, keyspace); - let trie = TrieDB::::new(&db, &root)?; + let trie = TrieDB::::new(&*db, &root)?; let iter = trie.iter()?; for x in iter { @@ -293,6 +266,7 @@ pub fn for_keys_in_child_trie( Ok(()) } + /// Record all keys for a given root. pub fn record_all_keys( db: &DB, @@ -316,26 +290,6 @@ pub fn record_all_keys( Ok(()) } -/// Read a value from the child trie. -pub fn read_child_trie_value( - _storage_key: &[u8], - keyspace: &[u8], - db: &DB, - root_slice: &[u8], - key: &[u8] -) -> Result>, Box>> - where - DB: hash_db::HashDBRef - + hash_db::PlainDBRef, trie_db::DBValue>, -{ - let mut root = TrieHash::::default(); - // root is fetched from DB, not writable by runtime, so it's always valid. - root.as_mut().copy_from_slice(root_slice); - - let db = KeySpacedDB::new(&*db, keyspace); - Ok(TrieDB::::new(&db, &root)?.get(key).map(|x| x.map(|val| val.to_vec()))?) -} - /// Read a value from the child trie with given query. pub fn read_child_trie_value_with, DB>( _storage_key: &[u8], From f39ce3f3d894b6dce2adcc2a024a93612285971d Mon Sep 17 00:00:00 2001 From: Emeric Chevalier Date: Thu, 30 Jan 2020 17:46:19 +0100 Subject: [PATCH 11/85] small refact --- client/db/src/lib.rs | 12 ++---------- 1 file changed, 2 insertions(+), 10 deletions(-) diff --git a/client/db/src/lib.rs b/client/db/src/lib.rs index bf6bac25beec5..2729825c63c92 100644 --- a/client/db/src/lib.rs +++ b/client/db/src/lib.rs @@ -1117,11 +1117,11 @@ impl Backend { } let finalized = if operation.commit_state { - let mut changesets = BTreeMap::new(); + let mut changesets = BTreeMap::<_, sc_state_db::ChangeSet>>::new(); let mut ops: u64 = 0; let mut bytes: u64 = 0; for (info, mut updates) in operation.db_updates.into_iter() { - let mut data: sc_state_db::ChangeSet> = sc_state_db::ChangeSet::default(); + let data = changesets.entry(info).or_default(); for (key, (val, rc)) in updates.drain() { if rc > 0 { ops += 1; @@ -1135,14 +1135,6 @@ impl Backend { data.deleted.push(key); } } - match changesets.entry(info) { - Entry::Vacant(e) => { e.insert(data); }, - Entry::Occupied(mut e) => { - let e = e.get_mut(); - e.inserted.extend(data.inserted); - e.deleted.extend(data.deleted); - }, - } } self.state_usage.tally_writes(ops, bytes); From 9f0c600c7f4c4bafebc482ee9f3c0d2a6c29fc48 Mon Sep 17 00:00:00 2001 From: Emeric Chevalier Date: Fri, 31 Jan 2020 09:35:11 +0100 Subject: [PATCH 12/85] indent --- client/db/src/lib.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/client/db/src/lib.rs b/client/db/src/lib.rs index 2729825c63c92..e011c0373efb8 100644 --- a/client/db/src/lib.rs +++ b/client/db/src/lib.rs @@ -41,7 +41,7 @@ mod stats; use std::sync::Arc; use std::path::PathBuf; use std::io; -use std::collections::{HashMap, BTreeMap, btree_map::Entry}; +use std::collections::{HashMap, BTreeMap}; use sc_client_api::{execution_extensions::ExecutionExtensions, ForkBlocks, UsageInfo, MemoryInfo, BadBlocks, IoInfo}; use sc_client_api::backend::NewBlockState; From ecb43d04f055de789c824eab865312c9dd883fee Mon Sep 17 00:00:00 2001 From: Emeric Chevalier Date: Fri, 31 Jan 2020 09:35:11 +0100 Subject: [PATCH 13/85] Use const of null hash check on BackendStorageRef. --- client/src/cht.rs | 13 +++--- client/src/light/backend.rs | 2 +- client/src/light/call_executor.rs | 2 +- client/src/light/fetcher.rs | 3 +- client/transaction-pool/src/api.rs | 2 +- primitives/api/src/lib.rs | 2 +- primitives/core/src/lib.rs | 44 ++++++++++++++++++- primitives/state-machine/src/backend.rs | 4 +- .../state-machine/src/changes_trie/build.rs | 4 +- .../src/changes_trie/changes_iterator.rs | 2 +- .../state-machine/src/changes_trie/mod.rs | 3 +- .../state-machine/src/changes_trie/prune.rs | 4 +- .../state-machine/src/changes_trie/storage.rs | 3 +- primitives/state-machine/src/ext.rs | 2 +- .../state-machine/src/in_memory_backend.rs | 4 +- primitives/state-machine/src/lib.rs | 4 +- .../state-machine/src/overlayed_changes.rs | 2 +- .../state-machine/src/proving_backend.rs | 5 ++- primitives/state-machine/src/testing.rs | 2 +- primitives/state-machine/src/trie_backend.rs | 2 +- .../state-machine/src/trie_backend_essence.rs | 13 +++++- primitives/trie/src/lib.rs | 41 ++++++++++------- 22 files changed, 114 insertions(+), 49 deletions(-) diff --git a/client/src/cht.rs b/client/src/cht.rs index 29f19a77504b9..f470ee4fbe6fa 100644 --- a/client/src/cht.rs +++ b/client/src/cht.rs @@ -23,11 +23,10 @@ //! root has. A correct proof implies that the claimed block is identical to the one //! we discarded. -use hash_db; use codec::Encode; use sp_trie; -use sp_core::{H256, convert_hash}; +use sp_core::{H256, convert_hash, self}; use sp_runtime::traits::{Header as HeaderT, SimpleArithmetic, Zero, One}; use sp_state_machine::{ MemoryDB, TrieBackend, Backend as StateBackend, StorageProof, InMemoryBackend, @@ -86,7 +85,7 @@ pub fn compute_root( ) -> ClientResult where Header: HeaderT, - Hasher: hash_db::Hasher, + Hasher: sp_core::Hasher, Hasher::Out: Ord, I: IntoIterator>>, { @@ -105,7 +104,7 @@ pub fn build_proof( ) -> ClientResult where Header: HeaderT, - Hasher: hash_db::Hasher, + Hasher: sp_core::Hasher, Hasher::Out: Ord + codec::Codec, BlocksI: IntoIterator, HashesI: IntoIterator>>, @@ -132,7 +131,7 @@ pub fn check_proof( ) -> ClientResult<()> where Header: HeaderT, - Hasher: hash_db::Hasher, + Hasher: sp_core::Hasher, Hasher::Out: Ord + codec::Codec, { do_check_proof::( @@ -161,7 +160,7 @@ pub fn check_proof_on_proving_backend( ) -> ClientResult<()> where Header: HeaderT, - Hasher: hash_db::Hasher, + Hasher: sp_core::Hasher, Hasher::Out: Ord + codec::Codec, { do_check_proof::( @@ -185,7 +184,7 @@ fn do_check_proof( ) -> ClientResult<()> where Header: HeaderT, - Hasher: hash_db::Hasher, + Hasher: sp_core::Hasher, Hasher::Out: Ord, F: FnOnce(Hasher::Out, &[u8]) -> ClientResult>>, { diff --git a/client/src/light/backend.rs b/client/src/light/backend.rs index 34259ac895539..d7b992403b288 100644 --- a/client/src/light/backend.rs +++ b/client/src/light/backend.rs @@ -46,7 +46,7 @@ use sc_client_api::{ UsageInfo, }; use crate::light::blockchain::Blockchain; -use hash_db::Hasher; +use sp_core::Hasher; const IN_MEMORY_EXPECT_PROOF: &str = "InMemory state backend has Void error type and always succeeds; qed"; diff --git a/client/src/light/call_executor.rs b/client/src/light/call_executor.rs index 01a93c78219bc..20b4faf4a303c 100644 --- a/client/src/light/call_executor.rs +++ b/client/src/light/call_executor.rs @@ -31,7 +31,7 @@ use sp_state_machine::{ execution_proof_check_on_trie_backend, ExecutionManager, StorageProof, merge_storage_proofs, }; -use hash_db::Hasher; +use sp_core::Hasher; use sp_api::{ProofRecorder, InitializeBlock, StorageTransactionCache}; diff --git a/client/src/light/fetcher.rs b/client/src/light/fetcher.rs index d66108b7f0adb..38bf4aaf24eb3 100644 --- a/client/src/light/fetcher.rs +++ b/client/src/light/fetcher.rs @@ -20,7 +20,8 @@ use std::sync::Arc; use std::collections::{BTreeMap, HashMap}; use std::marker::PhantomData; -use hash_db::{HashDB, Hasher, EMPTY_PREFIX}; +use hash_db::{HashDB, EMPTY_PREFIX}; +use sp_core::Hasher; use codec::{Decode, Encode}; use sp_core::{convert_hash, traits::CodeExecutor}; use sp_runtime::traits::{ diff --git a/client/transaction-pool/src/api.rs b/client/transaction-pool/src/api.rs index bfc13c01fdf53..a2bf7fb6021ba 100644 --- a/client/transaction-pool/src/api.rs +++ b/client/transaction-pool/src/api.rs @@ -27,7 +27,7 @@ use sc_client_api::{ light::{Fetcher, RemoteCallRequest, RemoteBodyRequest}, BlockBody, }; -use sp_core::Hasher; +use sp_core::InnerHasher; use sp_runtime::{ generic::BlockId, traits::{self, Block as BlockT, BlockIdTo, Header as HeaderT, Hash as HashT}, transaction_validity::TransactionValidity, diff --git a/primitives/api/src/lib.rs b/primitives/api/src/lib.rs index bde00d48172e8..97f24de2d4a5b 100644 --- a/primitives/api/src/lib.rs +++ b/primitives/api/src/lib.rs @@ -43,7 +43,7 @@ pub use sp_state_machine::{ pub use sp_core::NativeOrEncoded; #[doc(hidden)] #[cfg(feature = "std")] -pub use hash_db::Hasher; +pub use sp_state_machine::Hasher; #[doc(hidden)] #[cfg(not(feature = "std"))] pub use sp_core::to_substrate_wasm_fn_return_value; diff --git a/primitives/core/src/lib.rs b/primitives/core/src/lib.rs index 5bb9a3927f965..95efbce865d51 100644 --- a/primitives/core/src/lib.rs +++ b/primitives/core/src/lib.rs @@ -79,7 +79,8 @@ pub use changes_trie::{ChangesTrieConfiguration, ChangesTrieConfigurationRange}; #[cfg(feature = "full_crypto")] pub use crypto::{DeriveJunction, Pair, Public}; -pub use hash_db::Hasher; +pub use hash_db::Hasher as InnerHasher; +pub use hash_db::{Prefix, EMPTY_PREFIX}; // Switch back to Blake after PoC-3 is out // pub use self::hasher::blake::BlakeHasher; pub use self::hasher::blake2::Blake2Hasher; @@ -349,3 +350,44 @@ macro_rules! impl_maybe_marker { )+ } } + +/// Technical trait to avoid calculating empty root. +/// This assumes (same wrong asumption as for hashdb trait), +/// an empty node is `[0u8]`. +pub trait Hasher: InnerHasher { + /// Associated constant value. + const EMPTY_ROOT: Option<&'static [u8]>; + + + /// Test to call for all new implementation. + #[cfg(test)] + fn test_associated_empty_root() -> bool { + if let Some(root) = Self::EMPTY_ROOT.as_ref() { + let empty = Self::hash(&[0u8]); + if *root != empty.as_ref() { + return false; + } + } + + true + } +} + +impl Hasher for Blake2Hasher { + const EMPTY_ROOT: Option<&'static [u8]> = Some(&[ + 3, 23, 10, 46, 117, 151, 183, 183, 227, 216, + 76, 5, 57, 29, 19, 154, 98, 177, 87, 231, + 135, 134, 216, 192, 130, 242, 157, 207, 76, 17, + 19, 20, + ]); +} + +#[cfg(test)] +mod test { + use super::{Blake2Hasher, Hasher}; + + #[test] + fn empty_root_const() { + assert!(Blake2Hasher::test_associated_empty_root()); + } +} diff --git a/primitives/state-machine/src/backend.rs b/primitives/state-machine/src/backend.rs index cd8a69f3f2d2a..cdb226935cc42 100644 --- a/primitives/state-machine/src/backend.rs +++ b/primitives/state-machine/src/backend.rs @@ -17,7 +17,7 @@ //! State machine backends. These manage the code and storage of contracts. use log::warn; -use hash_db::Hasher; +use sp_core::{Hasher, InnerHasher}; use codec::Encode; use sp_core::storage::{ChildInfo, OwnedChildInfo}; @@ -369,7 +369,7 @@ pub(crate) fn insert_into_memory_db(mdb: &mut MemoryDB, input: I) -> Op H: Hasher, I: IntoIterator, { - let mut root = ::Out::default(); + let mut root = ::Out::default(); { let mut trie = TrieDBMut::::new(mdb, &mut root); for (key, value) in input { diff --git a/primitives/state-machine/src/changes_trie/build.rs b/primitives/state-machine/src/changes_trie/build.rs index c731d4104b260..16e6a2da4583f 100644 --- a/primitives/state-machine/src/changes_trie/build.rs +++ b/primitives/state-machine/src/changes_trie/build.rs @@ -19,7 +19,7 @@ use std::collections::{BTreeMap, BTreeSet}; use std::collections::btree_map::Entry; use codec::{Decode, Encode}; -use hash_db::Hasher; +use sp_core::{Hasher, InnerHasher}; use num_traits::One; use crate::{ StorageKey, @@ -291,7 +291,7 @@ fn prepare_digest_input<'a, H, Number>( trie_storage.for_key_values_with_prefix(&child_prefix, |key, value| if let Ok(InputKey::ChildIndex::(trie_key)) = Decode::decode(&mut &key[..]) { if let Ok(value) = >::decode(&mut &value[..]) { - let mut trie_root = ::Out::default(); + let mut trie_root = ::Out::default(); trie_root.as_mut().copy_from_slice(&value[..]); children_roots.insert(trie_key.storage_key, trie_root); } diff --git a/primitives/state-machine/src/changes_trie/changes_iterator.rs b/primitives/state-machine/src/changes_trie/changes_iterator.rs index 9f2d44967d716..9e185d0444c86 100644 --- a/primitives/state-machine/src/changes_trie/changes_iterator.rs +++ b/primitives/state-machine/src/changes_trie/changes_iterator.rs @@ -20,7 +20,7 @@ use std::cell::RefCell; use std::collections::VecDeque; use codec::{Decode, Encode, Codec}; -use hash_db::Hasher; +use sp_core::Hasher; use num_traits::Zero; use sp_trie::Recorder; use crate::changes_trie::{AnchorBlockId, ConfigurationRange, RootsStorage, Storage, BlockNumber}; diff --git a/primitives/state-machine/src/changes_trie/mod.rs b/primitives/state-machine/src/changes_trie/mod.rs index 77fbd2f17ad14..45970e7a31dc7 100644 --- a/primitives/state-machine/src/changes_trie/mod.rs +++ b/primitives/state-machine/src/changes_trie/mod.rs @@ -67,7 +67,8 @@ pub use self::prune::prune; use std::collections::{HashMap, HashSet}; use std::convert::TryInto; -use hash_db::{Hasher, Prefix}; +use hash_db::Prefix; +use sp_core::Hasher; use num_traits::{One, Zero}; use codec::{Decode, Encode}; use sp_core; diff --git a/primitives/state-machine/src/changes_trie/prune.rs b/primitives/state-machine/src/changes_trie/prune.rs index f6be3223ae9f8..94e8fe4bdaed2 100644 --- a/primitives/state-machine/src/changes_trie/prune.rs +++ b/primitives/state-machine/src/changes_trie/prune.rs @@ -16,7 +16,7 @@ //! Changes trie pruning-related functions. -use hash_db::Hasher; +use sp_core::{Hasher, InnerHasher}; use sp_trie::Recorder; use log::warn; use num_traits::One; @@ -68,7 +68,7 @@ pub fn prune( trie_storage.for_key_values_with_prefix(&child_prefix, |key, value| { if let Ok(InputKey::ChildIndex::(_trie_key)) = Decode::decode(&mut &key[..]) { if let Ok(value) = >::decode(&mut &value[..]) { - let mut trie_root = ::Out::default(); + let mut trie_root = ::Out::default(); trie_root.as_mut().copy_from_slice(&value[..]); children_roots.push(trie_root); } diff --git a/primitives/state-machine/src/changes_trie/storage.rs b/primitives/state-machine/src/changes_trie/storage.rs index 7e4a79548d78a..ee2599d09548a 100644 --- a/primitives/state-machine/src/changes_trie/storage.rs +++ b/primitives/state-machine/src/changes_trie/storage.rs @@ -17,7 +17,8 @@ //! Changes trie storage utilities. use std::collections::{BTreeMap, HashSet, HashMap}; -use hash_db::{Hasher, Prefix, EMPTY_PREFIX}; +use hash_db::{Prefix, EMPTY_PREFIX}; +use sp_core::Hasher; use sp_trie::DBValue; use sp_trie::MemoryDB; use parking_lot::RwLock; diff --git a/primitives/state-machine/src/ext.rs b/primitives/state-machine/src/ext.rs index 9d70382bf4ccc..39dbe2e901592 100644 --- a/primitives/state-machine/src/ext.rs +++ b/primitives/state-machine/src/ext.rs @@ -22,8 +22,8 @@ use crate::{ changes_trie::State as ChangesTrieState, }; -use hash_db::Hasher; use sp_core::{ + Hasher, storage::{ChildStorageKey, well_known_keys::is_child_storage_key, ChildInfo}, traits::Externalities, hexdisplay::HexDisplay, }; diff --git a/primitives/state-machine/src/in_memory_backend.rs b/primitives/state-machine/src/in_memory_backend.rs index 4dd50a74828a1..ab96a63c63686 100644 --- a/primitives/state-machine/src/in_memory_backend.rs +++ b/primitives/state-machine/src/in_memory_backend.rs @@ -22,7 +22,7 @@ use crate::{ backend::{Backend, insert_into_memory_db}, }; use std::{error, fmt, collections::{BTreeMap, HashMap}, marker::PhantomData, ops}; -use hash_db::Hasher; +use sp_core::{Hasher, InnerHasher}; use sp_trie::{ MemoryDB, default_child_trie_root, TrieConfiguration, trie_types::Layout, }; @@ -232,7 +232,7 @@ impl Backend for InMemory where H::Out: Codec { fn storage_root(&self, delta: I) -> (H::Out, Self::Transaction) where I: IntoIterator, Option>)>, - ::Out: Ord, + ::Out: Ord, { let existing_pairs = self.inner.get(&None) .into_iter() diff --git a/primitives/state-machine/src/lib.rs b/primitives/state-machine/src/lib.rs index 173de031c5db7..66da5b8920450 100644 --- a/primitives/state-machine/src/lib.rs +++ b/primitives/state-machine/src/lib.rs @@ -20,7 +20,7 @@ use std::{fmt, result, collections::HashMap, panic::UnwindSafe, marker::PhantomData}; use log::{warn, trace}; -use hash_db::Hasher; +pub use sp_core::{Hasher, InnerHasher}; use codec::{Decode, Encode, Codec}; use sp_core::{ storage::ChildInfo, NativeOrEncoded, NeverNativeValue, @@ -84,7 +84,7 @@ pub type DefaultHandler = fn(CallResult, CallResult) -> CallRe /// Type of changes trie transaction. pub type ChangesTrieTransaction = ( MemoryDB, - ChangesTrieCacheAction<::Out, N>, + ChangesTrieCacheAction<::Out, N>, ); /// Strategy for executing a call into the runtime. diff --git a/primitives/state-machine/src/overlayed_changes.rs b/primitives/state-machine/src/overlayed_changes.rs index a15e8c613d3d0..d983680ff0797 100644 --- a/primitives/state-machine/src/overlayed_changes.rs +++ b/primitives/state-machine/src/overlayed_changes.rs @@ -31,7 +31,7 @@ use codec::{Decode, Encode}; use sp_core::storage::{well_known_keys::EXTRINSIC_INDEX, OwnedChildInfo, ChildInfo}; use std::{mem, ops}; -use hash_db::Hasher; +use sp_core::Hasher; /// Storage key. pub type StorageKey = Vec; diff --git a/primitives/state-machine/src/proving_backend.rs b/primitives/state-machine/src/proving_backend.rs index 3f925e252ccc6..65e5d25027c9d 100644 --- a/primitives/state-machine/src/proving_backend.rs +++ b/primitives/state-machine/src/proving_backend.rs @@ -20,7 +20,8 @@ use std::sync::Arc; use parking_lot::RwLock; use codec::{Decode, Encode, Codec}; use log::debug; -use hash_db::{Hasher, HashDB, EMPTY_PREFIX, Prefix}; +use hash_db::{HashDB, EMPTY_PREFIX, Prefix}; +use sp_core::{Hasher, InnerHasher}; use sp_trie::{ MemoryDB, default_child_trie_root, read_trie_value_with, read_child_trie_value_with, record_all_keys @@ -190,7 +191,7 @@ impl<'a, S, H> ProvingBackendRecorder<'a, S, H> /// Global proof recorder, act as a layer over a hash db for recording queried /// data. -pub type ProofRecorder = Arc::Out, Option>>>; +pub type ProofRecorder = Arc::Out, Option>>>; /// Patricia trie-based backend which also tracks all touched storage trie values. /// These can be sent to remote node and used as a proof of execution. diff --git a/primitives/state-machine/src/testing.rs b/primitives/state-machine/src/testing.rs index 39a34509b720b..56393747e9fdc 100644 --- a/primitives/state-machine/src/testing.rs +++ b/primitives/state-machine/src/testing.rs @@ -18,7 +18,7 @@ use std::any::{Any, TypeId}; use codec::Decode; -use hash_db::Hasher; +use sp_core::Hasher; use crate::{ backend::Backend, OverlayedChanges, StorageTransactionCache, ext::Ext, InMemoryBackend, StorageKey, StorageValue, diff --git a/primitives/state-machine/src/trie_backend.rs b/primitives/state-machine/src/trie_backend.rs index 2daee660a8246..6d445bc7c7562 100644 --- a/primitives/state-machine/src/trie_backend.rs +++ b/primitives/state-machine/src/trie_backend.rs @@ -16,7 +16,7 @@ //! Trie-based state machine backend. use log::{warn, debug}; -use hash_db::Hasher; +use sp_core::Hasher; use sp_trie::{Trie, delta_trie_root, default_child_trie_root}; use sp_trie::trie_types::{TrieDB, TrieError, Layout}; use sp_core::storage::{ChildInfo, OwnedChildInfo}; diff --git a/primitives/state-machine/src/trie_backend_essence.rs b/primitives/state-machine/src/trie_backend_essence.rs index 54ca824f956e9..6df54341f74fa 100644 --- a/primitives/state-machine/src/trie_backend_essence.rs +++ b/primitives/state-machine/src/trie_backend_essence.rs @@ -21,9 +21,10 @@ use std::ops::Deref; use std::sync::Arc; use std::marker::PhantomData; use log::{debug, warn}; -use hash_db::{self, Hasher, EMPTY_PREFIX, Prefix}; +use sp_core::Hasher; +use hash_db::{self, EMPTY_PREFIX, Prefix}; use sp_trie::{Trie, MemoryDB, PrefixedMemoryDB, DBValue, - read_trie_value, + read_trie_value, check_if_empty_root, for_keys_in_trie, KeySpacedDB, keyspace_as_prefix_alloc}; use sp_trie::trie_types::{TrieDB, TrieError, Layout}; use crate::{backend::Consolidate, StorageKey, StorageValue}; @@ -296,6 +297,10 @@ impl<'a, S, H> hash_db::PlainDBRef for BackendStorageDBRef<'a, H: 'a + Hasher, { fn get(&self, key: &H::Out) -> Option { + if check_if_empty_root::(key.as_ref()) { + return Some(vec![0u8]); + } + match self.storage.get(&key, EMPTY_PREFIX) { Ok(x) => x, Err(e) => { @@ -367,6 +372,10 @@ impl<'a, S, H> hash_db::HashDBRef for BackendStorageDBRef<'a, S, H> H: 'a + Hasher, { fn get(&self, key: &H::Out, prefix: Prefix) -> Option { + if check_if_empty_root::(key.as_ref()) { + return Some(vec![0u8]); + } + match self.storage.get(&key, prefix) { Ok(x) => x, Err(e) => { diff --git a/primitives/trie/src/lib.rs b/primitives/trie/src/lib.rs index e265652d407e9..379d9d7a655a0 100644 --- a/primitives/trie/src/lib.rs +++ b/primitives/trie/src/lib.rs @@ -26,7 +26,7 @@ mod trie_stream; use sp_std::boxed::Box; use sp_std::marker::PhantomData; use sp_std::vec::Vec; -use hash_db::{Hasher, Prefix}; +use sp_core::{Hasher, InnerHasher, Prefix}; use trie_db::proof::{generate_proof, verify_proof}; pub use trie_db::proof::VerifyError; /// Our `NodeCodec`-specific error. @@ -49,14 +49,14 @@ pub use hash_db::{HashDB as HashDBT, EMPTY_PREFIX}; /// substrate trie layout pub struct Layout(sp_std::marker::PhantomData); -impl TrieLayout for Layout { +impl TrieLayout for Layout { const USE_EXTENSION: bool = false; type Hash = H; type Codec = NodeCodec; } -impl TrieConfiguration for Layout { - fn trie_root(input: I) -> ::Out where +impl TrieConfiguration for Layout { + fn trie_root(input: I) -> ::Out where I: IntoIterator, A: AsRef<[u8]> + Ord, B: AsRef<[u8]>, @@ -80,8 +80,8 @@ impl TrieConfiguration for Layout { /// TrieDB error over `TrieConfiguration` trait. pub type TrieError = trie_db::TrieError, CError>; /// Reexport from `hash_db`, with genericity set for `Hasher` trait. -pub trait AsHashDB: hash_db::AsHashDB {} -impl> AsHashDB for T {} +pub trait AsHashDB: hash_db::AsHashDB {} +impl> AsHashDB for T {} /// Reexport from `hash_db`, with genericity set for `Hasher` trait. pub type HashDB<'a, H> = dyn hash_db::HashDB + 'a; /// Reexport from `hash_db`, with genericity set for key only. @@ -105,7 +105,7 @@ pub type TrieDBMut<'a, L> = trie_db::TrieDBMut<'a, L>; /// Querying interface, as in `trie_db` but less generic. pub type Lookup<'a, L, Q> = trie_db::Lookup<'a, L, Q>; /// Hash type for a trie layout. -pub type TrieHash = <::Hash as Hasher>::Out; +pub type TrieHash = <::Hash as InnerHasher>::Out; /// This module is for non generic definition of trie type. /// Only the `Hasher` trait is generic in this case. @@ -213,10 +213,20 @@ pub fn read_trie_value_with< /// Determine the default child trie root. pub fn default_child_trie_root( _storage_key: &[u8], -) -> ::Out { +) -> ::Out { L::trie_root::<_, Vec, Vec>(core::iter::empty()) } +pub fn check_if_empty_root ( + root: &[u8], +) -> bool { + if let Some(empty_root) = H::EMPTY_ROOT.as_ref() { + *empty_root == root + } else { + H::hash(&[0u8]).as_ref() == root + } +} + /// Call `f` for all keys in a child trie. pub fn for_keys_in_trie( db: &DB, @@ -304,7 +314,7 @@ pub fn keyspace_as_prefix_alloc(ks: &[u8], prefix: Prefix) -> (Vec, Option KeySpacedDB<'a, DB, H> where - H: Hasher, + H: InnerHasher, { /// instantiate new keyspaced db pub fn new(db: &'a DB, ks: &'a [u8]) -> Self { @@ -314,7 +324,7 @@ impl<'a, DB, H> KeySpacedDB<'a, DB, H> where #[cfg(feature="test-helpers")] impl<'a, DB, H> KeySpacedDBMut<'a, DB, H> where - H: Hasher, + H: InnerHasher, { /// instantiate new keyspaced db pub fn new(db: &'a mut DB, ks: &'a [u8]) -> Self { @@ -324,7 +334,7 @@ impl<'a, DB, H> KeySpacedDBMut<'a, DB, H> where impl<'a, DB, H, T> hash_db::HashDBRef for KeySpacedDB<'a, DB, H> where DB: hash_db::HashDBRef, - H: Hasher, + H: InnerHasher, T: From<&'static [u8]>, { fn get(&self, key: &H::Out, prefix: Prefix) -> Option { @@ -341,7 +351,7 @@ impl<'a, DB, H, T> hash_db::HashDBRef for KeySpacedDB<'a, DB, H> where #[cfg(feature="test-helpers")] impl<'a, DB, H, T> hash_db::HashDB for KeySpacedDBMut<'a, DB, H> where DB: hash_db::HashDB, - H: Hasher, + H: InnerHasher, T: Default + PartialEq + for<'b> From<&'b [u8]> + Clone + Send + Sync, { fn get(&self, key: &H::Out, prefix: Prefix) -> Option { @@ -373,7 +383,7 @@ impl<'a, DB, H, T> hash_db::HashDB for KeySpacedDBMut<'a, DB, H> where #[cfg(feature="test-helpers")] impl<'a, DB, H, T> hash_db::AsHashDB for KeySpacedDBMut<'a, DB, H> where DB: hash_db::HashDB, - H: Hasher, + H: InnerHasher, T: Default + PartialEq + for<'b> From<&'b [u8]> + Clone + Send + Sync, { fn as_hash_db(&self) -> &dyn hash_db::HashDB { &*self } @@ -397,7 +407,8 @@ mod tests { use super::*; use codec::{Encode, Compact}; use sp_core::Blake2Hasher; - use hash_db::{HashDB, Hasher}; + use hash_db::HashDB; + use sp_core::{Hasher, InnerHasher}; use trie_db::{DBValue, TrieMut, Trie, NodeCodec as NodeCodecT}; use trie_standardmap::{Alphabet, ValueMode, StandardMap}; use hex_literal::hex; @@ -581,7 +592,7 @@ mod tests { #[test] fn random_should_work() { - let mut seed = ::Out::zero(); + let mut seed = ::Out::zero(); for test_i in 0..10000 { if test_i % 50 == 0 { println!("{:?} of 10000 stress tests done", test_i); From ae29df5338c0111a9e235284cf0d8dbe5dc94e86 Mon Sep 17 00:00:00 2001 From: Emeric Chevalier Date: Fri, 31 Jan 2020 15:35:59 +0100 Subject: [PATCH 14/85] Associated null node hash set to a non optional const. --- primitives/core/src/lib.rs | 28 ++++++++-------------------- primitives/trie/src/lib.rs | 7 ++----- 2 files changed, 10 insertions(+), 25 deletions(-) diff --git a/primitives/core/src/lib.rs b/primitives/core/src/lib.rs index 95efbce865d51..113ff634ff8ca 100644 --- a/primitives/core/src/lib.rs +++ b/primitives/core/src/lib.rs @@ -355,39 +355,27 @@ macro_rules! impl_maybe_marker { /// This assumes (same wrong asumption as for hashdb trait), /// an empty node is `[0u8]`. pub trait Hasher: InnerHasher { - /// Associated constant value. - const EMPTY_ROOT: Option<&'static [u8]>; - - - /// Test to call for all new implementation. - #[cfg(test)] - fn test_associated_empty_root() -> bool { - if let Some(root) = Self::EMPTY_ROOT.as_ref() { - let empty = Self::hash(&[0u8]); - if *root != empty.as_ref() { - return false; - } - } - - true - } + /// Value for an empty root node, this + /// is the hash of `[0u8]` value. + const EMPTY_ROOT: &'static [u8]; } impl Hasher for Blake2Hasher { - const EMPTY_ROOT: Option<&'static [u8]> = Some(&[ + const EMPTY_ROOT: &'static [u8] = &[ 3, 23, 10, 46, 117, 151, 183, 183, 227, 216, 76, 5, 57, 29, 19, 154, 98, 177, 87, 231, 135, 134, 216, 192, 130, 242, 157, 207, 76, 17, 19, 20, - ]); + ]; } #[cfg(test)] mod test { - use super::{Blake2Hasher, Hasher}; + use super::{Blake2Hasher, Hasher, InnerHasher}; #[test] fn empty_root_const() { - assert!(Blake2Hasher::test_associated_empty_root()); + let empty = Blake2Hasher::hash(&[0u8]); + assert_eq!(Blake2Hasher::EMPTY_ROOT, empty.as_ref()); } } diff --git a/primitives/trie/src/lib.rs b/primitives/trie/src/lib.rs index 379d9d7a655a0..bb8c7f880aa92 100644 --- a/primitives/trie/src/lib.rs +++ b/primitives/trie/src/lib.rs @@ -217,14 +217,11 @@ pub fn default_child_trie_root( L::trie_root::<_, Vec, Vec>(core::iter::empty()) } +/// Test if this is an empty root node. pub fn check_if_empty_root ( root: &[u8], ) -> bool { - if let Some(empty_root) = H::EMPTY_ROOT.as_ref() { - *empty_root == root - } else { - H::hash(&[0u8]).as_ref() == root - } + H::EMPTY_ROOT == root } /// Call `f` for all keys in a child trie. From 6a06c0a81f914a4ead81f9a55209e36ca716f9b6 Mon Sep 17 00:00:00 2001 From: Emeric Chevalier Date: Sat, 1 Feb 2020 00:12:21 +0100 Subject: [PATCH 15/85] Make ChildInfo borrow of OwnedChildInfo. --- client/chain-spec/src/chain_spec.rs | 10 +- client/db/src/lib.rs | 19 +- client/db/src/storage_cache.rs | 20 +- client/network/src/chain.rs | 4 +- client/network/src/protocol.rs | 7 +- client/network/src/protocol/light_dispatch.rs | 4 +- client/rpc/src/state/state_full.rs | 18 +- client/rpc/src/state/tests.rs | 13 +- client/src/client.rs | 8 +- client/src/light/backend.rs | 10 +- client/src/light/fetcher.rs | 12 +- client/state-db/src/lib.rs | 4 +- client/state-db/src/noncanonical.rs | 2 +- frame/contracts/src/account_db.rs | 31 ++- frame/contracts/src/exec.rs | 31 ++- frame/contracts/src/lib.rs | 17 +- frame/contracts/src/rent.rs | 4 +- frame/contracts/src/tests.rs | 12 +- frame/support/src/storage/child.rs | 30 +-- primitives/externalities/src/lib.rs | 22 +- primitives/io/src/lib.rs | 50 ++--- primitives/runtime/src/lib.rs | 2 +- primitives/state-machine/src/backend.rs | 28 +-- primitives/state-machine/src/basic.rs | 37 ++-- .../state-machine/src/changes_trie/build.rs | 19 +- primitives/state-machine/src/ext.rs | 44 ++-- .../state-machine/src/in_memory_backend.rs | 20 +- primitives/state-machine/src/lib.rs | 24 ++- .../state-machine/src/overlayed_changes.rs | 19 +- .../state-machine/src/proving_backend.rs | 32 +-- primitives/state-machine/src/trie_backend.rs | 21 +- .../state-machine/src/trie_backend_essence.rs | 13 +- primitives/storage/Cargo.toml | 2 +- primitives/storage/src/lib.rs | 202 +++++++++++------- primitives/trie/src/lib.rs | 2 +- test-utils/client/src/lib.rs | 2 +- test-utils/runtime/client/src/lib.rs | 2 +- 37 files changed, 446 insertions(+), 351 deletions(-) diff --git a/client/chain-spec/src/chain_spec.rs b/client/chain-spec/src/chain_spec.rs index 81cbce5ea731c..8688e8ec9d1cd 100644 --- a/client/chain-spec/src/chain_spec.rs +++ b/client/chain-spec/src/chain_spec.rs @@ -22,7 +22,7 @@ use std::fs::File; use std::path::PathBuf; use std::rc::Rc; use serde::{Serialize, Deserialize}; -use sp_core::storage::{StorageKey, StorageData, ChildInfo, Storage, StorageChild}; +use sp_core::storage::{StorageKey, StorageData, OwnedChildInfo, Storage, StorageChild}; use sp_runtime::BuildStorage; use serde_json as json; use crate::RuntimeGenesis; @@ -77,10 +77,7 @@ impl BuildStorage for ChainSpec { Genesis::Raw(RawGenesis { top: map, children: children_map }) => Ok(Storage { top: map.into_iter().map(|(k, v)| (k.0, v.0)).collect(), children: children_map.into_iter().map(|(sk, child_content)| { - let child_info = ChildInfo::resolve_child_info( - child_content.child_type, - child_content.child_info.as_slice(), - ).expect("chain spec contains correct content").to_owned(); + let child_info = OwnedChildInfo::new_default(child_content.child_info.as_slice()); ( sk.0, StorageChild { @@ -287,8 +284,7 @@ impl ChainSpec { .collect(); let children = storage.children.into_iter() .map(|(sk, child)| { - let info = child.child_info.as_ref(); - let (info, ci_type) = info.info(); + let (info, ci_type) = child.child_info.info(); ( StorageKey(sk), ChildRawStorage { diff --git a/client/db/src/lib.rs b/client/db/src/lib.rs index e011c0373efb8..b60157d5429ba 100644 --- a/client/db/src/lib.rs +++ b/client/db/src/lib.rs @@ -148,7 +148,7 @@ impl StateBackend> for RefTrackingState { fn child_storage( &self, storage_key: &[u8], - child_info: ChildInfo, + child_info: &ChildInfo, key: &[u8], ) -> Result>, Self::Error> { self.state.child_storage(storage_key, child_info, key) @@ -161,7 +161,7 @@ impl StateBackend> for RefTrackingState { fn exists_child_storage( &self, storage_key: &[u8], - child_info: ChildInfo, + child_info: &ChildInfo, key: &[u8], ) -> Result { self.state.exists_child_storage(storage_key, child_info, key) @@ -174,7 +174,7 @@ impl StateBackend> for RefTrackingState { fn next_child_storage_key( &self, storage_key: &[u8], - child_info: ChildInfo, + child_info: &ChildInfo, key: &[u8], ) -> Result>, Self::Error> { self.state.next_child_storage_key(storage_key, child_info, key) @@ -191,7 +191,7 @@ impl StateBackend> for RefTrackingState { fn for_keys_in_child_storage( &self, storage_key: &[u8], - child_info: ChildInfo, + child_info: &ChildInfo, f: F, ) { self.state.for_keys_in_child_storage(storage_key, child_info, f) @@ -200,7 +200,7 @@ impl StateBackend> for RefTrackingState { fn for_child_keys_with_prefix( &self, storage_key: &[u8], - child_info: ChildInfo, + child_info: &ChildInfo, prefix: &[u8], f: F, ) { @@ -217,7 +217,7 @@ impl StateBackend> for RefTrackingState { fn child_storage_root( &self, storage_key: &[u8], - child_info: ChildInfo, + child_info: &ChildInfo, delta: I, ) -> (B::Hash, bool, Self::Transaction) where @@ -237,7 +237,7 @@ impl StateBackend> for RefTrackingState { fn child_keys( &self, storage_key: &[u8], - child_info: ChildInfo, + child_info: &ChildInfo, prefix: &[u8], ) -> Vec> { self.state.child_keys(storage_key, child_info, prefix) @@ -668,7 +668,7 @@ struct StorageDb { impl sp_state_machine::Storage> for StorageDb { fn get( &self, - trie: Option, + trie: Option<&ChildInfo>, key: &Block::Hash, prefix: Prefix, ) -> Result, String> { @@ -701,7 +701,7 @@ impl DbGenesisStorage { impl sp_state_machine::Storage> for DbGenesisStorage { fn get( &self, - _trie: Option, + _trie: Option<&ChildInfo>, _key: &Block::Hash, _prefix: Prefix, ) -> Result, String> { @@ -1329,7 +1329,6 @@ fn apply_state_commit(transaction: &mut DBTransaction, commit: sc_state_db::Comm for child_data in commit.data.into_iter() { if let Some(child_info) = child_data.0 { // children tries with prefixes - let child_info = child_info.as_ref(); let keyspace = child_info.keyspace(); let keyspace_len = keyspace.len(); key_buffer.resize(keyspace_len, 0); diff --git a/client/db/src/storage_cache.rs b/client/db/src/storage_cache.rs index e300ec8b29312..71fae6771c39c 100644 --- a/client/db/src/storage_cache.rs +++ b/client/db/src/storage_cache.rs @@ -540,7 +540,7 @@ impl>, B: BlockT> StateBackend> for Ca fn child_storage( &self, storage_key: &[u8], - child_info: ChildInfo, + child_info: &ChildInfo, key: &[u8], ) -> Result>, Self::Error> { let key = (storage_key.to_vec(), key.to_vec()); @@ -577,7 +577,7 @@ impl>, B: BlockT> StateBackend> for Ca fn exists_child_storage( &self, storage_key: &[u8], - child_info: ChildInfo, + child_info: &ChildInfo, key: &[u8], ) -> Result { self.state.exists_child_storage(storage_key, child_info, key) @@ -586,7 +586,7 @@ impl>, B: BlockT> StateBackend> for Ca fn for_keys_in_child_storage( &self, storage_key: &[u8], - child_info: ChildInfo, + child_info: &ChildInfo, f: F, ) { self.state.for_keys_in_child_storage(storage_key, child_info, f) @@ -599,7 +599,7 @@ impl>, B: BlockT> StateBackend> for Ca fn next_child_storage_key( &self, storage_key: &[u8], - child_info: ChildInfo, + child_info: &ChildInfo, key: &[u8], ) -> Result>, Self::Error> { self.state.next_child_storage_key(storage_key, child_info, key) @@ -616,7 +616,7 @@ impl>, B: BlockT> StateBackend> for Ca fn for_child_keys_with_prefix( &self, storage_key: &[u8], - child_info: ChildInfo, + child_info: &ChildInfo, prefix: &[u8], f: F, ) { @@ -633,7 +633,7 @@ impl>, B: BlockT> StateBackend> for Ca fn child_storage_root( &self, storage_key: &[u8], - child_info: ChildInfo, + child_info: &ChildInfo, delta: I, ) -> (B::Hash, bool, Self::Transaction) where @@ -653,7 +653,7 @@ impl>, B: BlockT> StateBackend> for Ca fn child_keys( &self, storage_key: &[u8], - child_info: ChildInfo, + child_info: &ChildInfo, prefix: &[u8], ) -> Vec> { self.state.child_keys(storage_key, child_info, prefix) @@ -677,8 +677,7 @@ mod tests { type Block = RawBlock>; - const CHILD_KEY_1: &'static [u8] = b"unique_id_1"; - const CHILD_INFO_1: ChildInfo<'static> = ChildInfo::new_default(CHILD_KEY_1); + const CHILD_KEY_1: &'static [u8] = b"\x01\x00\x00\x00unique_id_1"; #[test] fn smoke() { @@ -969,6 +968,7 @@ mod tests { #[test] fn should_track_used_size_correctly() { + let child_info1 = ChildInfo::resolve_child_info(CHILD_KEY_1).unwrap(); let root_parent = H256::random(); let shared = new_shared_cache::(109, ((109-36), 109)); let h0 = H256::random(); @@ -996,7 +996,7 @@ mod tests { &[], &[], vec![], - vec![(s_key.clone(), vec![(key.clone(), Some(vec![1, 2]))], CHILD_INFO_1.to_owned())], + vec![(s_key.clone(), vec![(key.clone(), Some(vec![1, 2]))], child_info1.to_owned())], Some(h0), Some(0), true, diff --git a/client/network/src/chain.rs b/client/network/src/chain.rs index b991a0e65208c..bad7d71419285 100644 --- a/client/network/src/chain.rs +++ b/client/network/src/chain.rs @@ -57,7 +57,7 @@ pub trait Client: Send + Sync { &self, block: &Block::Hash, storage_key: &[u8], - child_info: ChildInfo, + child_info: &ChildInfo, keys: &[Vec], ) -> Result; @@ -139,7 +139,7 @@ impl Client for SubstrateClient where &self, block: &Block::Hash, storage_key: &[u8], - child_info: ChildInfo, + child_info: &ChildInfo, keys: &[Vec], ) -> Result { (self as &SubstrateClient) diff --git a/client/network/src/protocol.rs b/client/network/src/protocol.rs index 2df8f6597c508..45f2ee3497380 100644 --- a/client/network/src/protocol.rs +++ b/client/network/src/protocol.rs @@ -23,7 +23,7 @@ use libp2p::{Multiaddr, PeerId}; use libp2p::core::{ConnectedPoint, nodes::Substream, muxing::StreamMuxerBox}; use libp2p::swarm::{ProtocolsHandler, IntoProtocolsHandler}; use libp2p::swarm::{NetworkBehaviour, NetworkBehaviourAction, PollParameters}; -use sp_core::storage::{StorageKey, ChildInfo}; +use sp_core::storage::{StorageKey, OwnedChildInfo, ChildType}; use sp_consensus::{ BlockOrigin, block_validation::BlockAnnounceValidator, @@ -1555,11 +1555,12 @@ impl, H: ExHashT> Protocol { trace!(target: "sync", "Remote read child request {} from {} ({} {} at {})", request.id, who, request.storage_key.to_hex::(), keys_str(), request.block); - let proof = if let Some(child_info) = ChildInfo::resolve_child_info(request.child_type, &request.child_info[..]) { + let proof = if ChildType::CryptoUniqueId as u32 == request.child_type { + let child_info = OwnedChildInfo::new_default(&request.child_info[..]); match self.context_data.chain.read_child_proof( &request.block, &request.storage_key, - child_info, + &*child_info, &request.keys, ) { Ok(proof) => proof, diff --git a/client/network/src/protocol/light_dispatch.rs b/client/network/src/protocol/light_dispatch.rs index bfa8daa181ca1..b50688eea67a0 100644 --- a/client/network/src/protocol/light_dispatch.rs +++ b/client/network/src/protocol/light_dispatch.rs @@ -681,7 +681,7 @@ pub mod tests { use std::sync::Arc; use std::time::Instant; use futures::channel::oneshot; - use sp_core::storage::ChildInfo; + use sp_core::storage::OwnedChildInfo; use sp_runtime::traits::{Block as BlockT, NumberFor, Header as HeaderT}; use sp_blockchain::{Error as ClientError, Result as ClientResult}; use sc_client_api::{FetchChecker, RemoteHeaderRequest, @@ -1035,7 +1035,7 @@ pub mod tests { light_dispatch.on_connect(&mut network_interface, peer0.clone(), Roles::FULL, 1000); let (tx, response) = oneshot::channel(); - let child_info = ChildInfo::new_default(b"unique_id_1"); + let child_info = OwnedChildInfo::new_default(b"unique_id_1"); let (child_info, child_type) = child_info.info(); light_dispatch.add_request(&mut network_interface, RequestData::RemoteReadChild(RemoteReadChildRequest { header: dummy_header(), diff --git a/client/rpc/src/state/state_full.rs b/client/rpc/src/state/state_full.rs index cd77e8b080846..867bf5ff3314d 100644 --- a/client/rpc/src/state/state_full.rs +++ b/client/rpc/src/state/state_full.rs @@ -33,7 +33,8 @@ use sc_client::{ Client, CallExecutor, BlockchainEvents }; use sp_core::{ - Bytes, storage::{well_known_keys, StorageKey, StorageData, StorageChangeSet, ChildInfo}, + storage::{well_known_keys, StorageKey, StorageData, StorageChangeSet, OwnedChildInfo, ChildType}, + Bytes, }; use sp_version::RuntimeVersion; use sp_state_machine::ExecutionStrategy; @@ -290,7 +291,7 @@ impl StateBackend for FullState StateBackend for FullState StateBackend for FullState StateBackend for FullState Option { + if child_type != ChildType::CryptoUniqueId as u32 { + None + } else { + Some(OwnedChildInfo::new_default(&child_definition[..])) + } +} + + /// Splits passed range into two subranges where: /// - first range has at least one element in it; /// - second range (optionally) starts at given `middle` element. diff --git a/client/rpc/src/state/tests.rs b/client/rpc/src/state/tests.rs index a0ab11e977204..f18e31e9d30e3 100644 --- a/client/rpc/src/state/tests.rs +++ b/client/rpc/src/state/tests.rs @@ -30,7 +30,7 @@ use substrate_test_runtime_client::{ runtime, }; -const CHILD_INFO: ChildInfo<'static> = ChildInfo::new_default(b"unique_id"); +const CHILD_INFO: &'static [u8] = b"\x01\x00\x00\x00unique_id"; #[test] fn should_return_storage() { @@ -38,17 +38,17 @@ fn should_return_storage() { const VALUE: &[u8] = b"hello world"; const STORAGE_KEY: &[u8] = b":child_storage:default:child"; const CHILD_VALUE: &[u8] = b"hello world !"; - + let child_info1 = ChildInfo::resolve_child_info(CHILD_INFO).unwrap(); let mut core = tokio::runtime::Runtime::new().unwrap(); let client = TestClientBuilder::new() .add_extra_storage(KEY.to_vec(), VALUE.to_vec()) - .add_extra_child_storage(STORAGE_KEY.to_vec(), CHILD_INFO, KEY.to_vec(), CHILD_VALUE.to_vec()) + .add_extra_child_storage(STORAGE_KEY.to_vec(), child_info1, KEY.to_vec(), CHILD_VALUE.to_vec()) .build(); let genesis_hash = client.genesis_hash(); let client = new_full(Arc::new(client), Subscriptions::new(Arc::new(core.executor()))); let key = StorageKey(KEY.to_vec()); let storage_key = StorageKey(STORAGE_KEY.to_vec()); - let (child_info, child_type) = CHILD_INFO.info(); + let (child_info, child_type) = child_info1.info(); let child_info = StorageKey(child_info.to_vec()); assert_eq!( @@ -77,11 +77,12 @@ fn should_return_storage() { #[test] fn should_return_child_storage() { - let (child_info, child_type) = CHILD_INFO.info(); + let child_info1 = ChildInfo::resolve_child_info(CHILD_INFO).unwrap(); + let (child_info, child_type) = child_info1.info(); let child_info = StorageKey(child_info.to_vec()); let core = tokio::runtime::Runtime::new().unwrap(); let client = Arc::new(substrate_test_runtime_client::TestClientBuilder::new() - .add_child_storage("test", "key", CHILD_INFO, vec![42_u8]) + .add_child_storage("test", "key", child_info1, vec![42_u8]) .build()); let genesis_hash = client.genesis_hash(); let client = new_full(client, Subscriptions::new(Arc::new(core.executor()))); diff --git a/client/src/client.rs b/client/src/client.rs index 118487c4e7b72..7acef6a4a910c 100644 --- a/client/src/client.rs +++ b/client/src/client.rs @@ -261,7 +261,7 @@ impl Client where &self, id: &BlockId, child_storage_key: &StorageKey, - child_info: ChildInfo, + child_info: &ChildInfo, key_prefix: &StorageKey ) -> sp_blockchain::Result> { let keys = self.state_at(id)? @@ -277,7 +277,7 @@ impl Client where &self, id: &BlockId, storage_key: &StorageKey, - child_info: ChildInfo, + child_info: &ChildInfo, key: &StorageKey ) -> sp_blockchain::Result> { Ok(self.state_at(id)? @@ -291,7 +291,7 @@ impl Client where &self, id: &BlockId, storage_key: &StorageKey, - child_info: ChildInfo, + child_info: &ChildInfo, key: &StorageKey ) -> sp_blockchain::Result> { Ok(self.state_at(id)? @@ -333,7 +333,7 @@ impl Client where &self, id: &BlockId, storage_key: &[u8], - child_info: ChildInfo, + child_info: &ChildInfo, keys: I, ) -> sp_blockchain::Result where I: IntoIterator, diff --git a/client/src/light/backend.rs b/client/src/light/backend.rs index d7b992403b288..12186a5b61ac2 100644 --- a/client/src/light/backend.rs +++ b/client/src/light/backend.rs @@ -387,7 +387,7 @@ impl StateBackend for GenesisOrUnavailableState fn child_storage( &self, storage_key: &[u8], - child_info: ChildInfo, + child_info: &ChildInfo, key: &[u8], ) -> ClientResult>> { match *self { @@ -408,7 +408,7 @@ impl StateBackend for GenesisOrUnavailableState fn next_child_storage_key( &self, storage_key: &[u8], - child_info: ChildInfo, + child_info: &ChildInfo, key: &[u8], ) -> Result>, Self::Error> { match *self { @@ -437,7 +437,7 @@ impl StateBackend for GenesisOrUnavailableState fn for_keys_in_child_storage( &self, storage_key: &[u8], - child_info: ChildInfo, + child_info: &ChildInfo, action: A, ) { match *self { @@ -450,7 +450,7 @@ impl StateBackend for GenesisOrUnavailableState fn for_child_keys_with_prefix( &self, storage_key: &[u8], - child_info: ChildInfo, + child_info: &ChildInfo, prefix: &[u8], action: A, ) { @@ -475,7 +475,7 @@ impl StateBackend for GenesisOrUnavailableState fn child_storage_root( &self, storage_key: &[u8], - child_info: ChildInfo, + child_info: &ChildInfo, delta: I, ) -> (H::Out, bool, Self::Transaction) where diff --git a/client/src/light/fetcher.rs b/client/src/light/fetcher.rs index 38bf4aaf24eb3..ed6c04816ceca 100644 --- a/client/src/light/fetcher.rs +++ b/client/src/light/fetcher.rs @@ -351,7 +351,7 @@ pub mod tests { use sp_state_machine::Backend; use super::*; - const CHILD_INFO_1: ChildInfo<'static> = ChildInfo::new_default(b"unique_id_1"); + const CHILD_INFO_1: &'static [u8] = b"\x01\x00\x00\x00unique_id_1"; type TestChecker = LightDataChecker< NativeExecutor, @@ -399,13 +399,14 @@ pub mod tests { } fn prepare_for_read_child_proof_check() -> (TestChecker, Header, StorageProof, Vec) { + let child_info1 = ChildInfo::resolve_child_info(CHILD_INFO_1).unwrap(); use substrate_test_runtime_client::DefaultTestClientBuilderExt; use substrate_test_runtime_client::TestClientBuilderExt; // prepare remote client let remote_client = substrate_test_runtime_client::TestClientBuilder::new() .add_extra_child_storage( b":child_storage:default:child1".to_vec(), - CHILD_INFO_1, + child_info1, b"key1".to_vec(), b"value1".to_vec(), ).build(); @@ -419,14 +420,14 @@ pub mod tests { let child_value = remote_client.child_storage( &remote_block_id, &StorageKey(b":child_storage:default:child1".to_vec()), - CHILD_INFO_1, + child_info1, &StorageKey(b"key1".to_vec()), ).unwrap().unwrap().0; assert_eq!(b"value1"[..], child_value[..]); let remote_read_proof = remote_client.read_child_proof( &remote_block_id, b":child_storage:default:child1", - CHILD_INFO_1, + child_info1, &[b"key1"], ).unwrap(); @@ -504,7 +505,8 @@ pub mod tests { remote_read_proof, result, ) = prepare_for_read_child_proof_check(); - let child_infos = CHILD_INFO_1.info(); + ; + let child_infos = ChildInfo::resolve_child_info(CHILD_INFO_1).unwrap().info(); assert_eq!((&local_checker as &dyn FetchChecker).check_read_child_proof( &RemoteReadChildRequest::
{ block: remote_block_header.hash(), diff --git a/client/state-db/src/lib.rs b/client/state-db/src/lib.rs index 80ba18dcd130e..1cfc7fa8398a7 100644 --- a/client/state-db/src/lib.rs +++ b/client/state-db/src/lib.rs @@ -420,7 +420,7 @@ impl StateDbSync { pub fn get( &self, - trie: Option, + trie: Option<&ChildInfo>, key: &Key, db: &D, ) -> Result, Error> @@ -497,7 +497,7 @@ impl StateDb { /// Get a value from non-canonical/pruning overlay or the backing DB. pub fn get( &self, - trie: Option, + trie: Option<&ChildInfo>, key: &Key, db: &D, ) -> Result, Error> diff --git a/client/state-db/src/noncanonical.rs b/client/state-db/src/noncanonical.rs index 24e7a9f6369dd..6c5988446e881 100644 --- a/client/state-db/src/noncanonical.rs +++ b/client/state-db/src/noncanonical.rs @@ -460,7 +460,7 @@ impl NonCanonicalOverlay { } /// Get a value from the node overlay. This searches in every existing changeset. - pub fn get(&self, trie: Option, key: &Key) -> Option { + pub fn get(&self, trie: Option<&ChildInfo>, key: &Key) -> Option { // TODO make storage over data representation of OwnedChildInfo to use borrow if let Some(values) = self.values.get(&trie.map(|t| t.to_owned())) { if let Some((_, value)) = values.get(&key) { diff --git a/frame/contracts/src/account_db.rs b/frame/contracts/src/account_db.rs index 3615673f2d9dc..bf326dc44e70b 100644 --- a/frame/contracts/src/account_db.rs +++ b/frame/contracts/src/account_db.rs @@ -27,7 +27,7 @@ use sp_std::prelude::*; use sp_io::hashing::blake2_256; use sp_runtime::traits::{Bounded, Zero}; use frame_support::traits::{Currency, Get, Imbalance, SignedImbalance, UpdateBalanceOutcome}; -use frame_support::{storage::child, StorageMap}; +use frame_support::{storage::child, StorageMap, storage::child::ChildInfo}; use frame_system; // Note: we don't provide Option because we can't create @@ -108,7 +108,13 @@ pub trait AccountDb { /// /// Trie id is None iff account doesn't have an associated trie id in >. /// Because DirectAccountDb bypass the lookup for this association. - fn get_storage(&self, account: &T::AccountId, trie_id: Option<&TrieId>, location: &StorageKey) -> Option>; + fn get_storage( + &self, + account: &T::AccountId, + trie_id: Option<&TrieId>, + child_info: Option<&ChildInfo>, + location: &StorageKey + ) -> Option>; /// If account has an alive contract then return the code hash associated. fn get_code_hash(&self, account: &T::AccountId) -> Option>; /// If account has an alive contract then return the rent allowance associated. @@ -126,9 +132,14 @@ impl AccountDb for DirectAccountDb { &self, _account: &T::AccountId, trie_id: Option<&TrieId>, + child_info: Option<&ChildInfo>, location: &StorageKey ) -> Option> { - trie_id.and_then(|id| child::get_raw(id, crate::trie_unique_id(&id[..]), &blake2_256(location))) + trie_id.and_then(|id| if let Some(child_info) = child_info { + child::get_raw(id, child_info, &blake2_256(location)) + } else { + child::get_raw(id, &*crate::trie_unique_id(&id[..]), &blake2_256(location)) + }) } fn get_code_hash(&self, account: &T::AccountId) -> Option> { >::get(account).and_then(|i| i.as_alive().map(|i| i.code_hash)) @@ -173,13 +184,13 @@ impl AccountDb for DirectAccountDb { (false, Some(info), _) => info, // Existing contract is being removed. (true, Some(info), None) => { - child::kill_storage(&info.trie_id, info.child_trie_unique_id()); + child::kill_storage(&info.trie_id, &*info.child_trie_unique_id()); >::remove(&address); continue; } // Existing contract is being replaced by a new one. (true, Some(info), Some(code_hash)) => { - child::kill_storage(&info.trie_id, info.child_trie_unique_id()); + child::kill_storage(&info.trie_id, &*info.child_trie_unique_id()); AliveContractInfo:: { code_hash, storage_size: T::StorageSizeOffset::get(), @@ -216,19 +227,20 @@ impl AccountDb for DirectAccountDb { new_info.last_write = Some(>::block_number()); } + let child_info = &*new_info.child_trie_unique_id(); for (k, v) in changed.storage.into_iter() { if let Some(value) = child::get_raw( &new_info.trie_id[..], - new_info.child_trie_unique_id(), + child_info, &blake2_256(&k), ) { new_info.storage_size -= value.len() as u32; } if let Some(value) = v { new_info.storage_size += value.len() as u32; - child::put_raw(&new_info.trie_id[..], new_info.child_trie_unique_id(), &blake2_256(&k), &value[..]); + child::put_raw(&new_info.trie_id[..], child_info, &blake2_256(&k), &value[..]); } else { - child::kill(&new_info.trie_id[..], new_info.child_trie_unique_id(), &blake2_256(&k)); + child::kill(&new_info.trie_id[..], child_info, &blake2_256(&k)); } } @@ -334,13 +346,14 @@ impl<'a, T: Trait> AccountDb for OverlayAccountDb<'a, T> { &self, account: &T::AccountId, trie_id: Option<&TrieId>, + child_info: Option<&ChildInfo>, location: &StorageKey ) -> Option> { self.local .borrow() .get(account) .and_then(|changes| changes.storage(location)) - .unwrap_or_else(|| self.underlying.get_storage(account, trie_id, location)) + .unwrap_or_else(|| self.underlying.get_storage(account, trie_id, child_info, location)) } fn get_code_hash(&self, account: &T::AccountId) -> Option> { self.local diff --git a/frame/contracts/src/exec.rs b/frame/contracts/src/exec.rs index cfbefa2a72c93..87dbcacde5f43 100644 --- a/frame/contracts/src/exec.rs +++ b/frame/contracts/src/exec.rs @@ -23,7 +23,7 @@ use crate::rent; use sp_std::prelude::*; use sp_runtime::traits::{Bounded, CheckedAdd, CheckedSub, Zero}; use frame_support::{ - storage::unhashed, dispatch::DispatchError, + storage::unhashed, dispatch::DispatchError, storage::child::OwnedChildInfo, traits::{WithdrawReason, Currency, Time, Randomness}, }; @@ -277,6 +277,7 @@ pub struct ExecutionContext<'a, T: Trait + 'a, V, L> { pub parent: Option<&'a ExecutionContext<'a, T, V, L>>, pub self_account: T::AccountId, pub self_trie_id: Option, + pub self_child_info: Option, pub overlay: OverlayAccountDb<'a, T>, pub depth: usize, pub deferred: Vec>, @@ -301,6 +302,7 @@ where ExecutionContext { parent: None, self_trie_id: None, + self_child_info: None, self_account: origin, overlay: OverlayAccountDb::::new(&DirectAccountDb), depth: 0, @@ -313,12 +315,13 @@ where } } - fn nested<'b, 'c: 'b>(&'c self, dest: T::AccountId, trie_id: Option) + fn nested<'b, 'c: 'b>(&'c self, dest: T::AccountId, trie_id: Option, child_info: Option) -> ExecutionContext<'b, T, V, L> { ExecutionContext { parent: Some(self), self_trie_id: trie_id, + self_child_info: child_info, self_account: dest, overlay: OverlayAccountDb::new(&self.overlay), depth: self.depth + 1, @@ -371,8 +374,9 @@ where let caller = self.self_account.clone(); let dest_trie_id = contract_info.and_then(|i| i.as_alive().map(|i| i.trie_id.clone())); + let dest_child_info = dest_trie_id.as_ref().map(|id| crate::trie_unique_id(id)); - self.with_nested_context(dest.clone(), dest_trie_id, |nested| { + self.with_nested_context(dest.clone(), dest_trie_id, dest_child_info, |nested| { if value > BalanceOf::::zero() { try_or_exec_error!( transfer( @@ -457,8 +461,9 @@ where // TrieId has not been generated yet and storage is empty since contract is new. let dest_trie_id = None; + let dest_child_info = None; - let output = self.with_nested_context(dest.clone(), dest_trie_id, |nested| { + let output = self.with_nested_context(dest.clone(), dest_trie_id, dest_child_info, |nested| { try_or_exec_error!( nested.overlay.instantiate_contract(&dest, code_hash.clone()), input_data @@ -524,12 +529,17 @@ where } } - fn with_nested_context(&mut self, dest: T::AccountId, trie_id: Option, func: F) - -> ExecResult + fn with_nested_context( + &mut self, + dest: T::AccountId, + trie_id: Option, + child_info: Option, + func: F, + ) -> ExecResult where F: FnOnce(&mut ExecutionContext) -> ExecResult { let (output, change_set, deferred) = { - let mut nested = self.nested(dest, trie_id); + let mut nested = self.nested(dest, trie_id, child_info); let output = func(&mut nested)?; (output, nested.overlay.into_change_set(), nested.deferred) }; @@ -695,7 +705,12 @@ where type T = T; fn get_storage(&self, key: &StorageKey) -> Option> { - self.ctx.overlay.get_storage(&self.ctx.self_account, self.ctx.self_trie_id.as_ref(), key) + self.ctx.overlay.get_storage( + &self.ctx.self_account, + self.ctx.self_trie_id.as_ref(), + self.ctx.self_child_info.as_deref(), + key, + ) } fn set_storage(&mut self, key: StorageKey, value: Option>) -> Result<(), &'static str> { diff --git a/frame/contracts/src/lib.rs b/frame/contracts/src/lib.rs index 9ac43cbb50784..9811a52246c45 100644 --- a/frame/contracts/src/lib.rs +++ b/frame/contracts/src/lib.rs @@ -225,15 +225,15 @@ pub struct RawAliveContractInfo { impl RawAliveContractInfo { /// Associated child trie unique id is built from the hash part of the trie id. - pub fn child_trie_unique_id(&self) -> child::ChildInfo { + pub fn child_trie_unique_id(&self) -> child::OwnedChildInfo { trie_unique_id(&self.trie_id[..]) } } /// Associated child trie unique id is built from the hash part of the trie id. -pub(crate) fn trie_unique_id(trie_id: &[u8]) -> child::ChildInfo { +pub(crate) fn trie_unique_id(trie_id: &[u8]) -> child::OwnedChildInfo { let start = CHILD_STORAGE_KEY_PREFIX.len() + b"default:".len(); - child::ChildInfo::new_default(&trie_id[start ..]) + child::OwnedChildInfo::new_default(&trie_id[start ..]) } pub type TombstoneContractInfo = @@ -716,10 +716,12 @@ impl Module { .get_alive() .ok_or(GetStorageError::IsTombstone)?; + let child_trie = contract_info.child_trie_unique_id(); let maybe_value = AccountDb::::get_storage( &DirectAccountDb, &address, Some(&contract_info.trie_id), + Some(&*child_trie), &key, ); Ok(maybe_value) @@ -826,16 +828,17 @@ impl Module { origin_contract.last_write }; + let child_trie = origin_contract.child_trie_unique_id(); let key_values_taken = delta.iter() .filter_map(|key| { child::get_raw( &origin_contract.trie_id, - origin_contract.child_trie_unique_id(), + &*child_trie, &blake2_256(key), ).map(|value| { child::kill( &origin_contract.trie_id, - origin_contract.child_trie_unique_id(), + &*child_trie, &blake2_256(key), ); @@ -857,7 +860,7 @@ impl Module { for (key, value) in key_values_taken { child::put_raw( &origin_contract.trie_id, - origin_contract.child_trie_unique_id(), + &*child_trie, &blake2_256(key), &value, ); @@ -957,7 +960,7 @@ decl_storage! { impl OnFreeBalanceZero for Module { fn on_free_balance_zero(who: &T::AccountId) { if let Some(ContractInfo::Alive(info)) = >::take(who) { - child::kill_storage(&info.trie_id, info.child_trie_unique_id()); + child::kill_storage(&info.trie_id, &*info.child_trie_unique_id()); } } } diff --git a/frame/contracts/src/rent.rs b/frame/contracts/src/rent.rs index 46f915e64264f..3967fe03cf21b 100644 --- a/frame/contracts/src/rent.rs +++ b/frame/contracts/src/rent.rs @@ -183,7 +183,7 @@ fn enact_verdict( >::remove(account); child::kill_storage( &alive_contract_info.trie_id, - alive_contract_info.child_trie_unique_id(), + &*alive_contract_info.child_trie_unique_id(), ); >::deposit_event(RawEvent::Evicted(account.clone(), false)); None @@ -205,7 +205,7 @@ fn enact_verdict( child::kill_storage( &alive_contract_info.trie_id, - alive_contract_info.child_trie_unique_id(), + &*alive_contract_info.child_trie_unique_id(), ); >::deposit_event(RawEvent::Evicted(account.clone(), true)); diff --git a/frame/contracts/src/tests.rs b/frame/contracts/src/tests.rs index 9a2ef36bb86f0..61e490a4210d4 100644 --- a/frame/contracts/src/tests.rs +++ b/frame/contracts/src/tests.rs @@ -320,6 +320,10 @@ fn account_removal_removes_storage() { ExtBuilder::default().existential_deposit(100).build().execute_with(|| { let trie_id1 = ::TrieIdGenerator::trie_id(&1); let trie_id2 = ::TrieIdGenerator::trie_id(&2); + let child_info1 = crate::trie_unique_id(trie_id1.as_ref()); + let child_info2 = crate::trie_unique_id(trie_id2.as_ref()); + let child_info1 = Some(&*child_info1); + let child_info2 = Some(&*child_info2); let key1 = &[1; 32]; let key2 = &[2; 32]; @@ -365,15 +369,15 @@ fn account_removal_removes_storage() { // Verify that all entries from account 1 is removed, while // entries from account 2 is in place. { - assert!(>::get_storage(&DirectAccountDb, &1, Some(&trie_id1), key1).is_none()); - assert!(>::get_storage(&DirectAccountDb, &1, Some(&trie_id1), key2).is_none()); + assert!(>::get_storage(&DirectAccountDb, &1, Some(&trie_id1), child_info1, key1).is_none()); + assert!(>::get_storage(&DirectAccountDb, &1, Some(&trie_id1), child_info2, key2).is_none()); assert_eq!( - >::get_storage(&DirectAccountDb, &2, Some(&trie_id2), key1), + >::get_storage(&DirectAccountDb, &2, Some(&trie_id2), child_info2, key1), Some(b"3".to_vec()) ); assert_eq!( - >::get_storage(&DirectAccountDb, &2, Some(&trie_id2), key2), + >::get_storage(&DirectAccountDb, &2, Some(&trie_id2), child_info2, key2), Some(b"4".to_vec()) ); } diff --git a/frame/support/src/storage/child.rs b/frame/support/src/storage/child.rs index f549ffc25fd94..d1dd459b9635e 100644 --- a/frame/support/src/storage/child.rs +++ b/frame/support/src/storage/child.rs @@ -27,12 +27,12 @@ use crate::sp_std::prelude::*; use codec::{Codec, Encode, Decode}; -pub use sp_core::storage::ChildInfo; +pub use sp_core::storage::{ChildInfo, OwnedChildInfo}; /// Return the value of the item in storage under `key`, or `None` if there is no explicit entry. pub fn get( storage_key: &[u8], - child_info: ChildInfo, + child_info: &ChildInfo, key: &[u8], ) -> Option { let (data, child_type) = child_info.info(); @@ -54,7 +54,7 @@ pub fn get( /// explicit entry. pub fn get_or_default( storage_key: &[u8], - child_info: ChildInfo, + child_info: &ChildInfo, key: &[u8], ) -> T { get(storage_key, child_info, key).unwrap_or_else(Default::default) @@ -64,7 +64,7 @@ pub fn get_or_default( /// explicit entry. pub fn get_or( storage_key: &[u8], - child_info: ChildInfo, + child_info: &ChildInfo, key: &[u8], default_value: T, ) -> T { @@ -75,7 +75,7 @@ pub fn get_or( /// explicit entry. pub fn get_or_else T>( storage_key: &[u8], - child_info: ChildInfo, + child_info: &ChildInfo, key: &[u8], default_value: F, ) -> T { @@ -85,7 +85,7 @@ pub fn get_or_else T>( /// Put `value` in storage under `key`. pub fn put( storage_key: &[u8], - child_info: ChildInfo, + child_info: &ChildInfo, key: &[u8], value: &T, ) { @@ -104,7 +104,7 @@ pub fn put( /// Remove `key` from storage, returning its value if it had an explicit entry or `None` otherwise. pub fn take( storage_key: &[u8], - child_info: ChildInfo, + child_info: &ChildInfo, key: &[u8], ) -> Option { let r = get(storage_key, child_info, key); @@ -118,7 +118,7 @@ pub fn take( /// the default for its type. pub fn take_or_default( storage_key: &[u8], - child_info: ChildInfo, + child_info: &ChildInfo, key: &[u8], ) -> T { take(storage_key, child_info, key).unwrap_or_else(Default::default) @@ -128,7 +128,7 @@ pub fn take_or_default( /// explicit entry. Ensure there is no explicit entry on return. pub fn take_or( storage_key: &[u8], - child_info: ChildInfo, + child_info: &ChildInfo, key: &[u8], default_value: T, ) -> T { @@ -139,7 +139,7 @@ pub fn take_or( /// explicit entry. Ensure there is no explicit entry on return. pub fn take_or_else T>( storage_key: &[u8], - child_info: ChildInfo, + child_info: &ChildInfo, key: &[u8], default_value: F, ) -> T { @@ -149,7 +149,7 @@ pub fn take_or_else T>( /// Check to see if `key` has an explicit entry in storage. pub fn exists( storage_key: &[u8], - child_info: ChildInfo, + child_info: &ChildInfo, key: &[u8], ) -> bool { let (data, child_type) = child_info.info(); @@ -162,7 +162,7 @@ pub fn exists( /// Remove all `storage_key` key/values pub fn kill_storage( storage_key: &[u8], - child_info: ChildInfo, + child_info: &ChildInfo, ) { let (data, child_type) = child_info.info(); sp_io::storage::child_storage_kill( @@ -175,7 +175,7 @@ pub fn kill_storage( /// Ensure `key` has no explicit entry in storage. pub fn kill( storage_key: &[u8], - child_info: ChildInfo, + child_info: &ChildInfo, key: &[u8], ) { let (data, child_type) = child_info.info(); @@ -190,7 +190,7 @@ pub fn kill( /// Get a Vec of bytes from storage. pub fn get_raw( storage_key: &[u8], - child_info: ChildInfo, + child_info: &ChildInfo, key: &[u8], ) -> Option> { let (data, child_type) = child_info.info(); @@ -205,7 +205,7 @@ pub fn get_raw( /// Put a raw byte slice into storage. pub fn put_raw( storage_key: &[u8], - child_info: ChildInfo, + child_info: &ChildInfo, key: &[u8], value: &[u8], ) { diff --git a/primitives/externalities/src/lib.rs b/primitives/externalities/src/lib.rs index 350b65d190840..b6006d61bd242 100644 --- a/primitives/externalities/src/lib.rs +++ b/primitives/externalities/src/lib.rs @@ -48,7 +48,7 @@ pub trait Externalities: ExtensionStore { fn child_storage_hash( &self, storage_key: ChildStorageKey, - child_info: ChildInfo, + child_info: &ChildInfo, key: &[u8], ) -> Option>; @@ -61,7 +61,7 @@ pub trait Externalities: ExtensionStore { fn original_child_storage( &self, storage_key: ChildStorageKey, - child_info: ChildInfo, + child_info: &ChildInfo, key: &[u8], ) -> Option>; @@ -78,7 +78,7 @@ pub trait Externalities: ExtensionStore { fn original_child_storage_hash( &self, storage_key: ChildStorageKey, - child_info: ChildInfo, + child_info: &ChildInfo, key: &[u8], ) -> Option>; @@ -88,7 +88,7 @@ pub trait Externalities: ExtensionStore { fn child_storage( &self, storage_key: ChildStorageKey, - child_info: ChildInfo, + child_info: &ChildInfo, key: &[u8], ) -> Option>; @@ -101,7 +101,7 @@ pub trait Externalities: ExtensionStore { fn set_child_storage( &mut self, storage_key: ChildStorageKey, - child_info: ChildInfo, + child_info: &ChildInfo, key: Vec, value: Vec, ) { @@ -117,7 +117,7 @@ pub trait Externalities: ExtensionStore { fn clear_child_storage( &mut self, storage_key: ChildStorageKey, - child_info: ChildInfo, + child_info: &ChildInfo, key: &[u8], ) { self.place_child_storage(storage_key, child_info, key.to_vec(), None) @@ -132,7 +132,7 @@ pub trait Externalities: ExtensionStore { fn exists_child_storage( &self, storage_key: ChildStorageKey, - child_info: ChildInfo, + child_info: &ChildInfo, key: &[u8], ) -> bool { self.child_storage(storage_key, child_info, key).is_some() @@ -145,12 +145,12 @@ pub trait Externalities: ExtensionStore { fn next_child_storage_key( &self, storage_key: ChildStorageKey, - child_info: ChildInfo, + child_info: &ChildInfo, key: &[u8], ) -> Option>; /// Clear an entire child storage. - fn kill_child_storage(&mut self, storage_key: ChildStorageKey, child_info: ChildInfo); + fn kill_child_storage(&mut self, storage_key: ChildStorageKey, child_info: &ChildInfo); /// Clear storage entries which keys are start with the given prefix. fn clear_prefix(&mut self, prefix: &[u8]); @@ -159,7 +159,7 @@ pub trait Externalities: ExtensionStore { fn clear_child_prefix( &mut self, storage_key: ChildStorageKey, - child_info: ChildInfo, + child_info: &ChildInfo, prefix: &[u8], ); @@ -170,7 +170,7 @@ pub trait Externalities: ExtensionStore { fn place_child_storage( &mut self, storage_key: ChildStorageKey, - child_info: ChildInfo, + child_info: &ChildInfo, key: Vec, value: Option>, ); diff --git a/primitives/io/src/lib.rs b/primitives/io/src/lib.rs index dce67133d39a3..454b732fe779e 100644 --- a/primitives/io/src/lib.rs +++ b/primitives/io/src/lib.rs @@ -38,7 +38,7 @@ use sp_core::{ traits::{KeystoreExt, CallInWasmExt}, offchain::{OffchainExt, TransactionPoolExt}, hexdisplay::HexDisplay, - storage::{ChildStorageKey, ChildInfo}, + storage::{ChildStorageKey, OwnedChildInfo, ChildType}, }; use sp_core::{ @@ -82,6 +82,14 @@ fn child_storage_key_or_panic(storage_key: &[u8]) -> ChildStorageKey { } } +#[cfg(feature = "std")] +fn resolve_child_info(child_type: u32, child_definition: &[u8]) -> OwnedChildInfo { + if child_type != ChildType::CryptoUniqueId as u32 { + panic!("Invalid child definition"); + } + OwnedChildInfo::new_default(&child_definition[..]) +} + /// Interface for accessing the storage from within the runtime. #[runtime_interface] pub trait Storage { @@ -109,9 +117,8 @@ pub trait Storage { key: &[u8], ) -> Option> { let storage_key = child_storage_key_or_panic(child_storage_key); - let child_info = ChildInfo::resolve_child_info(child_type, child_definition) - .expect("Invalid child definition"); - self.child_storage(storage_key, child_info, key).map(|s| s.to_vec()) + let child_info = resolve_child_info(child_type, child_definition); + self.child_storage(storage_key, &*child_info, key).map(|s| s.to_vec()) } /// Get `key` from storage, placing the value into `value_out` and return the number of @@ -146,9 +153,8 @@ pub trait Storage { value_offset: u32, ) -> Option { let storage_key = child_storage_key_or_panic(child_storage_key); - let child_info = ChildInfo::resolve_child_info(child_type, child_definition) - .expect("Invalid child definition"); - self.child_storage(storage_key, child_info, key) + let child_info = resolve_child_info(child_type, child_definition); + self.child_storage(storage_key, &*child_info, key) .map(|value| { let value_offset = value_offset as usize; let data = &value[value_offset.min(value.len())..]; @@ -175,9 +181,8 @@ pub trait Storage { value: &[u8], ) { let storage_key = child_storage_key_or_panic(child_storage_key); - let child_info = ChildInfo::resolve_child_info(child_type, child_definition) - .expect("Invalid child definition"); - self.set_child_storage(storage_key, child_info, key.to_vec(), value.to_vec()); + let child_info = resolve_child_info(child_type, child_definition); + self.set_child_storage(storage_key, &*child_info, key.to_vec(), value.to_vec()); } /// Clear the storage of the given `key` and its value. @@ -196,9 +201,8 @@ pub trait Storage { key: &[u8], ) { let storage_key = child_storage_key_or_panic(child_storage_key); - let child_info = ChildInfo::resolve_child_info(child_type, child_definition) - .expect("Invalid child definition"); - self.clear_child_storage(storage_key, child_info, key); + let child_info = resolve_child_info(child_type, child_definition); + self.clear_child_storage(storage_key, &*child_info, key); } /// Clear an entire child storage. @@ -211,9 +215,8 @@ pub trait Storage { child_type: u32, ) { let storage_key = child_storage_key_or_panic(child_storage_key); - let child_info = ChildInfo::resolve_child_info(child_type, child_definition) - .expect("Invalid child definition"); - self.kill_child_storage(storage_key, child_info); + let child_info = resolve_child_info(child_type, child_definition); + self.kill_child_storage(storage_key, &*child_info); } /// Check whether the given `key` exists in storage. @@ -232,9 +235,8 @@ pub trait Storage { key: &[u8], ) -> bool { let storage_key = child_storage_key_or_panic(child_storage_key); - let child_info = ChildInfo::resolve_child_info(child_type, child_definition) - .expect("Invalid child definition"); - self.exists_child_storage(storage_key, child_info, key) + let child_info = resolve_child_info(child_type, child_definition); + self.exists_child_storage(storage_key, &*child_info, key) } /// Clear the storage of each key-value pair where the key starts with the given `prefix`. @@ -253,9 +255,8 @@ pub trait Storage { prefix: &[u8], ) { let storage_key = child_storage_key_or_panic(child_storage_key); - let child_info = ChildInfo::resolve_child_info(child_type, child_definition) - .expect("Invalid child definition"); - self.clear_child_prefix(storage_key, child_info, prefix); + let child_info = resolve_child_info(child_type, child_definition); + self.clear_child_prefix(storage_key, &*child_info, prefix); } /// "Commit" all existing operations and compute the resulting storage root. @@ -307,9 +308,8 @@ pub trait Storage { key: &[u8], ) -> Option> { let storage_key = child_storage_key_or_panic(child_storage_key); - let child_info = ChildInfo::resolve_child_info(child_type, child_definition) - .expect("Invalid child definition"); - self.next_child_storage_key(storage_key, child_info, key) + let child_info = resolve_child_info(child_type, child_definition); + self.next_child_storage_key(storage_key, &*child_info, key) } } diff --git a/primitives/runtime/src/lib.rs b/primitives/runtime/src/lib.rs index 46930c35e8e8d..69bbb0adddf85 100644 --- a/primitives/runtime/src/lib.rs +++ b/primitives/runtime/src/lib.rs @@ -136,7 +136,7 @@ impl BuildStorage for sp_core::storage::Storage { let k = k.clone(); if let Some(map) = storage.children.get_mut(&k) { map.data.extend(other_map.data.iter().map(|(k, v)| (k.clone(), v.clone()))); - if !map.child_info.try_update(other_map.child_info.as_ref()) { + if !map.child_info.try_update(&*other_map.child_info) { return Err("Incompatible child info update".to_string()); } } else { diff --git a/primitives/state-machine/src/backend.rs b/primitives/state-machine/src/backend.rs index cdb226935cc42..f99ad53009261 100644 --- a/primitives/state-machine/src/backend.rs +++ b/primitives/state-machine/src/backend.rs @@ -55,7 +55,7 @@ pub trait Backend: std::fmt::Debug { fn child_storage( &self, storage_key: &[u8], - child_info: ChildInfo, + child_info: &ChildInfo, key: &[u8], ) -> Result, Self::Error>; @@ -63,7 +63,7 @@ pub trait Backend: std::fmt::Debug { fn child_storage_hash( &self, storage_key: &[u8], - child_info: ChildInfo, + child_info: &ChildInfo, key: &[u8], ) -> Result, Self::Error> { self.child_storage(storage_key, child_info, key).map(|v| v.map(|v| H::hash(&v))) @@ -78,7 +78,7 @@ pub trait Backend: std::fmt::Debug { fn exists_child_storage( &self, storage_key: &[u8], - child_info: ChildInfo, + child_info: &ChildInfo, key: &[u8], ) -> Result { Ok(self.child_storage(storage_key, child_info, key)?.is_some()) @@ -91,7 +91,7 @@ pub trait Backend: std::fmt::Debug { fn next_child_storage_key( &self, storage_key: &[u8], - child_info: ChildInfo, + child_info: &ChildInfo, key: &[u8] ) -> Result, Self::Error>; @@ -99,7 +99,7 @@ pub trait Backend: std::fmt::Debug { fn for_keys_in_child_storage( &self, storage_key: &[u8], - child_info: ChildInfo, + child_info: &ChildInfo, f: F, ); @@ -119,7 +119,7 @@ pub trait Backend: std::fmt::Debug { fn for_child_keys_with_prefix( &self, storage_key: &[u8], - child_info: ChildInfo, + child_info: &ChildInfo, prefix: &[u8], f: F, ); @@ -138,7 +138,7 @@ pub trait Backend: std::fmt::Debug { fn child_storage_root( &self, storage_key: &[u8], - child_info: ChildInfo, + child_info: &ChildInfo, delta: I, ) -> (H::Out, bool, Self::Transaction) where @@ -159,7 +159,7 @@ pub trait Backend: std::fmt::Debug { fn child_keys( &self, storage_key: &[u8], - child_info: ChildInfo, + child_info: &ChildInfo, prefix: &[u8], ) -> Vec { let mut all = Vec::new(); @@ -193,7 +193,7 @@ pub trait Backend: std::fmt::Debug { // child first for (storage_key, child_delta, child_info) in child_deltas { let (child_root, empty, child_txs) = - self.child_storage_root(&storage_key[..], child_info.as_ref(), child_delta); + self.child_storage_root(&storage_key[..], &*child_info, child_delta); txs.consolidate(child_txs); if empty { if return_child_roots { @@ -237,7 +237,7 @@ impl<'a, T: Backend, H: Hasher> Backend for &'a T { fn child_storage( &self, storage_key: &[u8], - child_info: ChildInfo, + child_info: &ChildInfo, key: &[u8], ) -> Result, Self::Error> { (*self).child_storage(storage_key, child_info, key) @@ -246,7 +246,7 @@ impl<'a, T: Backend, H: Hasher> Backend for &'a T { fn for_keys_in_child_storage( &self, storage_key: &[u8], - child_info: ChildInfo, + child_info: &ChildInfo, f: F, ) { (*self).for_keys_in_child_storage(storage_key, child_info, f) @@ -259,7 +259,7 @@ impl<'a, T: Backend, H: Hasher> Backend for &'a T { fn next_child_storage_key( &self, storage_key: &[u8], - child_info: ChildInfo, + child_info: &ChildInfo, key: &[u8], ) -> Result, Self::Error> { (*self).next_child_storage_key(storage_key, child_info, key) @@ -272,7 +272,7 @@ impl<'a, T: Backend, H: Hasher> Backend for &'a T { fn for_child_keys_with_prefix( &self, storage_key: &[u8], - child_info: ChildInfo, + child_info: &ChildInfo, prefix: &[u8], f: F, ) { @@ -290,7 +290,7 @@ impl<'a, T: Backend, H: Hasher> Backend for &'a T { fn child_storage_root( &self, storage_key: &[u8], - child_info: ChildInfo, + child_info: &ChildInfo, delta: I, ) -> (H::Out, bool, Self::Transaction) where diff --git a/primitives/state-machine/src/basic.rs b/primitives/state-machine/src/basic.rs index d905657737a8a..5a17683354e4d 100644 --- a/primitives/state-machine/src/basic.rs +++ b/primitives/state-machine/src/basic.rs @@ -130,7 +130,7 @@ impl Externalities for BasicExternalities { fn child_storage( &self, storage_key: ChildStorageKey, - _child_info: ChildInfo, + _child_info: &ChildInfo, key: &[u8], ) -> Option { self.inner.children.get(storage_key.as_ref()).and_then(|child| child.data.get(key)).cloned() @@ -139,7 +139,7 @@ impl Externalities for BasicExternalities { fn child_storage_hash( &self, storage_key: ChildStorageKey, - child_info: ChildInfo, + child_info: &ChildInfo, key: &[u8], ) -> Option> { self.child_storage(storage_key, child_info, key).map(|v| Blake2Hasher::hash(&v).encode()) @@ -148,7 +148,7 @@ impl Externalities for BasicExternalities { fn original_child_storage_hash( &self, storage_key: ChildStorageKey, - child_info: ChildInfo, + child_info: &ChildInfo, key: &[u8], ) -> Option> { self.child_storage_hash(storage_key, child_info, key) @@ -157,7 +157,7 @@ impl Externalities for BasicExternalities { fn original_child_storage( &self, storage_key: ChildStorageKey, - child_info: ChildInfo, + child_info: &ChildInfo, key: &[u8], ) -> Option { Externalities::child_storage(self, storage_key, child_info, key) @@ -171,7 +171,7 @@ impl Externalities for BasicExternalities { fn next_child_storage_key( &self, storage_key: ChildStorageKey, - _child_info: ChildInfo, + _child_info: &ChildInfo, key: &[u8], ) -> Option { let range = (Bound::Excluded(key), Bound::Unbounded); @@ -194,7 +194,7 @@ impl Externalities for BasicExternalities { fn place_child_storage( &mut self, storage_key: ChildStorageKey, - child_info: ChildInfo, + child_info: &ChildInfo, key: StorageKey, value: Option, ) { @@ -213,7 +213,7 @@ impl Externalities for BasicExternalities { fn kill_child_storage( &mut self, storage_key: ChildStorageKey, - _child_info: ChildInfo, + _child_info: &ChildInfo, ) { self.inner.children.remove(storage_key.as_ref()); } @@ -241,7 +241,7 @@ impl Externalities for BasicExternalities { fn clear_child_prefix( &mut self, storage_key: ChildStorageKey, - _child_info: ChildInfo, + _child_info: &ChildInfo, prefix: &[u8], ) { if let Some(child) = self.inner.children.get_mut(storage_key.as_ref()) { @@ -289,7 +289,7 @@ impl Externalities for BasicExternalities { let delta = child.data.clone().into_iter().map(|(k, v)| (k, Some(v))); InMemoryBackend::::default() - .child_storage_root(storage_key.as_ref(), child.child_info.as_ref(), delta).0 + .child_storage_root(storage_key.as_ref(), &*child.child_info, delta).0 } else { default_child_trie_root::>(storage_key.as_ref()) }.encode() @@ -315,7 +315,7 @@ mod tests { use sp_core::storage::well_known_keys::CODE; use hex_literal::hex; - const CHILD_INFO_1: ChildInfo<'static> = ChildInfo::new_default(b"unique_id_1"); + const CHILD_INFO_1: &'static [u8] = b"\x01\x00\x00\x00unique_id_1"; #[test] fn commit_should_work() { @@ -340,6 +340,7 @@ mod tests { #[test] fn children_works() { + let child_info1 = ChildInfo::resolve_child_info(CHILD_INFO_1).unwrap(); let child_storage = b":child_storage:default:test".to_vec(); let mut ext = BasicExternalities::new(Storage { @@ -347,23 +348,23 @@ mod tests { children: map![ child_storage.clone() => StorageChild { data: map![ b"doe".to_vec() => b"reindeer".to_vec() ], - child_info: CHILD_INFO_1.to_owned(), + child_info: child_info1.to_owned(), } ] }); let child = || ChildStorageKey::from_vec(child_storage.clone()).unwrap(); - assert_eq!(ext.child_storage(child(), CHILD_INFO_1, b"doe"), Some(b"reindeer".to_vec())); + assert_eq!(ext.child_storage(child(), child_info1, b"doe"), Some(b"reindeer".to_vec())); - ext.set_child_storage(child(), CHILD_INFO_1, b"dog".to_vec(), b"puppy".to_vec()); - assert_eq!(ext.child_storage(child(), CHILD_INFO_1, b"dog"), Some(b"puppy".to_vec())); + ext.set_child_storage(child(), child_info1, b"dog".to_vec(), b"puppy".to_vec()); + assert_eq!(ext.child_storage(child(), child_info1, b"dog"), Some(b"puppy".to_vec())); - ext.clear_child_storage(child(), CHILD_INFO_1, b"dog"); - assert_eq!(ext.child_storage(child(), CHILD_INFO_1, b"dog"), None); + ext.clear_child_storage(child(), child_info1, b"dog"); + assert_eq!(ext.child_storage(child(), child_info1, b"dog"), None); - ext.kill_child_storage(child(), CHILD_INFO_1); - assert_eq!(ext.child_storage(child(), CHILD_INFO_1, b"doe"), None); + ext.kill_child_storage(child(), child_info1); + assert_eq!(ext.child_storage(child(), child_info1, b"doe"), None); } #[test] diff --git a/primitives/state-machine/src/changes_trie/build.rs b/primitives/state-machine/src/changes_trie/build.rs index 16e6a2da4583f..639a29962ea99 100644 --- a/primitives/state-machine/src/changes_trie/build.rs +++ b/primitives/state-machine/src/changes_trie/build.rs @@ -138,7 +138,7 @@ fn prepare_extrinsics_input_inner<'a, B, H, Number>( Number: BlockNumber, { let (committed, prospective, child_info) = if let Some(sk) = storage_key.as_ref() { - let child_info = changes.child_info(sk).cloned(); + let child_info = changes.child_info(sk).to_owned(); ( changes.committed.children.get(sk).map(|c| &c.0), changes.prospective.children.get(sk).map(|c| &c.0), @@ -157,8 +157,8 @@ fn prepare_extrinsics_input_inner<'a, B, H, Number>( // AND are not in storage at the beginning of operation if let Some(sk) = storage_key.as_ref() { if !changes.child_storage(sk, k).map(|v| v.is_some()).unwrap_or_default() { - if let Some(child_info) = child_info.as_ref() { - if !backend.exists_child_storage(sk, child_info.as_ref(), k) + if let Some(child_info) = child_info.as_deref() { + if !backend.exists_child_storage(sk, child_info, k) .map_err(|e| format!("{}", e))? { return Ok(map); } @@ -351,8 +351,8 @@ mod test { use crate::overlayed_changes::{OverlayedValue, OverlayedChangeSet}; use super::*; - const CHILD_INFO_1: ChildInfo<'static> = ChildInfo::new_default(b"unique_id_1"); - const CHILD_INFO_2: ChildInfo<'static> = ChildInfo::new_default(b"unique_id_2"); + const CHILD_INFO_1: &'static [u8] = b"\x01\x00\x00\x00unique_id_1"; + const CHILD_INFO_2: &'static [u8] = b"\x01\x00\x00\x00unique_id_2"; fn prepare_for_build(zero: u64) -> ( InMemoryBackend, @@ -360,6 +360,9 @@ mod test { OverlayedChanges, Configuration, ) { + + let child_info1 = ChildInfo::resolve_child_info(CHILD_INFO_1).unwrap(); + let child_info2 = ChildInfo::resolve_child_info(CHILD_INFO_2).unwrap(); let backend: InMemoryBackend<_> = vec![ (vec![100], vec![255]), (vec![101], vec![255]), @@ -436,13 +439,13 @@ mod test { value: Some(vec![200]), extrinsics: Some(vec![0, 2].into_iter().collect()) }) - ].into_iter().collect(), CHILD_INFO_1.to_owned())), + ].into_iter().collect(), child_info1.to_owned())), (child_trie_key2, (vec![ (vec![100], OverlayedValue { value: Some(vec![200]), extrinsics: Some(vec![0, 2].into_iter().collect()) }) - ].into_iter().collect(), CHILD_INFO_2.to_owned())), + ].into_iter().collect(), child_info2.to_owned())), ].into_iter().collect() }, committed: OverlayedChangeSet { top: vec![ @@ -465,7 +468,7 @@ mod test { value: Some(vec![202]), extrinsics: Some(vec![3].into_iter().collect()) }) - ].into_iter().collect(), CHILD_INFO_1.to_owned())), + ].into_iter().collect(), child_info1.to_owned())), ].into_iter().collect(), }, collect_extrinsics: true, diff --git a/primitives/state-machine/src/ext.rs b/primitives/state-machine/src/ext.rs index 39dbe2e901592..41bfcdd906d1f 100644 --- a/primitives/state-machine/src/ext.rs +++ b/primitives/state-machine/src/ext.rs @@ -206,7 +206,7 @@ where fn child_storage( &self, storage_key: ChildStorageKey, - child_info: ChildInfo, + child_info: &ChildInfo, key: &[u8], ) -> Option { let _guard = sp_panic_handler::AbortGuard::force_abort(); @@ -231,7 +231,7 @@ where fn child_storage_hash( &self, storage_key: ChildStorageKey, - _child_info: ChildInfo, + _child_info: &ChildInfo, key: &[u8], ) -> Option> { let _guard = sp_panic_handler::AbortGuard::force_abort(); @@ -255,7 +255,7 @@ where fn original_child_storage( &self, storage_key: ChildStorageKey, - child_info: ChildInfo, + child_info: &ChildInfo, key: &[u8], ) -> Option { let _guard = sp_panic_handler::AbortGuard::force_abort(); @@ -276,7 +276,7 @@ where fn original_child_storage_hash( &self, storage_key: ChildStorageKey, - child_info: ChildInfo, + child_info: &ChildInfo, key: &[u8], ) -> Option> { let _guard = sp_panic_handler::AbortGuard::force_abort(); @@ -312,7 +312,7 @@ where fn exists_child_storage( &self, storage_key: ChildStorageKey, - child_info: ChildInfo, + child_info: &ChildInfo, key: &[u8], ) -> bool { let _guard = sp_panic_handler::AbortGuard::force_abort(); @@ -351,7 +351,7 @@ where fn next_child_storage_key( &self, storage_key: ChildStorageKey, - child_info: ChildInfo, + child_info: &ChildInfo, key: &[u8], ) -> Option { let next_backend_key = self.backend @@ -396,7 +396,7 @@ where fn place_child_storage( &mut self, storage_key: ChildStorageKey, - child_info: ChildInfo, + child_info: &ChildInfo, key: StorageKey, value: Option, ) { @@ -415,7 +415,7 @@ where fn kill_child_storage( &mut self, storage_key: ChildStorageKey, - child_info: ChildInfo, + child_info: &ChildInfo, ) { trace!(target: "state-trace", "{:04x}: KillChild({})", self.id, @@ -451,7 +451,7 @@ where fn clear_child_prefix( &mut self, storage_key: ChildStorageKey, - child_info: ChildInfo, + child_info: &ChildInfo, prefix: &[u8], ) { trace!(target: "state-trace", "{:04x}: ClearChildPrefix({}) {}", @@ -507,7 +507,7 @@ where } else { let storage_key = storage_key.as_ref(); - if let Some(child_info) = self.overlay.child_info(storage_key).cloned() { + if let Some(child_info) = self.overlay.child_info(storage_key).to_owned() { let (root, _is_empty, _) = { let delta = self.overlay.committed.children.get(storage_key) .into_iter() @@ -518,7 +518,7 @@ where .flat_map(|(map, _)| map.clone().into_iter().map(|(k, v)| (k, v.value))) ); - self.backend.child_storage_root(storage_key, child_info.as_ref(), delta) + self.backend.child_storage_root(storage_key, child_info, delta) }; let root = root.encode(); @@ -714,14 +714,14 @@ mod tests { fn next_child_storage_key_works() { const CHILD_KEY_1: &[u8] = b":child_storage:default:Child1"; - const CHILD_UUID_1: &[u8] = b"unique_id_1"; - const CHILD_INFO_1: ChildInfo<'static> = ChildInfo::new_default(CHILD_UUID_1); + const CHILD_UUID_1: &[u8] = b"\x01\x00\x00\x00unique_id_1"; + let child_info1 = ChildInfo::resolve_child_info(CHILD_UUID_1).unwrap(); let mut cache = StorageTransactionCache::default(); let child = || ChildStorageKey::from_slice(CHILD_KEY_1).unwrap(); let mut overlay = OverlayedChanges::default(); - overlay.set_child_storage(child().as_ref().to_vec(), CHILD_INFO_1, vec![20], None); - overlay.set_child_storage(child().as_ref().to_vec(), CHILD_INFO_1, vec![30], Some(vec![31])); + overlay.set_child_storage(child().as_ref().to_vec(), child_info1, vec![20], None); + overlay.set_child_storage(child().as_ref().to_vec(), child_info1, vec![30], Some(vec![31])); let backend = Storage { top: map![], children: map![ @@ -731,7 +731,7 @@ mod tests { vec![20] => vec![20], vec![40] => vec![40] ], - child_info: CHILD_INFO_1.to_owned(), + child_info: child_info1.to_owned(), } ], }.into(); @@ -740,22 +740,22 @@ mod tests { let ext = TestExt::new(&mut overlay, &mut cache, &backend, None, None); // next_backend < next_overlay - assert_eq!(ext.next_child_storage_key(child(), CHILD_INFO_1, &[5]), Some(vec![10])); + assert_eq!(ext.next_child_storage_key(child(), child_info1, &[5]), Some(vec![10])); // next_backend == next_overlay but next_overlay is a delete - assert_eq!(ext.next_child_storage_key(child(), CHILD_INFO_1, &[10]), Some(vec![30])); + assert_eq!(ext.next_child_storage_key(child(), child_info1, &[10]), Some(vec![30])); // next_overlay < next_backend - assert_eq!(ext.next_child_storage_key(child(), CHILD_INFO_1, &[20]), Some(vec![30])); + assert_eq!(ext.next_child_storage_key(child(), child_info1, &[20]), Some(vec![30])); // next_backend exist but next_overlay doesn't exist - assert_eq!(ext.next_child_storage_key(child(), CHILD_INFO_1, &[30]), Some(vec![40])); + assert_eq!(ext.next_child_storage_key(child(), child_info1, &[30]), Some(vec![40])); drop(ext); - overlay.set_child_storage(child().as_ref().to_vec(), CHILD_INFO_1, vec![50], Some(vec![50])); + overlay.set_child_storage(child().as_ref().to_vec(), child_info1, vec![50], Some(vec![50])); let ext = TestExt::new(&mut overlay, &mut cache, &backend, None, None); // next_overlay exist but next_backend doesn't exist - assert_eq!(ext.next_child_storage_key(child(), CHILD_INFO_1, &[40]), Some(vec![50])); + assert_eq!(ext.next_child_storage_key(child(), child_info1, &[40]), Some(vec![50])); } } diff --git a/primitives/state-machine/src/in_memory_backend.rs b/primitives/state-machine/src/in_memory_backend.rs index ab96a63c63686..f083e085e1b56 100644 --- a/primitives/state-machine/src/in_memory_backend.rs +++ b/primitives/state-machine/src/in_memory_backend.rs @@ -165,9 +165,9 @@ impl From, StorageCollectio impl InMemory { /// child storage key iterator - pub fn child_storage_keys(&self) -> impl Iterator { + pub fn child_storage_keys(&self) -> impl Iterator { self.inner.iter().filter_map(|item| - item.0.as_ref().map(|v|(&v.0[..], v.1.as_ref())) + item.0.as_ref().map(|v|(&v.0[..], &*v.1)) ) } } @@ -187,7 +187,7 @@ impl Backend for InMemory where H::Out: Codec { fn child_storage( &self, storage_key: &[u8], - child_info: ChildInfo, + child_info: &ChildInfo, key: &[u8], ) -> Result, Self::Error> { Ok(self.inner.get(&Some((storage_key.to_vec(), child_info.to_owned()))) @@ -211,7 +211,7 @@ impl Backend for InMemory where H::Out: Codec { fn for_keys_in_child_storage( &self, storage_key: &[u8], - child_info: ChildInfo, + child_info: &ChildInfo, mut f: F, ) { self.inner.get(&Some((storage_key.to_vec(), child_info.to_owned()))) @@ -221,7 +221,7 @@ impl Backend for InMemory where H::Out: Codec { fn for_child_keys_with_prefix( &self, storage_key: &[u8], - child_info: ChildInfo, + child_info: &ChildInfo, prefix: &[u8], f: F, ) { @@ -253,7 +253,7 @@ impl Backend for InMemory where H::Out: Codec { fn child_storage_root( &self, storage_key: &[u8], - child_info: ChildInfo, + child_info: &ChildInfo, delta: I, ) -> (H::Out, bool, Self::Transaction) where @@ -293,7 +293,7 @@ impl Backend for InMemory where H::Out: Codec { fn next_child_storage_key( &self, storage_key: &[u8], - child_info: ChildInfo, + child_info: &ChildInfo, key: &[u8], ) -> Result, Self::Error> { let range = (ops::Bound::Excluded(key), ops::Bound::Unbounded); @@ -320,7 +320,7 @@ impl Backend for InMemory where H::Out: Codec { fn child_keys( &self, storage_key: &[u8], - child_info: ChildInfo, + child_info: &ChildInfo, prefix: &[u8], ) -> Vec { self.inner.get(&Some((storage_key.to_vec(), child_info.to_owned()))) @@ -366,7 +366,7 @@ mod tests { #[test] fn in_memory_with_child_trie_only() { let storage = InMemory::::default(); - let child_info = OwnedChildInfo::new_default(b"unique_id_1".to_vec()); + let child_info = OwnedChildInfo::new_default(b"unique_id_1"); let mut storage = storage.update( vec![( Some((b"1".to_vec(), child_info.clone())), @@ -374,7 +374,7 @@ mod tests { )] ); let trie_backend = storage.as_trie_backend().unwrap(); - assert_eq!(trie_backend.child_storage(b"1", child_info.as_ref(), b"2").unwrap(), + assert_eq!(trie_backend.child_storage(b"1", &*child_info, b"2").unwrap(), Some(b"3".to_vec())); assert!(trie_backend.storage(b"1").unwrap().is_some()); } diff --git a/primitives/state-machine/src/lib.rs b/primitives/state-machine/src/lib.rs index 66da5b8920450..5b62c5ad3e05c 100644 --- a/primitives/state-machine/src/lib.rs +++ b/primitives/state-machine/src/lib.rs @@ -551,7 +551,7 @@ where pub fn prove_child_read( mut backend: B, storage_key: &[u8], - child_info: ChildInfo, + child_info: &ChildInfo, keys: I, ) -> Result> where @@ -591,7 +591,7 @@ where pub fn prove_child_read_on_trie_backend( trie_backend: &TrieBackend, storage_key: &[u8], - child_info: ChildInfo, + child_info: &ChildInfo, keys: I, ) -> Result> where @@ -604,7 +604,7 @@ where let proving_backend = proving_backend::ProvingBackend::<_, H>::new(trie_backend); for key in keys.into_iter() { proving_backend - .child_storage(storage_key, child_info.clone(), key.as_ref()) + .child_storage(storage_key, child_info, key.as_ref()) .map_err(|e| Box::new(e) as Box)?; } Ok(proving_backend.extract_proof()) @@ -680,7 +680,7 @@ where H::Out: Ord + Codec, { // Not a prefixed memory db, using empty unique id and include root resolution. - proving_backend.child_storage(storage_key, ChildInfo::new_default(&[]), key) + proving_backend.child_storage(storage_key, ChildInfo::top_trie(), key) .map_err(|e| Box::new(e) as Box) } @@ -702,7 +702,7 @@ mod tests { fallback_succeeds: bool, } - const CHILD_INFO_1: ChildInfo<'static> = ChildInfo::new_default(b"unique_id_1"); + const CHILD_INFO_1: &'static [u8] = b"\x01\x00\x00\x00unique_id_1"; impl CodeExecutor for DummyCodeExecutor { type Error = u8; @@ -932,6 +932,8 @@ mod tests { #[test] fn set_child_storage_works() { + + let child_info1 = ChildInfo::resolve_child_info(CHILD_INFO_1).unwrap(); let mut state = InMemoryBackend::::default(); let backend = state.as_trie_backend().unwrap(); let mut overlay = OverlayedChanges::default(); @@ -946,26 +948,26 @@ mod tests { ext.set_child_storage( ChildStorageKey::from_slice(b":child_storage:default:testchild").unwrap(), - CHILD_INFO_1, + child_info1, b"abc".to_vec(), b"def".to_vec() ); assert_eq!( ext.child_storage( ChildStorageKey::from_slice(b":child_storage:default:testchild").unwrap(), - CHILD_INFO_1, + child_info1, b"abc" ), Some(b"def".to_vec()) ); ext.kill_child_storage( ChildStorageKey::from_slice(b":child_storage:default:testchild").unwrap(), - CHILD_INFO_1, + child_info1, ); assert_eq!( ext.child_storage( ChildStorageKey::from_slice(b":child_storage:default:testchild").unwrap(), - CHILD_INFO_1, + child_info1, b"abc" ), None @@ -974,6 +976,8 @@ mod tests { #[test] fn prove_read_and_proof_check_works() { + + let child_info1 = ChildInfo::resolve_child_info(CHILD_INFO_1).unwrap(); // fetch read proof from 'remote' full node let remote_backend = trie_backend::tests::test_trie(); let remote_root = remote_backend.storage_root(::std::iter::empty()).0; @@ -1001,7 +1005,7 @@ mod tests { let remote_proof = prove_child_read( remote_backend, b":child_storage:default:sub1", - CHILD_INFO_1, + child_info1, &[b"value3"], ).unwrap(); let local_result1 = read_child_proof_check::( diff --git a/primitives/state-machine/src/overlayed_changes.rs b/primitives/state-machine/src/overlayed_changes.rs index d983680ff0797..7de9885dce550 100644 --- a/primitives/state-machine/src/overlayed_changes.rs +++ b/primitives/state-machine/src/overlayed_changes.rs @@ -251,7 +251,7 @@ impl OverlayedChanges { pub(crate) fn set_child_storage( &mut self, storage_key: StorageKey, - child_info: ChildInfo, + child_info: &ChildInfo, key: StorageKey, val: Option, ) { @@ -279,7 +279,7 @@ impl OverlayedChanges { pub(crate) fn clear_child_storage( &mut self, storage_key: &[u8], - child_info: ChildInfo, + child_info: &ChildInfo, ) { let extrinsic_index = self.extrinsic_index(); let map_entry = self.prospective.children.entry(storage_key.to_vec()) @@ -353,7 +353,7 @@ impl OverlayedChanges { pub(crate) fn clear_child_prefix( &mut self, storage_key: &[u8], - child_info: ChildInfo, + child_info: &ChildInfo, prefix: &[u8], ) { let extrinsic_index = self.extrinsic_index(); @@ -537,8 +537,9 @@ impl OverlayedChanges { .into_iter() .flat_map(|(map, _)| map.iter().map(|(k, v)| (k.clone(), v.value.clone()))) ), - self.child_info(storage_key).cloned() - .expect("child info initialized in either committed or prospective"), + self.child_info(storage_key) + .expect("child info initialized in either committed or prospective") + .to_owned(), ) ); @@ -586,12 +587,12 @@ impl OverlayedChanges { /// Get child info for a storage key. /// Take the latest value so prospective first. - pub fn child_info(&self, storage_key: &[u8]) -> Option<&OwnedChildInfo> { + pub fn child_info(&self, storage_key: &[u8]) -> Option<&ChildInfo> { if let Some((_, ci)) = self.prospective.children.get(storage_key) { - return Some(&ci); + return Some(&*ci); } if let Some((_, ci)) = self.committed.children.get(storage_key) { - return Some(&ci); + return Some(&*ci); } None } @@ -843,7 +844,7 @@ mod tests { #[test] fn next_child_storage_key_change_works() { let child = b"Child1".to_vec(); - let child_info = ChildInfo::new_default(b"uniqueid"); + let child_info = ChildInfo::resolve_child_info(b"\x01\x00\x00\x00uniqueid").unwrap(); let mut overlay = OverlayedChanges::default(); overlay.set_child_storage(child.clone(), child_info, vec![20], Some(vec![20])); overlay.set_child_storage(child.clone(), child_info, vec![30], Some(vec![30])); diff --git a/primitives/state-machine/src/proving_backend.rs b/primitives/state-machine/src/proving_backend.rs index 65e5d25027c9d..e38ca5d573357 100644 --- a/primitives/state-machine/src/proving_backend.rs +++ b/primitives/state-machine/src/proving_backend.rs @@ -145,7 +145,7 @@ impl<'a, S, H> ProvingBackendRecorder<'a, S, H> pub fn child_storage( &mut self, storage_key: &[u8], - child_info: ChildInfo, + child_info: &ChildInfo, key: &[u8] ) -> Result>, String> { let root = self.storage(storage_key)? @@ -284,7 +284,7 @@ impl<'a, S, H> Backend for ProvingBackend<'a, S, H> fn child_storage( &self, storage_key: &[u8], - child_info: ChildInfo, + child_info: &ChildInfo, key: &[u8], ) -> Result>, Self::Error> { self.0.child_storage(storage_key, child_info, key) @@ -293,7 +293,7 @@ impl<'a, S, H> Backend for ProvingBackend<'a, S, H> fn for_keys_in_child_storage( &self, storage_key: &[u8], - child_info: ChildInfo, + child_info: &ChildInfo, f: F, ) { self.0.for_keys_in_child_storage(storage_key, child_info, f) @@ -306,7 +306,7 @@ impl<'a, S, H> Backend for ProvingBackend<'a, S, H> fn next_child_storage_key( &self, storage_key: &[u8], - child_info: ChildInfo, + child_info: &ChildInfo, key: &[u8], ) -> Result>, Self::Error> { self.0.next_child_storage_key(storage_key, child_info, key) @@ -323,7 +323,7 @@ impl<'a, S, H> Backend for ProvingBackend<'a, S, H> fn for_child_keys_with_prefix( &self, storage_key: &[u8], - child_info: ChildInfo, + child_info: &ChildInfo, prefix: &[u8], f: F, ) { @@ -341,7 +341,7 @@ impl<'a, S, H> Backend for ProvingBackend<'a, S, H> fn child_keys( &self, storage_key: &[u8], - child_info: ChildInfo, + child_info: &ChildInfo, prefix: &[u8], ) -> Vec> { self.0.child_keys(storage_key, child_info, prefix) @@ -358,7 +358,7 @@ impl<'a, S, H> Backend for ProvingBackend<'a, S, H> fn child_storage_root( &self, storage_key: &[u8], - child_info: ChildInfo, + child_info: &ChildInfo, delta: I, ) -> (H::Out, bool, Self::Transaction) where @@ -411,8 +411,8 @@ mod tests { use crate::proving_backend::create_proof_check_backend; use sp_trie::PrefixedMemoryDB; - const CHILD_INFO_1: ChildInfo<'static> = ChildInfo::new_default(b"unique_id_1"); - const CHILD_INFO_2: ChildInfo<'static> = ChildInfo::new_default(b"unique_id_2"); + const CHILD_INFO_1: &'static [u8] = b"\x01\x00\x00\x00unique_id_1"; + const CHILD_INFO_2: &'static [u8] = b"\x01\x00\x00\x00unique_id_2"; fn test_proving<'a>( trie_backend: &'a TrieBackend,Blake2Hasher>, @@ -482,15 +482,17 @@ mod tests { #[test] fn proof_recorded_and_checked_with_child() { + let child_info1 = ChildInfo::resolve_child_info(CHILD_INFO_1).unwrap(); + let child_info2 = ChildInfo::resolve_child_info(CHILD_INFO_2).unwrap(); let subtrie1 = ChildStorageKey::from_slice(b":child_storage:default:sub1").unwrap(); let subtrie2 = ChildStorageKey::from_slice(b":child_storage:default:sub2").unwrap(); let own1 = subtrie1.into_owned(); let own2 = subtrie2.into_owned(); let contents = vec![ (None, (0..64).map(|i| (vec![i], Some(vec![i]))).collect()), - (Some((own1.clone(), CHILD_INFO_1.to_owned())), + (Some((own1.clone(), child_info1.to_owned())), (28..65).map(|i| (vec![i], Some(vec![i]))).collect()), - (Some((own2.clone(), CHILD_INFO_2.to_owned())), + (Some((own2.clone(), child_info2.to_owned())), (10..15).map(|i| (vec![i], Some(vec![i]))).collect()), ]; let in_memory = InMemoryBackend::::default(); @@ -505,11 +507,11 @@ mod tests { vec![i] )); (28..65).for_each(|i| assert_eq!( - in_memory.child_storage(&own1[..], CHILD_INFO_1, &[i]).unwrap().unwrap(), + in_memory.child_storage(&own1[..], child_info1, &[i]).unwrap().unwrap(), vec![i] )); (10..15).for_each(|i| assert_eq!( - in_memory.child_storage(&own2[..], CHILD_INFO_2, &[i]).unwrap().unwrap(), + in_memory.child_storage(&own2[..], child_info2, &[i]).unwrap().unwrap(), vec![i] )); @@ -537,7 +539,7 @@ mod tests { assert_eq!(proof_check.storage(&[64]).unwrap(), None); let proving = ProvingBackend::new(trie); - assert_eq!(proving.child_storage(&own1[..], CHILD_INFO_1, &[64]), Ok(Some(vec![64]))); + assert_eq!(proving.child_storage(&own1[..], child_info1, &[64]), Ok(Some(vec![64]))); let proof = proving.extract_proof(); let proof_check = create_proof_check_backend::( @@ -545,7 +547,7 @@ mod tests { proof ).unwrap(); assert_eq!( - proof_check.child_storage(&own1[..], CHILD_INFO_1, &[64]).unwrap().unwrap(), + proof_check.child_storage(&own1[..], child_info1, &[64]).unwrap().unwrap(), vec![64] ); } diff --git a/primitives/state-machine/src/trie_backend.rs b/primitives/state-machine/src/trie_backend.rs index 6d445bc7c7562..17a0d6fda8c15 100644 --- a/primitives/state-machine/src/trie_backend.rs +++ b/primitives/state-machine/src/trie_backend.rs @@ -82,7 +82,7 @@ impl, H: Hasher> Backend for TrieBackend where fn child_storage( &self, storage_key: &[u8], - child_info: ChildInfo, + child_info: &ChildInfo, key: &[u8], ) -> Result, Self::Error> { // TODO switch to &mut self like in overlay pr @@ -101,7 +101,7 @@ impl, H: Hasher> Backend for TrieBackend where fn next_child_storage_key( &self, storage_key: &[u8], - child_info: ChildInfo, + child_info: &ChildInfo, key: &[u8], ) -> Result, Self::Error> { // TODO switch to &mut self like in overlay pr @@ -124,7 +124,7 @@ impl, H: Hasher> Backend for TrieBackend where fn for_keys_in_child_storage( &self, storage_key: &[u8], - child_info: ChildInfo, + child_info: &ChildInfo, f: F, ) { // TODO switch to &mut self like in overlay pr @@ -137,7 +137,7 @@ impl, H: Hasher> Backend for TrieBackend where fn for_child_keys_with_prefix( &self, storage_key: &[u8], - child_info: ChildInfo, + child_info: &ChildInfo, prefix: &[u8], f: F, ) { @@ -215,7 +215,7 @@ impl, H: Hasher> Backend for TrieBackend where fn child_storage_root( &self, storage_key: &[u8], - child_info: ChildInfo, + child_info: &ChildInfo, delta: I, ) -> (H::Out, bool, Self::Transaction) where @@ -272,7 +272,7 @@ impl, H: Hasher> TrieBackend where fn child_essence<'a>( &'a self, storage_key: &[u8], - child_info: ChildInfo<'a>, + child_info: &'a ChildInfo, buffer: &'a mut Vec, ) -> Result, H>>, >::Error> { let root: Option = self.storage(storage_key)? @@ -299,14 +299,14 @@ pub mod tests { const CHILD_KEY_1: &[u8] = b":child_storage:default:sub1"; - const CHILD_UUID_1: &[u8] = b"unique_id_1"; - const CHILD_INFO_1: ChildInfo<'static> = ChildInfo::new_default(CHILD_UUID_1); + const CHILD_UUID_1: &[u8] = b"\x01\x00\x00\x00unique_id_1"; fn test_db() -> (PrefixedMemoryDB, H256) { + let child_info1 = ChildInfo::resolve_child_info(CHILD_UUID_1).unwrap(); let mut root = H256::default(); let mut mdb = PrefixedMemoryDB::::default(); { - let mut mdb = KeySpacedDBMut::new(&mut mdb, CHILD_UUID_1); + let mut mdb = KeySpacedDBMut::new(&mut mdb, child_info1.keyspace()); let mut trie = TrieDBMut::new(&mut mdb, &mut root); trie.insert(b"value3", &[142]).expect("insert failed"); trie.insert(b"value4", &[124]).expect("insert failed"); @@ -340,9 +340,10 @@ pub mod tests { #[test] fn read_from_child_storage_returns_some() { + let child_info1 = ChildInfo::resolve_child_info(CHILD_UUID_1).unwrap(); let test_trie = test_trie(); assert_eq!( - test_trie.child_storage(CHILD_KEY_1, CHILD_INFO_1, b"value3").unwrap(), + test_trie.child_storage(CHILD_KEY_1, child_info1, b"value3").unwrap(), Some(vec![142u8]), ); } diff --git a/primitives/state-machine/src/trie_backend_essence.rs b/primitives/state-machine/src/trie_backend_essence.rs index 6df54341f74fa..cd6cb9f45c6b2 100644 --- a/primitives/state-machine/src/trie_backend_essence.rs +++ b/primitives/state-machine/src/trie_backend_essence.rs @@ -36,7 +36,7 @@ pub trait Storage: Send + Sync { /// Get a trie node. fn get( &self, - trie: Option, + trie: Option<&ChildInfo>, key: &H::Out, prefix: Prefix, ) -> Result, String>; @@ -141,7 +141,7 @@ impl, H: Hasher> TrieBackendEssence where H::O root: &H::Out, prefix: &[u8], mut f: F, - child_info: Option, + child_info: Option<&ChildInfo>, ) { let eph = BackendStorageDBRef::new(&self.storage); @@ -417,8 +417,7 @@ impl TrieBackendStorageRef for (Arc>, Option Result, String> { - let child_info = self.1.as_ref(); - Storage::::get(self.0.deref(), child_info.map(|c| c.as_ref()), key, prefix) + Storage::::get(self.0.deref(), self.1.as_deref(), key, prefix) } } @@ -426,14 +425,14 @@ impl TrieBackendStorageRef for (Arc>, Option> { db: &'a B, - info: Option>, + info: Option<&'a ChildInfo>, buffer: &'a mut Vec, _ph: PhantomData, } impl<'a, H: Hasher, B: TrieBackendStorageRef> ChildTrieBackendStorage<'a, H, B> { /// Instantiate a `ChildTrieBackendStorage`. - pub fn new(db: &'a B, info: Option>, buffer: &'a mut Vec) -> Self { + pub fn new(db: &'a B, info: Option<&'a ChildInfo>, buffer: &'a mut Vec) -> Self { ChildTrieBackendStorage { db, info, @@ -504,7 +503,7 @@ mod test { #[test] fn next_storage_key_and_next_child_storage_key_work() { - let child_info = ChildInfo::new_default(b"uniqueid"); + let child_info = ChildInfo::resolve_child_info(b"\x01\x00\x00\x00uniqueid").unwrap(); // Contains values let mut root_1 = H256::default(); // Contains child trie diff --git a/primitives/storage/Cargo.toml b/primitives/storage/Cargo.toml index 557b206ecb29c..c0af25fc9ba9c 100644 --- a/primitives/storage/Cargo.toml +++ b/primitives/storage/Cargo.toml @@ -11,7 +11,7 @@ sp-std = { version = "2.0.0", default-features = false, path = "../std" } serde = { version = "1.0.101", optional = true, features = ["derive"] } impl-serde = { version = "0.2.3", optional = true } sp-debug-derive = { version = "2.0.0", path = "../debug-derive" } -codec = { package = "parity-scale-codec", version = "1.0.0", default-features = false } +codec = { package = "parity-scale-codec", version = "1.0.0", default-features = false, features = ["derive"] } [features] default = [ "std" ] diff --git a/primitives/storage/src/lib.rs b/primitives/storage/src/lib.rs index 34e7f0ead6d18..1cba659ad3d95 100644 --- a/primitives/storage/src/lib.rs +++ b/primitives/storage/src/lib.rs @@ -18,13 +18,13 @@ #![cfg_attr(not(feature = "std"), no_std)] -#[cfg(feature = "std")] -use codec::{Decode, Encode}; +use codec::{Decode, Encode, Output}; #[cfg(feature = "std")] use serde::{Serialize, Deserialize}; use sp_debug_derive::RuntimeDebug; -use sp_std::{vec::Vec, borrow::Cow}; +use sp_std::{vec, vec::Vec, borrow::Cow, borrow::Borrow, + borrow::ToOwned, convert::TryInto, ops::Deref}; /// Storage key. #[derive(PartialEq, Eq, RuntimeDebug)] @@ -177,131 +177,171 @@ impl<'a> ChildStorageKey<'a> { } } -#[derive(Clone, Copy)] +#[repr(transparent)] +#[derive(Debug, PartialEq, Eq, PartialOrd, Ord, Hash)] /// Information related to a child state. -pub enum ChildInfo<'a> { - Default(ChildTrie<'a>), +pub struct ChildInfo([u8]); + +impl Encode for ChildInfo { + fn encode_to(&self, output: &mut T) { + self.0.encode_to(output) + } } /// Owned version of `ChildInfo`. /// To be use in persistence layers. -#[derive(Debug, Clone)] -#[cfg_attr(feature = "std", derive(PartialEq, Eq, Hash, PartialOrd, Ord, Encode, Decode))] -pub enum OwnedChildInfo { - Default(OwnedChildTrie), -} +#[derive(Debug, Clone, PartialEq, Eq, Hash, PartialOrd, Ord, Encode, Decode)] +#[repr(transparent)] +pub struct OwnedChildInfo(Vec); -impl<'a> ChildInfo<'a> { - /// Instantiates information for a default child trie. - pub const fn new_default(unique_id: &'a[u8]) -> Self { - ChildInfo::Default(ChildTrie { - data: unique_id, - }) +impl ToOwned for ChildInfo { + type Owned = OwnedChildInfo; + + fn to_owned(&self) -> Self::Owned { + OwnedChildInfo(self.0.to_owned()) } +} - /// Instantiates a owned version of this child info. - pub fn to_owned(&self) -> OwnedChildInfo { - match self { - ChildInfo::Default(ChildTrie { data }) - => OwnedChildInfo::Default(OwnedChildTrie { - data: data.to_vec(), - }), +impl Borrow for OwnedChildInfo { + #[inline] + fn borrow(&self) -> &ChildInfo { + let data: &[u8] = self.0.borrow(); + unsafe { + sp_std::mem::transmute(data) } } +} + +impl Deref for OwnedChildInfo { + type Target = ChildInfo; + + #[inline] + fn deref(&self) -> &ChildInfo { + self.borrow() + } +} +impl ChildInfo { /// Create child info from a linear byte packed value and a given type. - pub fn resolve_child_info(child_type: u32, data: &'a[u8]) -> Option { - match child_type { - x if x == ChildType::CryptoUniqueId as u32 => Some(ChildInfo::new_default(data)), + pub fn resolve_child_info(data: &[u8]) -> Option<&Self> { + match ChildType::read_type(data) { + Some(x) if x == ChildType::CryptoUniqueId => Some({ + unsafe { + sp_std::mem::transmute(data) + } + }), _ => None, } } + /// Instantiates information for a child trie. + /// No check is done on consistency. + pub fn new_unchecked(data: &[u8]) -> &Self { + unsafe { + sp_std::mem::transmute(data) + } + } + + /// Top trie defined as the unique crypto id trie with + /// 0 length unique id. + pub fn top_trie() -> &'static Self { + Self::new_unchecked(b"\x01\x00\x00\x00") + } + /// Return a single byte vector containing packed child info content and its child info type. /// This can be use as input for `resolve_child_info`. pub fn info(&self) -> (&[u8], u32) { - match self { - ChildInfo::Default(ChildTrie { - data, - }) => (data, ChildType::CryptoUniqueId as u32), - } + let child_type = ChildType::read_type_unchecked(&self.0); + (&self.0, child_type as u32) } /// Return byte sequence (keyspace) that can be use by underlying db to isolate keys. /// This is a unique id of the child trie. The collision resistance of this value /// depends on the type of child info use. For `ChildInfo::Default` it is and need to be. pub fn keyspace(&self) -> &[u8] { - match self { - ChildInfo::Default(ChildTrie { - data, - }) => &data[..], + match ChildType::read_type_unchecked(&self.0) { + ChildType::CryptoUniqueId => &self.0[4..], } } + + fn child_type(&self) -> ChildType { + ChildType::read_type_unchecked(&self.0[..]) + } } -/// Type of child. +/// Type of child, it is encoded in the four first byte of the +/// encoded child info (LE u32). /// It does not strictly define different child type, it can also /// be related to technical consideration or api variant. #[repr(u32)] +#[derive(Clone, Copy, PartialEq)] pub enum ChildType { /// Default, it uses a cryptographic strong unique id as input. + /// All bytes following the type in encoded form are this unique + /// id. + /// If the trie got a unique id of length 0 it is considered + /// as a top child trie. CryptoUniqueId = 1, } -impl OwnedChildInfo { - /// Instantiates info for a default child trie. - pub fn new_default(unique_id: Vec) -> Self { - OwnedChildInfo::Default(OwnedChildTrie { - data: unique_id, - }) - } +const LOWER_CHILD_TYPE: u32 = 1; +const HIGHER_CHILD_TYPE: u32 = 1; - /// Try to update with another instance, return false if both instance - /// are not compatible. - pub fn try_update(&mut self, other: ChildInfo) -> bool { - match self { - OwnedChildInfo::Default(owned_child_trie) => owned_child_trie.try_update(other), +impl ChildType { + /// Try to read type from child definition. + pub fn read_type(slice: &[u8]) -> Option { + if slice.len() < 4 { + return None; } + slice[..4].try_into().ok() + .map(|b| u32::from_le_bytes(b)) + .filter(|b| *b >= LOWER_CHILD_TYPE && *b <= HIGHER_CHILD_TYPE) + .map(|b| unsafe { + sp_std::mem::transmute(b) + }) } - /// Get `ChildInfo` reference to this owned child info. - pub fn as_ref(&self) -> ChildInfo { - match self { - OwnedChildInfo::Default(OwnedChildTrie { data }) - => ChildInfo::Default(ChildTrie { - data: data.as_slice(), - }), - } + fn read_type_unchecked(slice: &[u8]) -> Self { + let child_type = u32::from_le_bytes(slice[..4].try_into() + .expect("This function is only called on initialized child info.")); + unsafe { sp_std::mem::transmute(child_type) } } } -/// A child trie of default type. -/// Default is the same implementation as the top trie. -/// It share its trie node storage with any kind of key, -/// and its unique id needs to be collision free (eg strong -/// crypto hash). -#[derive(Clone, Copy)] -pub struct ChildTrie<'a> { - /// Data containing unique id. - /// Unique id must but unique and free of any possible key collision - /// (depending on its storage behavior). - data: &'a[u8], -} - -/// Owned version of default child trie `ChildTrie`. -#[derive(Debug, Clone)] -#[cfg_attr(feature = "std", derive(PartialEq, Eq, Hash, PartialOrd, Ord, Encode, Decode))] -pub struct OwnedChildTrie { - /// See `ChildTrie` reference field documentation. - data: Vec, -} +impl OwnedChildInfo { + /// Create a new child trie information for default + /// child type. + pub fn new_default(unique_id: &[u8]) -> Self { + let mut vec = vec![0; unique_id.len() + 4]; + vec[..4].copy_from_slice(&(ChildType::CryptoUniqueId as u32).to_le_bytes()[..]); + vec[4..].copy_from_slice(unique_id); + OwnedChildInfo(vec) + } -impl OwnedChildTrie { /// Try to update with another instance, return false if both instance /// are not compatible. - fn try_update(&mut self, other: ChildInfo) -> bool { - match other { - ChildInfo::Default(other) => self.data[..] == other.data[..], + pub fn try_update(&self, other: &ChildInfo) -> bool { + match self.child_type() { + ChildType::CryptoUniqueId => { + match other.child_type() { + ChildType::CryptoUniqueId => self.deref() == other, + } + }, } } } + +#[cfg(test)] +mod test { + use super::*; + + #[test] + fn test_top_trie() { + let top_trie = ChildInfo::top_trie(); + assert!(top_trie.child_type() == ChildType::CryptoUniqueId); + assert_eq!(top_trie.encode(), top_trie.to_owned().encode()); + // 16 compact enc 4 and le 1 u32 + assert!(top_trie.encode() == vec![16, 1, 0, 0, 0]); + assert_eq!(top_trie.keyspace(), &[]); + } +} diff --git a/primitives/trie/src/lib.rs b/primitives/trie/src/lib.rs index bb8c7f880aa92..08d7b2d590866 100644 --- a/primitives/trie/src/lib.rs +++ b/primitives/trie/src/lib.rs @@ -405,7 +405,7 @@ mod tests { use codec::{Encode, Compact}; use sp_core::Blake2Hasher; use hash_db::HashDB; - use sp_core::{Hasher, InnerHasher}; + use sp_core::InnerHasher; use trie_db::{DBValue, TrieMut, Trie, NodeCodec as NodeCodecT}; use trie_standardmap::{Alphabet, ValueMode, StandardMap}; use hex_literal::hex; diff --git a/test-utils/client/src/lib.rs b/test-utils/client/src/lib.rs index e95c5ad162760..61a6730bf8d1f 100644 --- a/test-utils/client/src/lib.rs +++ b/test-utils/client/src/lib.rs @@ -131,7 +131,7 @@ impl TestClientBuilder, child_key: impl AsRef<[u8]>, - child_info: ChildInfo, + child_info: &ChildInfo, value: impl AsRef<[u8]>, ) -> Self { let entry = self.child_storage_extension.entry(key.as_ref().to_vec()) diff --git a/test-utils/runtime/client/src/lib.rs b/test-utils/runtime/client/src/lib.rs index 21cf94dfa673a..84fc61eb0b6b1 100644 --- a/test-utils/runtime/client/src/lib.rs +++ b/test-utils/runtime/client/src/lib.rs @@ -195,7 +195,7 @@ pub trait TestClientBuilderExt: Sized { fn add_extra_child_storage>, K: Into>, V: Into>>( mut self, storage_key: SK, - child_info: ChildInfo, + child_info: &ChildInfo, key: K, value: V, ) -> Self { From 25aaa3a1c602f6408c5b15751595f614b657757d Mon Sep 17 00:00:00 2001 From: Emeric Chevalier Date: Mon, 3 Feb 2020 09:15:05 +0100 Subject: [PATCH 16/85] Removing unsafe cast, using ref_cast asumption for borrow case. --- Cargo.lock | 21 ++++++++++++++++++ primitives/storage/Cargo.toml | 1 + primitives/storage/src/lib.rs | 41 ++++++++++++++++------------------- 3 files changed, 41 insertions(+), 22 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 7e1f02f5aa101..056d4ad0c2772 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -4902,6 +4902,24 @@ dependencies = [ "rust-argon2 0.7.0 (registry+https://github.com/rust-lang/crates.io-index)", ] +[[package]] +name = "ref-cast" +version = "1.0.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +dependencies = [ + "ref-cast-impl 1.0.0 (registry+https://github.com/rust-lang/crates.io-index)", +] + +[[package]] +name = "ref-cast-impl" +version = "1.0.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +dependencies = [ + "proc-macro2 1.0.8 (registry+https://github.com/rust-lang/crates.io-index)", + "quote 1.0.2 (registry+https://github.com/rust-lang/crates.io-index)", + "syn 1.0.14 (registry+https://github.com/rust-lang/crates.io-index)", +] + [[package]] name = "regex" version = "1.3.3" @@ -6782,6 +6800,7 @@ version = "2.0.0" dependencies = [ "impl-serde 0.2.3 (registry+https://github.com/rust-lang/crates.io-index)", "parity-scale-codec 1.1.2 (registry+https://github.com/rust-lang/crates.io-index)", + "ref-cast 1.0.0 (registry+https://github.com/rust-lang/crates.io-index)", "serde 1.0.104 (registry+https://github.com/rust-lang/crates.io-index)", "sp-debug-derive 2.0.0", "sp-std 2.0.0", @@ -8774,6 +8793,8 @@ dependencies = [ "checksum rdrand 0.4.0 (registry+https://github.com/rust-lang/crates.io-index)" = "678054eb77286b51581ba43620cc911abf02758c91f93f479767aed0f90458b2" "checksum redox_syscall 0.1.56 (registry+https://github.com/rust-lang/crates.io-index)" = "2439c63f3f6139d1b57529d16bc3b8bb855230c8efcc5d3a896c8bea7c3b1e84" "checksum redox_users 0.3.4 (registry+https://github.com/rust-lang/crates.io-index)" = "09b23093265f8d200fa7b4c2c76297f47e681c655f6f1285a8780d6a022f7431" +"checksum ref-cast 1.0.0 (registry+https://github.com/rust-lang/crates.io-index)" = "077f197a31bfe7e4169145f9eca08d32705c6c6126c139c26793acdf163ac3ef" +"checksum ref-cast-impl 1.0.0 (registry+https://github.com/rust-lang/crates.io-index)" = "c36eb52b69b87c9e3a07387f476c88fd0dba9a1713b38e56617ed66b45392c1f" "checksum regex 1.3.3 (registry+https://github.com/rust-lang/crates.io-index)" = "b5508c1941e4e7cb19965abef075d35a9a8b5cdf0846f30b4050e9b55dc55e87" "checksum regex-automata 0.1.8 (registry+https://github.com/rust-lang/crates.io-index)" = "92b73c2a1770c255c240eaa4ee600df1704a38dc3feaa6e949e7fcd4f8dc09f9" "checksum regex-syntax 0.6.13 (registry+https://github.com/rust-lang/crates.io-index)" = "e734e891f5b408a29efbf8309e656876276f49ab6a6ac208600b4419bd893d90" diff --git a/primitives/storage/Cargo.toml b/primitives/storage/Cargo.toml index c0af25fc9ba9c..ebb3062a37313 100644 --- a/primitives/storage/Cargo.toml +++ b/primitives/storage/Cargo.toml @@ -12,6 +12,7 @@ serde = { version = "1.0.101", optional = true, features = ["derive"] } impl-serde = { version = "0.2.3", optional = true } sp-debug-derive = { version = "2.0.0", path = "../debug-derive" } codec = { package = "parity-scale-codec", version = "1.0.0", default-features = false, features = ["derive"] } +ref-cast = "1.0.0" [features] default = [ "std" ] diff --git a/primitives/storage/src/lib.rs b/primitives/storage/src/lib.rs index 1cba659ad3d95..8371ae30680bd 100644 --- a/primitives/storage/src/lib.rs +++ b/primitives/storage/src/lib.rs @@ -22,6 +22,7 @@ use codec::{Decode, Encode, Output}; #[cfg(feature = "std")] use serde::{Serialize, Deserialize}; use sp_debug_derive::RuntimeDebug; +use ref_cast::RefCast; use sp_std::{vec, vec::Vec, borrow::Cow, borrow::Borrow, borrow::ToOwned, convert::TryInto, ops::Deref}; @@ -178,7 +179,7 @@ impl<'a> ChildStorageKey<'a> { } #[repr(transparent)] -#[derive(Debug, PartialEq, Eq, PartialOrd, Ord, Hash)] +#[derive(Debug, PartialEq, Eq, PartialOrd, Ord, Hash, RefCast)] /// Information related to a child state. pub struct ChildInfo([u8]); @@ -206,9 +207,7 @@ impl Borrow for OwnedChildInfo { #[inline] fn borrow(&self) -> &ChildInfo { let data: &[u8] = self.0.borrow(); - unsafe { - sp_std::mem::transmute(data) - } + ChildInfo::ref_cast(data) } } @@ -225,11 +224,9 @@ impl ChildInfo { /// Create child info from a linear byte packed value and a given type. pub fn resolve_child_info(data: &[u8]) -> Option<&Self> { match ChildType::read_type(data) { - Some(x) if x == ChildType::CryptoUniqueId => Some({ - unsafe { - sp_std::mem::transmute(data) - } - }), + Some(x) if x == ChildType::CryptoUniqueId => Some( + ChildInfo::ref_cast(data) + ), _ => None, } } @@ -237,9 +234,7 @@ impl ChildInfo { /// Instantiates information for a child trie. /// No check is done on consistency. pub fn new_unchecked(data: &[u8]) -> &Self { - unsafe { - sp_std::mem::transmute(data) - } + ChildInfo::ref_cast(data) } /// Top trie defined as the unique crypto id trie with @@ -284,10 +279,14 @@ pub enum ChildType { CryptoUniqueId = 1, } -const LOWER_CHILD_TYPE: u32 = 1; -const HIGHER_CHILD_TYPE: u32 = 1; - impl ChildType { + fn new(repr: u32) -> Option { + Some(match repr { + r if r == ChildType::CryptoUniqueId as u32 => ChildType::CryptoUniqueId, + _ => return None, + }) + } + /// Try to read type from child definition. pub fn read_type(slice: &[u8]) -> Option { if slice.len() < 4 { @@ -295,16 +294,14 @@ impl ChildType { } slice[..4].try_into().ok() .map(|b| u32::from_le_bytes(b)) - .filter(|b| *b >= LOWER_CHILD_TYPE && *b <= HIGHER_CHILD_TYPE) - .map(|b| unsafe { - sp_std::mem::transmute(b) - }) + .and_then(|b| ChildType::new(b)) } fn read_type_unchecked(slice: &[u8]) -> Self { - let child_type = u32::from_le_bytes(slice[..4].try_into() - .expect("This function is only called on initialized child info.")); - unsafe { sp_std::mem::transmute(child_type) } + slice[..4].try_into().ok() + .map(|b| u32::from_le_bytes(b)) + .and_then(|b| ChildType::new(b)) + .expect("This function is only called on initialized child info.") } } From 0d45d8559a46d297311574c1304245e371ea7ee4 Mon Sep 17 00:00:00 2001 From: cheme Date: Mon, 3 Feb 2020 12:40:41 +0100 Subject: [PATCH 17/85] Borrow approach on OwnedChildInfo and ChildInfo did not make sense allocation whise unless we break all api for a close to nothing perf change: switching to simple single child info struct. --- Cargo.lock | 21 --- client/api/src/notifications.rs | 2 +- client/chain-spec/src/chain_spec.rs | 4 +- client/db/src/lib.rs | 7 +- client/db/src/storage_cache.rs | 6 +- client/network/src/protocol.rs | 6 +- client/network/src/protocol/light_dispatch.rs | 4 +- client/rpc/src/state/state_full.rs | 17 +- client/rpc/src/state/tests.rs | 10 +- client/src/light/backend.rs | 4 +- client/src/light/fetcher.rs | 15 +- client/state-db/src/lib.rs | 11 +- client/state-db/src/noncanonical.rs | 10 +- client/state-db/src/pruning.rs | 16 +- client/state-db/src/test.rs | 4 +- frame/contracts/src/account_db.rs | 8 +- frame/contracts/src/exec.rs | 41 ++--- frame/contracts/src/lib.rs | 19 +- frame/contracts/src/rent.rs | 4 +- frame/contracts/src/tests.rs | 4 +- frame/support/src/storage/child.rs | 2 +- primitives/io/src/lib.rs | 50 +++--- primitives/runtime/src/lib.rs | 2 +- primitives/state-machine/src/backend.rs | 8 +- primitives/state-machine/src/basic.rs | 22 +-- .../state-machine/src/changes_trie/build.rs | 8 +- primitives/state-machine/src/ext.rs | 22 +-- .../state-machine/src/in_memory_backend.rs | 26 +-- primitives/state-machine/src/lib.rs | 18 +- .../state-machine/src/overlayed_changes.rs | 22 +-- .../state-machine/src/proving_backend.rs | 20 +-- primitives/state-machine/src/trie_backend.rs | 14 +- .../state-machine/src/trie_backend_essence.rs | 18 +- primitives/storage/Cargo.toml | 1 - primitives/storage/src/lib.rs | 164 ++++++------------ 35 files changed, 262 insertions(+), 348 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 056d4ad0c2772..7e1f02f5aa101 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -4902,24 +4902,6 @@ dependencies = [ "rust-argon2 0.7.0 (registry+https://github.com/rust-lang/crates.io-index)", ] -[[package]] -name = "ref-cast" -version = "1.0.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -dependencies = [ - "ref-cast-impl 1.0.0 (registry+https://github.com/rust-lang/crates.io-index)", -] - -[[package]] -name = "ref-cast-impl" -version = "1.0.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -dependencies = [ - "proc-macro2 1.0.8 (registry+https://github.com/rust-lang/crates.io-index)", - "quote 1.0.2 (registry+https://github.com/rust-lang/crates.io-index)", - "syn 1.0.14 (registry+https://github.com/rust-lang/crates.io-index)", -] - [[package]] name = "regex" version = "1.3.3" @@ -6800,7 +6782,6 @@ version = "2.0.0" dependencies = [ "impl-serde 0.2.3 (registry+https://github.com/rust-lang/crates.io-index)", "parity-scale-codec 1.1.2 (registry+https://github.com/rust-lang/crates.io-index)", - "ref-cast 1.0.0 (registry+https://github.com/rust-lang/crates.io-index)", "serde 1.0.104 (registry+https://github.com/rust-lang/crates.io-index)", "sp-debug-derive 2.0.0", "sp-std 2.0.0", @@ -8793,8 +8774,6 @@ dependencies = [ "checksum rdrand 0.4.0 (registry+https://github.com/rust-lang/crates.io-index)" = "678054eb77286b51581ba43620cc911abf02758c91f93f479767aed0f90458b2" "checksum redox_syscall 0.1.56 (registry+https://github.com/rust-lang/crates.io-index)" = "2439c63f3f6139d1b57529d16bc3b8bb855230c8efcc5d3a896c8bea7c3b1e84" "checksum redox_users 0.3.4 (registry+https://github.com/rust-lang/crates.io-index)" = "09b23093265f8d200fa7b4c2c76297f47e681c655f6f1285a8780d6a022f7431" -"checksum ref-cast 1.0.0 (registry+https://github.com/rust-lang/crates.io-index)" = "077f197a31bfe7e4169145f9eca08d32705c6c6126c139c26793acdf163ac3ef" -"checksum ref-cast-impl 1.0.0 (registry+https://github.com/rust-lang/crates.io-index)" = "c36eb52b69b87c9e3a07387f476c88fd0dba9a1713b38e56617ed66b45392c1f" "checksum regex 1.3.3 (registry+https://github.com/rust-lang/crates.io-index)" = "b5508c1941e4e7cb19965abef075d35a9a8b5cdf0846f30b4050e9b55dc55e87" "checksum regex-automata 0.1.8 (registry+https://github.com/rust-lang/crates.io-index)" = "92b73c2a1770c255c240eaa4ee600df1704a38dc3feaa6e949e7fcd4f8dc09f9" "checksum regex-syntax 0.6.13 (registry+https://github.com/rust-lang/crates.io-index)" = "e734e891f5b408a29efbf8309e656876276f49ab6a6ac208600b4419bd893d90" diff --git a/client/api/src/notifications.rs b/client/api/src/notifications.rs index 13bf06396d163..72a9f357fce33 100644 --- a/client/api/src/notifications.rs +++ b/client/api/src/notifications.rs @@ -323,7 +323,7 @@ mod tests { let child_filters = Some([ (StorageKey(vec![4]), None), (StorageKey(vec![5]), None), - ].into_iter().cloned().collect()); + ].iter().cloned().collect()); StorageChangeSet { changes: Arc::new(changes.0), child_changes: Arc::new(changes.1), diff --git a/client/chain-spec/src/chain_spec.rs b/client/chain-spec/src/chain_spec.rs index 8688e8ec9d1cd..173941f6624c6 100644 --- a/client/chain-spec/src/chain_spec.rs +++ b/client/chain-spec/src/chain_spec.rs @@ -22,7 +22,7 @@ use std::fs::File; use std::path::PathBuf; use std::rc::Rc; use serde::{Serialize, Deserialize}; -use sp_core::storage::{StorageKey, StorageData, OwnedChildInfo, Storage, StorageChild}; +use sp_core::storage::{StorageKey, StorageData, ChildInfo, Storage, StorageChild}; use sp_runtime::BuildStorage; use serde_json as json; use crate::RuntimeGenesis; @@ -77,7 +77,7 @@ impl BuildStorage for ChainSpec { Genesis::Raw(RawGenesis { top: map, children: children_map }) => Ok(Storage { top: map.into_iter().map(|(k, v)| (k.0, v.0)).collect(), children: children_map.into_iter().map(|(sk, child_content)| { - let child_info = OwnedChildInfo::new_default(child_content.child_info.as_slice()); + let child_info = ChildInfo::new_default(child_content.child_info.as_slice()); ( sk.0, StorageChild { diff --git a/client/db/src/lib.rs b/client/db/src/lib.rs index b60157d5429ba..e1c35c0d676c7 100644 --- a/client/db/src/lib.rs +++ b/client/db/src/lib.rs @@ -79,7 +79,6 @@ use crate::storage_cache::{CachingState, SharedCache, new_shared_cache}; use crate::stats::StateUsageStats; use log::{trace, debug, warn}; pub use sc_state_db::PruningMode; -use sp_core::storage::OwnedChildInfo; #[cfg(feature = "test-helpers")] use sc_client::in_mem::Backend as InMemoryBackend; @@ -92,7 +91,7 @@ const DEFAULT_CHILD_RATIO: (usize, usize) = (1, 10); /// DB-backed patricia trie state, transaction type is an overlay of changes to commit. pub type DbState = sp_state_machine::TrieBackend< - (Arc>>, Option), HasherFor + (Arc>>, Option), HasherFor >; /// Re-export the KVDB trait so that one can pass an implementation of it. @@ -514,7 +513,7 @@ impl HeaderMetadata for BlockchainDb { /// Database transaction pub struct BlockImportOperation { old_state: CachingState, Block>, - db_updates: BTreeMap, PrefixedMemoryDB>>, + db_updates: BTreeMap, PrefixedMemoryDB>>, storage_updates: StorageCollection, child_storage_updates: ChildStorageCollection, changes_trie_updates: MemoryDB>, @@ -571,7 +570,7 @@ impl sc_client_api::backend::BlockImportOperation for Bloc fn update_db_storage( &mut self, - update: BTreeMap, PrefixedMemoryDB>>, + update: BTreeMap, PrefixedMemoryDB>>, ) -> ClientResult<()> { self.db_updates = update; Ok(()) diff --git a/client/db/src/storage_cache.rs b/client/db/src/storage_cache.rs index 71fae6771c39c..9a5c15e9910e6 100644 --- a/client/db/src/storage_cache.rs +++ b/client/db/src/storage_cache.rs @@ -677,7 +677,7 @@ mod tests { type Block = RawBlock>; - const CHILD_KEY_1: &'static [u8] = b"\x01\x00\x00\x00unique_id_1"; + const CHILD_KEY_1: &'static [u8] = b"unique_id_1"; #[test] fn smoke() { @@ -968,7 +968,7 @@ mod tests { #[test] fn should_track_used_size_correctly() { - let child_info1 = ChildInfo::resolve_child_info(CHILD_KEY_1).unwrap(); + let child_info1 = ChildInfo::new_default(CHILD_KEY_1); let root_parent = H256::random(); let shared = new_shared_cache::(109, ((109-36), 109)); let h0 = H256::random(); @@ -996,7 +996,7 @@ mod tests { &[], &[], vec![], - vec![(s_key.clone(), vec![(key.clone(), Some(vec![1, 2]))], child_info1.to_owned())], + vec![(s_key.clone(), vec![(key.clone(), Some(vec![1, 2]))], child_info1)], Some(h0), Some(0), true, diff --git a/client/network/src/protocol.rs b/client/network/src/protocol.rs index 45f2ee3497380..68352b3f404fb 100644 --- a/client/network/src/protocol.rs +++ b/client/network/src/protocol.rs @@ -23,7 +23,7 @@ use libp2p::{Multiaddr, PeerId}; use libp2p::core::{ConnectedPoint, nodes::Substream, muxing::StreamMuxerBox}; use libp2p::swarm::{ProtocolsHandler, IntoProtocolsHandler}; use libp2p::swarm::{NetworkBehaviour, NetworkBehaviourAction, PollParameters}; -use sp_core::storage::{StorageKey, OwnedChildInfo, ChildType}; +use sp_core::storage::{StorageKey, ChildInfo, ChildType}; use sp_consensus::{ BlockOrigin, block_validation::BlockAnnounceValidator, @@ -1556,11 +1556,11 @@ impl, H: ExHashT> Protocol { trace!(target: "sync", "Remote read child request {} from {} ({} {} at {})", request.id, who, request.storage_key.to_hex::(), keys_str(), request.block); let proof = if ChildType::CryptoUniqueId as u32 == request.child_type { - let child_info = OwnedChildInfo::new_default(&request.child_info[..]); + let child_info = ChildInfo::new_default(&request.child_info[..]); match self.context_data.chain.read_child_proof( &request.block, &request.storage_key, - &*child_info, + &child_info, &request.keys, ) { Ok(proof) => proof, diff --git a/client/network/src/protocol/light_dispatch.rs b/client/network/src/protocol/light_dispatch.rs index b50688eea67a0..bfa8daa181ca1 100644 --- a/client/network/src/protocol/light_dispatch.rs +++ b/client/network/src/protocol/light_dispatch.rs @@ -681,7 +681,7 @@ pub mod tests { use std::sync::Arc; use std::time::Instant; use futures::channel::oneshot; - use sp_core::storage::OwnedChildInfo; + use sp_core::storage::ChildInfo; use sp_runtime::traits::{Block as BlockT, NumberFor, Header as HeaderT}; use sp_blockchain::{Error as ClientError, Result as ClientResult}; use sc_client_api::{FetchChecker, RemoteHeaderRequest, @@ -1035,7 +1035,7 @@ pub mod tests { light_dispatch.on_connect(&mut network_interface, peer0.clone(), Roles::FULL, 1000); let (tx, response) = oneshot::channel(); - let child_info = OwnedChildInfo::new_default(b"unique_id_1"); + let child_info = ChildInfo::new_default(b"unique_id_1"); let (child_info, child_type) = child_info.info(); light_dispatch.add_request(&mut network_interface, RequestData::RemoteReadChild(RemoteReadChildRequest { header: dummy_header(), diff --git a/client/rpc/src/state/state_full.rs b/client/rpc/src/state/state_full.rs index 867bf5ff3314d..430c0230f0b6d 100644 --- a/client/rpc/src/state/state_full.rs +++ b/client/rpc/src/state/state_full.rs @@ -33,7 +33,7 @@ use sc_client::{ Client, CallExecutor, BlockchainEvents }; use sp_core::{ - storage::{well_known_keys, StorageKey, StorageData, StorageChangeSet, OwnedChildInfo, ChildType}, + storage::{well_known_keys, StorageKey, StorageData, StorageChangeSet, ChildInfo}, Bytes, }; use sp_version::RuntimeVersion; @@ -291,7 +291,7 @@ impl StateBackend for FullState StateBackend for FullState StateBackend for FullState StateBackend for FullState Option { - if child_type != ChildType::CryptoUniqueId as u32 { - None - } else { - Some(OwnedChildInfo::new_default(&child_definition[..])) - } -} - - /// Splits passed range into two subranges where: /// - first range has at least one element in it; /// - second range (optionally) starts at given `middle` element. diff --git a/client/rpc/src/state/tests.rs b/client/rpc/src/state/tests.rs index f18e31e9d30e3..dd26a8a42fac2 100644 --- a/client/rpc/src/state/tests.rs +++ b/client/rpc/src/state/tests.rs @@ -30,7 +30,7 @@ use substrate_test_runtime_client::{ runtime, }; -const CHILD_INFO: &'static [u8] = b"\x01\x00\x00\x00unique_id"; +const CHILD_INFO: &'static [u8] = b"unique_id"; #[test] fn should_return_storage() { @@ -38,11 +38,11 @@ fn should_return_storage() { const VALUE: &[u8] = b"hello world"; const STORAGE_KEY: &[u8] = b":child_storage:default:child"; const CHILD_VALUE: &[u8] = b"hello world !"; - let child_info1 = ChildInfo::resolve_child_info(CHILD_INFO).unwrap(); + let child_info1 = ChildInfo::new_default(CHILD_INFO); let mut core = tokio::runtime::Runtime::new().unwrap(); let client = TestClientBuilder::new() .add_extra_storage(KEY.to_vec(), VALUE.to_vec()) - .add_extra_child_storage(STORAGE_KEY.to_vec(), child_info1, KEY.to_vec(), CHILD_VALUE.to_vec()) + .add_extra_child_storage(STORAGE_KEY.to_vec(), &child_info1, KEY.to_vec(), CHILD_VALUE.to_vec()) .build(); let genesis_hash = client.genesis_hash(); let client = new_full(Arc::new(client), Subscriptions::new(Arc::new(core.executor()))); @@ -77,12 +77,12 @@ fn should_return_storage() { #[test] fn should_return_child_storage() { - let child_info1 = ChildInfo::resolve_child_info(CHILD_INFO).unwrap(); + let child_info1 = ChildInfo::new_default(CHILD_INFO); let (child_info, child_type) = child_info1.info(); let child_info = StorageKey(child_info.to_vec()); let core = tokio::runtime::Runtime::new().unwrap(); let client = Arc::new(substrate_test_runtime_client::TestClientBuilder::new() - .add_child_storage("test", "key", child_info1, vec![42_u8]) + .add_child_storage("test", "key", &child_info1, vec![42_u8]) .build()); let genesis_hash = client.genesis_hash(); let client = new_full(client, Subscriptions::new(Arc::new(core.executor()))); diff --git a/client/src/light/backend.rs b/client/src/light/backend.rs index 12186a5b61ac2..f1bea18adc643 100644 --- a/client/src/light/backend.rs +++ b/client/src/light/backend.rs @@ -24,7 +24,7 @@ use parking_lot::RwLock; use codec::{Decode, Encode}; use sp_core::ChangesTrieConfiguration; -use sp_core::storage::{well_known_keys, ChildInfo, OwnedChildInfo}; +use sp_core::storage::{well_known_keys, ChildInfo}; use sp_core::offchain::storage::InMemOffchainStorage; use sp_state_machine::{ Backend as StateBackend, TrieBackend, InMemoryBackend, ChangesTrieTransaction, @@ -312,7 +312,7 @@ impl BlockImportOperation for ImportOperation self.changes_trie_config_update = Some(changes_trie_config); // this is only called when genesis block is imported => shouldn't be performance bottleneck - let mut storage: HashMap, OwnedChildInfo)>, _> = HashMap::new(); + let mut storage: HashMap, ChildInfo)>, _> = HashMap::new(); storage.insert(None, input.top); // create a list of children keys to re-compute roots for diff --git a/client/src/light/fetcher.rs b/client/src/light/fetcher.rs index ed6c04816ceca..8bcbb80c775a3 100644 --- a/client/src/light/fetcher.rs +++ b/client/src/light/fetcher.rs @@ -351,7 +351,7 @@ pub mod tests { use sp_state_machine::Backend; use super::*; - const CHILD_INFO_1: &'static [u8] = b"\x01\x00\x00\x00unique_id_1"; + const CHILD_INFO_1: &'static [u8] = b"unique_id_1"; type TestChecker = LightDataChecker< NativeExecutor, @@ -399,14 +399,14 @@ pub mod tests { } fn prepare_for_read_child_proof_check() -> (TestChecker, Header, StorageProof, Vec) { - let child_info1 = ChildInfo::resolve_child_info(CHILD_INFO_1).unwrap(); + let child_info1 = ChildInfo::new_default(CHILD_INFO_1); use substrate_test_runtime_client::DefaultTestClientBuilderExt; use substrate_test_runtime_client::TestClientBuilderExt; // prepare remote client let remote_client = substrate_test_runtime_client::TestClientBuilder::new() .add_extra_child_storage( b":child_storage:default:child1".to_vec(), - child_info1, + &child_info1, b"key1".to_vec(), b"value1".to_vec(), ).build(); @@ -420,14 +420,14 @@ pub mod tests { let child_value = remote_client.child_storage( &remote_block_id, &StorageKey(b":child_storage:default:child1".to_vec()), - child_info1, + &child_info1, &StorageKey(b"key1".to_vec()), ).unwrap().unwrap().0; assert_eq!(b"value1"[..], child_value[..]); let remote_read_proof = remote_client.read_child_proof( &remote_block_id, b":child_storage:default:child1", - child_info1, + &child_info1, &[b"key1"], ).unwrap(); @@ -505,8 +505,9 @@ pub mod tests { remote_read_proof, result, ) = prepare_for_read_child_proof_check(); - ; - let child_infos = ChildInfo::resolve_child_info(CHILD_INFO_1).unwrap().info(); + + let child_info = ChildInfo::new_default(CHILD_INFO_1); + let child_infos = child_info.info(); assert_eq!((&local_checker as &dyn FetchChecker).check_read_child_proof( &RemoteReadChildRequest::
{ block: remote_block_header.hash(), diff --git a/client/state-db/src/lib.rs b/client/state-db/src/lib.rs index 1cfc7fa8398a7..046e40d0506af 100644 --- a/client/state-db/src/lib.rs +++ b/client/state-db/src/lib.rs @@ -40,7 +40,7 @@ use std::collections::{BTreeMap, HashMap, hash_map::Entry, btree_map::Entry as B use noncanonical::NonCanonicalOverlay; use pruning::RefWindow; use log::trace; -use sp_core::storage::{OwnedChildInfo, ChildInfo}; +use sp_core::storage::ChildInfo; const PRUNING_MODE: &[u8] = b"mode"; const PRUNING_MODE_ARCHIVE: &[u8] = b"archive"; @@ -129,14 +129,17 @@ pub struct ChildTrieChangeSet { pub data: ChangeSet, /// Child trie descripton. /// If not set, this is the top trie. - pub info: Option, + pub info: Option, } /// Change sets of all child trie (top is key None). -pub type ChildTrieChangeSets = BTreeMap, ChangeSet>; +pub type ChildTrieChangeSets = BTreeMap, ChangeSet>; /// Extends for `ChildTrieChangeSets` is merging. -fn extend_change_sets(set: &mut ChildTrieChangeSets, other: impl Iterator, ChangeSet)>) { +fn extend_change_sets( + set: &mut ChildTrieChangeSets, + other: impl Iterator, ChangeSet)>, +) { for (ci, o_cs) in other { match set.entry(ci) { BEntry::Occupied(mut e) => { diff --git a/client/state-db/src/noncanonical.rs b/client/state-db/src/noncanonical.rs index 6c5988446e881..0b3bb36f253be 100644 --- a/client/state-db/src/noncanonical.rs +++ b/client/state-db/src/noncanonical.rs @@ -25,16 +25,16 @@ use std::collections::{HashMap, VecDeque, hash_map::Entry, BTreeMap}; use super::{Error, DBValue, ChildTrieChangeSets, CommitSet, MetaDb, Hash, to_meta_key, ChangeSet}; use codec::{Encode, Decode}; use log::trace; -use sp_core::storage::{ChildInfo, OwnedChildInfo}; +use sp_core::storage::ChildInfo; const NON_CANONICAL_JOURNAL: &[u8] = b"noncanonical_journal"; // version at start to avoid collision when adding a unit const NON_CANONICAL_JOURNAL_V1: &[u8] = b"v1_non_canonical_journal"; const LAST_CANONICAL: &[u8] = b"last_canonical"; -type Keys = Vec<(Option, Vec)>; -type KeyVals = Vec<(Option, Vec<(Key, DBValue)>)>; -type ChildKeyVals = BTreeMap, HashMap>; +type Keys = Vec<(Option, Vec)>; +type KeyVals = Vec<(Option, Vec<(Key, DBValue)>)>; +type ChildKeyVals = BTreeMap, HashMap>; /// See module documentation. pub struct NonCanonicalOverlay { @@ -461,7 +461,7 @@ impl NonCanonicalOverlay { /// Get a value from the node overlay. This searches in every existing changeset. pub fn get(&self, trie: Option<&ChildInfo>, key: &Key) -> Option { - // TODO make storage over data representation of OwnedChildInfo to use borrow + // TODO use top_trie instead of none if let Some(values) = self.values.get(&trie.map(|t| t.to_owned())) { if let Some((_, value)) = values.get(&key) { return Some(value.clone()); diff --git a/client/state-db/src/pruning.rs b/client/state-db/src/pruning.rs index fdf5dec0515b7..77dd2e099ad8a 100644 --- a/client/state-db/src/pruning.rs +++ b/client/state-db/src/pruning.rs @@ -26,21 +26,21 @@ use std::collections::{HashMap, HashSet, VecDeque}; use codec::{Encode, Decode}; use crate::{CommitSet, Error, MetaDb, to_meta_key, Hash}; use log::{trace, warn}; -use sp_core::storage::OwnedChildInfo; +use sp_core::storage::ChildInfo; use super::ChangeSet; const LAST_PRUNED: &[u8] = b"last_pruned"; const OLD_PRUNING_JOURNAL: &[u8] = b"pruning_journal"; const PRUNING_JOURNAL_V1: &[u8] = b"v1_pruning_journal"; -type Keys = Vec<(Option, Vec)>; +type Keys = Vec<(Option, Vec)>; /// See module documentation. pub struct RefWindow { /// A queue of keys that should be deleted for each block in the pruning window. death_rows: VecDeque>, /// An index that maps each key from `death_rows` to block number. - death_index: HashMap, HashMap>, + death_index: HashMap, HashMap>, /// Block number that corresponts to the front of `death_rows` pending_number: u64, /// Number of call of `note_canonical` after @@ -52,7 +52,7 @@ pub struct RefWindow { } impl RefWindow { - fn remove_death_index(&mut self, ct: &Option, key: &Key) -> Option { + fn remove_death_index(&mut self, ct: &Option, key: &Key) -> Option { if let Some(child_index) = self.death_index.get_mut(ct) { child_index.remove(key) } else { @@ -65,11 +65,11 @@ impl RefWindow { struct DeathRow { hash: BlockHash, journal_key: Vec, - deleted: HashMap, HashSet>, + deleted: HashMap, HashSet>, } impl DeathRow { - fn remove_deleted(&mut self, ct: &Option, key: &Key) -> bool { + fn remove_deleted(&mut self, ct: &Option, key: &Key) -> bool { if let Some(child_index) = self.deleted.get_mut(ct) { child_index.remove(key) } else { @@ -153,7 +153,7 @@ impl RefWindow { Ok(pruning) } - fn import, Vec)>>( + fn import, Vec)>>( &mut self, hash: &BlockHash, journal_key: Vec, @@ -178,7 +178,7 @@ impl RefWindow { entry.insert(k.clone(), imported_block); } } - let mut deleted_death_row = HashMap::, HashSet>::new(); + let mut deleted_death_row = HashMap::, HashSet>::new(); for (ct, deleted) in deleted.into_iter() { let entry = deleted_death_row.entry(ct).or_default(); entry.extend(deleted); diff --git a/client/state-db/src/test.rs b/client/state-db/src/test.rs index c7be13fb15595..76f7b09b83d84 100644 --- a/client/state-db/src/test.rs +++ b/client/state-db/src/test.rs @@ -19,11 +19,11 @@ use std::collections::HashMap; use sp_core::H256; use crate::{DBValue, ChangeSet, CommitSet, MetaDb, NodeDb, ChildTrieChangeSets}; -use sp_core::storage::OwnedChildInfo; +use sp_core::storage::ChildInfo; #[derive(Default, Debug, Clone, PartialEq, Eq)] pub struct TestDb { - pub data: HashMap, HashMap>, + pub data: HashMap, HashMap>, pub meta: HashMap, DBValue>, } diff --git a/frame/contracts/src/account_db.rs b/frame/contracts/src/account_db.rs index bf326dc44e70b..081f1edc501e8 100644 --- a/frame/contracts/src/account_db.rs +++ b/frame/contracts/src/account_db.rs @@ -138,7 +138,7 @@ impl AccountDb for DirectAccountDb { trie_id.and_then(|id| if let Some(child_info) = child_info { child::get_raw(id, child_info, &blake2_256(location)) } else { - child::get_raw(id, &*crate::trie_unique_id(&id[..]), &blake2_256(location)) + child::get_raw(id, &crate::trie_unique_id(&id[..]), &blake2_256(location)) }) } fn get_code_hash(&self, account: &T::AccountId) -> Option> { @@ -184,13 +184,13 @@ impl AccountDb for DirectAccountDb { (false, Some(info), _) => info, // Existing contract is being removed. (true, Some(info), None) => { - child::kill_storage(&info.trie_id, &*info.child_trie_unique_id()); + child::kill_storage(&info.trie_id, &info.child_trie_unique_id()); >::remove(&address); continue; } // Existing contract is being replaced by a new one. (true, Some(info), Some(code_hash)) => { - child::kill_storage(&info.trie_id, &*info.child_trie_unique_id()); + child::kill_storage(&info.trie_id, &info.child_trie_unique_id()); AliveContractInfo:: { code_hash, storage_size: T::StorageSizeOffset::get(), @@ -227,7 +227,7 @@ impl AccountDb for DirectAccountDb { new_info.last_write = Some(>::block_number()); } - let child_info = &*new_info.child_trie_unique_id(); + let child_info = &new_info.child_trie_unique_id(); for (k, v) in changed.storage.into_iter() { if let Some(value) = child::get_raw( &new_info.trie_id[..], diff --git a/frame/contracts/src/exec.rs b/frame/contracts/src/exec.rs index 87dbcacde5f43..bc91ebcec56d0 100644 --- a/frame/contracts/src/exec.rs +++ b/frame/contracts/src/exec.rs @@ -23,9 +23,10 @@ use crate::rent; use sp_std::prelude::*; use sp_runtime::traits::{Bounded, CheckedAdd, CheckedSub, Zero}; use frame_support::{ - storage::unhashed, dispatch::DispatchError, storage::child::OwnedChildInfo, + storage::unhashed, dispatch::DispatchError, traits::{WithdrawReason, Currency, Time, Randomness}, }; +use sp_core::storage::ChildInfo; pub type AccountIdOf = ::AccountId; pub type CallOf = ::Call; @@ -276,8 +277,7 @@ pub enum DeferredAction { pub struct ExecutionContext<'a, T: Trait + 'a, V, L> { pub parent: Option<&'a ExecutionContext<'a, T, V, L>>, pub self_account: T::AccountId, - pub self_trie_id: Option, - pub self_child_info: Option, + pub self_trie_info: Option<(TrieId, ChildInfo)>, pub overlay: OverlayAccountDb<'a, T>, pub depth: usize, pub deferred: Vec>, @@ -301,8 +301,7 @@ where pub fn top_level(origin: T::AccountId, cfg: &'a Config, vm: &'a V, loader: &'a L) -> Self { ExecutionContext { parent: None, - self_trie_id: None, - self_child_info: None, + self_trie_info: None, self_account: origin, overlay: OverlayAccountDb::::new(&DirectAccountDb), depth: 0, @@ -315,13 +314,12 @@ where } } - fn nested<'b, 'c: 'b>(&'c self, dest: T::AccountId, trie_id: Option, child_info: Option) + fn nested<'b, 'c: 'b>(&'c self, dest: T::AccountId, trie_info: Option<(TrieId, ChildInfo)>) -> ExecutionContext<'b, T, V, L> { ExecutionContext { parent: Some(self), - self_trie_id: trie_id, - self_child_info: child_info, + self_trie_info: trie_info, self_account: dest, overlay: OverlayAccountDb::new(&self.overlay), depth: self.depth + 1, @@ -374,9 +372,8 @@ where let caller = self.self_account.clone(); let dest_trie_id = contract_info.and_then(|i| i.as_alive().map(|i| i.trie_id.clone())); - let dest_child_info = dest_trie_id.as_ref().map(|id| crate::trie_unique_id(id)); - self.with_nested_context(dest.clone(), dest_trie_id, dest_child_info, |nested| { + self.with_nested_context(dest.clone(), dest_trie_id, |nested| { if value > BalanceOf::::zero() { try_or_exec_error!( transfer( @@ -461,9 +458,8 @@ where // TrieId has not been generated yet and storage is empty since contract is new. let dest_trie_id = None; - let dest_child_info = None; - let output = self.with_nested_context(dest.clone(), dest_trie_id, dest_child_info, |nested| { + let output = self.with_nested_context(dest.clone(), dest_trie_id, |nested| { try_or_exec_error!( nested.overlay.instantiate_contract(&dest, code_hash.clone()), input_data @@ -529,17 +525,15 @@ where } } - fn with_nested_context( - &mut self, - dest: T::AccountId, - trie_id: Option, - child_info: Option, - func: F, - ) -> ExecResult + fn with_nested_context(&mut self, dest: T::AccountId, trie_id: Option, func: F) + -> ExecResult where F: FnOnce(&mut ExecutionContext) -> ExecResult { let (output, change_set, deferred) = { - let mut nested = self.nested(dest, trie_id, child_info); + let mut nested = self.nested(dest, trie_id.map(|trie_id| { + let child_info = crate::trie_unique_id(&trie_id); + (trie_id, child_info) + })); let output = func(&mut nested)?; (output, nested.overlay.into_change_set(), nested.deferred) }; @@ -705,10 +699,13 @@ where type T = T; fn get_storage(&self, key: &StorageKey) -> Option> { + let (trie_id, child_info) = self.ctx.self_trie_info.as_ref() + .map(|info| (Some(&info.0), Some(&info.1))) + .unwrap_or((None, None)); self.ctx.overlay.get_storage( &self.ctx.self_account, - self.ctx.self_trie_id.as_ref(), - self.ctx.self_child_info.as_deref(), + trie_id, + child_info, key, ) } diff --git a/frame/contracts/src/lib.rs b/frame/contracts/src/lib.rs index 9811a52246c45..88bb9dda3221c 100644 --- a/frame/contracts/src/lib.rs +++ b/frame/contracts/src/lib.rs @@ -225,15 +225,15 @@ pub struct RawAliveContractInfo { impl RawAliveContractInfo { /// Associated child trie unique id is built from the hash part of the trie id. - pub fn child_trie_unique_id(&self) -> child::OwnedChildInfo { + pub fn child_trie_unique_id(&self) -> child::ChildInfo { trie_unique_id(&self.trie_id[..]) } } /// Associated child trie unique id is built from the hash part of the trie id. -pub(crate) fn trie_unique_id(trie_id: &[u8]) -> child::OwnedChildInfo { +pub(crate) fn trie_unique_id(trie_id: &[u8]) -> child::ChildInfo { let start = CHILD_STORAGE_KEY_PREFIX.len() + b"default:".len(); - child::OwnedChildInfo::new_default(&trie_id[start ..]) + child::ChildInfo::new_default(&trie_id[start ..]) } pub type TombstoneContractInfo = @@ -716,12 +716,12 @@ impl Module { .get_alive() .ok_or(GetStorageError::IsTombstone)?; - let child_trie = contract_info.child_trie_unique_id(); + let child_info = Some(trie_unique_id(&contract_info.trie_id)); let maybe_value = AccountDb::::get_storage( &DirectAccountDb, &address, Some(&contract_info.trie_id), - Some(&*child_trie), + child_info.as_ref(), &key, ); Ok(maybe_value) @@ -828,17 +828,16 @@ impl Module { origin_contract.last_write }; - let child_trie = origin_contract.child_trie_unique_id(); let key_values_taken = delta.iter() .filter_map(|key| { child::get_raw( &origin_contract.trie_id, - &*child_trie, + &origin_contract.child_trie_unique_id(), &blake2_256(key), ).map(|value| { child::kill( &origin_contract.trie_id, - &*child_trie, + &origin_contract.child_trie_unique_id(), &blake2_256(key), ); @@ -860,7 +859,7 @@ impl Module { for (key, value) in key_values_taken { child::put_raw( &origin_contract.trie_id, - &*child_trie, + &origin_contract.child_trie_unique_id(), &blake2_256(key), &value, ); @@ -960,7 +959,7 @@ decl_storage! { impl OnFreeBalanceZero for Module { fn on_free_balance_zero(who: &T::AccountId) { if let Some(ContractInfo::Alive(info)) = >::take(who) { - child::kill_storage(&info.trie_id, &*info.child_trie_unique_id()); + child::kill_storage(&info.trie_id, &info.child_trie_unique_id()); } } } diff --git a/frame/contracts/src/rent.rs b/frame/contracts/src/rent.rs index 3967fe03cf21b..a538e1eddb11d 100644 --- a/frame/contracts/src/rent.rs +++ b/frame/contracts/src/rent.rs @@ -183,7 +183,7 @@ fn enact_verdict( >::remove(account); child::kill_storage( &alive_contract_info.trie_id, - &*alive_contract_info.child_trie_unique_id(), + &alive_contract_info.child_trie_unique_id(), ); >::deposit_event(RawEvent::Evicted(account.clone(), false)); None @@ -205,7 +205,7 @@ fn enact_verdict( child::kill_storage( &alive_contract_info.trie_id, - &*alive_contract_info.child_trie_unique_id(), + &alive_contract_info.child_trie_unique_id(), ); >::deposit_event(RawEvent::Evicted(account.clone(), true)); diff --git a/frame/contracts/src/tests.rs b/frame/contracts/src/tests.rs index 61e490a4210d4..8267bd1e6b263 100644 --- a/frame/contracts/src/tests.rs +++ b/frame/contracts/src/tests.rs @@ -322,8 +322,8 @@ fn account_removal_removes_storage() { let trie_id2 = ::TrieIdGenerator::trie_id(&2); let child_info1 = crate::trie_unique_id(trie_id1.as_ref()); let child_info2 = crate::trie_unique_id(trie_id2.as_ref()); - let child_info1 = Some(&*child_info1); - let child_info2 = Some(&*child_info2); + let child_info1 = Some(&child_info1); + let child_info2 = Some(&child_info2); let key1 = &[1; 32]; let key2 = &[2; 32]; diff --git a/frame/support/src/storage/child.rs b/frame/support/src/storage/child.rs index d1dd459b9635e..601f33f79d853 100644 --- a/frame/support/src/storage/child.rs +++ b/frame/support/src/storage/child.rs @@ -27,7 +27,7 @@ use crate::sp_std::prelude::*; use codec::{Codec, Encode, Decode}; -pub use sp_core::storage::{ChildInfo, OwnedChildInfo}; +pub use sp_core::storage::ChildInfo; /// Return the value of the item in storage under `key`, or `None` if there is no explicit entry. pub fn get( diff --git a/primitives/io/src/lib.rs b/primitives/io/src/lib.rs index 454b732fe779e..5b923f9d74bc2 100644 --- a/primitives/io/src/lib.rs +++ b/primitives/io/src/lib.rs @@ -38,7 +38,7 @@ use sp_core::{ traits::{KeystoreExt, CallInWasmExt}, offchain::{OffchainExt, TransactionPoolExt}, hexdisplay::HexDisplay, - storage::{ChildStorageKey, OwnedChildInfo, ChildType}, + storage::{ChildStorageKey, ChildInfo}, }; use sp_core::{ @@ -82,14 +82,6 @@ fn child_storage_key_or_panic(storage_key: &[u8]) -> ChildStorageKey { } } -#[cfg(feature = "std")] -fn resolve_child_info(child_type: u32, child_definition: &[u8]) -> OwnedChildInfo { - if child_type != ChildType::CryptoUniqueId as u32 { - panic!("Invalid child definition"); - } - OwnedChildInfo::new_default(&child_definition[..]) -} - /// Interface for accessing the storage from within the runtime. #[runtime_interface] pub trait Storage { @@ -117,8 +109,9 @@ pub trait Storage { key: &[u8], ) -> Option> { let storage_key = child_storage_key_or_panic(child_storage_key); - let child_info = resolve_child_info(child_type, child_definition); - self.child_storage(storage_key, &*child_info, key).map(|s| s.to_vec()) + let child_info = ChildInfo::resolve_child_info(child_type, child_definition) + .expect("Invalid child definition"); + self.child_storage(storage_key, &child_info, key).map(|s| s.to_vec()) } /// Get `key` from storage, placing the value into `value_out` and return the number of @@ -153,8 +146,9 @@ pub trait Storage { value_offset: u32, ) -> Option { let storage_key = child_storage_key_or_panic(child_storage_key); - let child_info = resolve_child_info(child_type, child_definition); - self.child_storage(storage_key, &*child_info, key) + let child_info = ChildInfo::resolve_child_info(child_type, child_definition) + .expect("Invalid child definition"); + self.child_storage(storage_key, &child_info, key) .map(|value| { let value_offset = value_offset as usize; let data = &value[value_offset.min(value.len())..]; @@ -181,8 +175,9 @@ pub trait Storage { value: &[u8], ) { let storage_key = child_storage_key_or_panic(child_storage_key); - let child_info = resolve_child_info(child_type, child_definition); - self.set_child_storage(storage_key, &*child_info, key.to_vec(), value.to_vec()); + let child_info = ChildInfo::resolve_child_info(child_type, child_definition) + .expect("Invalid child definition"); + self.set_child_storage(storage_key, &child_info, key.to_vec(), value.to_vec()); } /// Clear the storage of the given `key` and its value. @@ -201,8 +196,9 @@ pub trait Storage { key: &[u8], ) { let storage_key = child_storage_key_or_panic(child_storage_key); - let child_info = resolve_child_info(child_type, child_definition); - self.clear_child_storage(storage_key, &*child_info, key); + let child_info = ChildInfo::resolve_child_info(child_type, child_definition) + .expect("Invalid child definition"); + self.clear_child_storage(storage_key, &child_info, key); } /// Clear an entire child storage. @@ -215,8 +211,9 @@ pub trait Storage { child_type: u32, ) { let storage_key = child_storage_key_or_panic(child_storage_key); - let child_info = resolve_child_info(child_type, child_definition); - self.kill_child_storage(storage_key, &*child_info); + let child_info = ChildInfo::resolve_child_info(child_type, child_definition) + .expect("Invalid child definition"); + self.kill_child_storage(storage_key, &child_info); } /// Check whether the given `key` exists in storage. @@ -235,8 +232,9 @@ pub trait Storage { key: &[u8], ) -> bool { let storage_key = child_storage_key_or_panic(child_storage_key); - let child_info = resolve_child_info(child_type, child_definition); - self.exists_child_storage(storage_key, &*child_info, key) + let child_info = ChildInfo::resolve_child_info(child_type, child_definition) + .expect("Invalid child definition"); + self.exists_child_storage(storage_key, &child_info, key) } /// Clear the storage of each key-value pair where the key starts with the given `prefix`. @@ -255,8 +253,9 @@ pub trait Storage { prefix: &[u8], ) { let storage_key = child_storage_key_or_panic(child_storage_key); - let child_info = resolve_child_info(child_type, child_definition); - self.clear_child_prefix(storage_key, &*child_info, prefix); + let child_info = ChildInfo::resolve_child_info(child_type, child_definition) + .expect("Invalid child definition"); + self.clear_child_prefix(storage_key, &child_info, prefix); } /// "Commit" all existing operations and compute the resulting storage root. @@ -308,8 +307,9 @@ pub trait Storage { key: &[u8], ) -> Option> { let storage_key = child_storage_key_or_panic(child_storage_key); - let child_info = resolve_child_info(child_type, child_definition); - self.next_child_storage_key(storage_key, &*child_info, key) + let child_info = ChildInfo::resolve_child_info(child_type, child_definition) + .expect("Invalid child definition"); + self.next_child_storage_key(storage_key, &child_info, key) } } diff --git a/primitives/runtime/src/lib.rs b/primitives/runtime/src/lib.rs index 69bbb0adddf85..2f46ae6e1d4c8 100644 --- a/primitives/runtime/src/lib.rs +++ b/primitives/runtime/src/lib.rs @@ -136,7 +136,7 @@ impl BuildStorage for sp_core::storage::Storage { let k = k.clone(); if let Some(map) = storage.children.get_mut(&k) { map.data.extend(other_map.data.iter().map(|(k, v)| (k.clone(), v.clone()))); - if !map.child_info.try_update(&*other_map.child_info) { + if !map.child_info.try_update(&other_map.child_info) { return Err("Incompatible child info update".to_string()); } } else { diff --git a/primitives/state-machine/src/backend.rs b/primitives/state-machine/src/backend.rs index f99ad53009261..d8c805508975b 100644 --- a/primitives/state-machine/src/backend.rs +++ b/primitives/state-machine/src/backend.rs @@ -20,7 +20,7 @@ use log::warn; use sp_core::{Hasher, InnerHasher}; use codec::Encode; -use sp_core::storage::{ChildInfo, OwnedChildInfo}; +use sp_core::storage::ChildInfo; use sp_trie::{TrieMut, MemoryDB, trie_types::TrieDBMut}; use std::collections::{BTreeMap, btree_map::Entry}; use crate::{ @@ -184,7 +184,7 @@ pub trait Backend: std::fmt::Debug { where I1: IntoIterator)>, I2i: IntoIterator)>, - I2: IntoIterator, + I2: IntoIterator, H::Out: Ord + Encode, { let mut txs: Self::Transaction = Default::default(); @@ -193,7 +193,7 @@ pub trait Backend: std::fmt::Debug { // child first for (storage_key, child_delta, child_info) in child_deltas { let (child_root, empty, child_txs) = - self.child_storage_root(&storage_key[..], &*child_info, child_delta); + self.child_storage_root(&storage_key[..], &child_info, child_delta); txs.consolidate(child_txs); if empty { if return_child_roots { @@ -326,7 +326,7 @@ impl Consolidate for () { } impl Consolidate for Vec<( - Option<(StorageKey, OwnedChildInfo)>, + Option<(StorageKey, ChildInfo)>, StorageCollection, )> { fn consolidate(&mut self, mut other: Self) { diff --git a/primitives/state-machine/src/basic.rs b/primitives/state-machine/src/basic.rs index 5a17683354e4d..644c629984f69 100644 --- a/primitives/state-machine/src/basic.rs +++ b/primitives/state-machine/src/basic.rs @@ -289,7 +289,7 @@ impl Externalities for BasicExternalities { let delta = child.data.clone().into_iter().map(|(k, v)| (k, Some(v))); InMemoryBackend::::default() - .child_storage_root(storage_key.as_ref(), &*child.child_info, delta).0 + .child_storage_root(storage_key.as_ref(), &child.child_info, delta).0 } else { default_child_trie_root::>(storage_key.as_ref()) }.encode() @@ -315,7 +315,7 @@ mod tests { use sp_core::storage::well_known_keys::CODE; use hex_literal::hex; - const CHILD_INFO_1: &'static [u8] = b"\x01\x00\x00\x00unique_id_1"; + const CHILD_INFO_1: &'static [u8] = b"unique_id_1"; #[test] fn commit_should_work() { @@ -340,7 +340,7 @@ mod tests { #[test] fn children_works() { - let child_info1 = ChildInfo::resolve_child_info(CHILD_INFO_1).unwrap(); + let child_info1 = ChildInfo::new_default(CHILD_INFO_1); let child_storage = b":child_storage:default:test".to_vec(); let mut ext = BasicExternalities::new(Storage { @@ -348,23 +348,23 @@ mod tests { children: map![ child_storage.clone() => StorageChild { data: map![ b"doe".to_vec() => b"reindeer".to_vec() ], - child_info: child_info1.to_owned(), + child_info: child_info1.clone(), } ] }); let child = || ChildStorageKey::from_vec(child_storage.clone()).unwrap(); - assert_eq!(ext.child_storage(child(), child_info1, b"doe"), Some(b"reindeer".to_vec())); + assert_eq!(ext.child_storage(child(), &child_info1, b"doe"), Some(b"reindeer".to_vec())); - ext.set_child_storage(child(), child_info1, b"dog".to_vec(), b"puppy".to_vec()); - assert_eq!(ext.child_storage(child(), child_info1, b"dog"), Some(b"puppy".to_vec())); + ext.set_child_storage(child(), &child_info1, b"dog".to_vec(), b"puppy".to_vec()); + assert_eq!(ext.child_storage(child(), &child_info1, b"dog"), Some(b"puppy".to_vec())); - ext.clear_child_storage(child(), child_info1, b"dog"); - assert_eq!(ext.child_storage(child(), child_info1, b"dog"), None); + ext.clear_child_storage(child(), &child_info1, b"dog"); + assert_eq!(ext.child_storage(child(), &child_info1, b"dog"), None); - ext.kill_child_storage(child(), child_info1); - assert_eq!(ext.child_storage(child(), child_info1, b"doe"), None); + ext.kill_child_storage(child(), &child_info1); + assert_eq!(ext.child_storage(child(), &child_info1, b"doe"), None); } #[test] diff --git a/primitives/state-machine/src/changes_trie/build.rs b/primitives/state-machine/src/changes_trie/build.rs index 639a29962ea99..4bfe7d8f8ef23 100644 --- a/primitives/state-machine/src/changes_trie/build.rs +++ b/primitives/state-machine/src/changes_trie/build.rs @@ -351,8 +351,8 @@ mod test { use crate::overlayed_changes::{OverlayedValue, OverlayedChangeSet}; use super::*; - const CHILD_INFO_1: &'static [u8] = b"\x01\x00\x00\x00unique_id_1"; - const CHILD_INFO_2: &'static [u8] = b"\x01\x00\x00\x00unique_id_2"; + const CHILD_INFO_1: &'static [u8] = b"unique_id_1"; + const CHILD_INFO_2: &'static [u8] = b"unique_id_2"; fn prepare_for_build(zero: u64) -> ( InMemoryBackend, @@ -361,8 +361,8 @@ mod test { Configuration, ) { - let child_info1 = ChildInfo::resolve_child_info(CHILD_INFO_1).unwrap(); - let child_info2 = ChildInfo::resolve_child_info(CHILD_INFO_2).unwrap(); + let child_info1 = ChildInfo::new_default(CHILD_INFO_1); + let child_info2 = ChildInfo::new_default(CHILD_INFO_2); let backend: InMemoryBackend<_> = vec![ (vec![100], vec![255]), (vec![101], vec![255]), diff --git a/primitives/state-machine/src/ext.rs b/primitives/state-machine/src/ext.rs index 41bfcdd906d1f..b1ea92c79ad90 100644 --- a/primitives/state-machine/src/ext.rs +++ b/primitives/state-machine/src/ext.rs @@ -714,14 +714,14 @@ mod tests { fn next_child_storage_key_works() { const CHILD_KEY_1: &[u8] = b":child_storage:default:Child1"; - const CHILD_UUID_1: &[u8] = b"\x01\x00\x00\x00unique_id_1"; - let child_info1 = ChildInfo::resolve_child_info(CHILD_UUID_1).unwrap(); + const CHILD_UUID_1: &[u8] = b"unique_id_1"; + let child_info1 = ChildInfo::new_default(CHILD_UUID_1); let mut cache = StorageTransactionCache::default(); let child = || ChildStorageKey::from_slice(CHILD_KEY_1).unwrap(); let mut overlay = OverlayedChanges::default(); - overlay.set_child_storage(child().as_ref().to_vec(), child_info1, vec![20], None); - overlay.set_child_storage(child().as_ref().to_vec(), child_info1, vec![30], Some(vec![31])); + overlay.set_child_storage(child().as_ref().to_vec(), &child_info1, vec![20], None); + overlay.set_child_storage(child().as_ref().to_vec(), &child_info1, vec![30], Some(vec![31])); let backend = Storage { top: map![], children: map![ @@ -731,7 +731,7 @@ mod tests { vec![20] => vec![20], vec![40] => vec![40] ], - child_info: child_info1.to_owned(), + child_info: child_info1.clone(), } ], }.into(); @@ -740,22 +740,22 @@ mod tests { let ext = TestExt::new(&mut overlay, &mut cache, &backend, None, None); // next_backend < next_overlay - assert_eq!(ext.next_child_storage_key(child(), child_info1, &[5]), Some(vec![10])); + assert_eq!(ext.next_child_storage_key(child(), &child_info1, &[5]), Some(vec![10])); // next_backend == next_overlay but next_overlay is a delete - assert_eq!(ext.next_child_storage_key(child(), child_info1, &[10]), Some(vec![30])); + assert_eq!(ext.next_child_storage_key(child(), &child_info1, &[10]), Some(vec![30])); // next_overlay < next_backend - assert_eq!(ext.next_child_storage_key(child(), child_info1, &[20]), Some(vec![30])); + assert_eq!(ext.next_child_storage_key(child(), &child_info1, &[20]), Some(vec![30])); // next_backend exist but next_overlay doesn't exist - assert_eq!(ext.next_child_storage_key(child(), child_info1, &[30]), Some(vec![40])); + assert_eq!(ext.next_child_storage_key(child(), &child_info1, &[30]), Some(vec![40])); drop(ext); - overlay.set_child_storage(child().as_ref().to_vec(), child_info1, vec![50], Some(vec![50])); + overlay.set_child_storage(child().as_ref().to_vec(), &child_info1, vec![50], Some(vec![50])); let ext = TestExt::new(&mut overlay, &mut cache, &backend, None, None); // next_overlay exist but next_backend doesn't exist - assert_eq!(ext.next_child_storage_key(child(), child_info1, &[40]), Some(vec![50])); + assert_eq!(ext.next_child_storage_key(child(), &child_info1, &[40]), Some(vec![50])); } } diff --git a/primitives/state-machine/src/in_memory_backend.rs b/primitives/state-machine/src/in_memory_backend.rs index f083e085e1b56..753f8ccbbf9ae 100644 --- a/primitives/state-machine/src/in_memory_backend.rs +++ b/primitives/state-machine/src/in_memory_backend.rs @@ -27,7 +27,7 @@ use sp_trie::{ MemoryDB, default_child_trie_root, TrieConfiguration, trie_types::Layout, }; use codec::Codec; -use sp_core::storage::{ChildInfo, OwnedChildInfo, Storage}; +use sp_core::storage::{ChildInfo, Storage}; /// Error impossible. // FIXME: use `!` type when stabilized. https://github.com/rust-lang/rust/issues/35121 @@ -47,7 +47,7 @@ impl error::Error for Void { /// In-memory backend. Fully recomputes tries each time `as_trie_backend` is called but useful for /// tests and proof checking. pub struct InMemory { - inner: HashMap, BTreeMap>, + inner: HashMap, BTreeMap>, // This field is only needed for returning reference in `as_trie_backend`. trie: Option, H>>, _hasher: PhantomData, @@ -88,7 +88,7 @@ impl PartialEq for InMemory { impl InMemory { /// Copy the state, with applied updates pub fn update< - T: IntoIterator, StorageCollection)> + T: IntoIterator, StorageCollection)> >( &self, changes: T, @@ -107,10 +107,10 @@ impl InMemory { } } -impl From, BTreeMap>> +impl From, BTreeMap>> for InMemory { - fn from(inner: HashMap, BTreeMap>) -> Self { + fn from(inner: HashMap, BTreeMap>) -> Self { InMemory { inner, trie: None, @@ -121,7 +121,7 @@ impl From, BTreeMap From for InMemory { fn from(inners: Storage) -> Self { - let mut inner: HashMap, BTreeMap> + let mut inner: HashMap, BTreeMap> = inners.children.into_iter().map(|(k, c)| (Some((k, c.child_info)), c.data)).collect(); inner.insert(None, inners.top); InMemory { @@ -144,12 +144,12 @@ impl From> for InMemory { } } -impl From, StorageCollection)>> +impl From, StorageCollection)>> for InMemory { fn from( - inner: Vec<(Option<(StorageKey, OwnedChildInfo)>, StorageCollection)>, + inner: Vec<(Option<(StorageKey, ChildInfo)>, StorageCollection)>, ) -> Self { - let mut expanded: HashMap, BTreeMap> + let mut expanded: HashMap, BTreeMap> = HashMap::new(); for (child_info, key_values) in inner { let entry = expanded.entry(child_info).or_default(); @@ -167,7 +167,7 @@ impl InMemory { /// child storage key iterator pub fn child_storage_keys(&self) -> impl Iterator { self.inner.iter().filter_map(|item| - item.0.as_ref().map(|v|(&v.0[..], &*v.1)) + item.0.as_ref().map(|v|(&v.0[..], &v.1)) ) } } @@ -175,7 +175,7 @@ impl InMemory { impl Backend for InMemory where H::Out: Codec { type Error = Void; type Transaction = Vec<( - Option<(StorageKey, OwnedChildInfo)>, + Option<(StorageKey, ChildInfo)>, StorageCollection, )>; type TrieBackendStorage = MemoryDB; @@ -366,7 +366,7 @@ mod tests { #[test] fn in_memory_with_child_trie_only() { let storage = InMemory::::default(); - let child_info = OwnedChildInfo::new_default(b"unique_id_1"); + let child_info = ChildInfo::new_default(b"unique_id_1"); let mut storage = storage.update( vec![( Some((b"1".to_vec(), child_info.clone())), @@ -374,7 +374,7 @@ mod tests { )] ); let trie_backend = storage.as_trie_backend().unwrap(); - assert_eq!(trie_backend.child_storage(b"1", &*child_info, b"2").unwrap(), + assert_eq!(trie_backend.child_storage(b"1", &child_info, b"2").unwrap(), Some(b"3".to_vec())); assert!(trie_backend.storage(b"1").unwrap().is_some()); } diff --git a/primitives/state-machine/src/lib.rs b/primitives/state-machine/src/lib.rs index 5b62c5ad3e05c..3aa57e9679f30 100644 --- a/primitives/state-machine/src/lib.rs +++ b/primitives/state-machine/src/lib.rs @@ -680,7 +680,7 @@ where H::Out: Ord + Codec, { // Not a prefixed memory db, using empty unique id and include root resolution. - proving_backend.child_storage(storage_key, ChildInfo::top_trie(), key) + proving_backend.child_storage(storage_key, &ChildInfo::top_trie(), key) .map_err(|e| Box::new(e) as Box) } @@ -702,7 +702,7 @@ mod tests { fallback_succeeds: bool, } - const CHILD_INFO_1: &'static [u8] = b"\x01\x00\x00\x00unique_id_1"; + const CHILD_INFO_1: &'static [u8] = b"unique_id_1"; impl CodeExecutor for DummyCodeExecutor { type Error = u8; @@ -933,7 +933,7 @@ mod tests { #[test] fn set_child_storage_works() { - let child_info1 = ChildInfo::resolve_child_info(CHILD_INFO_1).unwrap(); + let child_info1 = ChildInfo::new_default(CHILD_INFO_1); let mut state = InMemoryBackend::::default(); let backend = state.as_trie_backend().unwrap(); let mut overlay = OverlayedChanges::default(); @@ -948,26 +948,26 @@ mod tests { ext.set_child_storage( ChildStorageKey::from_slice(b":child_storage:default:testchild").unwrap(), - child_info1, + &child_info1, b"abc".to_vec(), b"def".to_vec() ); assert_eq!( ext.child_storage( ChildStorageKey::from_slice(b":child_storage:default:testchild").unwrap(), - child_info1, + &child_info1, b"abc" ), Some(b"def".to_vec()) ); ext.kill_child_storage( ChildStorageKey::from_slice(b":child_storage:default:testchild").unwrap(), - child_info1, + &child_info1, ); assert_eq!( ext.child_storage( ChildStorageKey::from_slice(b":child_storage:default:testchild").unwrap(), - child_info1, + &child_info1, b"abc" ), None @@ -977,7 +977,7 @@ mod tests { #[test] fn prove_read_and_proof_check_works() { - let child_info1 = ChildInfo::resolve_child_info(CHILD_INFO_1).unwrap(); + let child_info1 = ChildInfo::new_default(CHILD_INFO_1); // fetch read proof from 'remote' full node let remote_backend = trie_backend::tests::test_trie(); let remote_root = remote_backend.storage_root(::std::iter::empty()).0; @@ -1005,7 +1005,7 @@ mod tests { let remote_proof = prove_child_read( remote_backend, b":child_storage:default:sub1", - child_info1, + &child_info1, &[b"value3"], ).unwrap(); let local_result1 = read_child_proof_check::( diff --git a/primitives/state-machine/src/overlayed_changes.rs b/primitives/state-machine/src/overlayed_changes.rs index 7de9885dce550..4afc8a328ba8a 100644 --- a/primitives/state-machine/src/overlayed_changes.rs +++ b/primitives/state-machine/src/overlayed_changes.rs @@ -28,7 +28,7 @@ use crate::{ use std::iter::FromIterator; use std::collections::{HashMap, BTreeMap, BTreeSet}; use codec::{Decode, Encode}; -use sp_core::storage::{well_known_keys::EXTRINSIC_INDEX, OwnedChildInfo, ChildInfo}; +use sp_core::storage::{well_known_keys::EXTRINSIC_INDEX, ChildInfo}; use std::{mem, ops}; use sp_core::Hasher; @@ -43,7 +43,7 @@ pub type StorageValue = Vec; pub type StorageCollection = Vec<(StorageKey, Option)>; /// In memory arrays of storage values for multiple child tries. -pub type ChildStorageCollection = Vec<(StorageKey, StorageCollection, OwnedChildInfo)>; +pub type ChildStorageCollection = Vec<(StorageKey, StorageCollection, ChildInfo)>; /// The overlayed changes to state to be queried on top of the backend. /// @@ -77,7 +77,7 @@ pub struct OverlayedChangeSet { /// Top level storage changes. pub top: BTreeMap, /// Child storage changes. - pub children: HashMap, OwnedChildInfo)>, + pub children: HashMap, ChildInfo)>, } /// A storage changes structure that can be generated by the data collected in [`OverlayedChanges`]. @@ -433,7 +433,7 @@ impl OverlayedChanges { /// Will panic if there are any uncommitted prospective changes. pub fn into_committed(self) -> ( impl Iterator)>, - impl Iterator)>, OwnedChildInfo))>, + impl Iterator)>, ChildInfo))>, ){ assert!(self.prospective.is_empty()); ( @@ -844,14 +844,14 @@ mod tests { #[test] fn next_child_storage_key_change_works() { let child = b"Child1".to_vec(); - let child_info = ChildInfo::resolve_child_info(b"\x01\x00\x00\x00uniqueid").unwrap(); + let child_info = ChildInfo::new_default(b"uniqueid"); let mut overlay = OverlayedChanges::default(); - overlay.set_child_storage(child.clone(), child_info, vec![20], Some(vec![20])); - overlay.set_child_storage(child.clone(), child_info, vec![30], Some(vec![30])); - overlay.set_child_storage(child.clone(), child_info, vec![40], Some(vec![40])); + overlay.set_child_storage(child.clone(), &child_info, vec![20], Some(vec![20])); + overlay.set_child_storage(child.clone(), &child_info, vec![30], Some(vec![30])); + overlay.set_child_storage(child.clone(), &child_info, vec![40], Some(vec![40])); overlay.commit_prospective(); - overlay.set_child_storage(child.clone(), child_info, vec![10], Some(vec![10])); - overlay.set_child_storage(child.clone(), child_info, vec![30], None); + overlay.set_child_storage(child.clone(), &child_info, vec![10], Some(vec![10])); + overlay.set_child_storage(child.clone(), &child_info, vec![30], None); // next_prospective < next_committed let next_to_5 = overlay.next_child_storage_key_change(&child, &[5]).unwrap(); @@ -873,7 +873,7 @@ mod tests { assert_eq!(next_to_30.0.to_vec(), vec![40]); assert_eq!(next_to_30.1.value, Some(vec![40])); - overlay.set_child_storage(child.clone(), child_info, vec![50], Some(vec![50])); + overlay.set_child_storage(child.clone(), &child_info, vec![50], Some(vec![50])); // next_prospective, no next_committed let next_to_40 = overlay.next_child_storage_key_change(&child, &[40]).unwrap(); assert_eq!(next_to_40.0.to_vec(), vec![50]); diff --git a/primitives/state-machine/src/proving_backend.rs b/primitives/state-machine/src/proving_backend.rs index e38ca5d573357..7256f6815c535 100644 --- a/primitives/state-machine/src/proving_backend.rs +++ b/primitives/state-machine/src/proving_backend.rs @@ -411,8 +411,8 @@ mod tests { use crate::proving_backend::create_proof_check_backend; use sp_trie::PrefixedMemoryDB; - const CHILD_INFO_1: &'static [u8] = b"\x01\x00\x00\x00unique_id_1"; - const CHILD_INFO_2: &'static [u8] = b"\x01\x00\x00\x00unique_id_2"; + const CHILD_INFO_1: &'static [u8] = b"unique_id_1"; + const CHILD_INFO_2: &'static [u8] = b"unique_id_2"; fn test_proving<'a>( trie_backend: &'a TrieBackend,Blake2Hasher>, @@ -482,17 +482,17 @@ mod tests { #[test] fn proof_recorded_and_checked_with_child() { - let child_info1 = ChildInfo::resolve_child_info(CHILD_INFO_1).unwrap(); - let child_info2 = ChildInfo::resolve_child_info(CHILD_INFO_2).unwrap(); + let child_info1 = ChildInfo::new_default(CHILD_INFO_1); + let child_info2 = ChildInfo::new_default(CHILD_INFO_2); let subtrie1 = ChildStorageKey::from_slice(b":child_storage:default:sub1").unwrap(); let subtrie2 = ChildStorageKey::from_slice(b":child_storage:default:sub2").unwrap(); let own1 = subtrie1.into_owned(); let own2 = subtrie2.into_owned(); let contents = vec![ (None, (0..64).map(|i| (vec![i], Some(vec![i]))).collect()), - (Some((own1.clone(), child_info1.to_owned())), + (Some((own1.clone(), child_info1.clone())), (28..65).map(|i| (vec![i], Some(vec![i]))).collect()), - (Some((own2.clone(), child_info2.to_owned())), + (Some((own2.clone(), child_info2.clone())), (10..15).map(|i| (vec![i], Some(vec![i]))).collect()), ]; let in_memory = InMemoryBackend::::default(); @@ -507,11 +507,11 @@ mod tests { vec![i] )); (28..65).for_each(|i| assert_eq!( - in_memory.child_storage(&own1[..], child_info1, &[i]).unwrap().unwrap(), + in_memory.child_storage(&own1[..], &child_info1, &[i]).unwrap().unwrap(), vec![i] )); (10..15).for_each(|i| assert_eq!( - in_memory.child_storage(&own2[..], child_info2, &[i]).unwrap().unwrap(), + in_memory.child_storage(&own2[..], &child_info2, &[i]).unwrap().unwrap(), vec![i] )); @@ -539,7 +539,7 @@ mod tests { assert_eq!(proof_check.storage(&[64]).unwrap(), None); let proving = ProvingBackend::new(trie); - assert_eq!(proving.child_storage(&own1[..], child_info1, &[64]), Ok(Some(vec![64]))); + assert_eq!(proving.child_storage(&own1[..], &child_info1, &[64]), Ok(Some(vec![64]))); let proof = proving.extract_proof(); let proof_check = create_proof_check_backend::( @@ -547,7 +547,7 @@ mod tests { proof ).unwrap(); assert_eq!( - proof_check.child_storage(&own1[..], child_info1, &[64]).unwrap().unwrap(), + proof_check.child_storage(&own1[..], &child_info1, &[64]).unwrap().unwrap(), vec![64] ); } diff --git a/primitives/state-machine/src/trie_backend.rs b/primitives/state-machine/src/trie_backend.rs index 17a0d6fda8c15..8b29da56a6def 100644 --- a/primitives/state-machine/src/trie_backend.rs +++ b/primitives/state-machine/src/trie_backend.rs @@ -19,7 +19,7 @@ use log::{warn, debug}; use sp_core::Hasher; use sp_trie::{Trie, delta_trie_root, default_child_trie_root}; use sp_trie::trie_types::{TrieDB, TrieError, Layout}; -use sp_core::storage::{ChildInfo, OwnedChildInfo}; +use sp_core::storage::ChildInfo; use std::collections::BTreeMap; use codec::{Codec, Decode}; use crate::{ @@ -72,7 +72,7 @@ impl, H: Hasher> Backend for TrieBackend where H::Out: Ord + Codec, { type Error = String; - type Transaction = BTreeMap, S::Overlay>; + type Transaction = BTreeMap, S::Overlay>; type TrieBackendStorage = S; fn storage(&self, key: &[u8]) -> Result, Self::Error> { @@ -190,7 +190,7 @@ impl, H: Hasher> Backend for TrieBackend where collect_all().map_err(|e| debug!(target: "trie", "Error extracting trie keys: {}", e)).unwrap_or_default() } - fn storage_root(&self, delta: I) -> (H::Out, BTreeMap, S::Overlay>) + fn storage_root(&self, delta: I) -> (H::Out, BTreeMap, S::Overlay>) where I: IntoIterator)> { let mut write_overlay = S::Overlay::default(); @@ -299,10 +299,10 @@ pub mod tests { const CHILD_KEY_1: &[u8] = b":child_storage:default:sub1"; - const CHILD_UUID_1: &[u8] = b"\x01\x00\x00\x00unique_id_1"; + const CHILD_UUID_1: &[u8] = b"unique_id_1"; fn test_db() -> (PrefixedMemoryDB, H256) { - let child_info1 = ChildInfo::resolve_child_info(CHILD_UUID_1).unwrap(); + let child_info1 = ChildInfo::new_default(CHILD_UUID_1); let mut root = H256::default(); let mut mdb = PrefixedMemoryDB::::default(); { @@ -340,10 +340,10 @@ pub mod tests { #[test] fn read_from_child_storage_returns_some() { - let child_info1 = ChildInfo::resolve_child_info(CHILD_UUID_1).unwrap(); + let child_info1 = ChildInfo::new_default(CHILD_UUID_1); let test_trie = test_trie(); assert_eq!( - test_trie.child_storage(CHILD_KEY_1, child_info1, b"value3").unwrap(), + test_trie.child_storage(CHILD_KEY_1, &child_info1, b"value3").unwrap(), Some(vec![142u8]), ); } diff --git a/primitives/state-machine/src/trie_backend_essence.rs b/primitives/state-machine/src/trie_backend_essence.rs index cd6cb9f45c6b2..0419556c18e37 100644 --- a/primitives/state-machine/src/trie_backend_essence.rs +++ b/primitives/state-machine/src/trie_backend_essence.rs @@ -28,7 +28,7 @@ use sp_trie::{Trie, MemoryDB, PrefixedMemoryDB, DBValue, for_keys_in_trie, KeySpacedDB, keyspace_as_prefix_alloc}; use sp_trie::trie_types::{TrieDB, TrieError, Layout}; use crate::{backend::Consolidate, StorageKey, StorageValue}; -use sp_core::storage::{ChildInfo, OwnedChildInfo}; +use sp_core::storage::ChildInfo; use codec::Encode; /// Patricia trie-based storage trait. @@ -409,7 +409,7 @@ pub trait TrieBackendStorage: TrieBackendStorageRef + Send + Sync impl + Send + Sync> TrieBackendStorage for B {} // This implementation is used by normal storage trie clients. -impl TrieBackendStorageRef for (Arc>, Option) { +impl TrieBackendStorageRef for (Arc>, Option) { type Overlay = PrefixedMemoryDB; fn get( @@ -417,7 +417,7 @@ impl TrieBackendStorageRef for (Arc>, Option Result, String> { - Storage::::get(self.0.deref(), self.1.as_deref(), key, prefix) + Storage::::get(self.0.deref(), self.1.as_ref(), key, prefix) } } @@ -503,7 +503,7 @@ mod test { #[test] fn next_storage_key_and_next_child_storage_key_work() { - let child_info = ChildInfo::resolve_child_info(b"\x01\x00\x00\x00uniqueid").unwrap(); + let child_info = ChildInfo::new_default(b"uniqueid"); // Contains values let mut root_1 = H256::default(); // Contains child trie @@ -542,19 +542,19 @@ mod test { let essence_2 = TrieBackend::new(mdb, root_2); assert_eq!( - essence_2.next_child_storage_key(b"MyChild", child_info, b"2"), Ok(Some(b"3".to_vec())) + essence_2.next_child_storage_key(b"MyChild", &child_info, b"2"), Ok(Some(b"3".to_vec())) ); assert_eq!( - essence_2.next_child_storage_key(b"MyChild", child_info, b"3"), Ok(Some(b"4".to_vec())) + essence_2.next_child_storage_key(b"MyChild", &child_info, b"3"), Ok(Some(b"4".to_vec())) ); assert_eq!( - essence_2.next_child_storage_key(b"MyChild", child_info, b"4"), Ok(Some(b"6".to_vec())) + essence_2.next_child_storage_key(b"MyChild", &child_info, b"4"), Ok(Some(b"6".to_vec())) ); assert_eq!( - essence_2.next_child_storage_key(b"MyChild", child_info, b"5"), Ok(Some(b"6".to_vec())) + essence_2.next_child_storage_key(b"MyChild", &child_info, b"5"), Ok(Some(b"6".to_vec())) ); assert_eq!( - essence_2.next_child_storage_key(b"MyChild", child_info, b"6"), Ok(None) + essence_2.next_child_storage_key(b"MyChild", &child_info, b"6"), Ok(None) ); } } diff --git a/primitives/storage/Cargo.toml b/primitives/storage/Cargo.toml index ebb3062a37313..c0af25fc9ba9c 100644 --- a/primitives/storage/Cargo.toml +++ b/primitives/storage/Cargo.toml @@ -12,7 +12,6 @@ serde = { version = "1.0.101", optional = true, features = ["derive"] } impl-serde = { version = "0.2.3", optional = true } sp-debug-derive = { version = "2.0.0", path = "../debug-derive" } codec = { package = "parity-scale-codec", version = "1.0.0", default-features = false, features = ["derive"] } -ref-cast = "1.0.0" [features] default = [ "std" ] diff --git a/primitives/storage/src/lib.rs b/primitives/storage/src/lib.rs index 8371ae30680bd..250a1fa325dfd 100644 --- a/primitives/storage/src/lib.rs +++ b/primitives/storage/src/lib.rs @@ -18,14 +18,12 @@ #![cfg_attr(not(feature = "std"), no_std)] -use codec::{Decode, Encode, Output}; +use codec::{Decode, Encode}; #[cfg(feature = "std")] use serde::{Serialize, Deserialize}; use sp_debug_derive::RuntimeDebug; -use ref_cast::RefCast; -use sp_std::{vec, vec::Vec, borrow::Cow, borrow::Borrow, - borrow::ToOwned, convert::TryInto, ops::Deref}; +use sp_std::{vec::Vec, borrow::Cow}; /// Storage key. #[derive(PartialEq, Eq, RuntimeDebug)] @@ -56,7 +54,7 @@ pub struct StorageChild { pub data: StorageMap, /// Associated child info for a child /// trie. - pub child_info: OwnedChildInfo, + pub child_info: ChildInfo, } #[cfg(feature = "std")] @@ -178,89 +176,70 @@ impl<'a> ChildStorageKey<'a> { } } -#[repr(transparent)] -#[derive(Debug, PartialEq, Eq, PartialOrd, Ord, Hash, RefCast)] -/// Information related to a child state. -pub struct ChildInfo([u8]); -impl Encode for ChildInfo { - fn encode_to(&self, output: &mut T) { - self.0.encode_to(output) - } -} - -/// Owned version of `ChildInfo`. -/// To be use in persistence layers. +/// Information related to a child state. #[derive(Debug, Clone, PartialEq, Eq, Hash, PartialOrd, Ord, Encode, Decode)] -#[repr(transparent)] -pub struct OwnedChildInfo(Vec); - -impl ToOwned for ChildInfo { - type Owned = OwnedChildInfo; - - fn to_owned(&self) -> Self::Owned { - OwnedChildInfo(self.0.to_owned()) - } +pub enum ChildInfo { + Default(ChildTrie), } -impl Borrow for OwnedChildInfo { - #[inline] - fn borrow(&self) -> &ChildInfo { - let data: &[u8] = self.0.borrow(); - ChildInfo::ref_cast(data) +impl ChildInfo { + /// Create a new child trie information for default + /// child type. + pub fn new_default(unique_id: &[u8]) -> Self { + ChildInfo::Default(ChildTrie { + data: unique_id.to_vec(), + }) } -} - -impl Deref for OwnedChildInfo { - type Target = ChildInfo; - #[inline] - fn deref(&self) -> &ChildInfo { - self.borrow() + /// Try to update with another instance, return false if both instance + /// are not compatible. + pub fn try_update(&mut self, other: &ChildInfo) -> bool { + match self { + ChildInfo::Default(child_trie) => child_trie.try_update(other), + } } -} -impl ChildInfo { /// Create child info from a linear byte packed value and a given type. - pub fn resolve_child_info(data: &[u8]) -> Option<&Self> { - match ChildType::read_type(data) { - Some(x) if x == ChildType::CryptoUniqueId => Some( - ChildInfo::ref_cast(data) - ), - _ => None, + pub fn resolve_child_info(child_type: u32, data: &[u8]) -> Option { + match ChildType::new(child_type) { + Some(ChildType::CryptoUniqueId) => Some(ChildInfo::new_default(data)), + None => None, } } - /// Instantiates information for a child trie. - /// No check is done on consistency. - pub fn new_unchecked(data: &[u8]) -> &Self { - ChildInfo::ref_cast(data) - } - /// Top trie defined as the unique crypto id trie with /// 0 length unique id. - pub fn top_trie() -> &'static Self { - Self::new_unchecked(b"\x01\x00\x00\x00") + pub fn top_trie() -> Self { + Self::new_default(&[]) } /// Return a single byte vector containing packed child info content and its child info type. /// This can be use as input for `resolve_child_info`. pub fn info(&self) -> (&[u8], u32) { - let child_type = ChildType::read_type_unchecked(&self.0); - (&self.0, child_type as u32) + match self { + ChildInfo::Default(ChildTrie { + data, + }) => (data, ChildType::CryptoUniqueId as u32), + } } /// Return byte sequence (keyspace) that can be use by underlying db to isolate keys. /// This is a unique id of the child trie. The collision resistance of this value /// depends on the type of child info use. For `ChildInfo::Default` it is and need to be. pub fn keyspace(&self) -> &[u8] { - match ChildType::read_type_unchecked(&self.0) { - ChildType::CryptoUniqueId => &self.0[4..], + match self { + ChildInfo::Default(ChildTrie { + data, + }) => &data[..], } } - fn child_type(&self) -> ChildType { - ChildType::read_type_unchecked(&self.0[..]) + /// Return type for child trie. + pub fn child_type(&self) -> ChildType { + match self { + ChildInfo::Default(..) => ChildType::CryptoUniqueId, + } } } @@ -286,59 +265,26 @@ impl ChildType { _ => return None, }) } - - /// Try to read type from child definition. - pub fn read_type(slice: &[u8]) -> Option { - if slice.len() < 4 { - return None; - } - slice[..4].try_into().ok() - .map(|b| u32::from_le_bytes(b)) - .and_then(|b| ChildType::new(b)) - } - - fn read_type_unchecked(slice: &[u8]) -> Self { - slice[..4].try_into().ok() - .map(|b| u32::from_le_bytes(b)) - .and_then(|b| ChildType::new(b)) - .expect("This function is only called on initialized child info.") - } +} +/// A child trie of default type. +/// Default is the same implementation as the top trie. +/// It share its trie node storage with any kind of key, +/// and its unique id needs to be collision free (eg strong +/// crypto hash). +#[derive(Debug, Clone, PartialEq, Eq, Hash, PartialOrd, Ord, Encode, Decode)] +pub struct ChildTrie { + /// Data containing unique id. + /// Unique id must but unique and free of any possible key collision + /// (depending on its storage behavior). + data: Vec, } -impl OwnedChildInfo { - /// Create a new child trie information for default - /// child type. - pub fn new_default(unique_id: &[u8]) -> Self { - let mut vec = vec![0; unique_id.len() + 4]; - vec[..4].copy_from_slice(&(ChildType::CryptoUniqueId as u32).to_le_bytes()[..]); - vec[4..].copy_from_slice(unique_id); - OwnedChildInfo(vec) - } - +impl ChildTrie { /// Try to update with another instance, return false if both instance /// are not compatible. - pub fn try_update(&self, other: &ChildInfo) -> bool { - match self.child_type() { - ChildType::CryptoUniqueId => { - match other.child_type() { - ChildType::CryptoUniqueId => self.deref() == other, - } - }, + fn try_update(&mut self, other: &ChildInfo) -> bool { + match other { + ChildInfo::Default(other) => self.data[..] == other.data[..], } } } - -#[cfg(test)] -mod test { - use super::*; - - #[test] - fn test_top_trie() { - let top_trie = ChildInfo::top_trie(); - assert!(top_trie.child_type() == ChildType::CryptoUniqueId); - assert_eq!(top_trie.encode(), top_trie.to_owned().encode()); - // 16 compact enc 4 and le 1 u32 - assert!(top_trie.encode() == vec![16, 1, 0, 0, 0]); - assert_eq!(top_trie.keyspace(), &[]); - } -} From 274a92357ca60f099b736627b9de8554aa08e967 Mon Sep 17 00:00:00 2001 From: cheme Date: Mon, 3 Feb 2020 16:42:12 +0100 Subject: [PATCH 18/85] Factoring map of children code, before switching key. --- client/db/src/lib.rs | 10 +-- client/state-db/src/lib.rs | 37 ++++----- client/state-db/src/noncanonical.rs | 20 ++--- client/state-db/src/pruning.rs | 8 +- client/state-db/src/test.rs | 2 +- primitives/state-machine/src/backend.rs | 12 +-- primitives/state-machine/src/trie_backend.rs | 11 ++- primitives/storage/src/lib.rs | 81 +++++++++++++++++++- 8 files changed, 121 insertions(+), 60 deletions(-) diff --git a/client/db/src/lib.rs b/client/db/src/lib.rs index e1c35c0d676c7..c904d0b0cf296 100644 --- a/client/db/src/lib.rs +++ b/client/db/src/lib.rs @@ -41,7 +41,7 @@ mod stats; use std::sync::Arc; use std::path::PathBuf; use std::io; -use std::collections::{HashMap, BTreeMap}; +use std::collections::HashMap; use sc_client_api::{execution_extensions::ExecutionExtensions, ForkBlocks, UsageInfo, MemoryInfo, BadBlocks, IoInfo}; use sc_client_api::backend::NewBlockState; @@ -56,7 +56,7 @@ use kvdb::{KeyValueDB, DBTransaction}; use sp_trie::{MemoryDB, PrefixedMemoryDB, prefixed_key}; use parking_lot::RwLock; use sp_core::{ChangesTrieConfiguration, traits::CodeExecutor}; -use sp_core::storage::{well_known_keys, ChildInfo}; +use sp_core::storage::{well_known_keys, ChildInfo, ChildrenMap}; use sp_runtime::{ generic::BlockId, Justification, Storage, BuildStorage, @@ -513,7 +513,7 @@ impl HeaderMetadata for BlockchainDb { /// Database transaction pub struct BlockImportOperation { old_state: CachingState, Block>, - db_updates: BTreeMap, PrefixedMemoryDB>>, + db_updates: ChildrenMap>>, storage_updates: StorageCollection, child_storage_updates: ChildStorageCollection, changes_trie_updates: MemoryDB>, @@ -570,7 +570,7 @@ impl sc_client_api::backend::BlockImportOperation for Bloc fn update_db_storage( &mut self, - update: BTreeMap, PrefixedMemoryDB>>, + update: ChildrenMap>>, ) -> ClientResult<()> { self.db_updates = update; Ok(()) @@ -1116,7 +1116,7 @@ impl Backend { } let finalized = if operation.commit_state { - let mut changesets = BTreeMap::<_, sc_state_db::ChangeSet>>::new(); + let mut changesets = ChildrenMap::>>::default(); let mut ops: u64 = 0; let mut bytes: u64 = 0; for (info, mut updates) in operation.db_updates.into_iter() { diff --git a/client/state-db/src/lib.rs b/client/state-db/src/lib.rs index 046e40d0506af..dfcfe2b596be1 100644 --- a/client/state-db/src/lib.rs +++ b/client/state-db/src/lib.rs @@ -36,11 +36,11 @@ mod pruning; use std::fmt; use parking_lot::RwLock; use codec::Codec; -use std::collections::{BTreeMap, HashMap, hash_map::Entry, btree_map::Entry as BEntry}; +use std::collections::{HashMap, hash_map::Entry}; use noncanonical::NonCanonicalOverlay; use pruning::RefWindow; use log::trace; -use sp_core::storage::ChildInfo; +use sp_core::storage::{ChildInfo, ChildrenMap}; const PRUNING_MODE: &[u8] = b"mode"; const PRUNING_MODE_ARCHIVE: &[u8] = b"archive"; @@ -114,17 +114,23 @@ impl fmt::Debug for Error { /// A set of state node changes. #[derive(Default, Debug, Clone)] -pub struct ChangeSet { +pub struct ChangeSet { /// Inserted nodes. pub inserted: Vec<(H, DBValue)>, /// Deleted nodes. pub deleted: Vec, } +impl ChangeSet { + fn merge(&mut self, other: ChangeSet) { + self.inserted.extend(other.inserted.into_iter()); + self.deleted.extend(other.deleted.into_iter()); + } +} /// A set of state node changes for a child trie. /// TODO remove?? #[derive(Debug, Clone)] -pub struct ChildTrieChangeSet { +pub struct ChildTrieChangeSet { /// Change set of this element. pub data: ChangeSet, /// Child trie descripton. @@ -133,35 +139,18 @@ pub struct ChildTrieChangeSet { } /// Change sets of all child trie (top is key None). -pub type ChildTrieChangeSets = BTreeMap, ChangeSet>; - -/// Extends for `ChildTrieChangeSets` is merging. -fn extend_change_sets( - set: &mut ChildTrieChangeSets, - other: impl Iterator, ChangeSet)>, -) { - for (ci, o_cs) in other { - match set.entry(ci) { - BEntry::Occupied(mut e) => { - let entry = e.get_mut(); - entry.inserted.extend(o_cs.inserted); - entry.deleted.extend(o_cs.deleted); - }, - BEntry::Vacant(e) => { e.insert(o_cs); }, - } - } -} +pub type ChildTrieChangeSets = ChildrenMap>; /// A set of changes to the backing database. #[derive(Default, Debug, Clone)] -pub struct CommitSet { +pub struct CommitSet { /// State node changes. pub data: ChildTrieChangeSets, /// Metadata changes. pub meta: ChangeSet>, } -impl CommitSet { +impl CommitSet { /// Number of inserted key value element in the set. pub fn inserted_len(&self) -> usize { self.data.iter().map(|set| set.1.inserted.len()).sum() diff --git a/client/state-db/src/noncanonical.rs b/client/state-db/src/noncanonical.rs index 0b3bb36f253be..b4258f97aeb63 100644 --- a/client/state-db/src/noncanonical.rs +++ b/client/state-db/src/noncanonical.rs @@ -21,20 +21,20 @@ //! `revert_pending` use std::fmt; -use std::collections::{HashMap, VecDeque, hash_map::Entry, BTreeMap}; +use std::collections::{HashMap, VecDeque, hash_map::Entry}; use super::{Error, DBValue, ChildTrieChangeSets, CommitSet, MetaDb, Hash, to_meta_key, ChangeSet}; use codec::{Encode, Decode}; use log::trace; -use sp_core::storage::ChildInfo; +use sp_core::storage::{ChildInfo, ChildrenMap, ChildrenVec}; const NON_CANONICAL_JOURNAL: &[u8] = b"noncanonical_journal"; // version at start to avoid collision when adding a unit const NON_CANONICAL_JOURNAL_V1: &[u8] = b"v1_non_canonical_journal"; const LAST_CANONICAL: &[u8] = b"last_canonical"; -type Keys = Vec<(Option, Vec)>; -type KeyVals = Vec<(Option, Vec<(Key, DBValue)>)>; -type ChildKeyVals = BTreeMap, HashMap>; +type Keys = ChildrenVec>; +type KeyVals = ChildrenVec>; +type ChildKeyVals = ChildrenMap>; /// See module documentation. pub struct NonCanonicalOverlay { @@ -174,7 +174,7 @@ impl NonCanonicalOverlay { }; let mut levels = VecDeque::new(); let mut parents = HashMap::new(); - let mut values = BTreeMap::new(); + let mut values = ChildrenMap::default(); if let Some((ref hash, mut block)) = last_canonicalized { // read the journal trace!(target: "state-db", "Reading uncanonicalized journal. Last canonicalized #{} ({:?})", block, hash); @@ -389,7 +389,7 @@ impl NonCanonicalOverlay { // get the one we need to canonicalize let overlay = &level[index]; - crate::extend_change_sets(&mut commit.data, overlay.inserted.iter() + commit.data.extend_with(overlay.inserted.iter() .map(|(ct, keys)| ( ct.clone(), ChangeSet { @@ -403,15 +403,15 @@ impl NonCanonicalOverlay { )).collect(), deleted: Vec::new(), }, - ))); - crate::extend_change_sets(&mut commit.data, overlay.deleted.iter().cloned() + )), ChangeSet::merge); + commit.data.extend_with(overlay.deleted.iter().cloned() .map(|(ct, keys)| ( ct, ChangeSet { inserted: Vec::new(), deleted: keys, }, - ))); + )), ChangeSet::merge); commit.meta.deleted.append(&mut discarded_journals); let canonicalized = (hash.clone(), self.front_block_number() + self.pending_canonicalizations.len() as u64); diff --git a/client/state-db/src/pruning.rs b/client/state-db/src/pruning.rs index 77dd2e099ad8a..44fe7f6fc54ac 100644 --- a/client/state-db/src/pruning.rs +++ b/client/state-db/src/pruning.rs @@ -26,14 +26,14 @@ use std::collections::{HashMap, HashSet, VecDeque}; use codec::{Encode, Decode}; use crate::{CommitSet, Error, MetaDb, to_meta_key, Hash}; use log::{trace, warn}; -use sp_core::storage::ChildInfo; +use sp_core::storage::{ChildInfo, ChildrenVec}; use super::ChangeSet; const LAST_PRUNED: &[u8] = b"last_pruned"; const OLD_PRUNING_JOURNAL: &[u8] = b"pruning_journal"; const PRUNING_JOURNAL_V1: &[u8] = b"v1_pruning_journal"; -type Keys = Vec<(Option, Vec)>; +type Keys = ChildrenVec>; /// See module documentation. pub struct RefWindow { @@ -219,14 +219,14 @@ impl RefWindow { trace!(target: "state-db", "Pruning {:?} ({} deleted)", pruned.hash, pruned.deleted.len()); let index = self.pending_number + self.pending_prunings as u64; - crate::extend_change_sets(&mut commit.data, pruned.deleted.iter() + commit.data.extend_with(pruned.deleted.iter() .map(|(ct, keys)| ( ct.clone(), ChangeSet { inserted: Vec::new(), deleted: keys.iter().cloned().collect(), }, - ))); + )), ChangeSet::merge); commit.meta.inserted.push((to_meta_key(LAST_PRUNED, &()), index.encode())); commit.meta.deleted.push(pruned.journal_key.clone()); diff --git a/client/state-db/src/test.rs b/client/state-db/src/test.rs index 76f7b09b83d84..cc8639043a3d5 100644 --- a/client/state-db/src/test.rs +++ b/client/state-db/src/test.rs @@ -82,7 +82,7 @@ pub fn make_changeset(inserted: &[u64], deleted: &[u64]) -> ChangeSet { } pub fn make_childchangeset(inserted: &[u64], deleted: &[u64]) -> ChildTrieChangeSets { - let mut result = ChildTrieChangeSets::new(); + let mut result = ChildTrieChangeSets::default(); result.insert(None, make_changeset(inserted, deleted)); result } diff --git a/primitives/state-machine/src/backend.rs b/primitives/state-machine/src/backend.rs index d8c805508975b..f932e7cfbb716 100644 --- a/primitives/state-machine/src/backend.rs +++ b/primitives/state-machine/src/backend.rs @@ -20,9 +20,8 @@ use log::warn; use sp_core::{Hasher, InnerHasher}; use codec::Encode; -use sp_core::storage::ChildInfo; +use sp_core::storage::{ChildInfo, ChildrenMap}; use sp_trie::{TrieMut, MemoryDB, trie_types::TrieDBMut}; -use std::collections::{BTreeMap, btree_map::Entry}; use crate::{ trie_backend::TrieBackend, trie_backend_essence::TrieBackendStorage, @@ -334,14 +333,9 @@ impl Consolidate for Vec<( } } -impl Consolidate for BTreeMap { +impl Consolidate for ChildrenMap { fn consolidate(&mut self, other: Self) { - for (k, v) in other.into_iter() { - match self.entry(k) { - Entry::Occupied(mut e) => e.get_mut().consolidate(v), - Entry::Vacant(e) => { e.insert(v); }, - } - } + self.extend_with(other.into_iter(), Consolidate::consolidate) } } diff --git a/primitives/state-machine/src/trie_backend.rs b/primitives/state-machine/src/trie_backend.rs index 8b29da56a6def..e63f01e360167 100644 --- a/primitives/state-machine/src/trie_backend.rs +++ b/primitives/state-machine/src/trie_backend.rs @@ -19,8 +19,7 @@ use log::{warn, debug}; use sp_core::Hasher; use sp_trie::{Trie, delta_trie_root, default_child_trie_root}; use sp_trie::trie_types::{TrieDB, TrieError, Layout}; -use sp_core::storage::ChildInfo; -use std::collections::BTreeMap; +use sp_core::storage::{ChildInfo, ChildrenMap}; use codec::{Codec, Decode}; use crate::{ StorageKey, StorageValue, Backend, @@ -72,7 +71,7 @@ impl, H: Hasher> Backend for TrieBackend where H::Out: Ord + Codec, { type Error = String; - type Transaction = BTreeMap, S::Overlay>; + type Transaction = ChildrenMap; type TrieBackendStorage = S; fn storage(&self, key: &[u8]) -> Result, Self::Error> { @@ -190,7 +189,7 @@ impl, H: Hasher> Backend for TrieBackend where collect_all().map_err(|e| debug!(target: "trie", "Error extracting trie keys: {}", e)).unwrap_or_default() } - fn storage_root(&self, delta: I) -> (H::Out, BTreeMap, S::Overlay>) + fn storage_root(&self, delta: I) -> (H::Out, ChildrenMap) where I: IntoIterator)> { let mut write_overlay = S::Overlay::default(); @@ -207,7 +206,7 @@ impl, H: Hasher> Backend for TrieBackend where Err(e) => warn!(target: "trie", "Failed to write to trie: {}", e), } } - let mut tx = BTreeMap::new(); + let mut tx = ChildrenMap::default(); tx.insert(None, write_overlay); (root, tx) } @@ -256,7 +255,7 @@ impl, H: Hasher> Backend for TrieBackend where let is_default = root == default_root; - let mut tx = BTreeMap::new(); + let mut tx = ChildrenMap::default(); tx.insert(Some(child_info.to_owned()), write_overlay); (root, is_default, tx) } diff --git a/primitives/storage/src/lib.rs b/primitives/storage/src/lib.rs index 250a1fa325dfd..9180ff720e6f4 100644 --- a/primitives/storage/src/lib.rs +++ b/primitives/storage/src/lib.rs @@ -21,6 +21,8 @@ use codec::{Decode, Encode}; #[cfg(feature = "std")] use serde::{Serialize, Deserialize}; +#[cfg(feature = "std")] +use sp_std::collections::btree_map::BTreeMap; use sp_debug_derive::RuntimeDebug; use sp_std::{vec::Vec, borrow::Cow}; @@ -44,7 +46,7 @@ pub struct StorageData( /// Map of data to use in a storage, it is a collection of /// byte key and values. #[cfg(feature = "std")] -pub type StorageMap = std::collections::BTreeMap, Vec>; +pub type StorageMap = BTreeMap, Vec>; #[cfg(feature = "std")] #[derive(Debug, PartialEq, Eq, Clone)] @@ -288,3 +290,80 @@ impl ChildTrie { } } } + +#[cfg(feature = "std")] +#[derive(Clone, PartialEq, Eq, Debug)] +/// Type for storing a map of child trie related information. +/// A few utilities methods are defined. +pub struct ChildrenMap(pub BTreeMap, T>); + +/// Type alias for storage of children related content. +pub type ChildrenVec = Vec<(Option, T)>; + +/// Type alias for storage of children related content. +pub type ChildrenSlice<'a, T> = &'a [(Option, T)]; + +#[cfg(feature = "std")] +impl sp_std::ops::Deref for ChildrenMap { + type Target = BTreeMap, T>; + + fn deref(&self) -> &Self::Target { + &self.0 + } +} + +#[cfg(feature = "std")] +impl sp_std::ops::DerefMut for ChildrenMap { + fn deref_mut(&mut self) -> &mut Self::Target { + &mut self.0 + } +} + +#[cfg(feature = "std")] +impl sp_std::default::Default for ChildrenMap { + fn default() -> Self { + ChildrenMap(BTreeMap::new()) + } +} + +#[cfg(feature = "std")] +impl ChildrenMap { + /// Extend for `ChildrenMap` is usually about merging entries, + /// this method extends two maps, by applying a merge function + /// on each of its entries. + pub fn extend_with( + &mut self, + other: impl Iterator, T)>, + merge: impl Fn(&mut T, T), + ) { + use sp_std::collections::btree_map::Entry; + for (child_info, child_content) in other { + match self.0.entry(child_info) { + Entry::Occupied(mut entry) => { + merge(entry.get_mut(), child_content) + }, + Entry::Vacant(entry) => { + entry.insert(child_content); + }, + } + } + } + + /// Extends two maps, by enxtending entries with the same key. + pub fn extend_replace( + &mut self, + other: impl Iterator, T)>, + ) { + self.0.extend(other) + } +} + +#[cfg(feature = "std")] +impl IntoIterator for ChildrenMap { + type Item = (Option, T); + type IntoIter = sp_std::collections::btree_map::IntoIter, T>; + + fn into_iter(self) -> Self::IntoIter { + self.0.into_iter() + } +} From 2e47a1d1ea627a2fe94d4c3a4210156c9bd22d15 Mon Sep 17 00:00:00 2001 From: cheme Date: Mon, 3 Feb 2020 18:37:17 +0100 Subject: [PATCH 19/85] Switching children key from optional to simple ChildInfo. --- client/db/src/lib.rs | 35 ++++++++++------ client/state-db/src/lib.rs | 4 +- client/state-db/src/noncanonical.rs | 25 ++++++----- client/state-db/src/pruning.rs | 42 +++++++++---------- client/state-db/src/test.rs | 12 +++--- primitives/state-machine/src/ext.rs | 34 ++++++++++++++- .../state-machine/src/proving_backend.rs | 8 ++-- primitives/state-machine/src/trie_backend.rs | 4 +- .../state-machine/src/trie_backend_essence.rs | 6 +-- primitives/storage/src/lib.rs | 24 +++++++---- 10 files changed, 123 insertions(+), 71 deletions(-) diff --git a/client/db/src/lib.rs b/client/db/src/lib.rs index c904d0b0cf296..a447a6b87801e 100644 --- a/client/db/src/lib.rs +++ b/client/db/src/lib.rs @@ -91,7 +91,7 @@ const DEFAULT_CHILD_RATIO: (usize, usize) = (1, 10); /// DB-backed patricia trie state, transaction type is an overlay of changes to commit. pub type DbState = sp_state_machine::TrieBackend< - (Arc>>, Option), HasherFor + (Arc>>, ChildInfo), HasherFor >; /// Re-export the KVDB trait so that one can pass an implementation of it. @@ -667,7 +667,7 @@ struct StorageDb { impl sp_state_machine::Storage> for StorageDb { fn get( &self, - trie: Option<&ChildInfo>, + trie: &ChildInfo, key: &Block::Hash, prefix: Prefix, ) -> Result, String> { @@ -700,7 +700,7 @@ impl DbGenesisStorage { impl sp_state_machine::Storage> for DbGenesisStorage { fn get( &self, - _trie: Option<&ChildInfo>, + _trie: &ChildInfo, _key: &Block::Hash, _prefix: Prefix, ) -> Result, String> { @@ -1326,9 +1326,9 @@ impl Backend { fn apply_state_commit(transaction: &mut DBTransaction, commit: sc_state_db::CommitSet>) { let mut key_buffer = Vec::new(); for child_data in commit.data.into_iter() { - if let Some(child_info) = child_data.0 { + if !child_data.0.is_top_trie() { // children tries with prefixes - let keyspace = child_info.keyspace(); + let keyspace = child_data.0.keyspace(); let keyspace_len = keyspace.len(); key_buffer.resize(keyspace_len, 0); key_buffer[..keyspace_len].copy_from_slice(keyspace); @@ -1598,7 +1598,7 @@ impl sc_client_api::backend::Backend for Backend { BlockId::Hash(h) if h == Default::default() => { let genesis_storage = DbGenesisStorage::::new(); let root = genesis_storage.0.clone(); - let db_state = DbState::::new((Arc::new(genesis_storage), None), root); + let db_state = DbState::::new((Arc::new(genesis_storage), ChildInfo::top_trie()), root); let state = RefTrackingState::new(db_state, self.storage.clone(), None); return Ok(CachingState::new(state, self.shared_cache.clone(), None)); }, @@ -1617,7 +1617,7 @@ impl sc_client_api::backend::Backend for Backend { } if let Ok(()) = self.storage.state_db.pin(&hash) { let root = hdr.state_root(); - let db_state = DbState::::new((self.storage.clone(), None), *root); + let db_state = DbState::::new((self.storage.clone(), ChildInfo::top_trie()), *root); let state = RefTrackingState::new( db_state, self.storage.clone(), @@ -1647,7 +1647,7 @@ impl sc_client_api::backend::Backend for Backend { Ok(Some(header)) => { sp_state_machine::Storage::get( self.storage.as_ref(), - None, // header in top trie + &ChildInfo::top_trie(), &header.state_root(), (&[], None), ).unwrap_or(None).is_some() @@ -1916,7 +1916,9 @@ pub(crate) mod tests { children: Default::default(), }).unwrap(); - key = op.db_updates.entry(None).or_insert_with(Default::default).insert(EMPTY_PREFIX, b"hello"); + key = op.db_updates.entry(ChildInfo::top_trie()) + .or_insert_with(Default::default) + .insert(EMPTY_PREFIX, b"hello"); op.set_block_data( header, Some(vec![]), @@ -1952,8 +1954,14 @@ pub(crate) mod tests { ).0.into(); let hash = header.hash(); - op.db_updates.entry(None).or_insert_with(Default::default).insert(EMPTY_PREFIX, b"hello"); - op.db_updates.entry(None).or_insert_with(Default::default).remove(&key, EMPTY_PREFIX); + op.db_updates + .entry(ChildInfo::top_trie()) + .or_insert_with(Default::default) + .insert(EMPTY_PREFIX, b"hello"); + op.db_updates + .entry(ChildInfo::top_trie()) + .or_insert_with(Default::default) + .remove(&key, EMPTY_PREFIX); op.set_block_data( header, Some(vec![]), @@ -1989,7 +1997,10 @@ pub(crate) mod tests { ).0.into(); let hash = header.hash(); - op.db_updates.entry(None).or_insert_with(Default::default).remove(&key, EMPTY_PREFIX); + op.db_updates + .entry(ChildInfo::top_trie()) + .or_insert_with(Default::default) + .remove(&key, EMPTY_PREFIX); op.set_block_data( header, Some(vec![]), diff --git a/client/state-db/src/lib.rs b/client/state-db/src/lib.rs index dfcfe2b596be1..cfe2bb5c76aee 100644 --- a/client/state-db/src/lib.rs +++ b/client/state-db/src/lib.rs @@ -412,7 +412,7 @@ impl StateDbSync { pub fn get( &self, - trie: Option<&ChildInfo>, + trie: &ChildInfo, key: &Key, db: &D, ) -> Result, Error> @@ -489,7 +489,7 @@ impl StateDb { /// Get a value from non-canonical/pruning overlay or the backing DB. pub fn get( &self, - trie: Option<&ChildInfo>, + trie: &ChildInfo, key: &Key, db: &D, ) -> Result, Error> diff --git a/client/state-db/src/noncanonical.rs b/client/state-db/src/noncanonical.rs index b4258f97aeb63..6d79dfeffd4bb 100644 --- a/client/state-db/src/noncanonical.rs +++ b/client/state-db/src/noncanonical.rs @@ -73,8 +73,8 @@ impl From> for J JournalRecordV1 { hash: old.hash, parent_hash: old.parent_hash, - inserted: vec![(None, old.inserted)], - deleted: vec![(None, old.deleted)], + inserted: vec![(ChildInfo::top_trie(), old.inserted)], + deleted: vec![(ChildInfo::top_trie(), old.deleted)], } } } @@ -99,8 +99,8 @@ fn insert_values( values: &mut ChildKeyVals, inserted: KeyVals, ) { - for (ct, inserted) in inserted { - let values = values.entry(ct).or_default(); + for (child_info, inserted) in inserted { + let values = values.entry(child_info).or_default(); for (k, v) in inserted { debug_assert!(values.get(&k).map_or(true, |(_, value)| *value == v)); let (ref mut counter, _) = values.entry(k).or_insert_with(|| (0, v)); @@ -110,8 +110,8 @@ fn insert_values( } fn discard_values(values: &mut ChildKeyVals, inserted: Keys) { - for (ct, inserted) in inserted { - let values = values.entry(ct).or_default(); + for (child_info, inserted) in inserted { + let values = values.entry(child_info).or_default(); for k in inserted { match values.entry(k) { Entry::Occupied(mut e) => { @@ -198,7 +198,9 @@ impl NonCanonicalOverlay { } }, }; - let inserted = record.inserted.iter().map(|(ct, rec)| (ct.clone(), rec.iter().map(|(k, _)| k.clone()).collect())).collect(); + let inserted = record.inserted.iter().map(|(child_info, rec)| + (child_info.clone(), rec.iter().map(|(k, _)| k.clone()).collect()) + ).collect(); let overlay = BlockOverlay { hash: record.hash.clone(), journal_key, @@ -460,9 +462,8 @@ impl NonCanonicalOverlay { } /// Get a value from the node overlay. This searches in every existing changeset. - pub fn get(&self, trie: Option<&ChildInfo>, key: &Key) -> Option { - // TODO use top_trie instead of none - if let Some(values) = self.values.get(&trie.map(|t| t.to_owned())) { + pub fn get(&self, child_info: &ChildInfo, key: &Key) -> Option { + if let Some(values) = self.values.get(child_info) { if let Some((_, value)) = values.get(&key) { return Some(value.clone()); } @@ -566,12 +567,14 @@ impl NonCanonicalOverlay { mod tests { use std::io; use sp_core::H256; + use sp_core::storage::ChildInfo; use super::{NonCanonicalOverlay, to_journal_key_v1}; use crate::CommitSet; use crate::test::{make_db, make_childchangeset}; fn contains(overlay: &NonCanonicalOverlay, key: u64) -> bool { - overlay.get(None, &H256::from_low_u64_be(key)) == Some(H256::from_low_u64_be(key).as_bytes().to_vec()) + overlay.get(&ChildInfo::top_trie(), &H256::from_low_u64_be(key)) + == Some(H256::from_low_u64_be(key).as_bytes().to_vec()) } #[test] diff --git a/client/state-db/src/pruning.rs b/client/state-db/src/pruning.rs index 44fe7f6fc54ac..1fd736913188b 100644 --- a/client/state-db/src/pruning.rs +++ b/client/state-db/src/pruning.rs @@ -40,7 +40,7 @@ pub struct RefWindow { /// A queue of keys that should be deleted for each block in the pruning window. death_rows: VecDeque>, /// An index that maps each key from `death_rows` to block number. - death_index: HashMap, HashMap>, + death_index: HashMap>, /// Block number that corresponts to the front of `death_rows` pending_number: u64, /// Number of call of `note_canonical` after @@ -52,8 +52,8 @@ pub struct RefWindow { } impl RefWindow { - fn remove_death_index(&mut self, ct: &Option, key: &Key) -> Option { - if let Some(child_index) = self.death_index.get_mut(ct) { + fn remove_death_index(&mut self, child_info: &ChildInfo, key: &Key) -> Option { + if let Some(child_index) = self.death_index.get_mut(child_info) { child_index.remove(key) } else { None @@ -65,12 +65,12 @@ impl RefWindow { struct DeathRow { hash: BlockHash, journal_key: Vec, - deleted: HashMap, HashSet>, + deleted: HashMap>, } impl DeathRow { - fn remove_deleted(&mut self, ct: &Option, key: &Key) -> bool { - if let Some(child_index) = self.deleted.get_mut(ct) { + fn remove_deleted(&mut self, child_info: &ChildInfo, key: &Key) -> bool { + if let Some(child_index) = self.deleted.get_mut(child_info) { child_index.remove(key) } else { false @@ -104,8 +104,8 @@ impl From> for J fn from(old: JournalRecordCompat) -> Self { JournalRecordV1 { hash: old.hash, - inserted: vec![(None, old.inserted)], - deleted: vec![(None, old.deleted)], + inserted: vec![(ChildInfo::top_trie(), old.inserted)], + deleted: vec![(ChildInfo::top_trie(), old.deleted)], } } } @@ -153,7 +153,7 @@ impl RefWindow { Ok(pruning) } - fn import, Vec)>>( + fn import)>>( &mut self, hash: &BlockHash, journal_key: Vec, @@ -161,26 +161,26 @@ impl RefWindow { deleted: Keys, ) { // remove all re-inserted keys from death rows - for (ct, inserted) in inserted { + for (child_info, inserted) in inserted { for k in inserted { - if let Some(block) = self.remove_death_index(&ct, &k) { + if let Some(block) = self.remove_death_index(&child_info, &k) { self.death_rows[(block - self.pending_number) as usize] - .remove_deleted(&ct, &k); + .remove_deleted(&child_info, &k); } } } // add new keys let imported_block = self.pending_number + self.death_rows.len() as u64; - for (ct, deleted) in deleted.iter() { - let entry = self.death_index.entry(ct.clone()).or_default(); + for (child_info, deleted) in deleted.iter() { + let entry = self.death_index.entry(child_info.clone()).or_default(); for k in deleted.iter() { entry.insert(k.clone(), imported_block); } } - let mut deleted_death_row = HashMap::, HashSet>::new(); - for (ct, deleted) in deleted.into_iter() { - let entry = deleted_death_row.entry(ct).or_default(); + let mut deleted_death_row = HashMap::>::new(); + for (child_info, deleted) in deleted.into_iter() { + let entry = deleted_death_row.entry(child_info).or_default(); entry.extend(deleted); } @@ -220,8 +220,8 @@ impl RefWindow { let index = self.pending_number + self.pending_prunings as u64; commit.data.extend_with(pruned.deleted.iter() - .map(|(ct, keys)| ( - ct.clone(), + .map(|(child_info, keys)| ( + child_info.clone(), ChangeSet { inserted: Vec::new(), deleted: keys.iter().cloned().collect(), @@ -272,8 +272,8 @@ impl RefWindow { for _ in 0 .. self.pending_prunings { let pruned = self.death_rows.pop_front().expect("pending_prunings is always < death_rows.len()"); trace!(target: "state-db", "Applying pruning {:?} ({} deleted)", pruned.hash, pruned.deleted.len()); - for (ct, deleted) in pruned.deleted.iter() { - if let Some(child_index) = self.death_index.get_mut(ct) { + for (child_info, deleted) in pruned.deleted.iter() { + if let Some(child_index) = self.death_index.get_mut(child_info) { for key in deleted.iter() { child_index.remove(key); } diff --git a/client/state-db/src/test.rs b/client/state-db/src/test.rs index cc8639043a3d5..6cfa2256b2c1c 100644 --- a/client/state-db/src/test.rs +++ b/client/state-db/src/test.rs @@ -19,11 +19,11 @@ use std::collections::HashMap; use sp_core::H256; use crate::{DBValue, ChangeSet, CommitSet, MetaDb, NodeDb, ChildTrieChangeSets}; -use sp_core::storage::ChildInfo; +use sp_core::storage::{ChildInfo, ChildrenMap}; #[derive(Default, Debug, Clone, PartialEq, Eq)] pub struct TestDb { - pub data: HashMap, HashMap>, + pub data: ChildrenMap>, pub meta: HashMap, DBValue>, } @@ -40,7 +40,7 @@ impl NodeDb for TestDb { type Key = H256; fn get(&self, key: &H256) -> Result, ()> { - Ok(self.data.get(&None).and_then(|data| data.get(key).cloned())) + Ok(self.data.get(&ChildInfo::top_trie()).and_then(|data| data.get(key).cloned())) } } @@ -83,7 +83,7 @@ pub fn make_changeset(inserted: &[u64], deleted: &[u64]) -> ChangeSet { pub fn make_childchangeset(inserted: &[u64], deleted: &[u64]) -> ChildTrieChangeSets { let mut result = ChildTrieChangeSets::default(); - result.insert(None, make_changeset(inserted, deleted)); + result.insert(ChildInfo::top_trie(), make_changeset(inserted, deleted)); result } @@ -95,8 +95,8 @@ pub fn make_commit(inserted: &[u64], deleted: &[u64]) -> CommitSet { } pub fn make_db(inserted: &[u64]) -> TestDb { - let mut data = HashMap::new(); - data.insert(None, inserted.iter() + let mut data = ChildrenMap::default(); + data.insert(ChildInfo::top_trie(), inserted.iter() .map(|v| { (H256::from_low_u64_be(*v), H256::from_low_u64_be(*v).as_bytes().to_vec()) }) diff --git a/primitives/state-machine/src/ext.rs b/primitives/state-machine/src/ext.rs index b1ea92c79ad90..a1dffcbae9989 100644 --- a/primitives/state-machine/src/ext.rs +++ b/primitives/state-machine/src/ext.rs @@ -209,6 +209,9 @@ where child_info: &ChildInfo, key: &[u8], ) -> Option { + if child_info.is_top_trie() { + return self.storage(key); + } let _guard = sp_panic_handler::AbortGuard::force_abort(); let result = self.overlay .child_storage(storage_key.as_ref(), key) @@ -231,15 +234,19 @@ where fn child_storage_hash( &self, storage_key: ChildStorageKey, - _child_info: &ChildInfo, + child_info: &ChildInfo, key: &[u8], ) -> Option> { + if child_info.is_top_trie() { + return self.storage_hash(key); + } let _guard = sp_panic_handler::AbortGuard::force_abort(); let result = self.overlay .child_storage(storage_key.as_ref(), key) .map(|x| x.map(|x| H::hash(x))) .unwrap_or_else(|| - self.backend.storage_hash(key).expect(EXT_NOT_ALLOWED_TO_FAIL) + self.backend.child_storage_hash(storage_key.as_ref(), child_info, key) + .expect(EXT_NOT_ALLOWED_TO_FAIL) ); trace!(target: "state-trace", "{:04x}: ChildHash({}) {}={:?}", @@ -258,6 +265,9 @@ where child_info: &ChildInfo, key: &[u8], ) -> Option { + if child_info.is_top_trie() { + return self.original_storage(key); + } let _guard = sp_panic_handler::AbortGuard::force_abort(); let result = self.backend .child_storage(storage_key.as_ref(), child_info, key) @@ -279,6 +289,9 @@ where child_info: &ChildInfo, key: &[u8], ) -> Option> { + if child_info.is_top_trie() { + return self.original_storage_hash(key); + } let _guard = sp_panic_handler::AbortGuard::force_abort(); let result = self.backend .child_storage_hash(storage_key.as_ref(), child_info, key) @@ -315,6 +328,9 @@ where child_info: &ChildInfo, key: &[u8], ) -> bool { + if child_info.is_top_trie() { + return self.exists_storage(key); + } let _guard = sp_panic_handler::AbortGuard::force_abort(); let result = match self.overlay.child_storage(storage_key.as_ref(), key) { @@ -354,6 +370,9 @@ where child_info: &ChildInfo, key: &[u8], ) -> Option { + if child_info.is_top_trie() { + return self.next_storage_key(key); + } let next_backend_key = self.backend .next_child_storage_key(storage_key.as_ref(), child_info, key) .expect(EXT_NOT_ALLOWED_TO_FAIL); @@ -400,6 +419,9 @@ where key: StorageKey, value: Option, ) { + if child_info.is_top_trie() { + return self.place_storage(key, value); + } trace!(target: "state-trace", "{:04x}: PutChild({}) {}={:?}", self.id, HexDisplay::from(&storage_key.as_ref()), @@ -417,6 +439,10 @@ where storage_key: ChildStorageKey, child_info: &ChildInfo, ) { + if child_info.is_top_trie() { + trace!(target: "state-trace", "Ignoring kill_child_storage on top trie"); + return; + } trace!(target: "state-trace", "{:04x}: KillChild({})", self.id, HexDisplay::from(&storage_key.as_ref()), @@ -454,6 +480,10 @@ where child_info: &ChildInfo, prefix: &[u8], ) { + if child_info.is_top_trie() { + return self.clear_prefix(prefix); + } + trace!(target: "state-trace", "{:04x}: ClearChildPrefix({}) {}", self.id, HexDisplay::from(&storage_key.as_ref()), diff --git a/primitives/state-machine/src/proving_backend.rs b/primitives/state-machine/src/proving_backend.rs index 7256f6815c535..ed574650cf78b 100644 --- a/primitives/state-machine/src/proving_backend.rs +++ b/primitives/state-machine/src/proving_backend.rs @@ -351,8 +351,8 @@ impl<'a, S, H> Backend for ProvingBackend<'a, S, H> where I: IntoIterator, Option>)> { let (root, mut tx) = self.0.storage_root(delta); - // We may rather want to return a btreemap - (root, tx.remove(&None)) + // TODO should we prove over a collection of child trie instead? + (root, tx.remove(&ChildInfo::top_trie())) } fn child_storage_root( @@ -366,7 +366,7 @@ impl<'a, S, H> Backend for ProvingBackend<'a, S, H> H::Out: Ord { let (root, is_empty, mut tx) = self.0.child_storage_root(storage_key, child_info, delta); - (root, is_empty, tx.remove(&Some(child_info.to_owned()))) + (root, is_empty, tx.remove(child_info)) } } @@ -454,7 +454,7 @@ mod tests { let (trie_root, mut trie_mdb) = trie_backend.storage_root(::std::iter::empty()); let (proving_root, proving_mdb) = proving_backend.storage_root(::std::iter::empty()); assert_eq!(trie_root, proving_root); - let mut trie_mdb = trie_mdb.remove(&None).unwrap(); + let mut trie_mdb = trie_mdb.remove(&ChildInfo::top_trie()).unwrap(); assert_eq!(trie_mdb.drain(), proving_mdb.unwrap().drain()); } diff --git a/primitives/state-machine/src/trie_backend.rs b/primitives/state-machine/src/trie_backend.rs index e63f01e360167..af00fa438ed7e 100644 --- a/primitives/state-machine/src/trie_backend.rs +++ b/primitives/state-machine/src/trie_backend.rs @@ -207,7 +207,7 @@ impl, H: Hasher> Backend for TrieBackend where } } let mut tx = ChildrenMap::default(); - tx.insert(None, write_overlay); + tx.insert(ChildInfo::top_trie(), write_overlay); (root, tx) } @@ -256,7 +256,7 @@ impl, H: Hasher> Backend for TrieBackend where let is_default = root == default_root; let mut tx = ChildrenMap::default(); - tx.insert(Some(child_info.to_owned()), write_overlay); + tx.insert(child_info.clone(), write_overlay); (root, is_default, tx) } diff --git a/primitives/state-machine/src/trie_backend_essence.rs b/primitives/state-machine/src/trie_backend_essence.rs index 0419556c18e37..0faa93f3a7f1d 100644 --- a/primitives/state-machine/src/trie_backend_essence.rs +++ b/primitives/state-machine/src/trie_backend_essence.rs @@ -36,7 +36,7 @@ pub trait Storage: Send + Sync { /// Get a trie node. fn get( &self, - trie: Option<&ChildInfo>, + trie: &ChildInfo, key: &H::Out, prefix: Prefix, ) -> Result, String>; @@ -409,7 +409,7 @@ pub trait TrieBackendStorage: TrieBackendStorageRef + Send + Sync impl + Send + Sync> TrieBackendStorage for B {} // This implementation is used by normal storage trie clients. -impl TrieBackendStorageRef for (Arc>, Option) { +impl TrieBackendStorageRef for (Arc>, ChildInfo) { type Overlay = PrefixedMemoryDB; fn get( @@ -417,7 +417,7 @@ impl TrieBackendStorageRef for (Arc>, Option Result, String> { - Storage::::get(self.0.deref(), self.1.as_ref(), key, prefix) + Storage::::get(self.0.deref(), &self.1, key, prefix) } } diff --git a/primitives/storage/src/lib.rs b/primitives/storage/src/lib.rs index 9180ff720e6f4..e4d4b5604ae2b 100644 --- a/primitives/storage/src/lib.rs +++ b/primitives/storage/src/lib.rs @@ -216,6 +216,14 @@ impl ChildInfo { Self::new_default(&[]) } + /// Top trie defined as the unique crypto id trie with + /// 0 length unique id. + pub fn is_top_trie(&self) -> bool { + match self { + ChildInfo::Default(ChildTrie { data }) => data.len() == 0 + } + } + /// Return a single byte vector containing packed child info content and its child info type. /// This can be use as input for `resolve_child_info`. pub fn info(&self) -> (&[u8], u32) { @@ -295,17 +303,17 @@ impl ChildTrie { #[derive(Clone, PartialEq, Eq, Debug)] /// Type for storing a map of child trie related information. /// A few utilities methods are defined. -pub struct ChildrenMap(pub BTreeMap, T>); +pub struct ChildrenMap(pub BTreeMap); /// Type alias for storage of children related content. -pub type ChildrenVec = Vec<(Option, T)>; +pub type ChildrenVec = Vec<(ChildInfo, T)>; /// Type alias for storage of children related content. -pub type ChildrenSlice<'a, T> = &'a [(Option, T)]; +pub type ChildrenSlice<'a, T> = &'a [(ChildInfo, T)]; #[cfg(feature = "std")] impl sp_std::ops::Deref for ChildrenMap { - type Target = BTreeMap, T>; + type Target = BTreeMap; fn deref(&self) -> &Self::Target { &self.0 @@ -333,7 +341,7 @@ impl ChildrenMap { /// on each of its entries. pub fn extend_with( &mut self, - other: impl Iterator, T)>, + other: impl Iterator, merge: impl Fn(&mut T, T), ) { use sp_std::collections::btree_map::Entry; @@ -352,7 +360,7 @@ impl ChildrenMap { /// Extends two maps, by enxtending entries with the same key. pub fn extend_replace( &mut self, - other: impl Iterator, T)>, + other: impl Iterator, ) { self.0.extend(other) } @@ -360,8 +368,8 @@ impl ChildrenMap { #[cfg(feature = "std")] impl IntoIterator for ChildrenMap { - type Item = (Option, T); - type IntoIter = sp_std::collections::btree_map::IntoIter, T>; + type Item = (ChildInfo, T); + type IntoIter = sp_std::collections::btree_map::IntoIter; fn into_iter(self) -> Self::IntoIter { self.0.into_iter() From b07d7cac096d8df2443c687f068a5192dd631896 Mon Sep 17 00:00:00 2001 From: cheme Date: Tue, 4 Feb 2020 10:02:57 +0100 Subject: [PATCH 20/85] fix merge test --- primitives/state-machine/src/ext.rs | 33 ++++++++++++++--------------- 1 file changed, 16 insertions(+), 17 deletions(-) diff --git a/primitives/state-machine/src/ext.rs b/primitives/state-machine/src/ext.rs index df631e623c0b5..06ba6bd26bca9 100644 --- a/primitives/state-machine/src/ext.rs +++ b/primitives/state-machine/src/ext.rs @@ -633,10 +633,7 @@ mod tests { type TestExt<'a> = Ext<'a, Blake2Hasher, u64, TestBackend>; const CHILD_KEY_1: &[u8] = b":child_storage:default:Child1"; - const CHILD_UUID_1: &[u8] = b"unique_id_1"; - const CHILD_INFO_1: ChildInfo<'static> = ChildInfo::new_default(CHILD_UUID_1); - fn prepare_overlay_with_changes() -> OverlayedChanges { OverlayedChanges { @@ -748,9 +745,7 @@ mod tests { #[test] fn next_child_storage_key_works() { - const CHILD_KEY_1: &[u8] = b":child_storage:default:Child1"; - const CHILD_UUID_1: &[u8] = b"unique_id_1"; let child_info1 = ChildInfo::new_default(CHILD_UUID_1); let mut cache = StorageTransactionCache::default(); @@ -797,11 +792,15 @@ mod tests { #[test] fn child_storage_works() { + use sp_core::InnerHasher; + + let child_info1 = ChildInfo::new_default(CHILD_UUID_1); + let mut cache = StorageTransactionCache::default(); let child = || ChildStorageKey::from_slice(CHILD_KEY_1).unwrap(); let mut overlay = OverlayedChanges::default(); - overlay.set_child_storage(child().as_ref().to_vec(), CHILD_INFO_1, vec![20], None); - overlay.set_child_storage(child().as_ref().to_vec(), CHILD_INFO_1, vec![30], Some(vec![31])); + overlay.set_child_storage(child().as_ref().to_vec(), &child_info1, vec![20], None); + overlay.set_child_storage(child().as_ref().to_vec(), &child_info1, vec![30], Some(vec![31])); let backend = Storage { top: map![], children: map![ @@ -811,31 +810,31 @@ mod tests { vec![20] => vec![20], vec![30] => vec![40] ], - child_info: CHILD_INFO_1.to_owned(), + child_info: child_info1.clone(), } ], }.into(); let ext = TestExt::new(&mut overlay, &mut cache, &backend, None, None); - assert_eq!(ext.child_storage(child(), CHILD_INFO_1, &[10]), Some(vec![10])); - assert_eq!(ext.original_child_storage(child(), CHILD_INFO_1, &[10]), Some(vec![10])); + assert_eq!(ext.child_storage(child(), &child_info1, &[10]), Some(vec![10])); + assert_eq!(ext.original_child_storage(child(), &child_info1, &[10]), Some(vec![10])); assert_eq!( - ext.child_storage_hash(child(), CHILD_INFO_1, &[10]), + ext.child_storage_hash(child(), &child_info1, &[10]), Some(Blake2Hasher::hash(&[10]).as_ref().to_vec()), ); - assert_eq!(ext.child_storage(child(), CHILD_INFO_1, &[20]), None); - assert_eq!(ext.original_child_storage(child(), CHILD_INFO_1, &[20]), Some(vec![20])); + assert_eq!(ext.child_storage(child(), &child_info1, &[20]), None); + assert_eq!(ext.original_child_storage(child(), &child_info1, &[20]), Some(vec![20])); assert_eq!( - ext.child_storage_hash(child(), CHILD_INFO_1, &[20]), + ext.child_storage_hash(child(), &child_info1, &[20]), None, ); - assert_eq!(ext.child_storage(child(), CHILD_INFO_1, &[30]), Some(vec![31])); - assert_eq!(ext.original_child_storage(child(), CHILD_INFO_1, &[30]), Some(vec![40])); + assert_eq!(ext.child_storage(child(), &child_info1, &[30]), Some(vec![31])); + assert_eq!(ext.original_child_storage(child(), &child_info1, &[30]), Some(vec![40])); assert_eq!( - ext.child_storage_hash(child(), CHILD_INFO_1, &[30]), + ext.child_storage_hash(child(), &child_info1, &[30]), Some(Blake2Hasher::hash(&[31]).as_ref().to_vec()), ); From e5d7b04a0651d2f5cb0bd603b037430e1765f4ca Mon Sep 17 00:00:00 2001 From: cheme Date: Tue, 4 Feb 2020 10:52:08 +0100 Subject: [PATCH 21/85] clean todos --- client/state-db/src/lib.rs | 10 ---------- primitives/state-machine/src/proving_backend.rs | 4 ++-- primitives/state-machine/src/trie_backend.rs | 5 ----- primitives/state-machine/src/trie_backend_essence.rs | 10 ---------- 4 files changed, 2 insertions(+), 27 deletions(-) diff --git a/client/state-db/src/lib.rs b/client/state-db/src/lib.rs index cfe2bb5c76aee..8bd303d9b85a2 100644 --- a/client/state-db/src/lib.rs +++ b/client/state-db/src/lib.rs @@ -127,16 +127,6 @@ impl ChangeSet { self.deleted.extend(other.deleted.into_iter()); } } -/// A set of state node changes for a child trie. -/// TODO remove?? -#[derive(Debug, Clone)] -pub struct ChildTrieChangeSet { - /// Change set of this element. - pub data: ChangeSet, - /// Child trie descripton. - /// If not set, this is the top trie. - pub info: Option, -} /// Change sets of all child trie (top is key None). pub type ChildTrieChangeSets = ChildrenMap>; diff --git a/primitives/state-machine/src/proving_backend.rs b/primitives/state-machine/src/proving_backend.rs index ed574650cf78b..ae6dd9b2dbf68 100644 --- a/primitives/state-machine/src/proving_backend.rs +++ b/primitives/state-machine/src/proving_backend.rs @@ -239,6 +239,8 @@ impl<'a, S: 'a + TrieBackendStorage, H: 'a + Hasher> ProvingBackend<'a, S, H> } } +// proof run on a flatten storage of tries and currently only need implement a single +// trie backend storage api. impl<'a, S: 'a + TrieBackendStorage, H: 'a + Hasher> TrieBackendStorageRef for ProofRecorderBackend<'a, S, H> { @@ -249,7 +251,6 @@ impl<'a, S: 'a + TrieBackendStorage, H: 'a + Hasher> TrieBackendStorageRef key: &H::Out, prefix: Prefix, ) -> Result, String> { - // TODO switch proof model too (use a trie) if let Some(v) = self.proof_recorder.read().get(key) { return Ok(v.clone()); } @@ -351,7 +352,6 @@ impl<'a, S, H> Backend for ProvingBackend<'a, S, H> where I: IntoIterator, Option>)> { let (root, mut tx) = self.0.storage_root(delta); - // TODO should we prove over a collection of child trie instead? (root, tx.remove(&ChildInfo::top_trie())) } diff --git a/primitives/state-machine/src/trie_backend.rs b/primitives/state-machine/src/trie_backend.rs index af00fa438ed7e..771364aa964c7 100644 --- a/primitives/state-machine/src/trie_backend.rs +++ b/primitives/state-machine/src/trie_backend.rs @@ -84,7 +84,6 @@ impl, H: Hasher> Backend for TrieBackend where child_info: &ChildInfo, key: &[u8], ) -> Result, Self::Error> { - // TODO switch to &mut self like in overlay pr let mut buf = Vec::new(); if let Some(essence) = self.child_essence(storage_key, child_info, &mut buf)? { essence.storage(key) @@ -103,7 +102,6 @@ impl, H: Hasher> Backend for TrieBackend where child_info: &ChildInfo, key: &[u8], ) -> Result, Self::Error> { - // TODO switch to &mut self like in overlay pr let mut buf = Vec::new(); if let Some(essence) = self.child_essence(storage_key, child_info, &mut buf)? { essence.next_storage_key(key) @@ -126,7 +124,6 @@ impl, H: Hasher> Backend for TrieBackend where child_info: &ChildInfo, f: F, ) { - // TODO switch to &mut self like in overlay pr let mut buf = Vec::new(); if let Ok(Some(essence)) = self.child_essence(storage_key, child_info, &mut buf) { essence.for_keys(f) @@ -140,7 +137,6 @@ impl, H: Hasher> Backend for TrieBackend where prefix: &[u8], f: F, ) { - // TODO switch to &mut self like in overlay pr let mut buf = Vec::new(); if let Ok(Some(essence)) = self.child_essence(storage_key, child_info, &mut buf) { essence.for_keys_with_prefix(prefix, f) @@ -234,7 +230,6 @@ impl, H: Hasher> Backend for TrieBackend where }; { - // TODO switch to &mut self like in overlay pr let mut buf = Vec::new(); let child_essence = ChildTrieBackendStorage::new(self.essence.backend_storage(), Some(child_info), &mut buf); // Do not write prefix in overlay. diff --git a/primitives/state-machine/src/trie_backend_essence.rs b/primitives/state-machine/src/trie_backend_essence.rs index 0faa93f3a7f1d..32b2ba0bbca51 100644 --- a/primitives/state-machine/src/trie_backend_essence.rs +++ b/primitives/state-machine/src/trie_backend_essence.rs @@ -273,7 +273,6 @@ impl<'a, S, H, O> hash_db::PlainDBRef for Ephemeral<'a, S, H, O O: hash_db::HashDB + Default + Consolidate, { fn get(&self, key: &H::Out) -> Option { - // TODO need new trait with ct as parameter!!! if let Some(val) = hash_db::HashDB::get(self.overlay, key, EMPTY_PREFIX) { Some(val) } else { @@ -426,7 +425,6 @@ impl TrieBackendStorageRef for (Arc>, ChildInfo) { pub struct ChildTrieBackendStorage<'a, H: Hasher, B: TrieBackendStorageRef> { db: &'a B, info: Option<&'a ChildInfo>, - buffer: &'a mut Vec, _ph: PhantomData, } @@ -451,12 +449,6 @@ impl<'a, H: Hasher, B: TrieBackendStorageRef> TrieBackendStorageRef for Ch prefix: Prefix, ) -> Result, String> { if let Some(keyspace) = self.info.as_ref().map(|ci| ci.keyspace()) { - // TODO switch to &mut self like in overlay pr and use commented code - /*self.buffer.resize(keyspace.len() + prefix.0.len(), 0); - self.buffer[..keyspace.len()].copy_from_slice(keyspace); - self.buffer[keyspace.len()..].copy_from_slice(prefix.0); - self.db.get(key, (self.buffer.as_slice(), prefix.1))*/ - let prefix = keyspace_as_prefix_alloc(keyspace, prefix); self.db.get(key, (prefix.0.as_slice(), prefix.1)) } else { @@ -475,8 +467,6 @@ impl TrieBackendStorageRef for PrefixedMemoryDB { key: &H::Out, prefix: Prefix, ) -> Result, String> { - // TODO should we split prefixed memory db too?? -> likely yes: sharing - // rc does not make sense -> change type of PrefixedMemoryDB. Ok(hash_db::HashDB::get(self, key, prefix)) } } From 3a7166934168b1bcd4565f536d1df78331ac65c0 Mon Sep 17 00:00:00 2001 From: cheme Date: Tue, 4 Feb 2020 11:01:00 +0100 Subject: [PATCH 22/85] fix --- primitives/state-machine/src/trie_backend.rs | 17 +++++------------ .../state-machine/src/trie_backend_essence.rs | 3 +-- 2 files changed, 6 insertions(+), 14 deletions(-) diff --git a/primitives/state-machine/src/trie_backend.rs b/primitives/state-machine/src/trie_backend.rs index 771364aa964c7..1847fb89bb33e 100644 --- a/primitives/state-machine/src/trie_backend.rs +++ b/primitives/state-machine/src/trie_backend.rs @@ -84,8 +84,7 @@ impl, H: Hasher> Backend for TrieBackend where child_info: &ChildInfo, key: &[u8], ) -> Result, Self::Error> { - let mut buf = Vec::new(); - if let Some(essence) = self.child_essence(storage_key, child_info, &mut buf)? { + if let Some(essence) = self.child_essence(storage_key, child_info)? { essence.storage(key) } else { Ok(None) @@ -102,8 +101,7 @@ impl, H: Hasher> Backend for TrieBackend where child_info: &ChildInfo, key: &[u8], ) -> Result, Self::Error> { - let mut buf = Vec::new(); - if let Some(essence) = self.child_essence(storage_key, child_info, &mut buf)? { + if let Some(essence) = self.child_essence(storage_key, child_info)? { essence.next_storage_key(key) } else { Ok(None) @@ -124,8 +122,7 @@ impl, H: Hasher> Backend for TrieBackend where child_info: &ChildInfo, f: F, ) { - let mut buf = Vec::new(); - if let Ok(Some(essence)) = self.child_essence(storage_key, child_info, &mut buf) { + if let Ok(Some(essence)) = self.child_essence(storage_key, child_info) { essence.for_keys(f) } } @@ -137,8 +134,7 @@ impl, H: Hasher> Backend for TrieBackend where prefix: &[u8], f: F, ) { - let mut buf = Vec::new(); - if let Ok(Some(essence)) = self.child_essence(storage_key, child_info, &mut buf) { + if let Ok(Some(essence)) = self.child_essence(storage_key, child_info) { essence.for_keys_with_prefix(prefix, f) } } @@ -230,8 +226,7 @@ impl, H: Hasher> Backend for TrieBackend where }; { - let mut buf = Vec::new(); - let child_essence = ChildTrieBackendStorage::new(self.essence.backend_storage(), Some(child_info), &mut buf); + let child_essence = ChildTrieBackendStorage::new(self.essence.backend_storage(), Some(child_info)); // Do not write prefix in overlay. let mut eph = Ephemeral::new( &child_essence, @@ -267,7 +262,6 @@ impl, H: Hasher> TrieBackend where &'a self, storage_key: &[u8], child_info: &'a ChildInfo, - buffer: &'a mut Vec, ) -> Result, H>>, >::Error> { let root: Option = self.storage(storage_key)? .and_then(|encoded_root| Decode::decode(&mut &encoded_root[..]).ok()); @@ -275,7 +269,6 @@ impl, H: Hasher> TrieBackend where Some(TrieBackendEssence::new(ChildTrieBackendStorage::new( self.essence.backend_storage(), Some(child_info), - buffer, ), root)) } else { None diff --git a/primitives/state-machine/src/trie_backend_essence.rs b/primitives/state-machine/src/trie_backend_essence.rs index 32b2ba0bbca51..b4f24502d9c3c 100644 --- a/primitives/state-machine/src/trie_backend_essence.rs +++ b/primitives/state-machine/src/trie_backend_essence.rs @@ -430,11 +430,10 @@ pub struct ChildTrieBackendStorage<'a, H: Hasher, B: TrieBackendStorageRef> { impl<'a, H: Hasher, B: TrieBackendStorageRef> ChildTrieBackendStorage<'a, H, B> { /// Instantiate a `ChildTrieBackendStorage`. - pub fn new(db: &'a B, info: Option<&'a ChildInfo>, buffer: &'a mut Vec) -> Self { + pub fn new(db: &'a B, info: Option<&'a ChildInfo>) -> Self { ChildTrieBackendStorage { db, info, - buffer, _ph: PhantomData, } } From c8464710f905455aaeb6791c1ec309e31c956d4b Mon Sep 17 00:00:00 2001 From: cheme Date: Tue, 4 Feb 2020 17:29:48 +0100 Subject: [PATCH 23/85] End up removing all keypacedDB from code. --- client/db/src/changes_tries_storage.rs | 11 +- client/db/src/lib.rs | 13 +- client/src/client.rs | 3 +- client/state-db/src/lib.rs | 12 +- client/state-db/src/test.rs | 4 +- .../state-machine/src/changes_trie/build.rs | 13 +- .../src/changes_trie/changes_iterator.rs | 11 +- .../state-machine/src/changes_trie/mod.rs | 7 +- .../state-machine/src/changes_trie/prune.rs | 5 +- .../state-machine/src/changes_trie/storage.rs | 7 +- .../state-machine/src/proving_backend.rs | 39 +++--- primitives/state-machine/src/trie_backend.rs | 32 +++-- .../state-machine/src/trie_backend_essence.rs | 80 ++++++----- primitives/trie/src/lib.rs | 125 +----------------- 14 files changed, 138 insertions(+), 224 deletions(-) diff --git a/client/db/src/changes_tries_storage.rs b/client/db/src/changes_tries_storage.rs index ab8c7465badd1..f5c1d34688e23 100644 --- a/client/db/src/changes_tries_storage.rs +++ b/client/db/src/changes_tries_storage.rs @@ -488,9 +488,11 @@ where fn get( &self, + child_info: &sp_core::storage::ChildInfo, key: &Block::Hash, _prefix: Prefix, ) -> Result, String> { + debug_assert!(child_info.is_top_trie()); self.db.get(self.changes_tries_column, key.as_ref()) .map_err(|err| format!("{}", err)) } @@ -594,8 +596,9 @@ mod tests { assert_eq!(backend.changes_tries_storage.root(&anchor, block), Ok(Some(changes_root))); let storage = backend.changes_tries_storage.storage(); + let top_trie = sp_core::storage::ChildInfo::top_trie(); for (key, (val, _)) in changes_trie_update.drain() { - assert_eq!(storage.get(&key, EMPTY_PREFIX), Ok(Some(val))); + assert_eq!(storage.get(&top_trie, &key, EMPTY_PREFIX), Ok(Some(val))); } }; @@ -704,7 +707,11 @@ mod tests { .log(DigestItem::as_changes_trie_root) .cloned(); match trie_root { - Some(trie_root) => backend.changes_tries_storage.get(&trie_root, EMPTY_PREFIX).unwrap().is_none(), + Some(trie_root) => backend.changes_tries_storage.get( + &sp_core::storage::ChildInfo::top_trie(), + &trie_root, + EMPTY_PREFIX, + ).unwrap().is_none(), None => true, } }; diff --git a/client/db/src/lib.rs b/client/db/src/lib.rs index a447a6b87801e..2f73ea3c7d2a9 100644 --- a/client/db/src/lib.rs +++ b/client/db/src/lib.rs @@ -681,8 +681,17 @@ impl sc_state_db::NodeDb for StorageDb { type Error = io::Error; type Key = [u8]; - fn get(&self, key: &[u8]) -> Result>, Self::Error> { - self.db.get(columns::STATE, key).map(|r| r.map(|v| v.to_vec())) + fn get(&self, child_info: &ChildInfo, key: &[u8]) -> Result>, Self::Error> { + if child_info.is_top_trie() { + self.db.get(columns::STATE, key) + } else { + let keyspace = child_info.keyspace(); + // TODO try to switch api to &mut and use a key buffer from StorageDB + let mut key_buffer = vec![0; keyspace.len() + key.len()]; + key_buffer[..keyspace.len()].copy_from_slice(keyspace); + key_buffer[keyspace.len()..].copy_from_slice(&key[..]); + self.db.get(columns::STATE, &key_buffer[..]) + }.map(|r| r.map(|v| v.to_vec())) } } diff --git a/client/src/client.rs b/client/src/client.rs index 7acef6a4a910c..888bd88428863 100644 --- a/client/src/client.rs +++ b/client/src/client.rs @@ -566,10 +566,11 @@ impl Client where fn get( &self, + child_info: &ChildInfo, key: &Block::Hash, prefix: Prefix, ) -> Result, String> { - self.storage.get(key, prefix) + self.storage.get(child_info, key, prefix) } } diff --git a/client/state-db/src/lib.rs b/client/state-db/src/lib.rs index 8bd303d9b85a2..992e8fa81f250 100644 --- a/client/state-db/src/lib.rs +++ b/client/state-db/src/lib.rs @@ -68,7 +68,7 @@ pub trait NodeDb { type Error: fmt::Debug; /// Get state trie node. - fn get(&self, key: &Self::Key) -> Result, Self::Error>; + fn get(&self, child_info: &ChildInfo, key: &Self::Key) -> Result, Self::Error>; } /// Error type. @@ -402,16 +402,16 @@ impl StateDbSync { pub fn get( &self, - trie: &ChildInfo, + child_info: &ChildInfo, key: &Key, db: &D, ) -> Result, Error> where Key: AsRef { - if let Some(value) = self.non_canonical.get(trie, key) { + if let Some(value) = self.non_canonical.get(child_info, key) { return Ok(Some(value)); } - db.get(key.as_ref()).map_err(|e| Error::Db(e)) + db.get(child_info, key.as_ref()).map_err(|e| Error::Db(e)) } pub fn apply_pending(&mut self) { @@ -479,13 +479,13 @@ impl StateDb { /// Get a value from non-canonical/pruning overlay or the backing DB. pub fn get( &self, - trie: &ChildInfo, + child_info: &ChildInfo, key: &Key, db: &D, ) -> Result, Error> where Key: AsRef { - self.db.read().get(trie, key, db) + self.db.read().get(child_info, key, db) } /// Revert all non-canonical blocks with the best block number. diff --git a/client/state-db/src/test.rs b/client/state-db/src/test.rs index 6cfa2256b2c1c..b9f2941bcc5e0 100644 --- a/client/state-db/src/test.rs +++ b/client/state-db/src/test.rs @@ -39,8 +39,8 @@ impl NodeDb for TestDb { type Error = (); type Key = H256; - fn get(&self, key: &H256) -> Result, ()> { - Ok(self.data.get(&ChildInfo::top_trie()).and_then(|data| data.get(key).cloned())) + fn get(&self, child_info: &ChildInfo, key: &H256) -> Result, ()> { + Ok(self.data.get(child_info).and_then(|data| data.get(key).cloned())) } } diff --git a/primitives/state-machine/src/changes_trie/build.rs b/primitives/state-machine/src/changes_trie/build.rs index 4bfe7d8f8ef23..cefc4d88470a2 100644 --- a/primitives/state-machine/src/changes_trie/build.rs +++ b/primitives/state-machine/src/changes_trie/build.rs @@ -281,6 +281,9 @@ fn prepare_digest_input<'a, H, Number>( return Ok((map, child_map)); } + // change trie content are all stored as top_trie (default child trie with empty keyspace) + let child_info = sp_core::storage::ChildInfo::top_trie(); + let child_info = &child_info; let mut children_roots = BTreeMap::::new(); { let trie_storage = TrieBackendEssence::<_, H>::new( @@ -288,7 +291,7 @@ fn prepare_digest_input<'a, H, Number>( trie_root, ); - trie_storage.for_key_values_with_prefix(&child_prefix, |key, value| + trie_storage.for_key_values_with_prefix(child_info, &child_prefix, |key, value| if let Ok(InputKey::ChildIndex::(trie_key)) = Decode::decode(&mut &key[..]) { if let Ok(value) = >::decode(&mut &value[..]) { let mut trie_root = ::Out::default(); @@ -297,12 +300,12 @@ fn prepare_digest_input<'a, H, Number>( } }); - trie_storage.for_keys_with_prefix(&extrinsic_prefix, |key| + trie_storage.for_keys_with_prefix(child_info, &extrinsic_prefix, |key| if let Ok(InputKey::ExtrinsicIndex::(trie_key)) = Decode::decode(&mut &key[..]) { insert_to_map(&mut map, trie_key.key); }); - trie_storage.for_keys_with_prefix(&digest_prefix, |key| + trie_storage.for_keys_with_prefix(child_info, &digest_prefix, |key| if let Ok(InputKey::DigestIndex::(trie_key)) = Decode::decode(&mut &key[..]) { insert_to_map(&mut map, trie_key.key); }); @@ -319,12 +322,12 @@ fn prepare_digest_input<'a, H, Number>( crate::changes_trie::TrieBackendStorageAdapter(storage), trie_root, ); - trie_storage.for_keys_with_prefix(&extrinsic_prefix, |key| + trie_storage.for_keys_with_prefix(child_info, &extrinsic_prefix, |key| if let Ok(InputKey::ExtrinsicIndex::(trie_key)) = Decode::decode(&mut &key[..]) { insert_to_map(&mut map, trie_key.key); }); - trie_storage.for_keys_with_prefix(&digest_prefix, |key| + trie_storage.for_keys_with_prefix(child_info, &digest_prefix, |key| if let Ok(InputKey::DigestIndex::(trie_key)) = Decode::decode(&mut &key[..]) { insert_to_map(&mut map, trie_key.key); }); diff --git a/primitives/state-machine/src/changes_trie/changes_iterator.rs b/primitives/state-machine/src/changes_trie/changes_iterator.rs index 9e185d0444c86..84be4a3f55541 100644 --- a/primitives/state-machine/src/changes_trie/changes_iterator.rs +++ b/primitives/state-machine/src/changes_trie/changes_iterator.rs @@ -67,6 +67,7 @@ pub fn key_changes<'a, H: Hasher, Number: BlockNumber>( _hasher: ::std::marker::PhantomData::::default(), }, + child_info: sp_core::storage::ChildInfo::top_trie(), }) } @@ -177,6 +178,7 @@ pub fn key_changes_proof_check_with_db<'a, H: Hasher, Number: BlockNumber>( _hasher: ::std::marker::PhantomData::::default(), }, + child_info: sp_core::storage::ChildInfo::top_trie(), }.collect() } @@ -314,6 +316,10 @@ pub struct DrilldownIterator<'a, H, Number> H::Out: 'a, { essence: DrilldownIteratorEssence<'a, H, Number>, + /// This is always top trie info, but it cannot be + /// statically instantiated at the time (vec of null + /// size could be in theory). + child_info: sp_core::storage::ChildInfo, } impl<'a, H: Hasher, Number: BlockNumber> Iterator for DrilldownIterator<'a, H, Number> @@ -322,8 +328,11 @@ impl<'a, H: Hasher, Number: BlockNumber> Iterator for DrilldownIterator<'a, H, N type Item = Result<(Number, u32), String>; fn next(&mut self) -> Option { + let child_info = &self.child_info; self.essence.next(|storage, root, key| - TrieBackendEssence::<_, H>::new(TrieBackendAdapter::new(storage), root).storage(key)) + TrieBackendEssence::<_, H>::new(TrieBackendAdapter::new(storage), root) + .storage(child_info, key) + ) } } diff --git a/primitives/state-machine/src/changes_trie/mod.rs b/primitives/state-machine/src/changes_trie/mod.rs index 45970e7a31dc7..b6aba93108407 100644 --- a/primitives/state-machine/src/changes_trie/mod.rs +++ b/primitives/state-machine/src/changes_trie/mod.rs @@ -69,6 +69,7 @@ use std::collections::{HashMap, HashSet}; use std::convert::TryInto; use hash_db::Prefix; use sp_core::Hasher; +use sp_core::storage::ChildInfo; use num_traits::{One, Zero}; use codec::{Decode, Encode}; use sp_core; @@ -160,8 +161,11 @@ pub trait Storage: RootsStorage { functor: &mut dyn FnMut(&HashMap, HashSet>), ) -> bool; /// Get a trie node. + /// Note that child info is use only for case where we use this trait + /// as an adapter to storage. fn get( &self, + child_info: &ChildInfo, key: &H::Out, prefix: Prefix, ) -> Result, String>; @@ -175,10 +179,11 @@ impl<'a, H: Hasher, N: BlockNumber> crate::TrieBackendStorageRef for TrieBack fn get( &self, + child_info: &ChildInfo, key: &H::Out, prefix: Prefix, ) -> Result, String> { - self.0.get(key, prefix) + self.0.get(child_info, key, prefix) } } diff --git a/primitives/state-machine/src/changes_trie/prune.rs b/primitives/state-machine/src/changes_trie/prune.rs index 94e8fe4bdaed2..87bd5dad60e09 100644 --- a/primitives/state-machine/src/changes_trie/prune.rs +++ b/primitives/state-machine/src/changes_trie/prune.rs @@ -65,7 +65,8 @@ pub fn prune( ); let child_prefix = ChildIndex::key_neutral_prefix(block.clone()); let mut children_roots = Vec::new(); - trie_storage.for_key_values_with_prefix(&child_prefix, |key, value| { + let child_info = sp_core::storage::ChildInfo::top_trie(); + trie_storage.for_key_values_with_prefix(&child_info, &child_prefix, |key, value| { if let Ok(InputKey::ChildIndex::(_trie_key)) = Decode::decode(&mut &key[..]) { if let Ok(value) = >::decode(&mut &value[..]) { let mut trie_root = ::Out::default(); @@ -100,7 +101,7 @@ fn prune_trie( backend: &TrieBackendEssence::new(TrieBackendAdapter::new(storage), root), proof_recorder: &mut proof_recorder, }; - trie.record_all_keys(); + trie.record_all_top_trie_keys(); } // all nodes of this changes trie should be pruned diff --git a/primitives/state-machine/src/changes_trie/storage.rs b/primitives/state-machine/src/changes_trie/storage.rs index ee2599d09548a..53bb62675d9bb 100644 --- a/primitives/state-machine/src/changes_trie/storage.rs +++ b/primitives/state-machine/src/changes_trie/storage.rs @@ -19,6 +19,7 @@ use std::collections::{BTreeMap, HashSet, HashMap}; use hash_db::{Prefix, EMPTY_PREFIX}; use sp_core::Hasher; +use sp_core::storage::ChildInfo; use sp_trie::DBValue; use sp_trie::MemoryDB; use parking_lot::RwLock; @@ -190,10 +191,11 @@ impl Storage for InMemoryStorage Result, String> { - MemoryDB::::get(&self.data.read().mdb, key, prefix) + MemoryDB::::get(&self.data.read().mdb, child_info, key, prefix) } } @@ -212,9 +214,10 @@ impl<'a, H, Number> TrieBackendStorageRef for TrieBackendAdapter<'a, H, Numbe fn get( &self, + child_info: &ChildInfo, key: &H::Out, prefix: Prefix, ) -> Result, String> { - self.storage.get(key, prefix) + self.storage.get(child_info, key, prefix) } } diff --git a/primitives/state-machine/src/proving_backend.rs b/primitives/state-machine/src/proving_backend.rs index ae6dd9b2dbf68..e4eca1181089e 100644 --- a/primitives/state-machine/src/proving_backend.rs +++ b/primitives/state-machine/src/proving_backend.rs @@ -23,13 +23,14 @@ use log::debug; use hash_db::{HashDB, EMPTY_PREFIX, Prefix}; use sp_core::{Hasher, InnerHasher}; use sp_trie::{ - MemoryDB, default_child_trie_root, read_trie_value_with, read_child_trie_value_with, - record_all_keys + MemoryDB, default_child_trie_root, read_trie_value_with, + record_all_keys, }; pub use sp_trie::Recorder; pub use sp_trie::trie_types::{Layout, TrieError}; use crate::trie_backend::TrieBackend; -use crate::trie_backend_essence::{Ephemeral, TrieBackendEssence, TrieBackendStorage, TrieBackendStorageRef}; +use crate::trie_backend_essence::{BackendStorageDBRef, TrieBackendEssence, + TrieBackendStorage, TrieBackendStorageRef}; use crate::{Error, ExecutionError, Backend}; use std::collections::{HashMap, HashSet}; use crate::DBValue; @@ -125,15 +126,15 @@ impl<'a, S, H> ProvingBackendRecorder<'a, S, H> { /// Produce proof for a key query. pub fn storage(&mut self, key: &[u8]) -> Result>, String> { - let mut read_overlay = S::Overlay::default(); - let eph = Ephemeral::new( + let child_info = ChildInfo::top_trie(); + let eph = BackendStorageDBRef::new( self.backend.backend_storage(), - &mut read_overlay, + &child_info, ); let map_e = |e| format!("Trie lookup error: {}", e); - read_trie_value_with::, _, Ephemeral>( + read_trie_value_with::, _, BackendStorageDBRef>( &eph, self.backend.root(), key, @@ -146,36 +147,33 @@ impl<'a, S, H> ProvingBackendRecorder<'a, S, H> &mut self, storage_key: &[u8], child_info: &ChildInfo, - key: &[u8] + key: &[u8], ) -> Result>, String> { let root = self.storage(storage_key)? .and_then(|r| Decode::decode(&mut &r[..]).ok()) .unwrap_or(default_child_trie_root::>(storage_key)); - let mut read_overlay = S::Overlay::default(); - let eph = Ephemeral::new( + let eph = BackendStorageDBRef::new( self.backend.backend_storage(), - &mut read_overlay, + child_info, ); let map_e = |e| format!("Trie lookup error: {}", e); - read_child_trie_value_with::, _, _>( - storage_key, - child_info.keyspace(), + read_trie_value_with::, _, _>( &eph, - &root.as_ref(), + &root, key, &mut *self.proof_recorder ).map_err(map_e) } /// Produce proof for the whole backend. - pub fn record_all_keys(&mut self) { - let mut read_overlay = S::Overlay::default(); - let eph = Ephemeral::new( + pub fn record_all_top_trie_keys(&mut self) { + let child_info = ChildInfo::top_trie(); + let eph = BackendStorageDBRef::new( self.backend.backend_storage(), - &mut read_overlay, + &child_info, ); let mut iter = move || -> Result<(), Box>> { @@ -248,13 +246,14 @@ impl<'a, S: 'a + TrieBackendStorage, H: 'a + Hasher> TrieBackendStorageRef fn get( &self, + child_info: &ChildInfo, key: &H::Out, prefix: Prefix, ) -> Result, String> { if let Some(v) = self.proof_recorder.read().get(key) { return Ok(v.clone()); } - let backend_value = self.backend.get(key, prefix)?; + let backend_value = self.backend.get(child_info, key, prefix)?; self.proof_recorder.write().insert(key.clone(), backend_value.clone()); Ok(backend_value) } diff --git a/primitives/state-machine/src/trie_backend.rs b/primitives/state-machine/src/trie_backend.rs index 1847fb89bb33e..dfe0e43f76dc9 100644 --- a/primitives/state-machine/src/trie_backend.rs +++ b/primitives/state-machine/src/trie_backend.rs @@ -30,6 +30,9 @@ use crate::{ /// for this trie and child tries. pub struct TrieBackend, H: Hasher> { essence: TrieBackendEssence, + // storing child_info of top trie even if it is in + // theory a bit useless (no heap alloc on empty vec). + top_trie: ChildInfo, } impl, H: Hasher> TrieBackend where H::Out: Codec { @@ -37,6 +40,7 @@ impl, H: Hasher> TrieBackend where H::Out: Codec pub fn new(storage: S, root: H::Out) -> Self { TrieBackend { essence: TrieBackendEssence::new(storage, root), + top_trie: ChildInfo::top_trie(), } } @@ -75,7 +79,7 @@ impl, H: Hasher> Backend for TrieBackend where type TrieBackendStorage = S; fn storage(&self, key: &[u8]) -> Result, Self::Error> { - self.essence.storage(key) + self.essence.storage(&self.top_trie, key) } fn child_storage( @@ -85,14 +89,14 @@ impl, H: Hasher> Backend for TrieBackend where key: &[u8], ) -> Result, Self::Error> { if let Some(essence) = self.child_essence(storage_key, child_info)? { - essence.storage(key) + essence.storage(child_info, key) } else { Ok(None) } } fn next_storage_key(&self, key: &[u8]) -> Result, Self::Error> { - self.essence.next_storage_key(key) + self.essence.next_storage_key(&self.top_trie, key) } fn next_child_storage_key( @@ -102,18 +106,18 @@ impl, H: Hasher> Backend for TrieBackend where key: &[u8], ) -> Result, Self::Error> { if let Some(essence) = self.child_essence(storage_key, child_info)? { - essence.next_storage_key(key) + essence.next_storage_key(child_info, key) } else { Ok(None) } } fn for_keys_with_prefix(&self, prefix: &[u8], f: F) { - self.essence.for_keys_with_prefix(prefix, f) + self.essence.for_keys_with_prefix(&self.top_trie, prefix, f) } fn for_key_values_with_prefix(&self, prefix: &[u8], f: F) { - self.essence.for_key_values_with_prefix(prefix, f) + self.essence.for_key_values_with_prefix(&self.top_trie, prefix, f) } fn for_keys_in_child_storage( @@ -123,7 +127,7 @@ impl, H: Hasher> Backend for TrieBackend where f: F, ) { if let Ok(Some(essence)) = self.child_essence(storage_key, child_info) { - essence.for_keys(f) + essence.for_keys(child_info, f) } } @@ -135,12 +139,12 @@ impl, H: Hasher> Backend for TrieBackend where f: F, ) { if let Ok(Some(essence)) = self.child_essence(storage_key, child_info) { - essence.for_keys_with_prefix(prefix, f) + essence.for_keys_with_prefix(child_info, prefix, f) } } fn pairs(&self) -> Vec<(StorageKey, StorageValue)> { - let eph = BackendStorageDBRef::new(self.essence.backend_storage()); + let eph = BackendStorageDBRef::new(self.essence.backend_storage(), &self.top_trie); let collect_all = || -> Result<_, Box>> { let trie = TrieDB::::new(&eph, self.essence.root())?; @@ -163,7 +167,7 @@ impl, H: Hasher> Backend for TrieBackend where } fn keys(&self, prefix: &[u8]) -> Vec { - let eph = BackendStorageDBRef::new(self.essence.backend_storage()); + let eph = BackendStorageDBRef::new(self.essence.backend_storage(), &self.top_trie); let collect_all = || -> Result<_, Box>> { let trie = TrieDB::::new(&eph, self.essence.root())?; @@ -190,6 +194,7 @@ impl, H: Hasher> Backend for TrieBackend where { let mut eph = Ephemeral::new( self.essence.backend_storage(), + &self.top_trie, &mut write_overlay, ); @@ -199,7 +204,7 @@ impl, H: Hasher> Backend for TrieBackend where } } let mut tx = ChildrenMap::default(); - tx.insert(ChildInfo::top_trie(), write_overlay); + tx.insert(self.top_trie.clone(), write_overlay); (root, tx) } @@ -230,6 +235,7 @@ impl, H: Hasher> Backend for TrieBackend where // Do not write prefix in overlay. let mut eph = Ephemeral::new( &child_essence, + child_info, &mut write_overlay, ); @@ -281,7 +287,7 @@ pub mod tests { use std::collections::HashSet; use sp_core::{Blake2Hasher, H256}; use codec::Encode; - use sp_trie::{TrieMut, PrefixedMemoryDB, trie_types::TrieDBMut, KeySpacedDBMut}; + use sp_trie::{TrieMut, PrefixedMemoryDB, trie_types::TrieDBMut}; use super::*; const CHILD_KEY_1: &[u8] = b":child_storage:default:sub1"; @@ -289,11 +295,9 @@ pub mod tests { const CHILD_UUID_1: &[u8] = b"unique_id_1"; fn test_db() -> (PrefixedMemoryDB, H256) { - let child_info1 = ChildInfo::new_default(CHILD_UUID_1); let mut root = H256::default(); let mut mdb = PrefixedMemoryDB::::default(); { - let mut mdb = KeySpacedDBMut::new(&mut mdb, child_info1.keyspace()); let mut trie = TrieDBMut::new(&mut mdb, &mut root); trie.insert(b"value3", &[142]).expect("insert failed"); trie.insert(b"value4", &[124]).expect("insert failed"); diff --git a/primitives/state-machine/src/trie_backend_essence.rs b/primitives/state-machine/src/trie_backend_essence.rs index b4f24502d9c3c..291c613174255 100644 --- a/primitives/state-machine/src/trie_backend_essence.rs +++ b/primitives/state-machine/src/trie_backend_essence.rs @@ -25,7 +25,7 @@ use sp_core::Hasher; use hash_db::{self, EMPTY_PREFIX, Prefix}; use sp_trie::{Trie, MemoryDB, PrefixedMemoryDB, DBValue, read_trie_value, check_if_empty_root, - for_keys_in_trie, KeySpacedDB, keyspace_as_prefix_alloc}; + for_keys_in_trie}; use sp_trie::trie_types::{TrieDB, TrieError, Layout}; use crate::{backend::Consolidate, StorageKey, StorageValue}; use sp_core::storage::ChildInfo; @@ -74,8 +74,8 @@ impl, H: Hasher> TrieBackendEssence where H::O /// Return the next key in the trie i.e. the minimum key that is strictly superior to `key` in /// lexicographic order. - pub fn next_storage_key(&self, key: &[u8]) -> Result, String> { - let eph = BackendStorageDBRef::new(&self.storage); + pub fn next_storage_key(&self, child_info: &ChildInfo, key: &[u8]) -> Result, String> { + let eph = BackendStorageDBRef::new(&self.storage, child_info); let trie = TrieDB::::new(&eph, &self.root) .map_err(|e| format!("TrieDB creation error: {}", e))?; @@ -107,8 +107,8 @@ impl, H: Hasher> TrieBackendEssence where H::O } /// Get the value of storage at given key. - pub fn storage(&self, key: &[u8]) -> Result, String> { - let eph = BackendStorageDBRef::new(&self.storage); + pub fn storage(&self, child_info: &ChildInfo, key: &[u8]) -> Result, String> { + let eph = BackendStorageDBRef::new(&self.storage, child_info); let map_e = |e| format!("Trie lookup error: {}", e); @@ -118,9 +118,10 @@ impl, H: Hasher> TrieBackendEssence where H::O /// Retrieve all entries keys of storage and call `f` for each of those keys. pub fn for_keys( &self, + child_info: &ChildInfo, f: F, ) { - let eph = BackendStorageDBRef::new(&self.storage); + let eph = BackendStorageDBRef::new(&self.storage, child_info); if let Err(e) = for_keys_in_trie::, _, BackendStorageDBRef>( &eph, @@ -132,8 +133,8 @@ impl, H: Hasher> TrieBackendEssence where H::O } /// Execute given closure for all keys starting with prefix. - pub fn for_keys_with_prefix(&self, prefix: &[u8], mut f: F) { - self.keys_values_with_prefix_inner(&self.root, prefix, |k, _v| f(k), None) + pub fn for_keys_with_prefix(&self, child_info: &ChildInfo, prefix: &[u8], mut f: F) { + self.keys_values_with_prefix_inner(&self.root, prefix, |k, _v| f(k), child_info) } fn keys_values_with_prefix_inner( @@ -141,9 +142,9 @@ impl, H: Hasher> TrieBackendEssence where H::O root: &H::Out, prefix: &[u8], mut f: F, - child_info: Option<&ChildInfo>, + child_info: &ChildInfo, ) { - let eph = BackendStorageDBRef::new(&self.storage); + let eph = BackendStorageDBRef::new(&self.storage, child_info); let mut iter = move |db| -> Result<(), Box>> { let trie = TrieDB::::new(db, root)?; @@ -164,20 +165,14 @@ impl, H: Hasher> TrieBackendEssence where H::O Ok(()) }; - let result = if let Some(child_info) = child_info { - let db = KeySpacedDB::new(&eph, child_info.keyspace()); - iter(&db) - } else { - iter(&eph) - }; - if let Err(e) = result { + if let Err(e) = iter(&eph) { debug!(target: "trie", "Error while iterating by prefix: {}", e); } } /// Execute given closure for all key and values starting with prefix. - pub fn for_key_values_with_prefix(&self, prefix: &[u8], f: F) { - self.keys_values_with_prefix_inner(&self.root, prefix, f, None) + pub fn for_key_values_with_prefix(&self, child_info: &ChildInfo, prefix: &[u8], f: F) { + self.keys_values_with_prefix_inner(&self.root, prefix, f, child_info) } } @@ -187,6 +182,7 @@ pub(crate) struct Ephemeral<'a, S, H, O> where O: hash_db::HashDB + Default + Consolidate, { storage: &'a S, + child_info: &'a ChildInfo, overlay: &'a mut O, _ph: PhantomData, } @@ -196,6 +192,7 @@ pub(crate) struct BackendStorageDBRef<'a, S, H> where H: 'a + Hasher, { storage: &'a S, + child_info: &'a ChildInfo, _ph: PhantomData, } @@ -224,9 +221,10 @@ impl<'a, S, H, O> Ephemeral<'a, S, H, O> where H: 'a + Hasher, O: hash_db::HashDB + Default + Consolidate, { - pub fn new(storage: &'a S, overlay: &'a mut O) -> Self { + pub fn new(storage: &'a S, child_info: &'a ChildInfo, overlay: &'a mut O) -> Self { Ephemeral { storage, + child_info, overlay, _ph: PhantomData, } @@ -237,9 +235,10 @@ impl<'a, S, H> BackendStorageDBRef<'a, S, H> where S: 'a + TrieBackendStorageRef, H: 'a + Hasher, { - pub fn new(storage: &'a S) -> Self { + pub fn new(storage: &'a S, child_info: &'a ChildInfo) -> Self { BackendStorageDBRef { storage, + child_info, _ph: PhantomData, } } @@ -276,7 +275,7 @@ impl<'a, S, H, O> hash_db::PlainDBRef for Ephemeral<'a, S, H, O if let Some(val) = hash_db::HashDB::get(self.overlay, key, EMPTY_PREFIX) { Some(val) } else { - match self.storage.get(&key, EMPTY_PREFIX) { + match self.storage.get(self.child_info, &key, EMPTY_PREFIX) { Ok(x) => x, Err(e) => { warn!(target: "trie", "Failed to read from DB: {}", e); @@ -300,7 +299,7 @@ impl<'a, S, H> hash_db::PlainDBRef for BackendStorageDBRef<'a, return Some(vec![0u8]); } - match self.storage.get(&key, EMPTY_PREFIX) { + match self.storage.get(self.child_info, &key, EMPTY_PREFIX) { Ok(x) => x, Err(e) => { warn!(target: "trie", "Failed to read from DB: {}", e); @@ -351,7 +350,7 @@ impl<'a, S, H, O> hash_db::HashDBRef for Ephemeral<'a, S, H, O> wher if let Some(val) = hash_db::HashDB::get(self.overlay, key, prefix) { Some(val) } else { - match self.storage.get(&key, prefix) { + match self.storage.get(self.child_info, &key, prefix) { Ok(x) => x, Err(e) => { warn!(target: "trie", "Failed to read from DB: {}", e); @@ -375,7 +374,7 @@ impl<'a, S, H> hash_db::HashDBRef for BackendStorageDBRef<'a, S, H> return Some(vec![0u8]); } - match self.storage.get(&key, prefix) { + match self.storage.get(self.child_info, &key, prefix) { Ok(x) => x, Err(e) => { warn!(target: "trie", "Failed to read from DB: {}", e); @@ -397,6 +396,7 @@ pub trait TrieBackendStorageRef { /// Get the value stored at key. fn get( &self, + child_info: &ChildInfo, key: &H::Out, prefix: Prefix, ) -> Result, String>; @@ -408,22 +408,24 @@ pub trait TrieBackendStorage: TrieBackendStorageRef + Send + Sync impl + Send + Sync> TrieBackendStorage for B {} // This implementation is used by normal storage trie clients. +// TODO remove stored ChildInfo impl TrieBackendStorageRef for (Arc>, ChildInfo) { type Overlay = PrefixedMemoryDB; fn get( &self, + child_info: &ChildInfo, key: &H::Out, prefix: Prefix, ) -> Result, String> { - Storage::::get(self.0.deref(), &self.1, key, prefix) + Storage::::get(self.0.deref(), child_info, key, prefix) } } - /// This is an essence for the child trie backend. pub struct ChildTrieBackendStorage<'a, H: Hasher, B: TrieBackendStorageRef> { db: &'a B, + // TODO is it usefull? -> seems like not -> TODO remove this struct info: Option<&'a ChildInfo>, _ph: PhantomData, } @@ -444,15 +446,11 @@ impl<'a, H: Hasher, B: TrieBackendStorageRef> TrieBackendStorageRef for Ch fn get( &self, + child_info: &ChildInfo, key: &H::Out, prefix: Prefix, ) -> Result, String> { - if let Some(keyspace) = self.info.as_ref().map(|ci| ci.keyspace()) { - let prefix = keyspace_as_prefix_alloc(keyspace, prefix); - self.db.get(key, (prefix.0.as_slice(), prefix.1)) - } else { - self.db.get(key, prefix) - } + self.db.get(child_info, key, prefix) } } @@ -463,9 +461,11 @@ impl TrieBackendStorageRef for PrefixedMemoryDB { fn get( &self, + _child_info: &ChildInfo, key: &H::Out, prefix: Prefix, ) -> Result, String> { + // No need to use keyspace for in memory db, ignoring child_info parameter. Ok(hash_db::HashDB::get(self, key, prefix)) } } @@ -475,9 +475,11 @@ impl TrieBackendStorageRef for MemoryDB { fn get( &self, + _child_info: &ChildInfo, key: &H::Out, prefix: Prefix, ) -> Result, String> { + // No need to use keyspace for in memory db, ignoring child_info parameter. Ok(hash_db::HashDB::get(self, key, prefix)) } } @@ -485,7 +487,7 @@ impl TrieBackendStorageRef for MemoryDB { #[cfg(test)] mod test { use sp_core::{Blake2Hasher, H256}; - use sp_trie::{TrieMut, PrefixedMemoryDB, trie_types::TrieDBMut, KeySpacedDBMut}; + use sp_trie::{TrieMut, PrefixedMemoryDB, trie_types::TrieDBMut}; use super::*; use crate::trie_backend::TrieBackend; use crate::backend::Backend; @@ -505,17 +507,9 @@ mod test { trie.insert(b"4", &[1]).expect("insert failed"); trie.insert(b"6", &[1]).expect("insert failed"); } - { - let mut mdb = KeySpacedDBMut::new(&mut mdb, child_info.keyspace()); - // reuse of root_1 implicitly assert child trie root is same - // as top trie (contents must remain the same). - let mut trie = TrieDBMut::new(&mut mdb, &mut root_1); - trie.insert(b"3", &[1]).expect("insert failed"); - trie.insert(b"4", &[1]).expect("insert failed"); - trie.insert(b"6", &[1]).expect("insert failed"); - } { let mut trie = TrieDBMut::new(&mut mdb, &mut root_2); + // using top trie as child trie (both with same content) trie.insert(b"MyChild", root_1.as_ref()).expect("insert failed"); }; diff --git a/primitives/trie/src/lib.rs b/primitives/trie/src/lib.rs index 08d7b2d590866..1410a9ff1b7ef 100644 --- a/primitives/trie/src/lib.rs +++ b/primitives/trie/src/lib.rs @@ -26,7 +26,7 @@ mod trie_stream; use sp_std::boxed::Box; use sp_std::marker::PhantomData; use sp_std::vec::Vec; -use sp_core::{Hasher, InnerHasher, Prefix}; +use sp_core::{Hasher, InnerHasher}; use trie_db::proof::{generate_proof, verify_proof}; pub use trie_db::proof::VerifyError; /// Our `NodeCodec`-specific error. @@ -47,7 +47,7 @@ pub use hash_db::{HashDB as HashDBT, EMPTY_PREFIX}; #[derive(Default)] /// substrate trie layout -pub struct Layout(sp_std::marker::PhantomData); +pub struct Layout(PhantomData); impl TrieLayout for Layout { const USE_EXTENSION: bool = false; @@ -269,127 +269,6 @@ pub fn record_all_keys( Ok(()) } -/// Read a value from the child trie with given query. -pub fn read_child_trie_value_with, DB>( - _storage_key: &[u8], - keyspace: &[u8], - db: &DB, - root_slice: &[u8], - key: &[u8], - query: Q -) -> Result>, Box>> - where - DB: hash_db::HashDBRef - + hash_db::PlainDBRef, trie_db::DBValue>, -{ - let mut root = TrieHash::::default(); - // root is fetched from DB, not writable by runtime, so it's always valid. - root.as_mut().copy_from_slice(root_slice); - - let db = KeySpacedDB::new(&*db, keyspace); - Ok(TrieDB::::new(&db, &root)?.get_with(key, query).map(|x| x.map(|val| val.to_vec()))?) -} - -/// `HashDB` implementation that append a encoded prefix (unique id bytes) in addition to the -/// prefix of every key value. -pub struct KeySpacedDB<'a, DB, H>(&'a DB, &'a [u8], PhantomData); - -#[cfg(feature="test-helpers")] -/// `HashDBMut` implementation that append a encoded prefix (unique id bytes) in addition to the -/// prefix of every key value. -/// -/// Mutable variant of `KeySpacedDB`, see [`KeySpacedDB`]. -pub struct KeySpacedDBMut<'a, DB, H>(&'a mut DB, &'a [u8], PhantomData); - -/// Utility function used to merge some byte data (keyspace) and `prefix` data -/// before calling key value database primitives. -pub fn keyspace_as_prefix_alloc(ks: &[u8], prefix: Prefix) -> (Vec, Option) { - let mut result = sp_std::vec![0; ks.len() + prefix.0.len()]; - result[..ks.len()].copy_from_slice(ks); - result[ks.len()..].copy_from_slice(prefix.0); - (result, prefix.1) -} - -impl<'a, DB, H> KeySpacedDB<'a, DB, H> where - H: InnerHasher, -{ - /// instantiate new keyspaced db - pub fn new(db: &'a DB, ks: &'a [u8]) -> Self { - KeySpacedDB(db, ks, PhantomData) - } -} - -#[cfg(feature="test-helpers")] -impl<'a, DB, H> KeySpacedDBMut<'a, DB, H> where - H: InnerHasher, -{ - /// instantiate new keyspaced db - pub fn new(db: &'a mut DB, ks: &'a [u8]) -> Self { - KeySpacedDBMut(db, ks, PhantomData) - } -} - -impl<'a, DB, H, T> hash_db::HashDBRef for KeySpacedDB<'a, DB, H> where - DB: hash_db::HashDBRef, - H: InnerHasher, - T: From<&'static [u8]>, -{ - fn get(&self, key: &H::Out, prefix: Prefix) -> Option { - let derived_prefix = keyspace_as_prefix_alloc(self.1, prefix); - self.0.get(key, (&derived_prefix.0, derived_prefix.1)) - } - - fn contains(&self, key: &H::Out, prefix: Prefix) -> bool { - let derived_prefix = keyspace_as_prefix_alloc(self.1, prefix); - self.0.contains(key, (&derived_prefix.0, derived_prefix.1)) - } -} - -#[cfg(feature="test-helpers")] -impl<'a, DB, H, T> hash_db::HashDB for KeySpacedDBMut<'a, DB, H> where - DB: hash_db::HashDB, - H: InnerHasher, - T: Default + PartialEq + for<'b> From<&'b [u8]> + Clone + Send + Sync, -{ - fn get(&self, key: &H::Out, prefix: Prefix) -> Option { - let derived_prefix = keyspace_as_prefix_alloc(self.1, prefix); - self.0.get(key, (&derived_prefix.0, derived_prefix.1)) - } - - fn contains(&self, key: &H::Out, prefix: Prefix) -> bool { - let derived_prefix = keyspace_as_prefix_alloc(self.1, prefix); - self.0.contains(key, (&derived_prefix.0, derived_prefix.1)) - } - - fn insert(&mut self, prefix: Prefix, value: &[u8]) -> H::Out { - let derived_prefix = keyspace_as_prefix_alloc(self.1, prefix); - self.0.insert((&derived_prefix.0, derived_prefix.1), value) - } - - fn emplace(&mut self, key: H::Out, prefix: Prefix, value: T) { - let derived_prefix = keyspace_as_prefix_alloc(self.1, prefix); - self.0.emplace(key, (&derived_prefix.0, derived_prefix.1), value) - } - - fn remove(&mut self, key: &H::Out, prefix: Prefix) { - let derived_prefix = keyspace_as_prefix_alloc(self.1, prefix); - self.0.remove(key, (&derived_prefix.0, derived_prefix.1)) - } -} - -#[cfg(feature="test-helpers")] -impl<'a, DB, H, T> hash_db::AsHashDB for KeySpacedDBMut<'a, DB, H> where - DB: hash_db::HashDB, - H: InnerHasher, - T: Default + PartialEq + for<'b> From<&'b [u8]> + Clone + Send + Sync, -{ - fn as_hash_db(&self) -> &dyn hash_db::HashDB { &*self } - - fn as_hash_db_mut<'b>(&'b mut self) -> &'b mut (dyn hash_db::HashDB + 'b) { - &mut *self - } -} - /// Constants used into trie simplification codec. mod trie_constants { pub const EMPTY_TRIE: u8 = 0; From 0b557b676bd7448d175a57005311bb447148a0ac Mon Sep 17 00:00:00 2001 From: cheme Date: Tue, 4 Feb 2020 17:55:04 +0100 Subject: [PATCH 24/85] Clean unused struct and useless child info. --- client/db/src/lib.rs | 6 ++-- primitives/state-machine/src/trie_backend.rs | 22 ++++++------- .../state-machine/src/trie_backend_essence.rs | 32 +++---------------- 3 files changed, 17 insertions(+), 43 deletions(-) diff --git a/client/db/src/lib.rs b/client/db/src/lib.rs index 2f73ea3c7d2a9..06e6db6c43af1 100644 --- a/client/db/src/lib.rs +++ b/client/db/src/lib.rs @@ -91,7 +91,7 @@ const DEFAULT_CHILD_RATIO: (usize, usize) = (1, 10); /// DB-backed patricia trie state, transaction type is an overlay of changes to commit. pub type DbState = sp_state_machine::TrieBackend< - (Arc>>, ChildInfo), HasherFor + Arc>>, HasherFor >; /// Re-export the KVDB trait so that one can pass an implementation of it. @@ -1607,7 +1607,7 @@ impl sc_client_api::backend::Backend for Backend { BlockId::Hash(h) if h == Default::default() => { let genesis_storage = DbGenesisStorage::::new(); let root = genesis_storage.0.clone(); - let db_state = DbState::::new((Arc::new(genesis_storage), ChildInfo::top_trie()), root); + let db_state = DbState::::new(Arc::new(genesis_storage), root); let state = RefTrackingState::new(db_state, self.storage.clone(), None); return Ok(CachingState::new(state, self.shared_cache.clone(), None)); }, @@ -1626,7 +1626,7 @@ impl sc_client_api::backend::Backend for Backend { } if let Ok(()) = self.storage.state_db.pin(&hash) { let root = hdr.state_root(); - let db_state = DbState::::new((self.storage.clone(), ChildInfo::top_trie()), *root); + let db_state = DbState::::new(self.storage.clone(), *root); let state = RefTrackingState::new( db_state, self.storage.clone(), diff --git a/primitives/state-machine/src/trie_backend.rs b/primitives/state-machine/src/trie_backend.rs index dfe0e43f76dc9..6f9bd8b810c6a 100644 --- a/primitives/state-machine/src/trie_backend.rs +++ b/primitives/state-machine/src/trie_backend.rs @@ -23,7 +23,7 @@ use sp_core::storage::{ChildInfo, ChildrenMap}; use codec::{Codec, Decode}; use crate::{ StorageKey, StorageValue, Backend, - trie_backend_essence::{TrieBackendEssence, TrieBackendStorage, Ephemeral, BackendStorageDBRef, ChildTrieBackendStorage}, + trie_backend_essence::{TrieBackendEssence, TrieBackendStorage, Ephemeral, BackendStorageDBRef}, }; /// Patricia trie-based backend. Transaction type is overlays of changes to commit @@ -88,7 +88,7 @@ impl, H: Hasher> Backend for TrieBackend where child_info: &ChildInfo, key: &[u8], ) -> Result, Self::Error> { - if let Some(essence) = self.child_essence(storage_key, child_info)? { + if let Some(essence) = self.child_essence(storage_key)? { essence.storage(child_info, key) } else { Ok(None) @@ -105,7 +105,7 @@ impl, H: Hasher> Backend for TrieBackend where child_info: &ChildInfo, key: &[u8], ) -> Result, Self::Error> { - if let Some(essence) = self.child_essence(storage_key, child_info)? { + if let Some(essence) = self.child_essence(storage_key)? { essence.next_storage_key(child_info, key) } else { Ok(None) @@ -126,7 +126,7 @@ impl, H: Hasher> Backend for TrieBackend where child_info: &ChildInfo, f: F, ) { - if let Ok(Some(essence)) = self.child_essence(storage_key, child_info) { + if let Ok(Some(essence)) = self.child_essence(storage_key) { essence.for_keys(child_info, f) } } @@ -138,7 +138,7 @@ impl, H: Hasher> Backend for TrieBackend where prefix: &[u8], f: F, ) { - if let Ok(Some(essence)) = self.child_essence(storage_key, child_info) { + if let Ok(Some(essence)) = self.child_essence(storage_key) { essence.for_keys_with_prefix(child_info, prefix, f) } } @@ -231,10 +231,10 @@ impl, H: Hasher> Backend for TrieBackend where }; { - let child_essence = ChildTrieBackendStorage::new(self.essence.backend_storage(), Some(child_info)); + let storage = self.essence.backend_storage(); // Do not write prefix in overlay. let mut eph = Ephemeral::new( - &child_essence, + storage, child_info, &mut write_overlay, ); @@ -267,15 +267,11 @@ impl, H: Hasher> TrieBackend where fn child_essence<'a>( &'a self, storage_key: &[u8], - child_info: &'a ChildInfo, - ) -> Result, H>>, >::Error> { + ) -> Result>, >::Error> { let root: Option = self.storage(storage_key)? .and_then(|encoded_root| Decode::decode(&mut &encoded_root[..]).ok()); Ok(if let Some(root) = root { - Some(TrieBackendEssence::new(ChildTrieBackendStorage::new( - self.essence.backend_storage(), - Some(child_info), - ), root)) + Some(TrieBackendEssence::new(self.essence.backend_storage(), root)) } else { None }) diff --git a/primitives/state-machine/src/trie_backend_essence.rs b/primitives/state-machine/src/trie_backend_essence.rs index 291c613174255..2224084938a84 100644 --- a/primitives/state-machine/src/trie_backend_essence.rs +++ b/primitives/state-machine/src/trie_backend_essence.rs @@ -407,9 +407,7 @@ pub trait TrieBackendStorage: TrieBackendStorageRef + Send + Sync impl + Send + Sync> TrieBackendStorage for B {} -// This implementation is used by normal storage trie clients. -// TODO remove stored ChildInfo -impl TrieBackendStorageRef for (Arc>, ChildInfo) { +impl TrieBackendStorageRef for Arc> { type Overlay = PrefixedMemoryDB; fn get( @@ -418,31 +416,12 @@ impl TrieBackendStorageRef for (Arc>, ChildInfo) { key: &H::Out, prefix: Prefix, ) -> Result, String> { - Storage::::get(self.0.deref(), child_info, key, prefix) + Storage::::get(self.deref(), child_info, key, prefix) } } -/// This is an essence for the child trie backend. -pub struct ChildTrieBackendStorage<'a, H: Hasher, B: TrieBackendStorageRef> { - db: &'a B, - // TODO is it usefull? -> seems like not -> TODO remove this struct - info: Option<&'a ChildInfo>, - _ph: PhantomData, -} - -impl<'a, H: Hasher, B: TrieBackendStorageRef> ChildTrieBackendStorage<'a, H, B> { - /// Instantiate a `ChildTrieBackendStorage`. - pub fn new(db: &'a B, info: Option<&'a ChildInfo>) -> Self { - ChildTrieBackendStorage { - db, - info, - _ph: PhantomData, - } - } -} - -impl<'a, H: Hasher, B: TrieBackendStorageRef> TrieBackendStorageRef for ChildTrieBackendStorage<'a, H, B> { - type Overlay = PrefixedMemoryDB; +impl> TrieBackendStorageRef for &S { + type Overlay = >::Overlay; fn get( &self, @@ -450,11 +429,10 @@ impl<'a, H: Hasher, B: TrieBackendStorageRef> TrieBackendStorageRef for Ch key: &H::Out, prefix: Prefix, ) -> Result, String> { - self.db.get(child_info, key, prefix) + >::get(self, child_info, key, prefix) } } - // This implementation is used by test storage trie clients. impl TrieBackendStorageRef for PrefixedMemoryDB { type Overlay = PrefixedMemoryDB; From 8483da40f31607ac8717841271d806ce09936b1f Mon Sep 17 00:00:00 2001 From: cheme Date: Tue, 4 Feb 2020 17:59:17 +0100 Subject: [PATCH 25/85] remove todo --- client/db/src/lib.rs | 1 - 1 file changed, 1 deletion(-) diff --git a/client/db/src/lib.rs b/client/db/src/lib.rs index 06e6db6c43af1..8e5b401bc3db5 100644 --- a/client/db/src/lib.rs +++ b/client/db/src/lib.rs @@ -686,7 +686,6 @@ impl sc_state_db::NodeDb for StorageDb { self.db.get(columns::STATE, key) } else { let keyspace = child_info.keyspace(); - // TODO try to switch api to &mut and use a key buffer from StorageDB let mut key_buffer = vec![0; keyspace.len() + key.len()]; key_buffer[..keyspace.len()].copy_from_slice(keyspace); key_buffer[keyspace.len()..].copy_from_slice(&key[..]); From 64ffcead72174a4daa55bf9425b7440c47d7f63b Mon Sep 17 00:00:00 2001 From: cheme Date: Tue, 4 Feb 2020 19:24:45 +0100 Subject: [PATCH 26/85] actual touch to keyspace prefixing (fail on wrong code asserted). --- client/db/src/lib.rs | 32 +++++++++++++++++++++++++++++--- 1 file changed, 29 insertions(+), 3 deletions(-) diff --git a/client/db/src/lib.rs b/client/db/src/lib.rs index 8e5b401bc3db5..a400b06bdb1ce 100644 --- a/client/db/src/lib.rs +++ b/client/db/src/lib.rs @@ -1811,6 +1811,10 @@ pub(crate) mod tests { #[test] fn set_state_data() { let db = Backend::::new_test(2, 0); + + let child_info = sp_core::storage::ChildInfo::new_default(b"unique_id"); + let storage_key = b":child_storage:default:key1"; + let hash = { let mut op = db.begin_operation().unwrap(); db.begin_state_operation(&mut op, BlockId::Hash(Default::default())).unwrap(); @@ -1827,16 +1831,28 @@ pub(crate) mod tests { (vec![1, 2, 3], vec![9, 9, 9]), ]; - header.state_root = op.old_state.storage_root(storage + let child_storage = vec![ + (vec![2, 3, 5], Some(vec![4, 4, 6])), + (vec![2, 2, 3], Some(vec![7, 9, 9])), + ]; + + header.state_root = op.old_state.full_storage_root(storage .iter() .cloned() - .map(|(x, y)| (x, Some(y))) + .map(|(x, y)| (x, Some(y))), + vec![(storage_key.to_vec(), child_storage.clone(), child_info.clone())], + false, ).0.into(); let hash = header.hash(); + let mut children = HashMap::default(); + children.insert(storage_key.to_vec(), sp_core::storage::StorageChild { + child_info: child_info.clone(), + data: child_storage.iter().map(|(k, v)| (k.clone(), v.clone().unwrap())).collect(), + }); op.reset_storage(Storage { top: storage.iter().cloned().collect(), - children: Default::default(), + children, }).unwrap(); op.set_block_data( header.clone(), @@ -1852,6 +1868,10 @@ pub(crate) mod tests { assert_eq!(state.storage(&[1, 3, 5]).unwrap(), Some(vec![2, 4, 6])); assert_eq!(state.storage(&[1, 2, 3]).unwrap(), Some(vec![9, 9, 9])); assert_eq!(state.storage(&[5, 5, 5]).unwrap(), None); + assert_eq!( + state.child_storage(&storage_key[..], &child_info, &[2, 3, 5]).unwrap(), + Some(vec![4, 4, 6]), + ); hash }; @@ -1890,6 +1910,12 @@ pub(crate) mod tests { assert_eq!(state.storage(&[1, 3, 5]).unwrap(), None); assert_eq!(state.storage(&[1, 2, 3]).unwrap(), Some(vec![9, 9, 9])); assert_eq!(state.storage(&[5, 5, 5]).unwrap(), Some(vec![4, 5, 6])); + assert_eq!( + state.child_storage(&storage_key[..], &child_info, &[2, 3, 5]).unwrap(), + Some(vec![4, 4, 6]), + ); + + } } From 2bc3cb610273a15bef05f6133dc4b54c3f13ea94 Mon Sep 17 00:00:00 2001 From: cheme Date: Wed, 5 Feb 2020 13:43:48 +0100 Subject: [PATCH 27/85] self review changes. --- client/chain-spec/src/chain_spec.rs | 5 +++- client/db/src/changes_tries_storage.rs | 7 ++--- client/db/src/lib.rs | 27 +++++++++--------- client/db/src/storage_cache.rs | 4 +-- client/network/src/protocol.rs | 5 ++-- client/rpc/src/state/state_full.rs | 3 +- client/rpc/src/state/tests.rs | 6 ++-- client/src/client.rs | 3 +- client/src/light/fetcher.rs | 6 ++-- client/state-db/src/lib.rs | 16 +++++------ client/state-db/src/noncanonical.rs | 4 +-- client/state-db/src/pruning.rs | 28 +++++++------------ frame/contracts/src/account_db.rs | 17 ++++------- frame/contracts/src/exec.rs | 6 ++-- frame/contracts/src/lib.rs | 5 ++-- frame/contracts/src/tests.rs | 12 ++++---- primitives/state-machine/Cargo.toml | 1 - primitives/state-machine/src/basic.rs | 4 +-- .../state-machine/src/changes_trie/build.rs | 17 +++++------ .../src/changes_trie/changes_iterator.rs | 7 +++-- .../state-machine/src/changes_trie/mod.rs | 6 ++-- .../state-machine/src/changes_trie/storage.rs | 6 ++-- primitives/state-machine/src/ext.rs | 2 +- primitives/state-machine/src/lib.rs | 6 ++-- .../state-machine/src/overlayed_changes.rs | 8 +++--- .../state-machine/src/proving_backend.rs | 7 ++--- primitives/storage/src/lib.rs | 23 ++++++++++----- primitives/trie/Cargo.toml | 1 - 28 files changed, 107 insertions(+), 135 deletions(-) diff --git a/client/chain-spec/src/chain_spec.rs b/client/chain-spec/src/chain_spec.rs index 173941f6624c6..6bc3145534a06 100644 --- a/client/chain-spec/src/chain_spec.rs +++ b/client/chain-spec/src/chain_spec.rs @@ -77,7 +77,10 @@ impl BuildStorage for ChainSpec { Genesis::Raw(RawGenesis { top: map, children: children_map }) => Ok(Storage { top: map.into_iter().map(|(k, v)| (k.0, v.0)).collect(), children: children_map.into_iter().map(|(sk, child_content)| { - let child_info = ChildInfo::new_default(child_content.child_info.as_slice()); + let child_info = ChildInfo::resolve_child_info( + child_content.child_type, + child_content.child_info.as_slice(), + ).expect("chain spec contains correct content"); ( sk.0, StorageChild { diff --git a/client/db/src/changes_tries_storage.rs b/client/db/src/changes_tries_storage.rs index f5c1d34688e23..93bfd8b4cc673 100644 --- a/client/db/src/changes_tries_storage.rs +++ b/client/db/src/changes_tries_storage.rs @@ -488,11 +488,9 @@ where fn get( &self, - child_info: &sp_core::storage::ChildInfo, key: &Block::Hash, _prefix: Prefix, ) -> Result, String> { - debug_assert!(child_info.is_top_trie()); self.db.get(self.changes_tries_column, key.as_ref()) .map_err(|err| format!("{}", err)) } @@ -532,6 +530,7 @@ mod tests { }; use sp_blockchain::HeaderBackend as BlockchainHeaderBackend; use sp_core::H256; + use sp_core::storage::ChildInfo; use sp_runtime::testing::{Digest, Header}; use sp_runtime::traits::{Hash, BlakeTwo256}; use sp_state_machine::{ChangesTrieRootsStorage, ChangesTrieStorage}; @@ -596,9 +595,8 @@ mod tests { assert_eq!(backend.changes_tries_storage.root(&anchor, block), Ok(Some(changes_root))); let storage = backend.changes_tries_storage.storage(); - let top_trie = sp_core::storage::ChildInfo::top_trie(); for (key, (val, _)) in changes_trie_update.drain() { - assert_eq!(storage.get(&top_trie, &key, EMPTY_PREFIX), Ok(Some(val))); + assert_eq!(storage.get(&key, EMPTY_PREFIX), Ok(Some(val))); } }; @@ -708,7 +706,6 @@ mod tests { .cloned(); match trie_root { Some(trie_root) => backend.changes_tries_storage.get( - &sp_core::storage::ChildInfo::top_trie(), &trie_root, EMPTY_PREFIX, ).unwrap().is_none(), diff --git a/client/db/src/lib.rs b/client/db/src/lib.rs index a400b06bdb1ce..7642e944dfd7a 100644 --- a/client/db/src/lib.rs +++ b/client/db/src/lib.rs @@ -688,7 +688,7 @@ impl sc_state_db::NodeDb for StorageDb { let keyspace = child_info.keyspace(); let mut key_buffer = vec![0; keyspace.len() + key.len()]; key_buffer[..keyspace.len()].copy_from_slice(keyspace); - key_buffer[keyspace.len()..].copy_from_slice(&key[..]); + key_buffer[keyspace.len()..].copy_from_slice(key); self.db.get(columns::STATE, &key_buffer[..]) }.map(|r| r.map(|v| v.to_vec())) } @@ -1128,18 +1128,18 @@ impl Backend { let mut ops: u64 = 0; let mut bytes: u64 = 0; for (info, mut updates) in operation.db_updates.into_iter() { - let data = changesets.entry(info).or_default(); + let changeset = changesets.entry(info).or_default(); for (key, (val, rc)) in updates.drain() { if rc > 0 { ops += 1; bytes += key.len() as u64 + val.len() as u64; - data.inserted.push((key, val.to_vec())); + changeset.inserted.push((key, val.to_vec())); } else if rc < 0 { ops += 1; bytes += key.len() as u64; - data.deleted.push(key); + changeset.deleted.push(key); } } } @@ -1334,8 +1334,15 @@ impl Backend { fn apply_state_commit(transaction: &mut DBTransaction, commit: sc_state_db::CommitSet>) { let mut key_buffer = Vec::new(); for child_data in commit.data.into_iter() { - if !child_data.0.is_top_trie() { - // children tries with prefixes + if child_data.0.is_top_trie() { + // empty prefix + for (key, val) in child_data.1.inserted.into_iter() { + transaction.put(columns::STATE, &key[..], &val); + } + for key in child_data.1.deleted.into_iter() { + transaction.delete(columns::STATE, &key[..]); + } + } else { let keyspace = child_data.0.keyspace(); let keyspace_len = keyspace.len(); key_buffer.resize(keyspace_len, 0); @@ -1350,14 +1357,6 @@ fn apply_state_commit(transaction: &mut DBTransaction, commit: sc_state_db::Comm key_buffer[keyspace_len..].copy_from_slice(&key[..]); transaction.delete(columns::STATE, &key_buffer[..]); } - } else { - // top trie without prefixes - for (key, val) in child_data.1.inserted.into_iter() { - transaction.put(columns::STATE, &key[..], &val); - } - for key in child_data.1.deleted.into_iter() { - transaction.delete(columns::STATE, &key[..]); - } } } for (key, val) in commit.meta.inserted.into_iter() { diff --git a/client/db/src/storage_cache.rs b/client/db/src/storage_cache.rs index 9a5c15e9910e6..2dd27a2e3cbd3 100644 --- a/client/db/src/storage_cache.rs +++ b/client/db/src/storage_cache.rs @@ -677,8 +677,6 @@ mod tests { type Block = RawBlock>; - const CHILD_KEY_1: &'static [u8] = b"unique_id_1"; - #[test] fn smoke() { //init_log(); @@ -968,7 +966,7 @@ mod tests { #[test] fn should_track_used_size_correctly() { - let child_info1 = ChildInfo::new_default(CHILD_KEY_1); + let child_info1 = ChildInfo::new_default(b"unique_id_1"); let root_parent = H256::random(); let shared = new_shared_cache::(109, ((109-36), 109)); let h0 = H256::random(); diff --git a/client/network/src/protocol.rs b/client/network/src/protocol.rs index 68352b3f404fb..1207b7f883145 100644 --- a/client/network/src/protocol.rs +++ b/client/network/src/protocol.rs @@ -23,7 +23,7 @@ use libp2p::{Multiaddr, PeerId}; use libp2p::core::{ConnectedPoint, nodes::Substream, muxing::StreamMuxerBox}; use libp2p::swarm::{ProtocolsHandler, IntoProtocolsHandler}; use libp2p::swarm::{NetworkBehaviour, NetworkBehaviourAction, PollParameters}; -use sp_core::storage::{StorageKey, ChildInfo, ChildType}; +use sp_core::storage::{StorageKey, ChildInfo}; use sp_consensus::{ BlockOrigin, block_validation::BlockAnnounceValidator, @@ -1555,8 +1555,7 @@ impl, H: ExHashT> Protocol { trace!(target: "sync", "Remote read child request {} from {} ({} {} at {})", request.id, who, request.storage_key.to_hex::(), keys_str(), request.block); - let proof = if ChildType::CryptoUniqueId as u32 == request.child_type { - let child_info = ChildInfo::new_default(&request.child_info[..]); + let proof = if let Some(child_info) = ChildInfo::resolve_child_info(request.child_type, &request.child_info[..]) { match self.context_data.chain.read_child_proof( &request.block, &request.storage_key, diff --git a/client/rpc/src/state/state_full.rs b/client/rpc/src/state/state_full.rs index 220edd4860e5e..d396b191a2235 100644 --- a/client/rpc/src/state/state_full.rs +++ b/client/rpc/src/state/state_full.rs @@ -33,8 +33,7 @@ use sc_client::{ Client, CallExecutor, BlockchainEvents }; use sp_core::{ - storage::{well_known_keys, StorageKey, StorageData, StorageChangeSet, ChildInfo}, - Bytes, + Bytes, storage::{well_known_keys, StorageKey, StorageData, StorageChangeSet, ChildInfo}, }; use sp_version::RuntimeVersion; use sp_runtime::{ diff --git a/client/rpc/src/state/tests.rs b/client/rpc/src/state/tests.rs index dd26a8a42fac2..f459a5391b7ea 100644 --- a/client/rpc/src/state/tests.rs +++ b/client/rpc/src/state/tests.rs @@ -30,7 +30,7 @@ use substrate_test_runtime_client::{ runtime, }; -const CHILD_INFO: &'static [u8] = b"unique_id"; +const CHILD_UID: &'static [u8] = b"unique_id"; #[test] fn should_return_storage() { @@ -38,7 +38,7 @@ fn should_return_storage() { const VALUE: &[u8] = b"hello world"; const STORAGE_KEY: &[u8] = b":child_storage:default:child"; const CHILD_VALUE: &[u8] = b"hello world !"; - let child_info1 = ChildInfo::new_default(CHILD_INFO); + let child_info1 = ChildInfo::new_default(CHILD_UID); let mut core = tokio::runtime::Runtime::new().unwrap(); let client = TestClientBuilder::new() .add_extra_storage(KEY.to_vec(), VALUE.to_vec()) @@ -77,7 +77,7 @@ fn should_return_storage() { #[test] fn should_return_child_storage() { - let child_info1 = ChildInfo::new_default(CHILD_INFO); + let child_info1 = ChildInfo::new_default(CHILD_UID); let (child_info, child_type) = child_info1.info(); let child_info = StorageKey(child_info.to_vec()); let core = tokio::runtime::Runtime::new().unwrap(); diff --git a/client/src/client.rs b/client/src/client.rs index 888bd88428863..7acef6a4a910c 100644 --- a/client/src/client.rs +++ b/client/src/client.rs @@ -566,11 +566,10 @@ impl Client where fn get( &self, - child_info: &ChildInfo, key: &Block::Hash, prefix: Prefix, ) -> Result, String> { - self.storage.get(child_info, key, prefix) + self.storage.get(key, prefix) } } diff --git a/client/src/light/fetcher.rs b/client/src/light/fetcher.rs index 8bcbb80c775a3..a4168f356e609 100644 --- a/client/src/light/fetcher.rs +++ b/client/src/light/fetcher.rs @@ -351,7 +351,7 @@ pub mod tests { use sp_state_machine::Backend; use super::*; - const CHILD_INFO_1: &'static [u8] = b"unique_id_1"; + const CHILD_UID_1: &'static [u8] = b"unique_id_1"; type TestChecker = LightDataChecker< NativeExecutor, @@ -399,7 +399,7 @@ pub mod tests { } fn prepare_for_read_child_proof_check() -> (TestChecker, Header, StorageProof, Vec) { - let child_info1 = ChildInfo::new_default(CHILD_INFO_1); + let child_info1 = ChildInfo::new_default(CHILD_UID_1); use substrate_test_runtime_client::DefaultTestClientBuilderExt; use substrate_test_runtime_client::TestClientBuilderExt; // prepare remote client @@ -506,7 +506,7 @@ pub mod tests { result, ) = prepare_for_read_child_proof_check(); - let child_info = ChildInfo::new_default(CHILD_INFO_1); + let child_info = ChildInfo::new_default(CHILD_UID_1); let child_infos = child_info.info(); assert_eq!((&local_checker as &dyn FetchChecker).check_read_child_proof( &RemoteReadChildRequest::
{ diff --git a/client/state-db/src/lib.rs b/client/state-db/src/lib.rs index 992e8fa81f250..77373ce47649b 100644 --- a/client/state-db/src/lib.rs +++ b/client/state-db/src/lib.rs @@ -141,12 +141,12 @@ pub struct CommitSet { } impl CommitSet { - /// Number of inserted key value element in the set. + /// Number of inserted key value elements in the set. pub fn inserted_len(&self) -> usize { self.data.iter().map(|set| set.1.inserted.len()).sum() } - /// Number of deleted key value element in the set. + /// Number of deleted key value elements in the set. pub fn deleted_len(&self) -> usize { self.data.iter().map(|set| set.1.deleted.len()).sum() } @@ -261,7 +261,7 @@ impl StateDbSync { hash: &BlockHash, number: u64, parent_hash: &BlockHash, - mut changeset: ChildTrieChangeSets, + mut changesets: ChildTrieChangeSets, ) -> Result, Error> { let mut meta = ChangeSet::default(); if number == 0 { @@ -271,17 +271,17 @@ impl StateDbSync { match self.mode { PruningMode::ArchiveAll => { - for changeset in changeset.iter_mut() { + for changeset in changesets.iter_mut() { changeset.1.deleted.clear(); } // write changes immediately Ok(CommitSet { - data: changeset, + data: changesets, meta: meta, }) }, PruningMode::Constrained(_) | PruningMode::ArchiveCanonical => { - let commit = self.non_canonical.insert(hash, number, parent_hash, changeset); + let commit = self.non_canonical.insert(hash, number, parent_hash, changesets); commit.map(|mut c| { c.meta.inserted.extend(meta.inserted); c @@ -456,9 +456,9 @@ impl StateDb { hash: &BlockHash, number: u64, parent_hash: &BlockHash, - changeset: ChildTrieChangeSets, + changesets: ChildTrieChangeSets, ) -> Result, Error> { - self.db.write().insert_block(hash, number, parent_hash, changeset) + self.db.write().insert_block(hash, number, parent_hash, changesets) } /// Finalize a previously inserted block. diff --git a/client/state-db/src/noncanonical.rs b/client/state-db/src/noncanonical.rs index 6d79dfeffd4bb..4f06d9dd52180 100644 --- a/client/state-db/src/noncanonical.rs +++ b/client/state-db/src/noncanonical.rs @@ -28,7 +28,6 @@ use log::trace; use sp_core::storage::{ChildInfo, ChildrenMap, ChildrenVec}; const NON_CANONICAL_JOURNAL: &[u8] = b"noncanonical_journal"; -// version at start to avoid collision when adding a unit const NON_CANONICAL_JOURNAL_V1: &[u8] = b"v1_non_canonical_journal"; const LAST_CANONICAL: &[u8] = b"last_canonical"; @@ -67,8 +66,7 @@ struct JournalRecordV1 { impl From> for JournalRecordV1 { // Note that this compatibility only works as long as the backend - // db strategy match the one from current implementation, that - // is for default child trie which use same state column as top. + // child storage format is the same in both case. fn from(old: JournalRecordCompat) -> Self { JournalRecordV1 { hash: old.hash, diff --git a/client/state-db/src/pruning.rs b/client/state-db/src/pruning.rs index 1fd736913188b..a4e6fe1473fa1 100644 --- a/client/state-db/src/pruning.rs +++ b/client/state-db/src/pruning.rs @@ -26,7 +26,7 @@ use std::collections::{HashMap, HashSet, VecDeque}; use codec::{Encode, Decode}; use crate::{CommitSet, Error, MetaDb, to_meta_key, Hash}; use log::{trace, warn}; -use sp_core::storage::{ChildInfo, ChildrenVec}; +use sp_core::storage::{ChildInfo, ChildrenVec, ChildrenMap}; use super::ChangeSet; const LAST_PRUNED: &[u8] = b"last_pruned"; @@ -40,7 +40,7 @@ pub struct RefWindow { /// A queue of keys that should be deleted for each block in the pruning window. death_rows: VecDeque>, /// An index that maps each key from `death_rows` to block number. - death_index: HashMap>, + death_index: ChildrenMap>, /// Block number that corresponts to the front of `death_rows` pending_number: u64, /// Number of call of `note_canonical` after @@ -51,21 +51,11 @@ pub struct RefWindow { pending_prunings: usize, } -impl RefWindow { - fn remove_death_index(&mut self, child_info: &ChildInfo, key: &Key) -> Option { - if let Some(child_index) = self.death_index.get_mut(child_info) { - child_index.remove(key) - } else { - None - } - } -} - #[derive(Debug, PartialEq, Eq)] struct DeathRow { hash: BlockHash, journal_key: Vec, - deleted: HashMap>, + deleted: ChildrenMap>, } impl DeathRow { @@ -162,10 +152,12 @@ impl RefWindow { ) { // remove all re-inserted keys from death rows for (child_info, inserted) in inserted { - for k in inserted { - if let Some(block) = self.remove_death_index(&child_info, &k) { - self.death_rows[(block - self.pending_number) as usize] - .remove_deleted(&child_info, &k); + if let Some(child_index) = self.death_index.get_mut(&child_info) { + for k in inserted { + if let Some(block) = child_index.remove(&k) { + self.death_rows[(block - self.pending_number) as usize] + .remove_deleted(&child_info, &k); + } } } } @@ -178,7 +170,7 @@ impl RefWindow { entry.insert(k.clone(), imported_block); } } - let mut deleted_death_row = HashMap::>::new(); + let mut deleted_death_row = ChildrenMap::>::default(); for (child_info, deleted) in deleted.into_iter() { let entry = deleted_death_row.entry(child_info).or_default(); entry.extend(deleted); diff --git a/frame/contracts/src/account_db.rs b/frame/contracts/src/account_db.rs index e228d3205d09c..5e85dcb4fc0fd 100644 --- a/frame/contracts/src/account_db.rs +++ b/frame/contracts/src/account_db.rs @@ -111,8 +111,7 @@ pub trait AccountDb { fn get_storage( &self, account: &T::AccountId, - trie_id: Option<&TrieId>, - child_info: Option<&ChildInfo>, + trie_id: Option<(&TrieId, &ChildInfo)>, location: &StorageKey ) -> Option>; /// If account has an alive contract then return the code hash associated. @@ -131,15 +130,10 @@ impl AccountDb for DirectAccountDb { fn get_storage( &self, _account: &T::AccountId, - trie_id: Option<&TrieId>, - child_info: Option<&ChildInfo>, + trie_id: Option<(&TrieId, &ChildInfo)>, location: &StorageKey ) -> Option> { - trie_id.and_then(|id| if let Some(child_info) = child_info { - child::get_raw(id, child_info, &blake2_256(location)) - } else { - child::get_raw(id, &crate::trie_unique_id(&id[..]), &blake2_256(location)) - }) + trie_id.and_then(|(id, child_info)| child::get_raw(id, child_info, &blake2_256(location))) } fn get_code_hash(&self, account: &T::AccountId) -> Option> { >::get(account).and_then(|i| i.as_alive().map(|i| i.code_hash)) @@ -345,15 +339,14 @@ impl<'a, T: Trait> AccountDb for OverlayAccountDb<'a, T> { fn get_storage( &self, account: &T::AccountId, - trie_id: Option<&TrieId>, - child_info: Option<&ChildInfo>, + trie_id: Option<(&TrieId, &ChildInfo)>, location: &StorageKey ) -> Option> { self.local .borrow() .get(account) .and_then(|changes| changes.storage(location)) - .unwrap_or_else(|| self.underlying.get_storage(account, trie_id, child_info, location)) + .unwrap_or_else(|| self.underlying.get_storage(account, trie_id, location)) } fn get_code_hash(&self, account: &T::AccountId) -> Option> { self.local diff --git a/frame/contracts/src/exec.rs b/frame/contracts/src/exec.rs index f7830c4d1d0bc..77cb8af84a6ec 100644 --- a/frame/contracts/src/exec.rs +++ b/frame/contracts/src/exec.rs @@ -699,13 +699,11 @@ where type T = T; fn get_storage(&self, key: &StorageKey) -> Option> { - let (trie_id, child_info) = self.ctx.self_trie_info.as_ref() - .map(|info| (Some(&info.0), Some(&info.1))) - .unwrap_or((None, None)); + let trie_id = self.ctx.self_trie_info.as_ref() + .map(|info| ((&info.0, &info.1))); self.ctx.overlay.get_storage( &self.ctx.self_account, trie_id, - child_info, key, ) } diff --git a/frame/contracts/src/lib.rs b/frame/contracts/src/lib.rs index a40f5b8b726ad..a49d7195f4c6e 100644 --- a/frame/contracts/src/lib.rs +++ b/frame/contracts/src/lib.rs @@ -703,12 +703,11 @@ impl Module { .get_alive() .ok_or(ContractAccessError::IsTombstone)?; - let child_info = Some(trie_unique_id(&contract_info.trie_id)); + let child_info = trie_unique_id(&contract_info.trie_id); let maybe_value = AccountDb::::get_storage( &DirectAccountDb, &address, - Some(&contract_info.trie_id), - child_info.as_ref(), + Some((&contract_info.trie_id, &child_info)), &key, ); Ok(maybe_value) diff --git a/frame/contracts/src/tests.rs b/frame/contracts/src/tests.rs index 8c9dbd96a08e7..cc29658776539 100644 --- a/frame/contracts/src/tests.rs +++ b/frame/contracts/src/tests.rs @@ -318,8 +318,8 @@ fn account_removal_removes_storage() { let trie_id2 = ::TrieIdGenerator::trie_id(&2); let child_info1 = crate::trie_unique_id(trie_id1.as_ref()); let child_info2 = crate::trie_unique_id(trie_id2.as_ref()); - let child_info1 = Some(&child_info1); - let child_info2 = Some(&child_info2); + let child_info1 = Some((&trie_id1, &child_info1)); + let child_info2 = Some((&trie_id2, &child_info2)); let key1 = &[1; 32]; let key2 = &[2; 32]; @@ -365,15 +365,15 @@ fn account_removal_removes_storage() { // Verify that all entries from account 1 is removed, while // entries from account 2 is in place. { - assert!(>::get_storage(&DirectAccountDb, &1, Some(&trie_id1), child_info1, key1).is_none()); - assert!(>::get_storage(&DirectAccountDb, &1, Some(&trie_id1), child_info2, key2).is_none()); + assert!(>::get_storage(&DirectAccountDb, &1, child_info1, key1).is_none()); + assert!(>::get_storage(&DirectAccountDb, &1, child_info1, key2).is_none()); assert_eq!( - >::get_storage(&DirectAccountDb, &2, Some(&trie_id2), child_info2, key1), + >::get_storage(&DirectAccountDb, &2, child_info2, key1), Some(b"3".to_vec()) ); assert_eq!( - >::get_storage(&DirectAccountDb, &2, Some(&trie_id2), child_info2, key2), + >::get_storage(&DirectAccountDb, &2, child_info2, key2), Some(b"4".to_vec()) ); } diff --git a/primitives/state-machine/Cargo.toml b/primitives/state-machine/Cargo.toml index 42cbdc2e97495..a85614666701b 100644 --- a/primitives/state-machine/Cargo.toml +++ b/primitives/state-machine/Cargo.toml @@ -22,7 +22,6 @@ sp-externalities = { version = "0.8.0", path = "../externalities" } [dev-dependencies] hex-literal = "0.2.1" -sp-trie = { version = "2.0.0", path = "../trie", features = ["test-helpers"] } [features] default = [] diff --git a/primitives/state-machine/src/basic.rs b/primitives/state-machine/src/basic.rs index 644c629984f69..50e4fe69c60bd 100644 --- a/primitives/state-machine/src/basic.rs +++ b/primitives/state-machine/src/basic.rs @@ -315,8 +315,6 @@ mod tests { use sp_core::storage::well_known_keys::CODE; use hex_literal::hex; - const CHILD_INFO_1: &'static [u8] = b"unique_id_1"; - #[test] fn commit_should_work() { let mut ext = BasicExternalities::default(); @@ -340,7 +338,7 @@ mod tests { #[test] fn children_works() { - let child_info1 = ChildInfo::new_default(CHILD_INFO_1); + let child_info1 = ChildInfo::new_default(b"unique_id_1"); let child_storage = b":child_storage:default:test".to_vec(); let mut ext = BasicExternalities::new(Storage { diff --git a/primitives/state-machine/src/changes_trie/build.rs b/primitives/state-machine/src/changes_trie/build.rs index cefc4d88470a2..c0ebeff189450 100644 --- a/primitives/state-machine/src/changes_trie/build.rs +++ b/primitives/state-machine/src/changes_trie/build.rs @@ -138,7 +138,7 @@ fn prepare_extrinsics_input_inner<'a, B, H, Number>( Number: BlockNumber, { let (committed, prospective, child_info) = if let Some(sk) = storage_key.as_ref() { - let child_info = changes.child_info(sk).to_owned(); + let child_info = changes.child_info(sk).clone(); ( changes.committed.children.get(sk).map(|c| &c.0), changes.prospective.children.get(sk).map(|c| &c.0), @@ -157,7 +157,7 @@ fn prepare_extrinsics_input_inner<'a, B, H, Number>( // AND are not in storage at the beginning of operation if let Some(sk) = storage_key.as_ref() { if !changes.child_storage(sk, k).map(|v| v.is_some()).unwrap_or_default() { - if let Some(child_info) = child_info.as_deref() { + if let Some(child_info) = child_info.as_ref() { if !backend.exists_child_storage(sk, child_info, k) .map_err(|e| format!("{}", e))? { return Ok(map); @@ -354,9 +354,6 @@ mod test { use crate::overlayed_changes::{OverlayedValue, OverlayedChangeSet}; use super::*; - const CHILD_INFO_1: &'static [u8] = b"unique_id_1"; - const CHILD_INFO_2: &'static [u8] = b"unique_id_2"; - fn prepare_for_build(zero: u64) -> ( InMemoryBackend, InMemoryStorage, @@ -364,8 +361,8 @@ mod test { Configuration, ) { - let child_info1 = ChildInfo::new_default(CHILD_INFO_1); - let child_info2 = ChildInfo::new_default(CHILD_INFO_2); + let child_info1 = ChildInfo::new_default(b"unique_id_1"); + let child_info2 = ChildInfo::new_default(b"unique_id_2"); let backend: InMemoryBackend<_> = vec![ (vec![100], vec![255]), (vec![101], vec![255]), @@ -442,13 +439,13 @@ mod test { value: Some(vec![200]), extrinsics: Some(vec![0, 2].into_iter().collect()) }) - ].into_iter().collect(), child_info1.to_owned())), + ].into_iter().collect(), child_info1.clone())), (child_trie_key2, (vec![ (vec![100], OverlayedValue { value: Some(vec![200]), extrinsics: Some(vec![0, 2].into_iter().collect()) }) - ].into_iter().collect(), child_info2.to_owned())), + ].into_iter().collect(), child_info2)), ].into_iter().collect() }, committed: OverlayedChangeSet { top: vec![ @@ -471,7 +468,7 @@ mod test { value: Some(vec![202]), extrinsics: Some(vec![3].into_iter().collect()) }) - ].into_iter().collect(), child_info1.to_owned())), + ].into_iter().collect(), child_info1)), ].into_iter().collect(), }, collect_extrinsics: true, diff --git a/primitives/state-machine/src/changes_trie/changes_iterator.rs b/primitives/state-machine/src/changes_trie/changes_iterator.rs index 84be4a3f55541..dc28890c613d5 100644 --- a/primitives/state-machine/src/changes_trie/changes_iterator.rs +++ b/primitives/state-machine/src/changes_trie/changes_iterator.rs @@ -21,6 +21,7 @@ use std::cell::RefCell; use std::collections::VecDeque; use codec::{Decode, Encode, Codec}; use sp_core::Hasher; +use sp_core::storage::ChildInfo; use num_traits::Zero; use sp_trie::Recorder; use crate::changes_trie::{AnchorBlockId, ConfigurationRange, RootsStorage, Storage, BlockNumber}; @@ -67,7 +68,7 @@ pub fn key_changes<'a, H: Hasher, Number: BlockNumber>( _hasher: ::std::marker::PhantomData::::default(), }, - child_info: sp_core::storage::ChildInfo::top_trie(), + child_info: ChildInfo::top_trie(), }) } @@ -178,7 +179,7 @@ pub fn key_changes_proof_check_with_db<'a, H: Hasher, Number: BlockNumber>( _hasher: ::std::marker::PhantomData::::default(), }, - child_info: sp_core::storage::ChildInfo::top_trie(), + child_info: ChildInfo::top_trie(), }.collect() } @@ -319,7 +320,7 @@ pub struct DrilldownIterator<'a, H, Number> /// This is always top trie info, but it cannot be /// statically instantiated at the time (vec of null /// size could be in theory). - child_info: sp_core::storage::ChildInfo, + child_info: ChildInfo, } impl<'a, H: Hasher, Number: BlockNumber> Iterator for DrilldownIterator<'a, H, Number> diff --git a/primitives/state-machine/src/changes_trie/mod.rs b/primitives/state-machine/src/changes_trie/mod.rs index b6aba93108407..58deb27c1056e 100644 --- a/primitives/state-machine/src/changes_trie/mod.rs +++ b/primitives/state-machine/src/changes_trie/mod.rs @@ -161,11 +161,8 @@ pub trait Storage: RootsStorage { functor: &mut dyn FnMut(&HashMap, HashSet>), ) -> bool; /// Get a trie node. - /// Note that child info is use only for case where we use this trait - /// as an adapter to storage. fn get( &self, - child_info: &ChildInfo, key: &H::Out, prefix: Prefix, ) -> Result, String>; @@ -183,7 +180,8 @@ impl<'a, H: Hasher, N: BlockNumber> crate::TrieBackendStorageRef for TrieBack key: &H::Out, prefix: Prefix, ) -> Result, String> { - self.0.get(child_info, key, prefix) + debug_assert!(child_info.is_top_trie()); + self.0.get(key, prefix) } } diff --git a/primitives/state-machine/src/changes_trie/storage.rs b/primitives/state-machine/src/changes_trie/storage.rs index 53bb62675d9bb..23cd3b7bf050c 100644 --- a/primitives/state-machine/src/changes_trie/storage.rs +++ b/primitives/state-machine/src/changes_trie/storage.rs @@ -191,11 +191,10 @@ impl Storage for InMemoryStorage Result, String> { - MemoryDB::::get(&self.data.read().mdb, child_info, key, prefix) + MemoryDB::::get(&self.data.read().mdb, &ChildInfo::top_trie(), key, prefix) } } @@ -218,6 +217,7 @@ impl<'a, H, Number> TrieBackendStorageRef for TrieBackendAdapter<'a, H, Numbe key: &H::Out, prefix: Prefix, ) -> Result, String> { - self.storage.get(child_info, key, prefix) + debug_assert!(child_info.is_top_trie()); + self.storage.get(key, prefix) } } diff --git a/primitives/state-machine/src/ext.rs b/primitives/state-machine/src/ext.rs index 06ba6bd26bca9..a8ab84b399ed9 100644 --- a/primitives/state-machine/src/ext.rs +++ b/primitives/state-machine/src/ext.rs @@ -537,7 +537,7 @@ where } else { let storage_key = storage_key.as_ref(); - if let Some(child_info) = self.overlay.child_info(storage_key).to_owned() { + if let Some(child_info) = self.overlay.child_info(storage_key).clone() { let (root, _is_empty, _) = { let delta = self.overlay.committed.children.get(storage_key) .into_iter() diff --git a/primitives/state-machine/src/lib.rs b/primitives/state-machine/src/lib.rs index 3aa57e9679f30..802d7937c73d8 100644 --- a/primitives/state-machine/src/lib.rs +++ b/primitives/state-machine/src/lib.rs @@ -702,7 +702,7 @@ mod tests { fallback_succeeds: bool, } - const CHILD_INFO_1: &'static [u8] = b"unique_id_1"; + const CHILD_UID_1: &'static [u8] = b"unique_id_1"; impl CodeExecutor for DummyCodeExecutor { type Error = u8; @@ -933,7 +933,7 @@ mod tests { #[test] fn set_child_storage_works() { - let child_info1 = ChildInfo::new_default(CHILD_INFO_1); + let child_info1 = ChildInfo::new_default(CHILD_UID_1); let mut state = InMemoryBackend::::default(); let backend = state.as_trie_backend().unwrap(); let mut overlay = OverlayedChanges::default(); @@ -977,7 +977,7 @@ mod tests { #[test] fn prove_read_and_proof_check_works() { - let child_info1 = ChildInfo::new_default(CHILD_INFO_1); + let child_info1 = ChildInfo::new_default(CHILD_UID_1); // fetch read proof from 'remote' full node let remote_backend = trie_backend::tests::test_trie(); let remote_root = remote_backend.storage_root(::std::iter::empty()).0; diff --git a/primitives/state-machine/src/overlayed_changes.rs b/primitives/state-machine/src/overlayed_changes.rs index 4afc8a328ba8a..783608e2ae1af 100644 --- a/primitives/state-machine/src/overlayed_changes.rs +++ b/primitives/state-machine/src/overlayed_changes.rs @@ -130,7 +130,7 @@ pub struct StorageTransactionCache { pub(crate) transaction: Option, /// The storage root after applying the transaction. pub(crate) transaction_storage_root: Option, - /// The child root storage root after applying the transaction. + /// The storage child roots after applying the transaction. pub(crate) transaction_child_storage_root: BTreeMap>, /// Contains the changes trie transaction. pub(crate) changes_trie_transaction: Option>>, @@ -539,7 +539,7 @@ impl OverlayedChanges { ), self.child_info(storage_key) .expect("child info initialized in either committed or prospective") - .to_owned(), + .clone(), ) ); @@ -589,10 +589,10 @@ impl OverlayedChanges { /// Take the latest value so prospective first. pub fn child_info(&self, storage_key: &[u8]) -> Option<&ChildInfo> { if let Some((_, ci)) = self.prospective.children.get(storage_key) { - return Some(&*ci); + return Some(&ci); } if let Some((_, ci)) = self.committed.children.get(storage_key) { - return Some(&*ci); + return Some(&ci); } None } diff --git a/primitives/state-machine/src/proving_backend.rs b/primitives/state-machine/src/proving_backend.rs index e4eca1181089e..d49df322749db 100644 --- a/primitives/state-machine/src/proving_backend.rs +++ b/primitives/state-machine/src/proving_backend.rs @@ -410,9 +410,6 @@ mod tests { use crate::proving_backend::create_proof_check_backend; use sp_trie::PrefixedMemoryDB; - const CHILD_INFO_1: &'static [u8] = b"unique_id_1"; - const CHILD_INFO_2: &'static [u8] = b"unique_id_2"; - fn test_proving<'a>( trie_backend: &'a TrieBackend,Blake2Hasher>, ) -> ProvingBackend<'a, PrefixedMemoryDB, Blake2Hasher> { @@ -481,8 +478,8 @@ mod tests { #[test] fn proof_recorded_and_checked_with_child() { - let child_info1 = ChildInfo::new_default(CHILD_INFO_1); - let child_info2 = ChildInfo::new_default(CHILD_INFO_2); + let child_info1 = ChildInfo::new_default(b"unique_id_1"); + let child_info2 = ChildInfo::new_default(b"unique_id_2"); let subtrie1 = ChildStorageKey::from_slice(b":child_storage:default:sub1").unwrap(); let subtrie2 = ChildStorageKey::from_slice(b":child_storage:default:sub2").unwrap(); let own1 = subtrie1.into_owned(); diff --git a/primitives/storage/src/lib.rs b/primitives/storage/src/lib.rs index e4d4b5604ae2b..085805e73862f 100644 --- a/primitives/storage/src/lib.rs +++ b/primitives/storage/src/lib.rs @@ -253,18 +253,13 @@ impl ChildInfo { } } -/// Type of child, it is encoded in the four first byte of the -/// encoded child info (LE u32). +/// Type of child. /// It does not strictly define different child type, it can also /// be related to technical consideration or api variant. #[repr(u32)] #[derive(Clone, Copy, PartialEq)] pub enum ChildType { /// Default, it uses a cryptographic strong unique id as input. - /// All bytes following the type in encoded form are this unique - /// id. - /// If the trie got a unique id of length 0 it is considered - /// as a top child trie. CryptoUniqueId = 1, } @@ -357,13 +352,27 @@ impl ChildrenMap { } } - /// Extends two maps, by enxtending entries with the same key. + /// Extends two maps, by extending entries with the same key. pub fn extend_replace( &mut self, other: impl Iterator, ) { self.0.extend(other) } + + /// Retains only the elements specified by the predicate. + pub fn retain(&mut self, mut f: impl FnMut(&ChildInfo, &mut T) -> bool) { + let mut to_del = Vec::new(); + for (k, v) in self.0.iter_mut() { + if !f(k, v) { + // this clone can be avoid with unsafe code + to_del.push(k.clone()); + } + } + for k in to_del { + self.0.remove(&k); + } + } } #[cfg(feature = "std")] diff --git a/primitives/trie/Cargo.toml b/primitives/trie/Cargo.toml index 6cbd19cd0f70b..a78a26db736c4 100644 --- a/primitives/trie/Cargo.toml +++ b/primitives/trie/Cargo.toml @@ -28,7 +28,6 @@ hex-literal = "0.2.1" [features] default = ["std"] -test-helpers = [] std = [ "sp-std/std", "codec/std", From 313635504323d5af65011537aeb0a682827cc6f5 Mon Sep 17 00:00:00 2001 From: cheme Date: Wed, 5 Feb 2020 14:02:53 +0100 Subject: [PATCH 28/85] bump impl version. --- bin/node/runtime/src/lib.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/bin/node/runtime/src/lib.rs b/bin/node/runtime/src/lib.rs index f2c374cedd4bc..9dc1ce7d11054 100644 --- a/bin/node/runtime/src/lib.rs +++ b/bin/node/runtime/src/lib.rs @@ -80,7 +80,7 @@ pub const VERSION: RuntimeVersion = RuntimeVersion { // implementation changes and behavior does not, then leave spec_version as // is and increment impl_version. spec_version: 212, - impl_version: 0, + impl_version: 1, apis: RUNTIME_API_VERSIONS, }; From bae6523007b291bec9be8ad1fc11c5092ee6a109 Mon Sep 17 00:00:00 2001 From: cheme Date: Thu, 6 Feb 2020 11:32:06 +0100 Subject: [PATCH 29/85] calculate size for single operation on usize. --- client/db/src/lib.rs | 8 ++++---- client/src/cht.rs | 2 +- 2 files changed, 5 insertions(+), 5 deletions(-) diff --git a/client/db/src/lib.rs b/client/db/src/lib.rs index 7642e944dfd7a..3af8d7c384b5e 100644 --- a/client/db/src/lib.rs +++ b/client/db/src/lib.rs @@ -1126,24 +1126,24 @@ impl Backend { let finalized = if operation.commit_state { let mut changesets = ChildrenMap::>>::default(); let mut ops: u64 = 0; - let mut bytes: u64 = 0; + let mut bytes = 0; for (info, mut updates) in operation.db_updates.into_iter() { let changeset = changesets.entry(info).or_default(); for (key, (val, rc)) in updates.drain() { if rc > 0 { ops += 1; - bytes += key.len() as u64 + val.len() as u64; + bytes += key.len() + val.len(); changeset.inserted.push((key, val.to_vec())); } else if rc < 0 { ops += 1; - bytes += key.len() as u64; + bytes += key.len(); changeset.deleted.push(key); } } } - self.state_usage.tally_writes(ops, bytes); + self.state_usage.tally_writes(ops, bytes as u64); let number_u64 = number.saturated_into::(); let commit = self.storage.state_db.insert_block(&hash, number_u64, &pending_block.header.parent_hash(), changesets) diff --git a/client/src/cht.rs b/client/src/cht.rs index f470ee4fbe6fa..9e1a3bff017f1 100644 --- a/client/src/cht.rs +++ b/client/src/cht.rs @@ -26,7 +26,7 @@ use codec::Encode; use sp_trie; -use sp_core::{H256, convert_hash, self}; +use sp_core::{H256, convert_hash}; use sp_runtime::traits::{Header as HeaderT, SimpleArithmetic, Zero, One}; use sp_state_machine::{ MemoryDB, TrieBackend, Backend as StateBackend, StorageProof, InMemoryBackend, From 88ed5036cf39d5b59e6678db678af4ef706d11f9 Mon Sep 17 00:00:00 2001 From: cheme Date: Thu, 6 Feb 2020 11:54:54 +0100 Subject: [PATCH 30/85] Put keyspace logic in its own struct. --- client/db/src/changes_tries_storage.rs | 1 - client/db/src/lib.rs | 49 +++++++++++++++++--------- 2 files changed, 33 insertions(+), 17 deletions(-) diff --git a/client/db/src/changes_tries_storage.rs b/client/db/src/changes_tries_storage.rs index 93bfd8b4cc673..6f447f256a158 100644 --- a/client/db/src/changes_tries_storage.rs +++ b/client/db/src/changes_tries_storage.rs @@ -530,7 +530,6 @@ mod tests { }; use sp_blockchain::HeaderBackend as BlockchainHeaderBackend; use sp_core::H256; - use sp_core::storage::ChildInfo; use sp_runtime::testing::{Digest, Header}; use sp_runtime::traits::{Hash, BlakeTwo256}; use sp_state_machine::{ChangesTrieRootsStorage, ChangesTrieStorage}; diff --git a/client/db/src/lib.rs b/client/db/src/lib.rs index 3af8d7c384b5e..71e2408891ed4 100644 --- a/client/db/src/lib.rs +++ b/client/db/src/lib.rs @@ -685,11 +685,8 @@ impl sc_state_db::NodeDb for StorageDb { if child_info.is_top_trie() { self.db.get(columns::STATE, key) } else { - let keyspace = child_info.keyspace(); - let mut key_buffer = vec![0; keyspace.len() + key.len()]; - key_buffer[..keyspace.len()].copy_from_slice(keyspace); - key_buffer[keyspace.len()..].copy_from_slice(key); - self.db.get(columns::STATE, &key_buffer[..]) + let mut keyspace = Keyspaced::new(child_info.keyspace()); + self.db.get(columns::STATE, keyspace.prefix_key(key)) }.map(|r| r.map(|v| v.to_vec())) } } @@ -1332,7 +1329,7 @@ impl Backend { } fn apply_state_commit(transaction: &mut DBTransaction, commit: sc_state_db::CommitSet>) { - let mut key_buffer = Vec::new(); + let mut keyspace = Keyspaced::new(&[]); for child_data in commit.data.into_iter() { if child_data.0.is_top_trie() { // empty prefix @@ -1343,19 +1340,12 @@ fn apply_state_commit(transaction: &mut DBTransaction, commit: sc_state_db::Comm transaction.delete(columns::STATE, &key[..]); } } else { - let keyspace = child_data.0.keyspace(); - let keyspace_len = keyspace.len(); - key_buffer.resize(keyspace_len, 0); - key_buffer[..keyspace_len].copy_from_slice(keyspace); + keyspace.change_keyspace(child_data.0.keyspace()); for (key, val) in child_data.1.inserted.into_iter() { - key_buffer.resize(keyspace_len + key.len(), 0); - key_buffer[keyspace_len..].copy_from_slice(&key[..]); - transaction.put(columns::STATE, &key_buffer[..], &val); + transaction.put(columns::STATE, keyspace.prefix_key(key.as_slice()), &val); } for key in child_data.1.deleted.into_iter() { - key_buffer.resize(keyspace_len + key.len(), 0); - key_buffer[keyspace_len..].copy_from_slice(&key[..]); - transaction.delete(columns::STATE, &key_buffer[..]); + transaction.delete(columns::STATE, keyspace.prefix_key(key.as_slice())); } } } @@ -1682,6 +1672,33 @@ impl sc_client_api::backend::Backend for Backend { impl sc_client_api::backend::LocalBackend for Backend {} +/// Rules for storing a default child trie with unique id. +struct Keyspaced { + keyspace_len: usize, + buffer: Vec, +} + +impl Keyspaced { + fn new(keyspace: &[u8]) -> Self { + Keyspaced { + keyspace_len: keyspace.len(), + buffer: keyspace.to_vec(), + } + } + + fn change_keyspace(&mut self, new_keyspace: &[u8]) { + self.keyspace_len = new_keyspace.len(); + self.buffer.resize(new_keyspace.len(), 0); + self.buffer[..new_keyspace.len()].copy_from_slice(new_keyspace); + } + + fn prefix_key(&mut self, key: &[u8]) -> &[u8] { + self.buffer.resize(self.keyspace_len + key.len(), 0); + self.buffer[self.keyspace_len..].copy_from_slice(key); + self.buffer.as_slice() + } +} + #[cfg(test)] pub(crate) mod tests { use hash_db::{HashDB, EMPTY_PREFIX}; From 4eb467642dc538da36d9ca96169f82ebd04a6262 Mon Sep 17 00:00:00 2001 From: cheme Date: Thu, 6 Feb 2020 12:10:11 +0100 Subject: [PATCH 31/85] Restrict top trie in ext to the storage key build from an empty key. --- primitives/state-machine/src/ext.rs | 50 ++++++++++++++++++++++++----- primitives/storage/src/lib.rs | 11 +++++++ 2 files changed, 53 insertions(+), 8 deletions(-) diff --git a/primitives/state-machine/src/ext.rs b/primitives/state-machine/src/ext.rs index a8ab84b399ed9..667b073eb4cf8 100644 --- a/primitives/state-machine/src/ext.rs +++ b/primitives/state-machine/src/ext.rs @@ -210,7 +210,11 @@ where key: &[u8], ) -> Option { if child_info.is_top_trie() { - return self.storage(key); + if storage_key.is_empty() { + return self.storage(key); + } else { + return None; + } } let _guard = sp_panic_handler::AbortGuard::force_abort(); let result = self.overlay @@ -238,7 +242,11 @@ where key: &[u8], ) -> Option> { if child_info.is_top_trie() { - return self.storage_hash(key); + if storage_key.is_empty() { + return self.storage_hash(key); + } else { + return None; + } } let _guard = sp_panic_handler::AbortGuard::force_abort(); let result = self.overlay @@ -266,7 +274,11 @@ where key: &[u8], ) -> Option { if child_info.is_top_trie() { - return self.original_storage(key); + if storage_key.is_empty() { + return self.original_storage(key); + } else { + return None; + } } let _guard = sp_panic_handler::AbortGuard::force_abort(); let result = self.backend @@ -290,7 +302,11 @@ where key: &[u8], ) -> Option> { if child_info.is_top_trie() { - return self.original_storage_hash(key); + if storage_key.is_empty() { + return self.original_storage_hash(key); + } else { + return None; + } } let _guard = sp_panic_handler::AbortGuard::force_abort(); let result = self.backend @@ -329,7 +345,11 @@ where key: &[u8], ) -> bool { if child_info.is_top_trie() { - return self.exists_storage(key); + if storage_key.is_empty() { + return self.exists_storage(key); + } else { + return false; + } } let _guard = sp_panic_handler::AbortGuard::force_abort(); @@ -371,7 +391,11 @@ where key: &[u8], ) -> Option { if child_info.is_top_trie() { - return self.next_storage_key(key); + if storage_key.is_empty() { + return self.next_storage_key(key); + } else { + return None; + } } let next_backend_key = self.backend .next_child_storage_key(storage_key.as_ref(), child_info, key) @@ -420,7 +444,12 @@ where value: Option, ) { if child_info.is_top_trie() { - return self.place_storage(key, value); + if storage_key.is_empty() { + return self.place_storage(key, value); + } else { + trace!(target: "state-trace", "Ignoring place_child_storage on top trie"); + return; + } } trace!(target: "state-trace", "{:04x}: PutChild({}) {}={:?}", self.id, @@ -481,7 +510,12 @@ where prefix: &[u8], ) { if child_info.is_top_trie() { - return self.clear_prefix(prefix); + if storage_key.is_empty() { + return self.clear_prefix(prefix); + } else { + trace!(target: "state-trace", "Ignoring clear_child_prefix on top trie"); + return; + } } trace!(target: "state-trace", "{:04x}: ClearChildPrefix({}) {}", diff --git a/primitives/storage/src/lib.rs b/primitives/storage/src/lib.rs index 085805e73862f..69e746f725267 100644 --- a/primitives/storage/src/lib.rs +++ b/primitives/storage/src/lib.rs @@ -127,6 +127,11 @@ pub mod well_known_keys { } has_right_prefix } + + /// Return true if the variable part of the key is empty. + pub fn is_child_trie_key_empty(storage_key: &[u8]) -> bool { + storage_key.len() == b":child_storage:default:".len() + } } /// A wrapper around a child storage key. @@ -176,6 +181,12 @@ impl<'a> ChildStorageKey<'a> { pub fn into_owned(self) -> Vec { self.storage_key.into_owned() } + + /// Return true if the variable part of the key is empty. + pub fn is_empty(&self) -> bool { + well_known_keys::is_child_trie_key_empty(&*self.storage_key) + } + } From 5151471c85b95d16ac7fedf1dc6e4a451bc1736a Mon Sep 17 00:00:00 2001 From: cheme Date: Thu, 6 Feb 2020 17:00:32 +0100 Subject: [PATCH 32/85] Implementation basis for this PR, note that child storage key default prefix will be added at query and on full storage root lazilly. Also note that both type are implementation compatible so we do not need a different well known key. --- primitives/storage/src/lib.rs | 92 +++++++++++++++-------------------- 1 file changed, 40 insertions(+), 52 deletions(-) diff --git a/primitives/storage/src/lib.rs b/primitives/storage/src/lib.rs index d32c54aae8c47..da42d29c79688 100644 --- a/primitives/storage/src/lib.rs +++ b/primitives/storage/src/lib.rs @@ -22,7 +22,7 @@ use serde::{Serialize, Deserialize}; use sp_debug_derive::RuntimeDebug; -use sp_std::{vec::Vec, borrow::Cow}; +use sp_std::vec::Vec; /// Storage key. #[derive(PartialEq, Eq, RuntimeDebug)] @@ -126,58 +126,10 @@ pub mod well_known_keys { } } -/// A wrapper around a child storage key. -/// -/// This wrapper ensures that the child storage key is correct and properly used. It is -/// impossible to create an instance of this struct without providing a correct `storage_key`. -pub struct ChildStorageKey<'a> { - storage_key: Cow<'a, [u8]>, -} - -impl<'a> ChildStorageKey<'a> { - /// Create new instance of `Self`. - fn new(storage_key: Cow<'a, [u8]>) -> Option { - if well_known_keys::is_child_trie_key_valid(&storage_key) { - Some(ChildStorageKey { storage_key }) - } else { - None - } - } - - /// Create a new `ChildStorageKey` from a vector. - /// - /// `storage_key` need to start with `:child_storage:default:` - /// See `is_child_trie_key_valid` for more details. - pub fn from_vec(key: Vec) -> Option { - Self::new(Cow::Owned(key)) - } - - /// Create a new `ChildStorageKey` from a slice. - /// - /// `storage_key` need to start with `:child_storage:default:` - /// See `is_child_trie_key_valid` for more details. - pub fn from_slice(key: &'a [u8]) -> Option { - Self::new(Cow::Borrowed(key)) - } - - /// Get access to the byte representation of the storage key. - /// - /// This key is guaranteed to be correct. - pub fn as_ref(&self) -> &[u8] { - &*self.storage_key - } - - /// Destruct this instance into an owned vector that represents the storage key. - /// - /// This key is guaranteed to be correct. - pub fn into_owned(self) -> Vec { - self.storage_key.into_owned() - } -} - #[derive(Clone, Copy)] /// Information related to a child state. pub enum ChildInfo<'a> { + ParentKeyId(ChildTrie<'a>), Default(ChildTrie<'a>), } @@ -186,10 +138,18 @@ pub enum ChildInfo<'a> { #[derive(Debug, Clone)] #[cfg_attr(feature = "std", derive(PartialEq, Eq, Hash, PartialOrd, Ord))] pub enum OwnedChildInfo { + ParentKeyId(OwnedChildTrie), Default(OwnedChildTrie), } impl<'a> ChildInfo<'a> { + /// Instantiates information for a default child trie. + pub const fn new_uid_parent_key(storage_key: &'a[u8]) -> Self { + ChildInfo::ParentKeyId(ChildTrie { + data: storage_key, + }) + } + /// Instantiates information for a default child trie. pub const fn new_default(unique_id: &'a[u8]) -> Self { ChildInfo::Default(ChildTrie { @@ -204,12 +164,23 @@ impl<'a> ChildInfo<'a> { => OwnedChildInfo::Default(OwnedChildTrie { data: data.to_vec(), }), + ChildInfo::ParentKeyId(ChildTrie { data }) + => OwnedChildInfo::ParentKeyId(OwnedChildTrie { + data: data.to_vec(), + }), } } /// Create child info from a linear byte packed value and a given type. - pub fn resolve_child_info(child_type: u32, data: &'a[u8]) -> Option { + pub fn resolve_child_info(child_type: u32, data: &'a[u8], storage_key: &'a[u8]) -> Option { match child_type { + x if x == ChildType::ParentKeyId as u32 => { + if !data.len() == 0 { + // do not allow anything for additional data. + return None; + } + Some(ChildInfo::new_uid_parent_key(storage_key)) + }, x if x == ChildType::CryptoUniqueId as u32 => Some(ChildInfo::new_default(data)), _ => None, } @@ -219,6 +190,9 @@ impl<'a> ChildInfo<'a> { /// This can be use as input for `resolve_child_info`. pub fn info(&self) -> (&[u8], u32) { match self { + ChildInfo::ParentKeyId(ChildTrie { + data, + }) => (data, ChildType::ParentKeyId as u32), ChildInfo::Default(ChildTrie { data, }) => (data, ChildType::CryptoUniqueId as u32), @@ -230,6 +204,9 @@ impl<'a> ChildInfo<'a> { /// depends on the type of child info use. For `ChildInfo::Default` it is and need to be. pub fn keyspace(&self) -> &[u8] { match self { + ChildInfo::ParentKeyId(ChildTrie { + data, + }) => &data[..], ChildInfo::Default(ChildTrie { data, }) => &data[..], @@ -242,7 +219,11 @@ impl<'a> ChildInfo<'a> { /// be related to technical consideration or api variant. #[repr(u32)] pub enum ChildType { - /// Default, it uses a cryptographic strong unique id as input. + /// If runtime module ensures that the child key is a unique id that will + /// only be used once, this parent key is used as a child trie unique id. + ParentKeyId = 0, + /// Default, this uses a cryptographic strong unique id as input, this id + /// is used as a unique child trie identifier. CryptoUniqueId = 1, } @@ -259,6 +240,7 @@ impl OwnedChildInfo { pub fn try_update(&mut self, other: ChildInfo) -> bool { match self { OwnedChildInfo::Default(owned_child_trie) => owned_child_trie.try_update(other), + OwnedChildInfo::ParentKeyId(owned_child_trie) => owned_child_trie.try_update(other), } } @@ -269,6 +251,11 @@ impl OwnedChildInfo { => ChildInfo::Default(ChildTrie { data: data.as_slice(), }), + OwnedChildInfo::ParentKeyId(OwnedChildTrie { data }) + => ChildInfo::ParentKeyId(ChildTrie { + data: data.as_slice(), + }), + } } } @@ -300,6 +287,7 @@ impl OwnedChildTrie { fn try_update(&mut self, other: ChildInfo) -> bool { match other { ChildInfo::Default(other) => self.data[..] == other.data[..], + ChildInfo::ParentKeyId(other) => self.data[..] == other.data[..], } } } From 8715446e9a356778875b967a7e5251f68130153d Mon Sep 17 00:00:00 2001 From: cheme Date: Thu, 6 Feb 2020 17:24:01 +0100 Subject: [PATCH 33/85] Resolve prefix from child_info --- primitives/storage/src/lib.rs | 21 ++++++++++++++++++++- 1 file changed, 20 insertions(+), 1 deletion(-) diff --git a/primitives/storage/src/lib.rs b/primitives/storage/src/lib.rs index da42d29c79688..161e90dc848bf 100644 --- a/primitives/storage/src/lib.rs +++ b/primitives/storage/src/lib.rs @@ -113,7 +113,7 @@ pub mod well_known_keys { /// /// `child_trie_root` and `child_delta_trie_root` can panic if invalid value is provided to them. pub fn is_child_trie_key_valid(storage_key: &[u8]) -> bool { - let has_right_prefix = storage_key.starts_with(b":child_storage:default:"); + let has_right_prefix = storage_key.starts_with(super::DEFAULT_CHILD_TYPE_PARENT_PREFIX); if has_right_prefix { // This is an attempt to catch a change of `is_child_storage_key`, which // just checks if the key has prefix `:child_storage:` at the moment of writing. @@ -212,6 +212,16 @@ impl<'a> ChildInfo<'a> { }) => &data[..], } } + + /// Return the location reserved for this child trie in their parent trie if there + /// is one. + pub fn parent_prefix(&self, _parent: Option<&'a ChildInfo>) -> &'a [u8] { + match self { + ChildInfo::ParentKeyId(..) + | ChildInfo::Default(..) => DEFAULT_CHILD_TYPE_PARENT_PREFIX, + } + } + } /// Type of child. @@ -291,3 +301,12 @@ impl OwnedChildTrie { } } } + +const DEFAULT_CHILD_TYPE_PARENT_PREFIX: &'static [u8] = b":child_storage:default:"; + +#[test] +fn assert_default_trie_in_child_trie() { + let child_info = ChildInfo::new_default(b"any key"); + let prefix = child_info.parent_prefix(None); + assert!(prefix.starts_with(well_known_keys::CHILD_STORAGE_KEY_PREFIX)); +} From b72cd9c80377261f385e4488b1e404432b63017b Mon Sep 17 00:00:00 2001 From: cheme Date: Thu, 6 Feb 2020 18:53:48 +0100 Subject: [PATCH 34/85] Removed all child prefix, next is putting it back on full storage and on backend access. --- client/chain-spec/src/chain_spec.rs | 5 +- client/network/src/protocol.rs | 6 +- client/rpc/src/state/state_full.rs | 18 ++--- client/rpc/src/state/tests.rs | 2 +- client/src/light/fetcher.rs | 8 +- primitives/externalities/src/lib.rs | 28 +++---- primitives/io/src/lib.rs | 62 +++++---------- primitives/state-machine/src/basic.rs | 45 +++++------ primitives/state-machine/src/ext.rs | 78 +++++++++---------- primitives/state-machine/src/lib.rs | 24 +++--- .../state-machine/src/proving_backend.rs | 10 +-- primitives/state-machine/src/trie_backend.rs | 3 +- primitives/storage/src/lib.rs | 3 + test-utils/runtime/src/lib.rs | 2 +- 14 files changed, 134 insertions(+), 160 deletions(-) diff --git a/client/chain-spec/src/chain_spec.rs b/client/chain-spec/src/chain_spec.rs index 81cbce5ea731c..b47c41f107ccd 100644 --- a/client/chain-spec/src/chain_spec.rs +++ b/client/chain-spec/src/chain_spec.rs @@ -76,13 +76,14 @@ impl BuildStorage for ChainSpec { Genesis::Runtime(gc) => gc.build_storage(), Genesis::Raw(RawGenesis { top: map, children: children_map }) => Ok(Storage { top: map.into_iter().map(|(k, v)| (k.0, v.0)).collect(), - children: children_map.into_iter().map(|(sk, child_content)| { + children: children_map.into_iter().map(|(storage_key, child_content)| { let child_info = ChildInfo::resolve_child_info( child_content.child_type, child_content.child_info.as_slice(), + storage_key.0.as_slice(), ).expect("chain spec contains correct content").to_owned(); ( - sk.0, + storage_key.0, StorageChild { data: child_content.data.into_iter().map(|(k, v)| (k.0, v.0)).collect(), child_info, diff --git a/client/network/src/protocol.rs b/client/network/src/protocol.rs index 5e8df2831ba63..849cae509adfc 100644 --- a/client/network/src/protocol.rs +++ b/client/network/src/protocol.rs @@ -1555,7 +1555,11 @@ impl, H: ExHashT> Protocol { trace!(target: "sync", "Remote read child request {} from {} ({} {} at {})", request.id, who, request.storage_key.to_hex::(), keys_str(), request.block); - let proof = if let Some(child_info) = ChildInfo::resolve_child_info(request.child_type, &request.child_info[..]) { + let proof = if let Some(child_info) = ChildInfo::resolve_child_info( + request.child_type, + &request.child_info[..], + &request.storage_key[..], + ) { match self.context_data.chain.read_child_proof( &request.block, &request.storage_key, diff --git a/client/rpc/src/state/state_full.rs b/client/rpc/src/state/state_full.rs index 3d5613626e044..caf7a5787e1c3 100644 --- a/client/rpc/src/state/state_full.rs +++ b/client/rpc/src/state/state_full.rs @@ -308,7 +308,7 @@ impl StateBackend for FullState, - child_storage_key: StorageKey, + storage_key: StorageKey, child_info: StorageKey, child_type: u32, prefix: StorageKey, @@ -317,8 +317,8 @@ impl StateBackend for FullState StateBackend for FullState, - child_storage_key: StorageKey, + storage_key: StorageKey, child_info: StorageKey, child_type: u32, key: StorageKey, @@ -337,8 +337,8 @@ impl StateBackend for FullState StateBackend for FullState, - child_storage_key: StorageKey, + storage_key: StorageKey, child_info: StorageKey, child_type: u32, key: StorageKey, @@ -357,8 +357,8 @@ impl StateBackend for FullState = ChildInfo::new_default(b"unique_id"); fn should_return_storage() { const KEY: &[u8] = b":mock"; const VALUE: &[u8] = b"hello world"; - const STORAGE_KEY: &[u8] = b":child_storage:default:child"; + const STORAGE_KEY: &[u8] = b"child"; const CHILD_VALUE: &[u8] = b"hello world !"; let mut core = tokio::runtime::Runtime::new().unwrap(); diff --git a/client/src/light/fetcher.rs b/client/src/light/fetcher.rs index d66108b7f0adb..477c26a0bdc7c 100644 --- a/client/src/light/fetcher.rs +++ b/client/src/light/fetcher.rs @@ -403,7 +403,7 @@ pub mod tests { // prepare remote client let remote_client = substrate_test_runtime_client::TestClientBuilder::new() .add_extra_child_storage( - b":child_storage:default:child1".to_vec(), + b"child1".to_vec(), CHILD_INFO_1, b"key1".to_vec(), b"value1".to_vec(), @@ -417,14 +417,14 @@ pub mod tests { // 'fetch' child read proof from remote node let child_value = remote_client.child_storage( &remote_block_id, - &StorageKey(b":child_storage:default:child1".to_vec()), + &StorageKey(b"child1".to_vec()), CHILD_INFO_1, &StorageKey(b"key1".to_vec()), ).unwrap().unwrap().0; assert_eq!(b"value1"[..], child_value[..]); let remote_read_proof = remote_client.read_child_proof( &remote_block_id, - b":child_storage:default:child1", + b"child1", CHILD_INFO_1, &[b"key1"], ).unwrap(); @@ -508,7 +508,7 @@ pub mod tests { &RemoteReadChildRequest::
{ block: remote_block_header.hash(), header: remote_block_header, - storage_key: b":child_storage:default:child1".to_vec(), + storage_key: b"child1".to_vec(), child_info: child_infos.0.to_vec(), child_type: child_infos.1, keys: vec![b"key1".to_vec()], diff --git a/primitives/externalities/src/lib.rs b/primitives/externalities/src/lib.rs index 350b65d190840..8beccc8201a8f 100644 --- a/primitives/externalities/src/lib.rs +++ b/primitives/externalities/src/lib.rs @@ -24,7 +24,7 @@ use std::any::{Any, TypeId}; -use sp_storage::{ChildStorageKey, ChildInfo}; +use sp_storage::ChildInfo; pub use scope_limited::{set_and_run_with_externalities, with_externalities}; pub use extensions::{Extension, Extensions, ExtensionStore}; @@ -47,7 +47,7 @@ pub trait Externalities: ExtensionStore { /// Returns an `Option` that holds the SCALE encoded hash. fn child_storage_hash( &self, - storage_key: ChildStorageKey, + storage_key: &[u8], child_info: ChildInfo, key: &[u8], ) -> Option>; @@ -60,7 +60,7 @@ pub trait Externalities: ExtensionStore { /// Returns an `Option` that holds the SCALE encoded hash. fn original_child_storage( &self, - storage_key: ChildStorageKey, + storage_key: &[u8], child_info: ChildInfo, key: &[u8], ) -> Option>; @@ -77,7 +77,7 @@ pub trait Externalities: ExtensionStore { /// Returns an `Option` that holds the SCALE encoded hash. fn original_child_storage_hash( &self, - storage_key: ChildStorageKey, + storage_key: &[u8], child_info: ChildInfo, key: &[u8], ) -> Option>; @@ -87,7 +87,7 @@ pub trait Externalities: ExtensionStore { /// Returns an `Option` that holds the SCALE encoded hash. fn child_storage( &self, - storage_key: ChildStorageKey, + storage_key: &[u8], child_info: ChildInfo, key: &[u8], ) -> Option>; @@ -100,7 +100,7 @@ pub trait Externalities: ExtensionStore { /// Set child storage entry `key` of current contract being called (effective immediately). fn set_child_storage( &mut self, - storage_key: ChildStorageKey, + storage_key: Vec, child_info: ChildInfo, key: Vec, value: Vec, @@ -116,11 +116,11 @@ pub trait Externalities: ExtensionStore { /// Clear a child storage entry (`key`) of current contract being called (effective immediately). fn clear_child_storage( &mut self, - storage_key: ChildStorageKey, + storage_key: &[u8], child_info: ChildInfo, key: &[u8], ) { - self.place_child_storage(storage_key, child_info, key.to_vec(), None) + self.place_child_storage(storage_key.to_vec(), child_info, key.to_vec(), None) } /// Whether a storage entry exists. @@ -131,7 +131,7 @@ pub trait Externalities: ExtensionStore { /// Whether a child storage entry exists. fn exists_child_storage( &self, - storage_key: ChildStorageKey, + storage_key: &[u8], child_info: ChildInfo, key: &[u8], ) -> bool { @@ -144,13 +144,13 @@ pub trait Externalities: ExtensionStore { /// Returns the key immediately following the given key, if it exists, in child storage. fn next_child_storage_key( &self, - storage_key: ChildStorageKey, + storage_key: &[u8], child_info: ChildInfo, key: &[u8], ) -> Option>; /// Clear an entire child storage. - fn kill_child_storage(&mut self, storage_key: ChildStorageKey, child_info: ChildInfo); + fn kill_child_storage(&mut self, storage_key: &[u8], child_info: ChildInfo); /// Clear storage entries which keys are start with the given prefix. fn clear_prefix(&mut self, prefix: &[u8]); @@ -158,7 +158,7 @@ pub trait Externalities: ExtensionStore { /// Clear child storage entries which keys are start with the given prefix. fn clear_child_prefix( &mut self, - storage_key: ChildStorageKey, + storage_key: &[u8], child_info: ChildInfo, prefix: &[u8], ); @@ -169,7 +169,7 @@ pub trait Externalities: ExtensionStore { /// Set or clear a child storage entry. Return whether the operation succeeds. fn place_child_storage( &mut self, - storage_key: ChildStorageKey, + storage_key: Vec, child_info: ChildInfo, key: Vec, value: Option>, @@ -192,7 +192,7 @@ pub trait Externalities: ExtensionStore { /// storage map will be removed. fn child_storage_root( &mut self, - storage_key: ChildStorageKey, + storage_key: &[u8], ) -> Vec; /// Get the change trie root of the current storage overlay at a block with given parent. diff --git a/primitives/io/src/lib.rs b/primitives/io/src/lib.rs index 1b531725fefc8..fa3e895fc3482 100644 --- a/primitives/io/src/lib.rs +++ b/primitives/io/src/lib.rs @@ -37,7 +37,7 @@ use sp_core::{ traits::{KeystoreExt, CallInWasmExt}, offchain::{OffchainExt, TransactionPoolExt}, hexdisplay::HexDisplay, - storage::{ChildStorageKey, ChildInfo}, + storage::ChildInfo, }; use sp_core::{ @@ -68,19 +68,6 @@ pub enum EcdsaVerifyError { BadSignature, } -/// Returns a `ChildStorageKey` if the given `storage_key` slice is a valid storage -/// key or panics otherwise. -/// -/// Panicking here is aligned with what the `without_std` environment would do -/// in the case of an invalid child storage key. -#[cfg(feature = "std")] -fn child_storage_key_or_panic(storage_key: &[u8]) -> ChildStorageKey { - match ChildStorageKey::from_slice(storage_key) { - Some(storage_key) => storage_key, - None => panic!("child storage key is invalid"), - } -} - /// Interface for accessing the storage from within the runtime. #[runtime_interface] pub trait Storage { @@ -102,13 +89,12 @@ pub trait Storage { /// if the key can not be found. fn child_get( &self, - child_storage_key: &[u8], + storage_key: &[u8], child_definition: &[u8], child_type: u32, key: &[u8], ) -> Option> { - let storage_key = child_storage_key_or_panic(child_storage_key); - let child_info = ChildInfo::resolve_child_info(child_type, child_definition) + let child_info = ChildInfo::resolve_child_info(child_type, child_definition, storage_key) .expect("Invalid child definition"); self.child_storage(storage_key, child_info, key).map(|s| s.to_vec()) } @@ -137,15 +123,14 @@ pub trait Storage { /// See `child_get` for common child api parameters. fn child_read( &self, - child_storage_key: &[u8], + storage_key: &[u8], child_definition: &[u8], child_type: u32, key: &[u8], value_out: &mut [u8], value_offset: u32, ) -> Option { - let storage_key = child_storage_key_or_panic(child_storage_key); - let child_info = ChildInfo::resolve_child_info(child_type, child_definition) + let child_info = ChildInfo::resolve_child_info(child_type, child_definition, storage_key) .expect("Invalid child definition"); self.child_storage(storage_key, child_info, key) .map(|value| { @@ -162,21 +147,20 @@ pub trait Storage { self.set_storage(key.to_vec(), value.to_vec()); } - /// Set `key` to `value` in the child storage denoted by `child_storage_key`. + /// Set `key` to `value` in the child storage denoted by `storage_key`. /// /// See `child_get` for common child api parameters. fn child_set( &mut self, - child_storage_key: &[u8], + storage_key: &[u8], child_definition: &[u8], child_type: u32, key: &[u8], value: &[u8], ) { - let storage_key = child_storage_key_or_panic(child_storage_key); - let child_info = ChildInfo::resolve_child_info(child_type, child_definition) + let child_info = ChildInfo::resolve_child_info(child_type, child_definition, storage_key) .expect("Invalid child definition"); - self.set_child_storage(storage_key, child_info, key.to_vec(), value.to_vec()); + self.set_child_storage(storage_key.to_vec(), child_info, key.to_vec(), value.to_vec()); } /// Clear the storage of the given `key` and its value. @@ -189,13 +173,12 @@ pub trait Storage { /// See `child_get` for common child api parameters. fn child_clear( &mut self, - child_storage_key: &[u8], + storage_key: &[u8], child_definition: &[u8], child_type: u32, key: &[u8], ) { - let storage_key = child_storage_key_or_panic(child_storage_key); - let child_info = ChildInfo::resolve_child_info(child_type, child_definition) + let child_info = ChildInfo::resolve_child_info(child_type, child_definition, storage_key) .expect("Invalid child definition"); self.clear_child_storage(storage_key, child_info, key); } @@ -205,12 +188,11 @@ pub trait Storage { /// See `child_get` for common child api parameters. fn child_storage_kill( &mut self, - child_storage_key: &[u8], + storage_key: &[u8], child_definition: &[u8], child_type: u32, ) { - let storage_key = child_storage_key_or_panic(child_storage_key); - let child_info = ChildInfo::resolve_child_info(child_type, child_definition) + let child_info = ChildInfo::resolve_child_info(child_type, child_definition, storage_key) .expect("Invalid child definition"); self.kill_child_storage(storage_key, child_info); } @@ -225,13 +207,12 @@ pub trait Storage { /// See `child_get` for common child api parameters. fn child_exists( &self, - child_storage_key: &[u8], + storage_key: &[u8], child_definition: &[u8], child_type: u32, key: &[u8], ) -> bool { - let storage_key = child_storage_key_or_panic(child_storage_key); - let child_info = ChildInfo::resolve_child_info(child_type, child_definition) + let child_info = ChildInfo::resolve_child_info(child_type, child_definition, storage_key) .expect("Invalid child definition"); self.exists_child_storage(storage_key, child_info, key) } @@ -246,13 +227,12 @@ pub trait Storage { /// See `child_get` for common child api parameters. fn child_clear_prefix( &mut self, - child_storage_key: &[u8], + storage_key: &[u8], child_definition: &[u8], child_type: u32, prefix: &[u8], ) { - let storage_key = child_storage_key_or_panic(child_storage_key); - let child_info = ChildInfo::resolve_child_info(child_type, child_definition) + let child_info = ChildInfo::resolve_child_info(child_type, child_definition, storage_key) .expect("Invalid child definition"); self.clear_child_prefix(storage_key, child_info, prefix); } @@ -275,9 +255,8 @@ pub trait Storage { /// See `child_get` for common child api parameters. fn child_root( &mut self, - child_storage_key: &[u8], + storage_key: &[u8], ) -> Vec { - let storage_key = child_storage_key_or_panic(child_storage_key); self.child_storage_root(storage_key) } @@ -300,13 +279,12 @@ pub trait Storage { /// Get the next key in storage after the given one in lexicographic order in child storage. fn child_next_key( &mut self, - child_storage_key: &[u8], + storage_key: &[u8], child_definition: &[u8], child_type: u32, key: &[u8], ) -> Option> { - let storage_key = child_storage_key_or_panic(child_storage_key); - let child_info = ChildInfo::resolve_child_info(child_type, child_definition) + let child_info = ChildInfo::resolve_child_info(child_type, child_definition, storage_key) .expect("Invalid child definition"); self.next_child_storage_key(storage_key, child_info, key) } diff --git a/primitives/state-machine/src/basic.rs b/primitives/state-machine/src/basic.rs index d905657737a8a..1ca655cdaf569 100644 --- a/primitives/state-machine/src/basic.rs +++ b/primitives/state-machine/src/basic.rs @@ -25,7 +25,7 @@ use sp_trie::{TrieConfiguration, default_child_trie_root}; use sp_trie::trie_types::Layout; use sp_core::{ storage::{ - well_known_keys::is_child_storage_key, ChildStorageKey, Storage, + well_known_keys::is_child_storage_key, Storage, ChildInfo, StorageChild, }, traits::Externalities, Blake2Hasher, @@ -129,7 +129,7 @@ impl Externalities for BasicExternalities { fn child_storage( &self, - storage_key: ChildStorageKey, + storage_key: &[u8], _child_info: ChildInfo, key: &[u8], ) -> Option { @@ -138,7 +138,7 @@ impl Externalities for BasicExternalities { fn child_storage_hash( &self, - storage_key: ChildStorageKey, + storage_key: &[u8], child_info: ChildInfo, key: &[u8], ) -> Option> { @@ -147,7 +147,7 @@ impl Externalities for BasicExternalities { fn original_child_storage_hash( &self, - storage_key: ChildStorageKey, + storage_key: &[u8], child_info: ChildInfo, key: &[u8], ) -> Option> { @@ -156,7 +156,7 @@ impl Externalities for BasicExternalities { fn original_child_storage( &self, - storage_key: ChildStorageKey, + storage_key: &[u8], child_info: ChildInfo, key: &[u8], ) -> Option { @@ -170,7 +170,7 @@ impl Externalities for BasicExternalities { fn next_child_storage_key( &self, - storage_key: ChildStorageKey, + storage_key: &[u8], _child_info: ChildInfo, key: &[u8], ) -> Option { @@ -193,12 +193,12 @@ impl Externalities for BasicExternalities { fn place_child_storage( &mut self, - storage_key: ChildStorageKey, + storage_key: StorageKey, child_info: ChildInfo, key: StorageKey, value: Option, ) { - let child_map = self.inner.children.entry(storage_key.into_owned()) + let child_map = self.inner.children.entry(storage_key) .or_insert_with(|| StorageChild { data: Default::default(), child_info: child_info.to_owned(), @@ -212,7 +212,7 @@ impl Externalities for BasicExternalities { fn kill_child_storage( &mut self, - storage_key: ChildStorageKey, + storage_key: &[u8], _child_info: ChildInfo, ) { self.inner.children.remove(storage_key.as_ref()); @@ -240,7 +240,7 @@ impl Externalities for BasicExternalities { fn clear_child_prefix( &mut self, - storage_key: ChildStorageKey, + storage_key: &[u8], _child_info: ChildInfo, prefix: &[u8], ) { @@ -267,10 +267,7 @@ impl Externalities for BasicExternalities { // type of child trie support. let empty_hash = default_child_trie_root::>(&[]); for storage_key in keys { - let child_root = self.child_storage_root( - ChildStorageKey::from_slice(storage_key.as_slice()) - .expect("Map only feed by valid keys; qed"), - ); + let child_root = self.child_storage_root(storage_key.as_slice()); if &empty_hash[..] == &child_root[..] { top.remove(storage_key.as_slice()); } else { @@ -283,7 +280,7 @@ impl Externalities for BasicExternalities { fn child_storage_root( &mut self, - storage_key: ChildStorageKey, + storage_key: &[u8], ) -> Vec { if let Some(child) = self.inner.children.get(storage_key.as_ref()) { let delta = child.data.clone().into_iter().map(|(k, v)| (k, Some(v))); @@ -340,7 +337,7 @@ mod tests { #[test] fn children_works() { - let child_storage = b":child_storage:default:test".to_vec(); + let child_storage = b"test".to_vec(); let mut ext = BasicExternalities::new(Storage { top: Default::default(), @@ -352,18 +349,18 @@ mod tests { ] }); - let child = || ChildStorageKey::from_vec(child_storage.clone()).unwrap(); + let child = &child_storage[..]; - assert_eq!(ext.child_storage(child(), CHILD_INFO_1, b"doe"), Some(b"reindeer".to_vec())); + assert_eq!(ext.child_storage(child, CHILD_INFO_1, b"doe"), Some(b"reindeer".to_vec())); - ext.set_child_storage(child(), CHILD_INFO_1, b"dog".to_vec(), b"puppy".to_vec()); - assert_eq!(ext.child_storage(child(), CHILD_INFO_1, b"dog"), Some(b"puppy".to_vec())); + ext.set_child_storage(child.to_vec(), CHILD_INFO_1, b"dog".to_vec(), b"puppy".to_vec()); + assert_eq!(ext.child_storage(child, CHILD_INFO_1, b"dog"), Some(b"puppy".to_vec())); - ext.clear_child_storage(child(), CHILD_INFO_1, b"dog"); - assert_eq!(ext.child_storage(child(), CHILD_INFO_1, b"dog"), None); + ext.clear_child_storage(child, CHILD_INFO_1, b"dog"); + assert_eq!(ext.child_storage(child, CHILD_INFO_1, b"dog"), None); - ext.kill_child_storage(child(), CHILD_INFO_1); - assert_eq!(ext.child_storage(child(), CHILD_INFO_1, b"doe"), None); + ext.kill_child_storage(child, CHILD_INFO_1); + assert_eq!(ext.child_storage(child, CHILD_INFO_1, b"doe"), None); } #[test] diff --git a/primitives/state-machine/src/ext.rs b/primitives/state-machine/src/ext.rs index 38a2e70262d85..5b9595da9dc51 100644 --- a/primitives/state-machine/src/ext.rs +++ b/primitives/state-machine/src/ext.rs @@ -24,7 +24,7 @@ use crate::{ use hash_db::Hasher; use sp_core::{ - storage::{ChildStorageKey, well_known_keys::is_child_storage_key, ChildInfo}, + storage::{well_known_keys::is_child_storage_key, ChildInfo}, traits::Externalities, hexdisplay::HexDisplay, }; use sp_trie::{trie_types::Layout, default_child_trie_root}; @@ -205,7 +205,7 @@ where fn child_storage( &self, - storage_key: ChildStorageKey, + storage_key: &[u8], child_info: ChildInfo, key: &[u8], ) -> Option { @@ -230,7 +230,7 @@ where fn child_storage_hash( &self, - storage_key: ChildStorageKey, + storage_key: &[u8], child_info: ChildInfo, key: &[u8], ) -> Option> { @@ -255,7 +255,7 @@ where fn original_child_storage( &self, - storage_key: ChildStorageKey, + storage_key: &[u8], child_info: ChildInfo, key: &[u8], ) -> Option { @@ -276,7 +276,7 @@ where fn original_child_storage_hash( &self, - storage_key: ChildStorageKey, + storage_key: &[u8], child_info: ChildInfo, key: &[u8], ) -> Option> { @@ -312,7 +312,7 @@ where fn exists_child_storage( &self, - storage_key: ChildStorageKey, + storage_key: &[u8], child_info: ChildInfo, key: &[u8], ) -> bool { @@ -351,7 +351,7 @@ where fn next_child_storage_key( &self, - storage_key: ChildStorageKey, + storage_key: &[u8], child_info: ChildInfo, key: &[u8], ) -> Option { @@ -396,26 +396,26 @@ where fn place_child_storage( &mut self, - storage_key: ChildStorageKey, + storage_key: StorageKey, child_info: ChildInfo, key: StorageKey, value: Option, ) { trace!(target: "state-trace", "{:04x}: PutChild({}) {}={:?}", self.id, - HexDisplay::from(&storage_key.as_ref()), + HexDisplay::from(&storage_key), HexDisplay::from(&key), value.as_ref().map(HexDisplay::from) ); let _guard = sp_panic_handler::AbortGuard::force_abort(); self.mark_dirty(); - self.overlay.set_child_storage(storage_key.into_owned(), child_info, key, value); + self.overlay.set_child_storage(storage_key, child_info, key, value); } fn kill_child_storage( &mut self, - storage_key: ChildStorageKey, + storage_key: &[u8], child_info: ChildInfo, ) { trace!(target: "state-trace", "{:04x}: KillChild({})", @@ -451,7 +451,7 @@ where fn clear_child_prefix( &mut self, - storage_key: ChildStorageKey, + storage_key: &[u8], child_info: ChildInfo, prefix: &[u8], ) { @@ -490,7 +490,7 @@ where fn child_storage_root( &mut self, - storage_key: ChildStorageKey, + storage_key: &[u8], ) -> Vec { let _guard = sp_panic_handler::AbortGuard::force_abort(); if self.storage_transaction_cache.transaction_storage_root.is_some() { @@ -614,8 +614,7 @@ mod tests { type TestBackend = InMemoryBackend; type TestExt<'a> = Ext<'a, Blake2Hasher, u64, TestBackend>; - const CHILD_KEY_1: &[u8] = b":child_storage:default:Child1"; - + const CHILD_KEY_1: &[u8] = b"Child1"; const CHILD_UUID_1: &[u8] = b"unique_id_1"; const CHILD_INFO_1: ChildInfo<'static> = ChildInfo::new_default(CHILD_UUID_1); @@ -730,20 +729,14 @@ mod tests { #[test] fn next_child_storage_key_works() { - const CHILD_KEY_1: &[u8] = b":child_storage:default:Child1"; - - const CHILD_UUID_1: &[u8] = b"unique_id_1"; - const CHILD_INFO_1: ChildInfo<'static> = ChildInfo::new_default(CHILD_UUID_1); - let mut cache = StorageTransactionCache::default(); - let child = || ChildStorageKey::from_slice(CHILD_KEY_1).unwrap(); let mut overlay = OverlayedChanges::default(); - overlay.set_child_storage(child().as_ref().to_vec(), CHILD_INFO_1, vec![20], None); - overlay.set_child_storage(child().as_ref().to_vec(), CHILD_INFO_1, vec![30], Some(vec![31])); + overlay.set_child_storage(CHILD_KEY_1.to_vec(), CHILD_INFO_1, vec![20], None); + overlay.set_child_storage(CHILD_KEY_1.to_vec(), CHILD_INFO_1, vec![30], Some(vec![31])); let backend = Storage { top: map![], children: map![ - child().as_ref().to_vec() => StorageChild { + CHILD_KEY_1.to_vec() => StorageChild { data: map![ vec![10] => vec![10], vec![20] => vec![20], @@ -758,36 +751,35 @@ mod tests { let ext = TestExt::new(&mut overlay, &mut cache, &backend, None, None); // next_backend < next_overlay - assert_eq!(ext.next_child_storage_key(child(), CHILD_INFO_1, &[5]), Some(vec![10])); + assert_eq!(ext.next_child_storage_key(CHILD_KEY_1, CHILD_INFO_1, &[5]), Some(vec![10])); // next_backend == next_overlay but next_overlay is a delete - assert_eq!(ext.next_child_storage_key(child(), CHILD_INFO_1, &[10]), Some(vec![30])); + assert_eq!(ext.next_child_storage_key(CHILD_KEY_1, CHILD_INFO_1, &[10]), Some(vec![30])); // next_overlay < next_backend - assert_eq!(ext.next_child_storage_key(child(), CHILD_INFO_1, &[20]), Some(vec![30])); + assert_eq!(ext.next_child_storage_key(CHILD_KEY_1, CHILD_INFO_1, &[20]), Some(vec![30])); // next_backend exist but next_overlay doesn't exist - assert_eq!(ext.next_child_storage_key(child(), CHILD_INFO_1, &[30]), Some(vec![40])); + assert_eq!(ext.next_child_storage_key(CHILD_KEY_1, CHILD_INFO_1, &[30]), Some(vec![40])); drop(ext); - overlay.set_child_storage(child().as_ref().to_vec(), CHILD_INFO_1, vec![50], Some(vec![50])); + overlay.set_child_storage(CHILD_KEY_1.to_vec(), CHILD_INFO_1, vec![50], Some(vec![50])); let ext = TestExt::new(&mut overlay, &mut cache, &backend, None, None); // next_overlay exist but next_backend doesn't exist - assert_eq!(ext.next_child_storage_key(child(), CHILD_INFO_1, &[40]), Some(vec![50])); + assert_eq!(ext.next_child_storage_key(CHILD_KEY_1, CHILD_INFO_1, &[40]), Some(vec![50])); } #[test] fn child_storage_works() { let mut cache = StorageTransactionCache::default(); - let child = || ChildStorageKey::from_slice(CHILD_KEY_1).unwrap(); let mut overlay = OverlayedChanges::default(); - overlay.set_child_storage(child().as_ref().to_vec(), CHILD_INFO_1, vec![20], None); - overlay.set_child_storage(child().as_ref().to_vec(), CHILD_INFO_1, vec![30], Some(vec![31])); + overlay.set_child_storage(CHILD_KEY_1.to_vec(), CHILD_INFO_1, vec![20], None); + overlay.set_child_storage(CHILD_KEY_1.to_vec(), CHILD_INFO_1, vec![30], Some(vec![31])); let backend = Storage { top: map![], children: map![ - child().as_ref().to_vec() => StorageChild { + CHILD_KEY_1.to_vec() => StorageChild { data: map![ vec![10] => vec![10], vec![20] => vec![20], @@ -800,24 +792,24 @@ mod tests { let ext = TestExt::new(&mut overlay, &mut cache, &backend, None, None); - assert_eq!(ext.child_storage(child(), CHILD_INFO_1, &[10]), Some(vec![10])); - assert_eq!(ext.original_child_storage(child(), CHILD_INFO_1, &[10]), Some(vec![10])); + assert_eq!(ext.child_storage(CHILD_KEY_1, CHILD_INFO_1, &[10]), Some(vec![10])); + assert_eq!(ext.original_child_storage(CHILD_KEY_1, CHILD_INFO_1, &[10]), Some(vec![10])); assert_eq!( - ext.child_storage_hash(child(), CHILD_INFO_1, &[10]), + ext.child_storage_hash(CHILD_KEY_1, CHILD_INFO_1, &[10]), Some(Blake2Hasher::hash(&[10]).as_ref().to_vec()), ); - assert_eq!(ext.child_storage(child(), CHILD_INFO_1, &[20]), None); - assert_eq!(ext.original_child_storage(child(), CHILD_INFO_1, &[20]), Some(vec![20])); + assert_eq!(ext.child_storage(CHILD_KEY_1, CHILD_INFO_1, &[20]), None); + assert_eq!(ext.original_child_storage(CHILD_KEY_1, CHILD_INFO_1, &[20]), Some(vec![20])); assert_eq!( - ext.child_storage_hash(child(), CHILD_INFO_1, &[20]), + ext.child_storage_hash(CHILD_KEY_1, CHILD_INFO_1, &[20]), None, ); - assert_eq!(ext.child_storage(child(), CHILD_INFO_1, &[30]), Some(vec![31])); - assert_eq!(ext.original_child_storage(child(), CHILD_INFO_1, &[30]), Some(vec![40])); + assert_eq!(ext.child_storage(CHILD_KEY_1, CHILD_INFO_1, &[30]), Some(vec![31])); + assert_eq!(ext.original_child_storage(CHILD_KEY_1, CHILD_INFO_1, &[30]), Some(vec![40])); assert_eq!( - ext.child_storage_hash(child(), CHILD_INFO_1, &[30]), + ext.child_storage_hash(CHILD_KEY_1, CHILD_INFO_1, &[30]), Some(Blake2Hasher::hash(&[31]).as_ref().to_vec()), ); diff --git a/primitives/state-machine/src/lib.rs b/primitives/state-machine/src/lib.rs index bb62df6da4905..8bafda6aa6186 100644 --- a/primitives/state-machine/src/lib.rs +++ b/primitives/state-machine/src/lib.rs @@ -692,7 +692,7 @@ mod tests { use super::*; use super::ext::Ext; use super::changes_trie::Configuration as ChangesTrieConfig; - use sp_core::{Blake2Hasher, map, traits::Externalities, storage::ChildStorageKey}; + use sp_core::{Blake2Hasher, map, traits::Externalities}; #[derive(Clone)] struct DummyCodeExecutor { @@ -945,26 +945,26 @@ mod tests { ); ext.set_child_storage( - ChildStorageKey::from_slice(b":child_storage:default:testchild").unwrap(), + b"testchild".to_vec(), CHILD_INFO_1, b"abc".to_vec(), b"def".to_vec() ); assert_eq!( ext.child_storage( - ChildStorageKey::from_slice(b":child_storage:default:testchild").unwrap(), + b"testchild", CHILD_INFO_1, b"abc" ), Some(b"def".to_vec()) ); ext.kill_child_storage( - ChildStorageKey::from_slice(b":child_storage:default:testchild").unwrap(), + b"testchild", CHILD_INFO_1, ); assert_eq!( ext.child_storage( - ChildStorageKey::from_slice(b":child_storage:default:testchild").unwrap(), + b"testchild", CHILD_INFO_1, b"abc" ), @@ -1000,20 +1000,20 @@ mod tests { let remote_root = remote_backend.storage_root(::std::iter::empty()).0; let remote_proof = prove_child_read( remote_backend, - b":child_storage:default:sub1", + b"sub1", CHILD_INFO_1, &[b"value3"], ).unwrap(); let local_result1 = read_child_proof_check::( remote_root, remote_proof.clone(), - b":child_storage:default:sub1", + b"sub1", &[b"value3"], ).unwrap(); let local_result2 = read_child_proof_check::( remote_root, remote_proof.clone(), - b":child_storage:default:sub1", + b"sub1", &[b"value2"], ).unwrap(); assert_eq!( @@ -1033,8 +1033,8 @@ mod tests { use crate::trie_backend::tests::test_trie; let mut overlay = OverlayedChanges::default(); - let subtrie1 = ChildStorageKey::from_slice(b":child_storage:default:sub_test1").unwrap(); - let subtrie2 = ChildStorageKey::from_slice(b":child_storage:default:sub_test2").unwrap(); + let subtrie1 = b"sub_test1"; + let subtrie2 = b"sub_test2"; let mut transaction = { let backend = test_trie(); let mut cache = StorageTransactionCache::default(); @@ -1045,8 +1045,8 @@ mod tests { changes_trie::disabled_state::<_, u64>(), None, ); - ext.set_child_storage(subtrie1, CHILD_INFO_1, b"abc".to_vec(), b"def".to_vec()); - ext.set_child_storage(subtrie2, CHILD_INFO_2, b"abc".to_vec(), b"def".to_vec()); + ext.set_child_storage(subtrie1.to_vec(), CHILD_INFO_1, b"abc".to_vec(), b"def".to_vec()); + ext.set_child_storage(subtrie2.to_vec(), CHILD_INFO_2, b"abc".to_vec(), b"def".to_vec()); ext.storage_root(); cache.transaction.unwrap() }; diff --git a/primitives/state-machine/src/proving_backend.rs b/primitives/state-machine/src/proving_backend.rs index 70124927fdd2e..723cc737e15d7 100644 --- a/primitives/state-machine/src/proving_backend.rs +++ b/primitives/state-machine/src/proving_backend.rs @@ -398,7 +398,7 @@ mod tests { use crate::InMemoryBackend; use crate::trie_backend::tests::test_trie; use super::*; - use sp_core::{Blake2Hasher, storage::ChildStorageKey}; + use sp_core::{Blake2Hasher}; use crate::proving_backend::create_proof_check_backend; use sp_trie::PrefixedMemoryDB; @@ -472,10 +472,10 @@ mod tests { #[test] fn proof_recorded_and_checked_with_child() { - let subtrie1 = ChildStorageKey::from_slice(b":child_storage:default:sub1").unwrap(); - let subtrie2 = ChildStorageKey::from_slice(b":child_storage:default:sub2").unwrap(); - let own1 = subtrie1.into_owned(); - let own2 = subtrie2.into_owned(); + let subtrie1 = b"sub1"; + let subtrie2 = b"sub2"; + let own1 = subtrie1.to_vec(); + let own2 = subtrie2.to_vec(); let contents = vec![ (None, (0..64).map(|i| (vec![i], Some(vec![i]))).collect()), (Some((own1.clone(), CHILD_INFO_1.to_owned())), diff --git a/primitives/state-machine/src/trie_backend.rs b/primitives/state-machine/src/trie_backend.rs index dbaae323c09f2..3fc35ad73fa39 100644 --- a/primitives/state-machine/src/trie_backend.rs +++ b/primitives/state-machine/src/trie_backend.rs @@ -250,8 +250,7 @@ pub mod tests { use sp_trie::{TrieMut, PrefixedMemoryDB, trie_types::TrieDBMut, KeySpacedDBMut}; use super::*; - const CHILD_KEY_1: &[u8] = b":child_storage:default:sub1"; - + const CHILD_KEY_1: &[u8] = b"sub1"; const CHILD_UUID_1: &[u8] = b"unique_id_1"; const CHILD_INFO_1: ChildInfo<'static> = ChildInfo::new_default(CHILD_UUID_1); diff --git a/primitives/storage/src/lib.rs b/primitives/storage/src/lib.rs index 161e90dc848bf..28c165e546059 100644 --- a/primitives/storage/src/lib.rs +++ b/primitives/storage/src/lib.rs @@ -63,6 +63,9 @@ pub struct Storage { /// Top trie storage data. pub top: StorageMap, /// Children trie storage data by storage key. + /// Note that the key is not including child prefix, this will + /// not be possible if a different kind of trie than `default` + /// get in use. pub children: std::collections::HashMap, StorageChild>, } diff --git a/test-utils/runtime/src/lib.rs b/test-utils/runtime/src/lib.rs index 96387b1efc304..28c7798c6db57 100644 --- a/test-utils/runtime/src/lib.rs +++ b/test-utils/runtime/src/lib.rs @@ -868,7 +868,7 @@ fn test_read_storage() { } fn test_read_child_storage() { - const CHILD_KEY: &[u8] = b":child_storage:default:read_child_storage"; + const CHILD_KEY: &[u8] = b"read_child_storage"; const UNIQUE_ID: &[u8] = b":unique_id"; const KEY: &[u8] = b":read_child_storage"; sp_io::storage::child_set( From b636687bfffc9bf4c3a93591c6efba9f31e4b991 Mon Sep 17 00:00:00 2001 From: cheme Date: Fri, 7 Feb 2020 16:16:40 +0100 Subject: [PATCH 35/85] Prefixing child storage key where needed. --- primitives/state-machine/src/backend.rs | 3 ++- primitives/state-machine/src/basic.rs | 14 ++++++++----- primitives/state-machine/src/ext.rs | 4 ++-- .../state-machine/src/in_memory_backend.rs | 3 ++- .../state-machine/src/proving_backend.rs | 6 ++++-- primitives/state-machine/src/trie_backend.rs | 6 ++++-- .../state-machine/src/trie_backend_essence.rs | 21 ++++++++++++------- primitives/storage/src/lib.rs | 20 ++++++++++++++++++ 8 files changed, 57 insertions(+), 20 deletions(-) diff --git a/primitives/state-machine/src/backend.rs b/primitives/state-machine/src/backend.rs index 4ef9b970ae21d..c6250e755622f 100644 --- a/primitives/state-machine/src/backend.rs +++ b/primitives/state-machine/src/backend.rs @@ -189,9 +189,10 @@ pub trait Backend: std::fmt::Debug { let mut txs: Self::Transaction = Default::default(); let mut child_roots: Vec<_> = Default::default(); // child first - for (storage_key, child_delta, child_info) in child_deltas { + for (mut storage_key, child_delta, child_info) in child_deltas { let (child_root, empty, child_txs) = self.child_storage_root(&storage_key[..], child_info.as_ref(), child_delta); + child_info.as_ref().do_prefix_key(&mut storage_key, None); txs.consolidate(child_txs); if empty { child_roots.push((storage_key, None)); diff --git a/primitives/state-machine/src/basic.rs b/primitives/state-machine/src/basic.rs index 1ca655cdaf569..344613242ccc9 100644 --- a/primitives/state-machine/src/basic.rs +++ b/primitives/state-machine/src/basic.rs @@ -261,17 +261,21 @@ impl Externalities for BasicExternalities { fn storage_root(&mut self) -> Vec { let mut top = self.inner.top.clone(); - let keys: Vec<_> = self.inner.children.keys().map(|k| k.to_vec()).collect(); + let keys: Vec<_> = self.inner.children.iter().map(|(k, v)| { + let mut prefixed = k.to_vec(); + v.child_info.as_ref().do_prefix_key(&mut prefixed, None); + (k.to_vec(), prefixed) + }).collect(); // Single child trie implementation currently allows using the same child // empty root for all child trie. Using null storage key until multiple // type of child trie support. let empty_hash = default_child_trie_root::>(&[]); - for storage_key in keys { + for (storage_key, prefixed_storage_key) in keys { let child_root = self.child_storage_root(storage_key.as_slice()); if &empty_hash[..] == &child_root[..] { - top.remove(storage_key.as_slice()); + top.remove(prefixed_storage_key.as_slice()); } else { - top.insert(storage_key, child_root); + top.insert(prefixed_storage_key, child_root); } } @@ -288,7 +292,7 @@ impl Externalities for BasicExternalities { InMemoryBackend::::default() .child_storage_root(storage_key.as_ref(), child.child_info.as_ref(), delta).0 } else { - default_child_trie_root::>(storage_key.as_ref()) + default_child_trie_root::>(&[]) }.encode() } diff --git a/primitives/state-machine/src/ext.rs b/primitives/state-machine/src/ext.rs index 5b9595da9dc51..9268cf3782ac2 100644 --- a/primitives/state-machine/src/ext.rs +++ b/primitives/state-machine/src/ext.rs @@ -498,7 +498,7 @@ where .storage(storage_key.as_ref()) .and_then(|k| Decode::decode(&mut &k[..]).ok()) .unwrap_or( - default_child_trie_root::>(storage_key.as_ref()) + default_child_trie_root::>(&[]) ); trace!(target: "state-trace", "{:04x}: ChildRoot({}) (cached) {}", self.id, @@ -547,7 +547,7 @@ where .storage(storage_key.as_ref()) .and_then(|k| Decode::decode(&mut &k[..]).ok()) .unwrap_or( - default_child_trie_root::>(storage_key.as_ref()) + default_child_trie_root::>(&[]) ); trace!(target: "state-trace", "{:04x}: ChildRoot({}) (no change) {}", self.id, diff --git a/primitives/state-machine/src/in_memory_backend.rs b/primitives/state-machine/src/in_memory_backend.rs index 0a29468bbc4ef..b0314e321c554 100644 --- a/primitives/state-machine/src/in_memory_backend.rs +++ b/primitives/state-machine/src/in_memory_backend.rs @@ -261,6 +261,7 @@ impl Backend for InMemory where H::Out: Codec { H::Out: Ord { let storage_key = storage_key.to_vec(); + let parent_prefix = child_info.parent_prefix(None); let child_info = Some((storage_key.clone(), child_info.to_owned())); let existing_pairs = self.inner.get(&child_info) @@ -278,7 +279,7 @@ impl Backend for InMemory where H::Out: Codec { let full_transaction = transaction.into_iter().collect(); - let is_default = root == default_child_trie_root::>(&storage_key); + let is_default = root == default_child_trie_root::>(parent_prefix); (root, is_default, vec![(child_info, full_transaction)]) } diff --git a/primitives/state-machine/src/proving_backend.rs b/primitives/state-machine/src/proving_backend.rs index 723cc737e15d7..0572907401ba6 100644 --- a/primitives/state-machine/src/proving_backend.rs +++ b/primitives/state-machine/src/proving_backend.rs @@ -147,9 +147,11 @@ impl<'a, S, H> ProvingBackendRecorder<'a, S, H> child_info: ChildInfo, key: &[u8] ) -> Result>, String> { - let root = self.storage(storage_key)? + let mut prefixed_storage_key = storage_key.to_vec(); + child_info.do_prefix_key(&mut prefixed_storage_key, None); + let root = self.storage(prefixed_storage_key.as_slice())? .and_then(|r| Decode::decode(&mut &r[..]).ok()) - .unwrap_or(default_child_trie_root::>(storage_key)); + .unwrap_or(default_child_trie_root::>(&[])); let mut read_overlay = S::Overlay::default(); let eph = Ephemeral::new( diff --git a/primitives/state-machine/src/trie_backend.rs b/primitives/state-machine/src/trie_backend.rs index 3fc35ad73fa39..febb6e31f1fe4 100644 --- a/primitives/state-machine/src/trie_backend.rs +++ b/primitives/state-machine/src/trie_backend.rs @@ -202,10 +202,12 @@ impl, H: Hasher> Backend for TrieBackend where I: IntoIterator)>, H::Out: Ord, { - let default_root = default_child_trie_root::>(storage_key); + let default_root = default_child_trie_root::>(child_info.parent_prefix(None)); let mut write_overlay = S::Overlay::default(); - let mut root = match self.storage(storage_key) { + let mut prefixed_storage_key = storage_key.to_vec(); + child_info.do_prefix_key(&mut prefixed_storage_key, None); + let mut root = match self.storage(prefixed_storage_key.as_slice()) { Ok(value) => value.and_then(|r| Decode::decode(&mut &r[..]).ok()).unwrap_or(default_root.clone()), Err(e) => { diff --git a/primitives/state-machine/src/trie_backend_essence.rs b/primitives/state-machine/src/trie_backend_essence.rs index 2598682ae0668..278ad705c3253 100644 --- a/primitives/state-machine/src/trie_backend_essence.rs +++ b/primitives/state-machine/src/trie_backend_essence.rs @@ -71,6 +71,13 @@ impl, H: Hasher> TrieBackendEssence where H::Out: self.next_storage_key_from_root(&self.root, None, key) } + /// Access the root of the child storage in its parent trie + fn child_root(&self, storage_key: &[u8], child_info: ChildInfo) -> Result, String> { + let mut prefixed_storage_key = storage_key.to_vec(); + child_info.do_prefix_key(&mut prefixed_storage_key, None); + self.storage(prefixed_storage_key.as_slice()) + } + /// Return the next key in the child trie i.e. the minimum key that is strictly superior to /// `key` in lexicographic order. pub fn next_child_storage_key( @@ -79,7 +86,7 @@ impl, H: Hasher> TrieBackendEssence where H::Out: child_info: ChildInfo, key: &[u8], ) -> Result, String> { - let child_root = match self.storage(storage_key)? { + let child_root = match self.child_root(storage_key, child_info)? { Some(child_root) => child_root, None => return Ok(None), }; @@ -165,8 +172,8 @@ impl, H: Hasher> TrieBackendEssence where H::Out: child_info: ChildInfo, key: &[u8], ) -> Result, String> { - let root = self.storage(storage_key)? - .unwrap_or(default_child_trie_root::>(storage_key).encode()); + let root = self.child_root(storage_key, child_info)? + .unwrap_or(default_child_trie_root::>(child_info.parent_prefix(None)).encode()); let mut read_overlay = S::Overlay::default(); let eph = Ephemeral { @@ -187,8 +194,8 @@ impl, H: Hasher> TrieBackendEssence where H::Out: child_info: ChildInfo, f: F, ) { - let root = match self.storage(storage_key) { - Ok(v) => v.unwrap_or(default_child_trie_root::>(storage_key).encode()), + let root = match self.child_root(storage_key, child_info) { + Ok(v) => v.unwrap_or(default_child_trie_root::>(child_info.parent_prefix(None)).encode()), Err(e) => { debug!(target: "trie", "Error while iterating child storage: {}", e); return; @@ -220,8 +227,8 @@ impl, H: Hasher> TrieBackendEssence where H::Out: prefix: &[u8], mut f: F, ) { - let root_vec = match self.storage(storage_key) { - Ok(v) => v.unwrap_or(default_child_trie_root::>(storage_key).encode()), + let root_vec = match self.child_root(storage_key, child_info) { + Ok(v) => v.unwrap_or(default_child_trie_root::>(child_info.parent_prefix(None)).encode()), Err(e) => { debug!(target: "trie", "Error while iterating child storage: {}", e); return; diff --git a/primitives/storage/src/lib.rs b/primitives/storage/src/lib.rs index 28c165e546059..ea4dd56a1e7a9 100644 --- a/primitives/storage/src/lib.rs +++ b/primitives/storage/src/lib.rs @@ -225,6 +225,16 @@ impl<'a> ChildInfo<'a> { } } + /// Change a key to get prefixed with the parent prefix. + pub fn do_prefix_key(&self, key: &mut Vec, parent: Option<&ChildInfo>) { + let parent_prefix = self.parent_prefix(parent); + let key_len = key.len(); + if parent_prefix.len() > 0 { + key.resize(key_len + parent_prefix.len(), 0); + key.copy_within(..key_len, parent_prefix.len()); + key[..parent_prefix.len()].copy_from_slice(parent_prefix); + } + } } /// Type of child. @@ -313,3 +323,13 @@ fn assert_default_trie_in_child_trie() { let prefix = child_info.parent_prefix(None); assert!(prefix.starts_with(well_known_keys::CHILD_STORAGE_KEY_PREFIX)); } + +#[test] +fn test_do_prefix() { + let child_info = ChildInfo::new_default(b"any key"); + let mut prefixed_1 = b"key".to_vec(); + child_info.do_prefix_key(&mut prefixed_1, None); + let mut prefixed_2 = DEFAULT_CHILD_TYPE_PARENT_PREFIX.to_vec(); + prefixed_2.extend_from_slice(b"key"); + assert_eq!(prefixed_1, prefixed_2); +} From 728aedfaa0424f3461a5eb35dc5cd49ef6e22425 Mon Sep 17 00:00:00 2001 From: cheme Date: Fri, 7 Feb 2020 18:44:52 +0100 Subject: [PATCH 36/85] Remove deprecated check for genesis and fix some test storage builds. --- client/db/src/lib.rs | 6 ------ client/network/src/protocol/legacy_proto/tests.rs | 2 +- client/rpc/src/state/tests.rs | 4 +--- frame/contracts/src/tests.rs | 2 -- primitives/state-machine/src/in_memory_backend.rs | 10 +++++++--- primitives/state-machine/src/trie_backend.rs | 4 +++- primitives/state-machine/src/trie_backend_essence.rs | 5 ++++- test-utils/client/src/lib.rs | 2 +- test-utils/runtime/client/src/lib.rs | 4 +++- 9 files changed, 20 insertions(+), 19 deletions(-) diff --git a/client/db/src/lib.rs b/client/db/src/lib.rs index be569194972cc..407dcd4581434 100644 --- a/client/db/src/lib.rs +++ b/client/db/src/lib.rs @@ -582,12 +582,6 @@ impl sc_client_api::backend::BlockImportOperation for Bloc return Err(sp_blockchain::Error::GenesisInvalid.into()); } - for child_key in storage.children.keys() { - if !well_known_keys::is_child_storage_key(&child_key) { - return Err(sp_blockchain::Error::GenesisInvalid.into()); - } - } - let child_delta = storage.children.into_iter().map(|(storage_key, child_content)| ( storage_key, child_content.data.into_iter().map(|(k, v)| (k, Some(v))), child_content.child_info), diff --git a/client/network/src/protocol/legacy_proto/tests.rs b/client/network/src/protocol/legacy_proto/tests.rs index 18e32f1d0189f..ca35bbc6dfede 100644 --- a/client/network/src/protocol/legacy_proto/tests.rs +++ b/client/network/src/protocol/legacy_proto/tests.rs @@ -321,7 +321,7 @@ fn basic_two_nodes_requests_in_parallel() { }); } -#[test] +//#[test] fn reconnect_after_disconnect() { // We connect two nodes together, then force a disconnect (through the API of the `Service`), // check that the disconnect worked, and finally check whether they successfully reconnect. diff --git a/client/rpc/src/state/tests.rs b/client/rpc/src/state/tests.rs index 40597f8fa6d89..fe4ad6df16343 100644 --- a/client/rpc/src/state/tests.rs +++ b/client/rpc/src/state/tests.rs @@ -85,9 +85,7 @@ fn should_return_child_storage() { .build()); let genesis_hash = client.genesis_hash(); let client = new_full(client, Subscriptions::new(Arc::new(core.executor()))); - let child_key = StorageKey( - well_known_keys::CHILD_STORAGE_KEY_PREFIX.iter().chain(b"test").cloned().collect() - ); + let child_key = StorageKey(b"test".to_vec()); let key = StorageKey(b"key".to_vec()); diff --git a/frame/contracts/src/tests.rs b/frame/contracts/src/tests.rs index 5eb7bce48ab3f..4bf468deceb36 100644 --- a/frame/contracts/src/tests.rs +++ b/frame/contracts/src/tests.rs @@ -205,8 +205,6 @@ impl TrieIdGenerator for DummyTrieIdGenerator { // TODO: see https://github.com/paritytech/substrate/issues/2325 let mut res = vec![]; - res.extend_from_slice(well_known_keys::CHILD_STORAGE_KEY_PREFIX); - res.extend_from_slice(b"default:"); res.extend_from_slice(&new_seed.to_le_bytes()); res.extend_from_slice(&account_id.to_le_bytes()); res diff --git a/primitives/state-machine/src/in_memory_backend.rs b/primitives/state-machine/src/in_memory_backend.rs index b0314e321c554..02fd61de9c603 100644 --- a/primitives/state-machine/src/in_memory_backend.rs +++ b/primitives/state-machine/src/in_memory_backend.rs @@ -336,11 +336,13 @@ impl Backend for InMemory where H::Out: Codec { let mut new_child_roots = Vec::new(); let mut root_map = None; for (child_info, map) in &self.inner { - if let Some((storage_key, _child_info)) = child_info.as_ref() { + if let Some((storage_key, child_info)) = child_info.as_ref() { + let mut prefix_storage_key = storage_key.to_vec(); + child_info.as_ref().do_prefix_key(&mut prefix_storage_key, None); // no need to use child_info at this point because we use a MemoryDB for // proof (with PrefixedMemoryDB it would be needed). let ch = insert_into_memory_db::(&mut mdb, map.clone().into_iter())?; - new_child_roots.push((storage_key.clone(), ch.as_ref().into())); + new_child_roots.push((prefix_storage_key, ch.as_ref().into())); } else { root_map = Some(map); } @@ -378,6 +380,8 @@ mod tests { let trie_backend = storage.as_trie_backend().unwrap(); assert_eq!(trie_backend.child_storage(b"1", child_info.as_ref(), b"2").unwrap(), Some(b"3".to_vec())); - assert!(trie_backend.storage(b"1").unwrap().is_some()); + let mut prefixed_storage_key = b"1".to_vec(); + child_info.as_ref().do_prefix_key(&mut prefixed_storage_key, None); + assert!(trie_backend.storage(prefixed_storage_key.as_slice()).unwrap().is_some()); } } diff --git a/primitives/state-machine/src/trie_backend.rs b/primitives/state-machine/src/trie_backend.rs index febb6e31f1fe4..0df13a8fff137 100644 --- a/primitives/state-machine/src/trie_backend.rs +++ b/primitives/state-machine/src/trie_backend.rs @@ -270,7 +270,9 @@ pub mod tests { let mut sub_root = Vec::new(); root.encode_to(&mut sub_root); let mut trie = TrieDBMut::new(&mut mdb, &mut root); - trie.insert(CHILD_KEY_1, &sub_root[..]).expect("insert failed"); + let mut prefixed_storage_key = CHILD_KEY_1.to_vec(); + CHILD_INFO_1.do_prefix_key(&mut prefixed_storage_key, None); + trie.insert(prefixed_storage_key.as_slice(), &sub_root[..]).expect("insert failed"); trie.insert(b"key", b"value").expect("insert failed"); trie.insert(b"value1", &[42]).expect("insert failed"); trie.insert(b"value2", &[24]).expect("insert failed"); diff --git a/primitives/state-machine/src/trie_backend_essence.rs b/primitives/state-machine/src/trie_backend_essence.rs index 278ad705c3253..f515e30c9528f 100644 --- a/primitives/state-machine/src/trie_backend_essence.rs +++ b/primitives/state-machine/src/trie_backend_essence.rs @@ -472,7 +472,10 @@ mod test { } { let mut trie = TrieDBMut::new(&mut mdb, &mut root_2); - trie.insert(b"MyChild", root_1.as_ref()).expect("insert failed"); + let mut prefixed_storage_key = b"MyChild".to_vec(); + child_info.do_prefix_key(&mut prefixed_storage_key, None); + trie.insert(prefixed_storage_key.as_slice(), root_1.as_ref()) + .expect("insert failed"); }; let essence_1 = TrieBackendEssence::new(mdb, root_1); diff --git a/test-utils/client/src/lib.rs b/test-utils/client/src/lib.rs index e95c5ad162760..1204e809bce1b 100644 --- a/test-utils/client/src/lib.rs +++ b/test-utils/client/src/lib.rs @@ -190,7 +190,7 @@ impl TestClientBuilder::Header as HeaderT>::Hashing as HashT>::trie_root( child_content.data.clone().into_iter().collect() ); - (sk.clone(), state_root.encode()) + let mut prefixed_storage_key = sk.clone(); + child_content.child_info.as_ref().do_prefix_key(&mut prefixed_storage_key, None); + (prefixed_storage_key, state_root.encode()) }); let state_root = <<::Header as HeaderT>::Hashing as HashT>::trie_root( storage.top.clone().into_iter().chain(child_roots).collect() From 864e9ca17ddc3bd062b5f633d76e6e8849683991 Mon Sep 17 00:00:00 2001 From: cheme Date: Fri, 7 Feb 2020 19:55:03 +0100 Subject: [PATCH 37/85] Fix contract to stop using the child storage prefix. --- .../network/src/protocol/legacy_proto/tests.rs | 2 +- client/network/src/protocol/light_dispatch.rs | 2 +- client/rpc/src/state/tests.rs | 2 +- frame/contracts/src/lib.rs | 17 ++++------------- frame/contracts/src/tests.rs | 1 - test-utils/client/src/lib.rs | 2 +- 6 files changed, 8 insertions(+), 18 deletions(-) diff --git a/client/network/src/protocol/legacy_proto/tests.rs b/client/network/src/protocol/legacy_proto/tests.rs index ca35bbc6dfede..18e32f1d0189f 100644 --- a/client/network/src/protocol/legacy_proto/tests.rs +++ b/client/network/src/protocol/legacy_proto/tests.rs @@ -321,7 +321,7 @@ fn basic_two_nodes_requests_in_parallel() { }); } -//#[test] +#[test] fn reconnect_after_disconnect() { // We connect two nodes together, then force a disconnect (through the API of the `Service`), // check that the disconnect worked, and finally check whether they successfully reconnect. diff --git a/client/network/src/protocol/light_dispatch.rs b/client/network/src/protocol/light_dispatch.rs index bfa8daa181ca1..83e5589827f05 100644 --- a/client/network/src/protocol/light_dispatch.rs +++ b/client/network/src/protocol/light_dispatch.rs @@ -1040,7 +1040,7 @@ pub mod tests { light_dispatch.add_request(&mut network_interface, RequestData::RemoteReadChild(RemoteReadChildRequest { header: dummy_header(), block: Default::default(), - storage_key: b":child_storage:sub".to_vec(), + storage_key: b"sub".to_vec(), child_info: child_info.to_vec(), child_type, keys: vec![b":key".to_vec()], diff --git a/client/rpc/src/state/tests.rs b/client/rpc/src/state/tests.rs index fe4ad6df16343..39964f38f6f49 100644 --- a/client/rpc/src/state/tests.rs +++ b/client/rpc/src/state/tests.rs @@ -21,7 +21,7 @@ use self::error::Error; use std::sync::Arc; use assert_matches::assert_matches; use futures01::stream::Stream; -use sp_core::{storage::{well_known_keys, ChildInfo}, ChangesTrieConfiguration}; +use sp_core::{storage::ChildInfo, ChangesTrieConfiguration}; use sp_core::hash::H256; use sp_io::hashing::blake2_256; use substrate_test_runtime_client::{ diff --git a/frame/contracts/src/lib.rs b/frame/contracts/src/lib.rs index bd1e91f1a9d66..e67b9ecc92ee3 100644 --- a/frame/contracts/src/lib.rs +++ b/frame/contracts/src/lib.rs @@ -127,7 +127,6 @@ use frame_support::{ }; use frame_support::traits::{OnReapAccount, OnUnbalanced, Currency, Get, Time, Randomness}; use frame_system::{self as system, ensure_signed, RawOrigin, ensure_root}; -use sp_core::storage::well_known_keys::CHILD_STORAGE_KEY_PREFIX; use pallet_contracts_primitives::{RentProjection, ContractAccessError}; pub type CodeHash = ::Hash; @@ -233,8 +232,9 @@ impl RawAliveContractInfo child::ChildInfo { - let start = CHILD_STORAGE_KEY_PREFIX.len() + b"default:".len(); - child::ChildInfo::new_default(&trie_id[start ..]) + // Every new contract uses a new trie id and trie id results from + // hashing, so we can use child storage key (trie id) for child info. + child::ChildInfo::new_uid_parent_key(trie_id) } pub type TombstoneContractInfo = @@ -267,10 +267,6 @@ pub trait TrieIdGenerator { /// /// The implementation must ensure every new trie id is unique: two consecutive calls with the /// same parameter needs to return different trie id values. - /// - /// Also, the implementation is responsible for ensuring that `TrieId` starts with - /// `:child_storage:`. - /// TODO: We want to change this, see https://github.com/paritytech/substrate/issues/2325 fn trie_id(account_id: &AccountId) -> TrieId; } @@ -295,12 +291,7 @@ where buf.extend_from_slice(account_id.as_ref()); buf.extend_from_slice(&new_seed.to_le_bytes()[..]); - // TODO: see https://github.com/paritytech/substrate/issues/2325 - CHILD_STORAGE_KEY_PREFIX.iter() - .chain(b"default:") - .chain(T::Hashing::hash(&buf[..]).as_ref().iter()) - .cloned() - .collect() + T::Hashing::hash(&buf[..]).as_ref().to_vec() } } diff --git a/frame/contracts/src/tests.rs b/frame/contracts/src/tests.rs index 4bf468deceb36..7f2eff5d6942c 100644 --- a/frame/contracts/src/tests.rs +++ b/frame/contracts/src/tests.rs @@ -203,7 +203,6 @@ impl TrieIdGenerator for DummyTrieIdGenerator { *v }); - // TODO: see https://github.com/paritytech/substrate/issues/2325 let mut res = vec![]; res.extend_from_slice(&new_seed.to_le_bytes()); res.extend_from_slice(&account_id.to_le_bytes()); diff --git a/test-utils/client/src/lib.rs b/test-utils/client/src/lib.rs index 1204e809bce1b..9267989a40c53 100644 --- a/test-utils/client/src/lib.rs +++ b/test-utils/client/src/lib.rs @@ -40,7 +40,7 @@ pub use self::client_ext::{ClientExt, ClientBlockImportExt}; use std::sync::Arc; use std::collections::HashMap; -use sp_core::storage::{well_known_keys, ChildInfo}; +use sp_core::storage::ChildInfo; use sp_runtime::traits::Block as BlockT; use sc_client::LocalCallExecutor; From c87d19b20438b4ccce92dc43ffc78ec7bcd2e413 Mon Sep 17 00:00:00 2001 From: cheme Date: Tue, 11 Feb 2020 10:31:26 +0100 Subject: [PATCH 38/85] Revert changes to state-db and plug keyspace above it for default child tries. --- Cargo.lock | 1 - client/db/src/lib.rs | 99 +++++---- client/state-db/src/lib.rs | 94 ++------ client/state-db/src/noncanonical.rs | 324 ++++++++++------------------ client/state-db/src/pruning.rs | 164 +++----------- client/state-db/src/test.rs | 43 ++-- primitives/storage/Cargo.toml | 3 +- primitives/storage/src/lib.rs | 6 +- 8 files changed, 246 insertions(+), 488 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index c97058876e2c6..d13c45afa22f7 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -7207,7 +7207,6 @@ name = "sp-storage" version = "2.0.0" dependencies = [ "impl-serde 0.2.3", - "parity-scale-codec", "serde", "sp-debug-derive", "sp-std", diff --git a/client/db/src/lib.rs b/client/db/src/lib.rs index 71e2408891ed4..0a4b40a990ba3 100644 --- a/client/db/src/lib.rs +++ b/client/db/src/lib.rs @@ -53,10 +53,10 @@ use sp_blockchain::{ use codec::{Decode, Encode}; use hash_db::Prefix; use kvdb::{KeyValueDB, DBTransaction}; -use sp_trie::{MemoryDB, PrefixedMemoryDB, prefixed_key}; +use sp_trie::{MemoryDB, PrefixedMemoryDB}; use parking_lot::RwLock; use sp_core::{ChangesTrieConfiguration, traits::CodeExecutor}; -use sp_core::storage::{well_known_keys, ChildInfo, ChildrenMap}; +use sp_core::storage::{well_known_keys, ChildInfo, ChildrenMap, ChildType}; use sp_runtime::{ generic::BlockId, Justification, Storage, BuildStorage, @@ -667,12 +667,14 @@ struct StorageDb { impl sp_state_machine::Storage> for StorageDb { fn get( &self, - trie: &ChildInfo, + child_info: &ChildInfo, key: &Block::Hash, prefix: Prefix, ) -> Result, String> { - let key = prefixed_key::>(key, prefix); - self.state_db.get(trie, &key, self) + // Default child trie (those with strong unique id) are put + // directly into the same address space at state_db level. + let key = keyspace_and_prefixed_key(key.as_ref(), child_info.keyspace(), prefix); + self.state_db.get(&key, self) .map_err(|e| format!("Database backend error: {:?}", e)) } } @@ -681,13 +683,12 @@ impl sc_state_db::NodeDb for StorageDb { type Error = io::Error; type Key = [u8]; - fn get(&self, child_info: &ChildInfo, key: &[u8]) -> Result>, Self::Error> { - if child_info.is_top_trie() { - self.db.get(columns::STATE, key) - } else { - let mut keyspace = Keyspaced::new(child_info.keyspace()); - self.db.get(columns::STATE, keyspace.prefix_key(key)) - }.map(|r| r.map(|v| v.to_vec())) + fn get(&self, key: &[u8]) -> Result>, Self::Error> { + // note this implementation should ONLY be call from state_db, + // as it rely on the fact that we address a key that is already + // prefixed with keyspace + self.db.get(columns::STATE, key) + .map(|r| r.map(|v| v.to_vec())) } } @@ -1121,30 +1122,47 @@ impl Backend { } let finalized = if operation.commit_state { - let mut changesets = ChildrenMap::>>::default(); + let mut state_db_changeset: sc_state_db::ChangeSet> = sc_state_db::ChangeSet::default(); let mut ops: u64 = 0; let mut bytes = 0; + let mut keyspace = Keyspaced::new(&[]); for (info, mut updates) in operation.db_updates.into_iter() { - let changeset = changesets.entry(info).or_default(); + // child info with strong unique id are using the same state-db with prefixed key + if info.child_type() != ChildType::CryptoUniqueId { + // Unhandled child kind + return Err(ClientError::Backend(format!( + "Data for {:?} without a backend implementation", + info.child_type(), + ))); + } + keyspace.change_keyspace(info.keyspace()); for (key, (val, rc)) in updates.drain() { + let key = if info.is_top_trie() { + key + } else { + keyspace.prefix_key(key.as_slice()).to_vec() + }; if rc > 0 { ops += 1; - bytes += key.len() + val.len(); + bytes += key.len() as u64 + val.len() as u64; - changeset.inserted.push((key, val.to_vec())); + state_db_changeset.inserted.push((key, val.to_vec())); } else if rc < 0 { ops += 1; - bytes += key.len(); - - changeset.deleted.push(key); + bytes += key.len() as u64; + state_db_changeset.deleted.push(key); } } } self.state_usage.tally_writes(ops, bytes as u64); let number_u64 = number.saturated_into::(); - let commit = self.storage.state_db.insert_block(&hash, number_u64, &pending_block.header.parent_hash(), changesets) - .map_err(|e: sc_state_db::Error| sp_blockchain::Error::from(format!("State database error: {:?}", e)))?; + let commit = self.storage.state_db.insert_block( + &hash, + number_u64, + &pending_block.header.parent_hash(), + state_db_changeset, + ).map_err(|e: sc_state_db::Error| sp_blockchain::Error::from(format!("State database error: {:?}", e)))?; apply_state_commit(&mut transaction, commit); // Check if need to finalize. Genesis is always finalized instantly. @@ -1329,25 +1347,12 @@ impl Backend { } fn apply_state_commit(transaction: &mut DBTransaction, commit: sc_state_db::CommitSet>) { - let mut keyspace = Keyspaced::new(&[]); - for child_data in commit.data.into_iter() { - if child_data.0.is_top_trie() { - // empty prefix - for (key, val) in child_data.1.inserted.into_iter() { - transaction.put(columns::STATE, &key[..], &val); - } - for key in child_data.1.deleted.into_iter() { - transaction.delete(columns::STATE, &key[..]); - } - } else { - keyspace.change_keyspace(child_data.0.keyspace()); - for (key, val) in child_data.1.inserted.into_iter() { - transaction.put(columns::STATE, keyspace.prefix_key(key.as_slice()), &val); - } - for key in child_data.1.deleted.into_iter() { - transaction.delete(columns::STATE, keyspace.prefix_key(key.as_slice())); - } - } + // state_db commit set is only for column STATE + for (key, val) in commit.data.inserted.into_iter() { + transaction.put(columns::STATE, &key[..], &val); + } + for key in commit.data.deleted.into_iter() { + transaction.delete(columns::STATE, &key[..]); } for (key, val) in commit.meta.inserted.into_iter() { transaction.put(columns::STATE_META, &key[..], &val); @@ -1699,6 +1704,20 @@ impl Keyspaced { } } +// Prefix key and add keyspace with a single vec alloc +// Warning if memory_db `sp_trie::prefixed_key` implementation change, this function +// will need change too. +fn keyspace_and_prefixed_key(key: &[u8], keyspace: &[u8], prefix: Prefix) -> Vec { + let mut prefixed_key = Vec::with_capacity(key.len() + keyspace.len() + prefix.0.len() + 1); + prefixed_key.extend_from_slice(keyspace); + prefixed_key.extend_from_slice(prefix.0); + if let Some(last) = prefix.1 { + prefixed_key.push(last); + } + prefixed_key.extend_from_slice(key); + prefixed_key +} + #[cfg(test)] pub(crate) mod tests { use hash_db::{HashDB, EMPTY_PREFIX}; diff --git a/client/state-db/src/lib.rs b/client/state-db/src/lib.rs index 77373ce47649b..f2722ae308068 100644 --- a/client/state-db/src/lib.rs +++ b/client/state-db/src/lib.rs @@ -40,7 +40,6 @@ use std::collections::{HashMap, hash_map::Entry}; use noncanonical::NonCanonicalOverlay; use pruning::RefWindow; use log::trace; -use sp_core::storage::{ChildInfo, ChildrenMap}; const PRUNING_MODE: &[u8] = b"mode"; const PRUNING_MODE_ARCHIVE: &[u8] = b"archive"; @@ -68,7 +67,7 @@ pub trait NodeDb { type Error: fmt::Debug; /// Get state trie node. - fn get(&self, child_info: &ChildInfo, key: &Self::Key) -> Result, Self::Error>; + fn get(&self, key: &Self::Key) -> Result, Self::Error>; } /// Error type. @@ -114,44 +113,23 @@ impl fmt::Debug for Error { /// A set of state node changes. #[derive(Default, Debug, Clone)] -pub struct ChangeSet { +pub struct ChangeSet { /// Inserted nodes. pub inserted: Vec<(H, DBValue)>, /// Deleted nodes. pub deleted: Vec, } -impl ChangeSet { - fn merge(&mut self, other: ChangeSet) { - self.inserted.extend(other.inserted.into_iter()); - self.deleted.extend(other.deleted.into_iter()); - } -} - -/// Change sets of all child trie (top is key None). -pub type ChildTrieChangeSets = ChildrenMap>; /// A set of changes to the backing database. #[derive(Default, Debug, Clone)] -pub struct CommitSet { +pub struct CommitSet { /// State node changes. - pub data: ChildTrieChangeSets, + pub data: ChangeSet, /// Metadata changes. pub meta: ChangeSet>, } -impl CommitSet { - /// Number of inserted key value elements in the set. - pub fn inserted_len(&self) -> usize { - self.data.iter().map(|set| set.1.inserted.len()).sum() - } - - /// Number of deleted key value elements in the set. - pub fn deleted_len(&self) -> usize { - self.data.iter().map(|set| set.1.deleted.len()).sum() - } -} - /// Pruning constraints. If none are specified pruning is #[derive(Default, Debug, Clone, Eq, PartialEq)] pub struct Constraints { @@ -256,13 +234,7 @@ impl StateDbSync { } } - pub fn insert_block( - &mut self, - hash: &BlockHash, - number: u64, - parent_hash: &BlockHash, - mut changesets: ChildTrieChangeSets, - ) -> Result, Error> { + pub fn insert_block(&mut self, hash: &BlockHash, number: u64, parent_hash: &BlockHash, mut changeset: ChangeSet) -> Result, Error> { let mut meta = ChangeSet::default(); if number == 0 { // Save pruning mode when writing first block. @@ -271,17 +243,15 @@ impl StateDbSync { match self.mode { PruningMode::ArchiveAll => { - for changeset in changesets.iter_mut() { - changeset.1.deleted.clear(); - } + changeset.deleted.clear(); // write changes immediately Ok(CommitSet { - data: changesets, + data: changeset, meta: meta, }) }, PruningMode::Constrained(_) | PruningMode::ArchiveCanonical => { - let commit = self.non_canonical.insert(hash, number, parent_hash, changesets); + let commit = self.non_canonical.insert(hash, number, parent_hash, changeset); commit.map(|mut c| { c.meta.inserted.extend(meta.inserted); c @@ -298,9 +268,7 @@ impl StateDbSync { match self.non_canonical.canonicalize(&hash, &mut commit) { Ok(()) => { if self.mode == PruningMode::ArchiveCanonical { - for commit in commit.data.iter_mut() { - commit.1.deleted.clear(); - } + commit.data.deleted.clear(); } } Err(e) => return Err(e), @@ -400,18 +368,13 @@ impl StateDbSync { } } - pub fn get( - &self, - child_info: &ChildInfo, - key: &Key, - db: &D, - ) -> Result, Error> + pub fn get(&self, key: &Key, db: &D) -> Result, Error> where Key: AsRef { - if let Some(value) = self.non_canonical.get(child_info, key) { + if let Some(value) = self.non_canonical.get(key) { return Ok(Some(value)); } - db.get(child_info, key.as_ref()).map_err(|e| Error::Db(e)) + db.get(key.as_ref()).map_err(|e| Error::Db(e)) } pub fn apply_pending(&mut self) { @@ -451,14 +414,8 @@ impl StateDb { } /// Add a new non-canonical block. - pub fn insert_block( - &self, - hash: &BlockHash, - number: u64, - parent_hash: &BlockHash, - changesets: ChildTrieChangeSets, - ) -> Result, Error> { - self.db.write().insert_block(hash, number, parent_hash, changesets) + pub fn insert_block(&self, hash: &BlockHash, number: u64, parent_hash: &BlockHash, changeset: ChangeSet) -> Result, Error> { + self.db.write().insert_block(hash, number, parent_hash, changeset) } /// Finalize a previously inserted block. @@ -477,15 +434,10 @@ impl StateDb { } /// Get a value from non-canonical/pruning overlay or the backing DB. - pub fn get( - &self, - child_info: &ChildInfo, - key: &Key, - db: &D, - ) -> Result, Error> + pub fn get(&self, key: &Key, db: &D) -> Result, Error> where Key: AsRef { - self.db.read().get(child_info, key, db) + self.db.read().get(key, db) } /// Revert all non-canonical blocks with the best block number. @@ -521,7 +473,7 @@ mod tests { use std::io; use sp_core::H256; use crate::{StateDb, PruningMode, Constraints}; - use crate::test::{make_db, make_childchangeset, TestDb}; + use crate::test::{make_db, make_changeset, TestDb}; fn make_test_db(settings: PruningMode) -> (TestDb, StateDb) { let mut db = make_db(&[91, 921, 922, 93, 94]); @@ -533,7 +485,7 @@ mod tests { &H256::from_low_u64_be(1), 1, &H256::from_low_u64_be(0), - make_childchangeset(&[1], &[91]), + make_changeset(&[1], &[91]), ) .unwrap(), ); @@ -543,7 +495,7 @@ mod tests { &H256::from_low_u64_be(21), 2, &H256::from_low_u64_be(1), - make_childchangeset(&[21], &[921, 1]), + make_changeset(&[21], &[921, 1]), ) .unwrap(), ); @@ -553,7 +505,7 @@ mod tests { &H256::from_low_u64_be(22), 2, &H256::from_low_u64_be(1), - make_childchangeset(&[22], &[922]), + make_changeset(&[22], &[922]), ) .unwrap(), ); @@ -563,7 +515,7 @@ mod tests { &H256::from_low_u64_be(3), 3, &H256::from_low_u64_be(21), - make_childchangeset(&[3], &[93]), + make_changeset(&[3], &[93]), ) .unwrap(), ); @@ -576,7 +528,7 @@ mod tests { &H256::from_low_u64_be(4), 4, &H256::from_low_u64_be(3), - make_childchangeset(&[4], &[94]), + make_changeset(&[4], &[94]), ) .unwrap(), ); @@ -647,7 +599,7 @@ mod tests { &H256::from_low_u64_be(0), 0, &H256::from_low_u64_be(0), - make_childchangeset(&[], &[]), + make_changeset(&[], &[]), ) .unwrap(), ); diff --git a/client/state-db/src/noncanonical.rs b/client/state-db/src/noncanonical.rs index 4f06d9dd52180..373c1aa0da076 100644 --- a/client/state-db/src/noncanonical.rs +++ b/client/state-db/src/noncanonical.rs @@ -22,19 +22,13 @@ use std::fmt; use std::collections::{HashMap, VecDeque, hash_map::Entry}; -use super::{Error, DBValue, ChildTrieChangeSets, CommitSet, MetaDb, Hash, to_meta_key, ChangeSet}; +use super::{Error, DBValue, ChangeSet, CommitSet, MetaDb, Hash, to_meta_key}; use codec::{Encode, Decode}; use log::trace; -use sp_core::storage::{ChildInfo, ChildrenMap, ChildrenVec}; const NON_CANONICAL_JOURNAL: &[u8] = b"noncanonical_journal"; -const NON_CANONICAL_JOURNAL_V1: &[u8] = b"v1_non_canonical_journal"; const LAST_CANONICAL: &[u8] = b"last_canonical"; -type Keys = ChildrenVec>; -type KeyVals = ChildrenVec>; -type ChildKeyVals = ChildrenMap>; - /// See module documentation. pub struct NonCanonicalOverlay { last_canonicalized: Option<(BlockHash, u64)>, @@ -42,86 +36,52 @@ pub struct NonCanonicalOverlay { parents: HashMap, pending_canonicalizations: Vec, pending_insertions: Vec, - values: ChildKeyVals, //ref counted + values: HashMap, //ref counted //would be deleted but kept around because block is pinned, ref counted. pinned: HashMap, - pinned_insertions: HashMap>, + pinned_insertions: HashMap>, } #[derive(Encode, Decode)] -struct JournalRecordCompat { +struct JournalRecord { hash: BlockHash, parent_hash: BlockHash, inserted: Vec<(Key, DBValue)>, deleted: Vec, } -#[derive(Encode, Decode)] -struct JournalRecordV1 { - hash: BlockHash, - parent_hash: BlockHash, - inserted: KeyVals, - deleted: Keys, -} - -impl From> for JournalRecordV1 { - // Note that this compatibility only works as long as the backend - // child storage format is the same in both case. - fn from(old: JournalRecordCompat) -> Self { - JournalRecordV1 { - hash: old.hash, - parent_hash: old.parent_hash, - inserted: vec![(ChildInfo::top_trie(), old.inserted)], - deleted: vec![(ChildInfo::top_trie(), old.deleted)], - } - } -} - -fn to_old_journal_key(block: u64, index: u64) -> Vec { +fn to_journal_key(block: u64, index: u64) -> Vec { to_meta_key(NON_CANONICAL_JOURNAL, &(block, index)) } -fn to_journal_key_v1(block: u64, index: u64) -> Vec { - to_meta_key(NON_CANONICAL_JOURNAL_V1, &(block, index)) -} - #[cfg_attr(test, derive(PartialEq, Debug))] struct BlockOverlay { hash: BlockHash, journal_key: Vec, - inserted: Keys, - deleted: Keys, + inserted: Vec, + deleted: Vec, } -fn insert_values( - values: &mut ChildKeyVals, - inserted: KeyVals, -) { - for (child_info, inserted) in inserted { - let values = values.entry(child_info).or_default(); - for (k, v) in inserted { - debug_assert!(values.get(&k).map_or(true, |(_, value)| *value == v)); - let (ref mut counter, _) = values.entry(k).or_insert_with(|| (0, v)); - *counter += 1; - } +fn insert_values(values: &mut HashMap, inserted: Vec<(Key, DBValue)>) { + for (k, v) in inserted { + debug_assert!(values.get(&k).map_or(true, |(_, value)| *value == v)); + let (ref mut counter, _) = values.entry(k).or_insert_with(|| (0, v)); + *counter += 1; } } -fn discard_values(values: &mut ChildKeyVals, inserted: Keys) { - for (child_info, inserted) in inserted { - let values = values.entry(child_info).or_default(); - for k in inserted { - match values.entry(k) { - Entry::Occupied(mut e) => { - let (ref mut counter, _) = e.get_mut(); - *counter -= 1; - if *counter == 0 { - e.remove_entry(); - } - }, - Entry::Vacant(_) => { - debug_assert!(false, "Trying to discard missing value"); +fn discard_values(values: &mut HashMap, inserted: Vec) { + for k in inserted { + match values.entry(k) { + Entry::Occupied(mut e) => { + let (ref mut counter, _) = e.get_mut(); + *counter -= 1; + if *counter == 0 { + e.remove_entry(); } + }, + Entry::Vacant(_) => { + debug_assert!(false, "Trying to discard missing value"); } } } @@ -129,11 +89,11 @@ fn discard_values(values: &mut ChildKeyVals, inserted: Keys fn discard_descendants( levels: &mut VecDeque>>, - mut values: &mut ChildKeyVals, + mut values: &mut HashMap, index: usize, parents: &mut HashMap, pinned: &HashMap, - pinned_insertions: &mut HashMap>, + pinned_insertions: &mut HashMap>, hash: &BlockHash, ) { let mut discarded = Vec::new(); @@ -172,7 +132,7 @@ impl NonCanonicalOverlay { }; let mut levels = VecDeque::new(); let mut parents = HashMap::new(); - let mut values = ChildrenMap::default(); + let mut values = HashMap::new(); if let Some((ref hash, mut block)) = last_canonicalized { // read the journal trace!(target: "state-db", "Reading uncanonicalized journal. Last canonicalized #{} ({:?})", block, hash); @@ -182,35 +142,26 @@ impl NonCanonicalOverlay { let mut index: u64 = 0; let mut level = Vec::new(); loop { - let journal_key = to_journal_key_v1(block, index); - let record: JournalRecordV1 = match db.get_meta(&journal_key).map_err(|e| Error::Db(e))? { - Some(record) => Decode::decode(&mut record.as_slice())?, - None => { - let journal_key = to_old_journal_key(block, index); - match db.get_meta(&journal_key).map_err(|e| Error::Db(e))? { - Some(record) => { - let record: JournalRecordCompat = Decode::decode(&mut record.as_slice())?; - record.into() - }, - None => break, - } + let journal_key = to_journal_key(block, index); + match db.get_meta(&journal_key).map_err(|e| Error::Db(e))? { + Some(record) => { + let record: JournalRecord = Decode::decode(&mut record.as_slice())?; + let inserted = record.inserted.iter().map(|(k, _)| k.clone()).collect(); + let overlay = BlockOverlay { + hash: record.hash.clone(), + journal_key, + inserted: inserted, + deleted: record.deleted, + }; + insert_values(&mut values, record.inserted); + trace!(target: "state-db", "Uncanonicalized journal entry {}.{} ({} inserted, {} deleted)", block, index, overlay.inserted.len(), overlay.deleted.len()); + level.push(overlay); + parents.insert(record.hash, record.parent_hash); + index += 1; + total += 1; }, - }; - let inserted = record.inserted.iter().map(|(child_info, rec)| - (child_info.clone(), rec.iter().map(|(k, _)| k.clone()).collect()) - ).collect(); - let overlay = BlockOverlay { - hash: record.hash.clone(), - journal_key, - inserted, - deleted: record.deleted, - }; - insert_values(&mut values, record.inserted); - trace!(target: "state-db", "Uncanonicalized journal entry {}.{} ({} inserted, {} deleted)", block, index, overlay.inserted.len(), overlay.deleted.len()); - level.push(overlay); - parents.insert(record.hash, record.parent_hash); - index += 1; - total += 1; + None => break, + } } if level.is_empty() { break; @@ -233,13 +184,7 @@ impl NonCanonicalOverlay { } /// Insert a new block into the overlay. If inserted on the second level or lover expects parent to be present in the window. - pub fn insert( - &mut self, - hash: &BlockHash, - number: u64, - parent_hash: &BlockHash, - changeset: ChildTrieChangeSets, - ) -> Result, Error> { + pub fn insert(&mut self, hash: &BlockHash, number: u64, parent_hash: &BlockHash, changeset: ChangeSet) -> Result, Error> { let mut commit = CommitSet::default(); let front_block_number = self.front_block_number(); if self.levels.is_empty() && self.last_canonicalized.is_none() && number > 0 { @@ -274,39 +219,22 @@ impl NonCanonicalOverlay { }; let index = level.len() as u64; - let journal_key = to_journal_key_v1(number, index); - - let mut inserted = Vec::with_capacity(changeset.len()); - let mut inserted_block = Vec::with_capacity(changeset.len()); - let mut deleted = Vec::with_capacity(changeset.len()); - for changeset in changeset.into_iter() { - inserted_block.push(( - changeset.0.clone(), - changeset.1.inserted.iter().map(|(k, _)| k.clone()).collect(), - )); - inserted.push(( - changeset.0.clone(), - changeset.1.inserted, - )); - deleted.push(( - changeset.0, - changeset.1.deleted, - )); - } + let journal_key = to_journal_key(number, index); + let inserted = changeset.inserted.iter().map(|(k, _)| k.clone()).collect(); let overlay = BlockOverlay { hash: hash.clone(), journal_key: journal_key.clone(), - inserted: inserted_block, - deleted: deleted.clone(), + inserted: inserted, + deleted: changeset.deleted.clone(), }; level.push(overlay); self.parents.insert(hash.clone(), parent_hash.clone()); - let journal_record = JournalRecordV1 { + let journal_record = JournalRecord { hash: hash.clone(), parent_hash: parent_hash.clone(), - inserted, - deleted, + inserted: changeset.inserted, + deleted: changeset.deleted, }; commit.meta.inserted.push((journal_key, journal_record.encode())); trace!(target: "state-db", "Inserted uncanonicalized changeset {}.{} ({} inserted, {} deleted)", number, index, journal_record.inserted.len(), journal_record.deleted.len()); @@ -389,29 +317,9 @@ impl NonCanonicalOverlay { // get the one we need to canonicalize let overlay = &level[index]; - commit.data.extend_with(overlay.inserted.iter() - .map(|(ct, keys)| ( - ct.clone(), - ChangeSet { - inserted: keys.iter().map(|k| ( - k.clone(), - self.values - .get(ct) - .expect("For each key in overlays there's a value in values") - .get(k) - .expect("For each key in overlays there's a value in values").1.clone(), - )).collect(), - deleted: Vec::new(), - }, - )), ChangeSet::merge); - commit.data.extend_with(overlay.deleted.iter().cloned() - .map(|(ct, keys)| ( - ct, - ChangeSet { - inserted: Vec::new(), - deleted: keys, - }, - )), ChangeSet::merge); + commit.data.inserted.extend(overlay.inserted.iter() + .map(|k| (k.clone(), self.values.get(k).expect("For each key in overlays there's a value in values").1.clone()))); + commit.data.deleted.extend(overlay.deleted.clone()); commit.meta.deleted.append(&mut discarded_journals); let canonicalized = (hash.clone(), self.front_block_number() + self.pending_canonicalizations.len() as u64); @@ -460,11 +368,9 @@ impl NonCanonicalOverlay { } /// Get a value from the node overlay. This searches in every existing changeset. - pub fn get(&self, child_info: &ChildInfo, key: &Key) -> Option { - if let Some(values) = self.values.get(child_info) { - if let Some((_, value)) = values.get(&key) { - return Some(value.clone()); - } + pub fn get(&self, key: &Key) -> Option { + if let Some((_, value)) = self.values.get(&key) { + return Some(value.clone()); } None } @@ -565,14 +471,12 @@ impl NonCanonicalOverlay { mod tests { use std::io; use sp_core::H256; - use sp_core::storage::ChildInfo; - use super::{NonCanonicalOverlay, to_journal_key_v1}; - use crate::CommitSet; - use crate::test::{make_db, make_childchangeset}; + use super::{NonCanonicalOverlay, to_journal_key}; + use crate::{ChangeSet, CommitSet}; + use crate::test::{make_db, make_changeset}; fn contains(overlay: &NonCanonicalOverlay, key: u64) -> bool { - overlay.get(&ChildInfo::top_trie(), &H256::from_low_u64_be(key)) - == Some(H256::from_low_u64_be(key).as_bytes().to_vec()) + overlay.get(&H256::from_low_u64_be(key)) == Some(H256::from_low_u64_be(key).as_bytes().to_vec()) } #[test] @@ -600,8 +504,8 @@ mod tests { let h1 = H256::random(); let h2 = H256::random(); let mut overlay = NonCanonicalOverlay::::new(&db).unwrap(); - overlay.insert::(&h1, 2, &H256::default(), Default::default()).unwrap(); - overlay.insert::(&h2, 1, &h1, Default::default()).unwrap(); + overlay.insert::(&h1, 2, &H256::default(), ChangeSet::default()).unwrap(); + overlay.insert::(&h2, 1, &h1, ChangeSet::default()).unwrap(); } #[test] @@ -611,8 +515,8 @@ mod tests { let h2 = H256::random(); let db = make_db(&[]); let mut overlay = NonCanonicalOverlay::::new(&db).unwrap(); - overlay.insert::(&h1, 1, &H256::default(), Default::default()).unwrap(); - overlay.insert::(&h2, 3, &h1, Default::default()).unwrap(); + overlay.insert::(&h1, 1, &H256::default(), ChangeSet::default()).unwrap(); + overlay.insert::(&h2, 3, &h1, ChangeSet::default()).unwrap(); } #[test] @@ -622,8 +526,8 @@ mod tests { let h1 = H256::random(); let h2 = H256::random(); let mut overlay = NonCanonicalOverlay::::new(&db).unwrap(); - overlay.insert::(&h1, 1, &H256::default(), Default::default()).unwrap(); - overlay.insert::(&h2, 2, &H256::default(), Default::default()).unwrap(); + overlay.insert::(&h1, 1, &H256::default(), ChangeSet::default()).unwrap(); + overlay.insert::(&h2, 2, &H256::default(), ChangeSet::default()).unwrap(); } #[test] @@ -633,7 +537,7 @@ mod tests { let h2 = H256::random(); let db = make_db(&[]); let mut overlay = NonCanonicalOverlay::::new(&db).unwrap(); - overlay.insert::(&h1, 1, &H256::default(), Default::default()).unwrap(); + overlay.insert::(&h1, 1, &H256::default(), ChangeSet::default()).unwrap(); let mut commit = CommitSet::default(); overlay.canonicalize::(&h2, &mut commit).unwrap(); } @@ -643,19 +547,17 @@ mod tests { let h1 = H256::random(); let mut db = make_db(&[1, 2]); let mut overlay = NonCanonicalOverlay::::new(&db).unwrap(); - let changeset = make_childchangeset(&[3, 4], &[2]); + let changeset = make_changeset(&[3, 4], &[2]); let insertion = overlay.insert::(&h1, 1, &H256::default(), changeset.clone()).unwrap(); - assert_eq!(insertion.inserted_len(), 0); - assert_eq!(insertion.deleted_len(), 0); + assert_eq!(insertion.data.inserted.len(), 0); + assert_eq!(insertion.data.deleted.len(), 0); assert_eq!(insertion.meta.inserted.len(), 2); assert_eq!(insertion.meta.deleted.len(), 0); db.commit(&insertion); let mut finalization = CommitSet::default(); overlay.canonicalize::(&h1, &mut finalization).unwrap(); - let inserted_len = changeset.iter().map(|set| set.1.inserted.len()).sum(); - let deleted_len = changeset.iter().map(|set| set.1.deleted.len()).sum(); - assert_eq!(finalization.inserted_len(), inserted_len); - assert_eq!(finalization.deleted_len(), deleted_len); + assert_eq!(finalization.data.inserted.len(), changeset.inserted.len()); + assert_eq!(finalization.data.deleted.len(), changeset.deleted.len()); assert_eq!(finalization.meta.inserted.len(), 1); assert_eq!(finalization.meta.deleted.len(), 1); db.commit(&finalization); @@ -668,8 +570,8 @@ mod tests { let h2 = H256::random(); let mut db = make_db(&[1, 2]); let mut overlay = NonCanonicalOverlay::::new(&db).unwrap(); - db.commit(&overlay.insert::(&h1, 10, &H256::default(), make_childchangeset(&[3, 4], &[2])).unwrap()); - db.commit(&overlay.insert::(&h2, 11, &h1, make_childchangeset(&[5], &[3])).unwrap()); + db.commit(&overlay.insert::(&h1, 10, &H256::default(), make_changeset(&[3, 4], &[2])).unwrap()); + db.commit(&overlay.insert::(&h2, 11, &h1, make_changeset(&[5], &[3])).unwrap()); assert_eq!(db.meta.len(), 3); let overlay2 = NonCanonicalOverlay::::new(&db).unwrap(); @@ -684,8 +586,8 @@ mod tests { let h2 = H256::random(); let mut db = make_db(&[1, 2]); let mut overlay = NonCanonicalOverlay::::new(&db).unwrap(); - db.commit(&overlay.insert::(&h1, 10, &H256::default(), make_childchangeset(&[3, 4], &[2])).unwrap()); - db.commit(&overlay.insert::(&h2, 11, &h1, make_childchangeset(&[5], &[3])).unwrap()); + db.commit(&overlay.insert::(&h1, 10, &H256::default(), make_changeset(&[3, 4], &[2])).unwrap()); + db.commit(&overlay.insert::(&h2, 11, &h1, make_changeset(&[5], &[3])).unwrap()); let mut commit = CommitSet::default(); overlay.canonicalize::(&h1, &mut commit).unwrap(); db.commit(&commit); @@ -704,8 +606,8 @@ mod tests { let h2 = H256::random(); let mut db = make_db(&[1, 2, 3, 4]); let mut overlay = NonCanonicalOverlay::::new(&db).unwrap(); - let changeset1 = make_childchangeset(&[5, 6], &[2]); - let changeset2 = make_childchangeset(&[7, 8], &[5, 3]); + let changeset1 = make_changeset(&[5, 6], &[2]); + let changeset2 = make_changeset(&[7, 8], &[5, 3]); db.commit(&overlay.insert::(&h1, 1, &H256::default(), changeset1).unwrap()); assert!(contains(&overlay, 5)); db.commit(&overlay.insert::(&h2, 2, &h1, changeset2).unwrap()); @@ -736,8 +638,8 @@ mod tests { #[test] fn insert_same_key() { let mut db = make_db(&[]); - let (h_1, c_1) = (H256::random(), make_childchangeset(&[1], &[])); - let (h_2, c_2) = (H256::random(), make_childchangeset(&[1], &[])); + let (h_1, c_1) = (H256::random(), make_changeset(&[1], &[])); + let (h_2, c_2) = (H256::random(), make_changeset(&[1], &[])); let mut overlay = NonCanonicalOverlay::::new(&db).unwrap(); db.commit(&overlay.insert::(&h_1, 1, &H256::default(), c_1).unwrap()); @@ -758,7 +660,7 @@ mod tests { let h3 = H256::random(); let mut db = make_db(&[]); let mut overlay = NonCanonicalOverlay::::new(&db).unwrap(); - let changeset = make_childchangeset(&[], &[]); + let changeset = make_changeset(&[], &[]); db.commit(&overlay.insert::(&h1, 1, &H256::default(), changeset.clone()).unwrap()); db.commit(&overlay.insert::(&h2, 2, &h1, changeset.clone()).unwrap()); overlay.apply_pending(); @@ -786,19 +688,19 @@ mod tests { // // 1_2_2 is the winner - let (h_1, c_1) = (H256::random(), make_childchangeset(&[1], &[])); - let (h_2, c_2) = (H256::random(), make_childchangeset(&[2], &[])); + let (h_1, c_1) = (H256::random(), make_changeset(&[1], &[])); + let (h_2, c_2) = (H256::random(), make_changeset(&[2], &[])); - let (h_1_1, c_1_1) = (H256::random(), make_childchangeset(&[11], &[])); - let (h_1_2, c_1_2) = (H256::random(), make_childchangeset(&[12], &[])); - let (h_2_1, c_2_1) = (H256::random(), make_childchangeset(&[21], &[])); - let (h_2_2, c_2_2) = (H256::random(), make_childchangeset(&[22], &[])); + let (h_1_1, c_1_1) = (H256::random(), make_changeset(&[11], &[])); + let (h_1_2, c_1_2) = (H256::random(), make_changeset(&[12], &[])); + let (h_2_1, c_2_1) = (H256::random(), make_changeset(&[21], &[])); + let (h_2_2, c_2_2) = (H256::random(), make_changeset(&[22], &[])); - let (h_1_1_1, c_1_1_1) = (H256::random(), make_childchangeset(&[111], &[])); - let (h_1_2_1, c_1_2_1) = (H256::random(), make_childchangeset(&[121], &[])); - let (h_1_2_2, c_1_2_2) = (H256::random(), make_childchangeset(&[122], &[])); - let (h_1_2_3, c_1_2_3) = (H256::random(), make_childchangeset(&[123], &[])); - let (h_2_1_1, c_2_1_1) = (H256::random(), make_childchangeset(&[211], &[])); + let (h_1_1_1, c_1_1_1) = (H256::random(), make_changeset(&[111], &[])); + let (h_1_2_1, c_1_2_1) = (H256::random(), make_changeset(&[121], &[])); + let (h_1_2_2, c_1_2_2) = (H256::random(), make_changeset(&[122], &[])); + let (h_1_2_3, c_1_2_3) = (H256::random(), make_changeset(&[123], &[])); + let (h_2_1_1, c_2_1_1) = (H256::random(), make_changeset(&[211], &[])); let mut overlay = NonCanonicalOverlay::::new(&db).unwrap(); db.commit(&overlay.insert::(&h_1, 1, &H256::default(), c_1).unwrap()); @@ -848,11 +750,11 @@ mod tests { assert!(contains(&overlay, 111)); assert!(!contains(&overlay, 211)); // check that journals are deleted - assert!(db.get_meta(&to_journal_key_v1(1, 0)).unwrap().is_none()); - assert!(db.get_meta(&to_journal_key_v1(1, 1)).unwrap().is_none()); - assert!(db.get_meta(&to_journal_key_v1(2, 1)).unwrap().is_some()); - assert!(db.get_meta(&to_journal_key_v1(2, 2)).unwrap().is_none()); - assert!(db.get_meta(&to_journal_key_v1(2, 3)).unwrap().is_none()); + assert!(db.get_meta(&to_journal_key(1, 0)).unwrap().is_none()); + assert!(db.get_meta(&to_journal_key(1, 1)).unwrap().is_none()); + assert!(db.get_meta(&to_journal_key(2, 1)).unwrap().is_some()); + assert!(db.get_meta(&to_journal_key(2, 2)).unwrap().is_none()); + assert!(db.get_meta(&to_journal_key(2, 3)).unwrap().is_none()); // canonicalize 1_2. 1_1 and all its children should be discarded let mut commit = CommitSet::default(); @@ -889,8 +791,8 @@ mod tests { let mut db = make_db(&[1, 2, 3, 4]); let mut overlay = NonCanonicalOverlay::::new(&db).unwrap(); assert!(overlay.revert_one().is_none()); - let changeset1 = make_childchangeset(&[5, 6], &[2]); - let changeset2 = make_childchangeset(&[7, 8], &[5, 3]); + let changeset1 = make_changeset(&[5, 6], &[2]); + let changeset2 = make_changeset(&[7, 8], &[5, 3]); db.commit(&overlay.insert::(&h1, 1, &H256::default(), changeset1).unwrap()); db.commit(&overlay.insert::(&h2, 2, &h1, changeset2).unwrap()); assert!(contains(&overlay, 7)); @@ -911,9 +813,9 @@ mod tests { let h2_2 = H256::random(); let db = make_db(&[]); let mut overlay = NonCanonicalOverlay::::new(&db).unwrap(); - let changeset1 = make_childchangeset(&[5, 6], &[2]); - let changeset2 = make_childchangeset(&[7, 8], &[5, 3]); - let changeset3 = make_childchangeset(&[9], &[]); + let changeset1 = make_changeset(&[5, 6], &[2]); + let changeset2 = make_changeset(&[7, 8], &[5, 3]); + let changeset3 = make_changeset(&[9], &[]); overlay.insert::(&h1, 1, &H256::default(), changeset1).unwrap(); assert!(contains(&overlay, 5)); overlay.insert::(&h2_1, 2, &h1, changeset2).unwrap(); @@ -936,8 +838,8 @@ mod tests { // - 0 - 1_1 // \ 1_2 - let (h_1, c_1) = (H256::random(), make_childchangeset(&[1], &[])); - let (h_2, c_2) = (H256::random(), make_childchangeset(&[2], &[])); + let (h_1, c_1) = (H256::random(), make_changeset(&[1], &[])); + let (h_2, c_2) = (H256::random(), make_changeset(&[2], &[])); let mut overlay = NonCanonicalOverlay::::new(&db).unwrap(); db.commit(&overlay.insert::(&h_1, 1, &H256::default(), c_1).unwrap()); @@ -964,9 +866,9 @@ mod tests { // \ 1_3 // 1_1 and 1_2 both make the same change - let (h_1, c_1) = (H256::random(), make_childchangeset(&[1], &[])); - let (h_2, c_2) = (H256::random(), make_childchangeset(&[1], &[])); - let (h_3, c_3) = (H256::random(), make_childchangeset(&[], &[])); + let (h_1, c_1) = (H256::random(), make_changeset(&[1], &[])); + let (h_2, c_2) = (H256::random(), make_changeset(&[1], &[])); + let (h_3, c_3) = (H256::random(), make_changeset(&[], &[])); let mut overlay = NonCanonicalOverlay::::new(&db).unwrap(); db.commit(&overlay.insert::(&h_1, 1, &H256::default(), c_1).unwrap()); @@ -993,9 +895,9 @@ mod tests { // - 0 - 1_1 - 2_1 // \ 1_2 - let (h_11, c_11) = (H256::random(), make_childchangeset(&[1], &[])); - let (h_12, c_12) = (H256::random(), make_childchangeset(&[], &[])); - let (h_21, c_21) = (H256::random(), make_childchangeset(&[], &[])); + let (h_11, c_11) = (H256::random(), make_changeset(&[1], &[])); + let (h_12, c_12) = (H256::random(), make_changeset(&[], &[])); + let (h_21, c_21) = (H256::random(), make_changeset(&[], &[])); let mut overlay = NonCanonicalOverlay::::new(&db).unwrap(); db.commit(&overlay.insert::(&h_11, 1, &H256::default(), c_11).unwrap()); diff --git a/client/state-db/src/pruning.rs b/client/state-db/src/pruning.rs index a4e6fe1473fa1..a993df4f111ac 100644 --- a/client/state-db/src/pruning.rs +++ b/client/state-db/src/pruning.rs @@ -26,21 +26,16 @@ use std::collections::{HashMap, HashSet, VecDeque}; use codec::{Encode, Decode}; use crate::{CommitSet, Error, MetaDb, to_meta_key, Hash}; use log::{trace, warn}; -use sp_core::storage::{ChildInfo, ChildrenVec, ChildrenMap}; -use super::ChangeSet; const LAST_PRUNED: &[u8] = b"last_pruned"; -const OLD_PRUNING_JOURNAL: &[u8] = b"pruning_journal"; -const PRUNING_JOURNAL_V1: &[u8] = b"v1_pruning_journal"; - -type Keys = ChildrenVec>; +const PRUNING_JOURNAL: &[u8] = b"pruning_journal"; /// See module documentation. pub struct RefWindow { /// A queue of keys that should be deleted for each block in the pruning window. death_rows: VecDeque>, /// An index that maps each key from `death_rows` to block number. - death_index: ChildrenMap>, + death_index: HashMap, /// Block number that corresponts to the front of `death_rows` pending_number: u64, /// Number of call of `note_canonical` after @@ -55,49 +50,18 @@ pub struct RefWindow { struct DeathRow { hash: BlockHash, journal_key: Vec, - deleted: ChildrenMap>, -} - -impl DeathRow { - fn remove_deleted(&mut self, child_info: &ChildInfo, key: &Key) -> bool { - if let Some(child_index) = self.deleted.get_mut(child_info) { - child_index.remove(key) - } else { - false - } - } + deleted: HashSet, } #[derive(Encode, Decode)] -struct JournalRecordCompat { +struct JournalRecord { hash: BlockHash, inserted: Vec, deleted: Vec, } -#[derive(Encode, Decode)] -struct JournalRecordV1 { - hash: BlockHash, - inserted: Keys, - deleted: Keys, -} - -fn to_old_journal_key(block: u64) -> Vec { - to_meta_key(OLD_PRUNING_JOURNAL, &block) -} - -fn to_journal_key_v1(block: u64) -> Vec { - to_meta_key(PRUNING_JOURNAL_V1, &block) -} - -impl From> for JournalRecordV1 { - fn from(old: JournalRecordCompat) -> Self { - JournalRecordV1 { - hash: old.hash, - inserted: vec![(ChildInfo::top_trie(), old.inserted)], - deleted: vec![(ChildInfo::top_trie(), old.deleted)], - } - } +fn to_journal_key(block: u64) -> Vec { + to_meta_key(PRUNING_JOURNAL, &block) } impl RefWindow { @@ -119,67 +83,37 @@ impl RefWindow { // read the journal trace!(target: "state-db", "Reading pruning journal. Pending #{}", pending_number); loop { - let journal_key = to_journal_key_v1(block); - let record: JournalRecordV1 = match db.get_meta(&journal_key) - .map_err(|e| Error::Db(e))? { - Some(record) => Decode::decode(&mut record.as_slice())?, - None => { - let journal_key = to_old_journal_key(block); - match db.get_meta(&journal_key).map_err(|e| Error::Db(e))? { - Some(record) => JournalRecordCompat::decode(&mut record.as_slice())?.into(), - None => break, - } + let journal_key = to_journal_key(block); + match db.get_meta(&journal_key).map_err(|e| Error::Db(e))? { + Some(record) => { + let record: JournalRecord = Decode::decode(&mut record.as_slice())?; + trace!(target: "state-db", "Pruning journal entry {} ({} inserted, {} deleted)", block, record.inserted.len(), record.deleted.len()); + pruning.import(&record.hash, journal_key, record.inserted.into_iter(), record.deleted); }, - }; - trace!( - target: "state-db", "Pruning journal entry {} ({} inserted, {} deleted)", - block, - record.inserted.len(), - record.deleted.len(), - ); - pruning.import(&record.hash, journal_key, record.inserted.into_iter(), record.deleted); + None => break, + } block += 1; } Ok(pruning) } - fn import)>>( - &mut self, - hash: &BlockHash, - journal_key: Vec, - inserted: I, - deleted: Keys, - ) { + fn import>(&mut self, hash: &BlockHash, journal_key: Vec, inserted: I, deleted: Vec) { // remove all re-inserted keys from death rows - for (child_info, inserted) in inserted { - if let Some(child_index) = self.death_index.get_mut(&child_info) { - for k in inserted { - if let Some(block) = child_index.remove(&k) { - self.death_rows[(block - self.pending_number) as usize] - .remove_deleted(&child_info, &k); - } - } + for k in inserted { + if let Some(block) = self.death_index.remove(&k) { + self.death_rows[(block - self.pending_number) as usize].deleted.remove(&k); } } // add new keys let imported_block = self.pending_number + self.death_rows.len() as u64; - for (child_info, deleted) in deleted.iter() { - let entry = self.death_index.entry(child_info.clone()).or_default(); - for k in deleted.iter() { - entry.insert(k.clone(), imported_block); - } - } - let mut deleted_death_row = ChildrenMap::>::default(); - for (child_info, deleted) in deleted.into_iter() { - let entry = deleted_death_row.entry(child_info).or_default(); - entry.extend(deleted); + for k in deleted.iter() { + self.death_index.insert(k.clone(), imported_block); } - self.death_rows.push_back( DeathRow { hash: hash.clone(), - deleted: deleted_death_row, + deleted: deleted.into_iter().collect(), journal_key: journal_key, } ); @@ -210,16 +144,7 @@ impl RefWindow { if let Some(pruned) = self.death_rows.get(self.pending_prunings) { trace!(target: "state-db", "Pruning {:?} ({} deleted)", pruned.hash, pruned.deleted.len()); let index = self.pending_number + self.pending_prunings as u64; - - commit.data.extend_with(pruned.deleted.iter() - .map(|(child_info, keys)| ( - child_info.clone(), - ChangeSet { - inserted: Vec::new(), - deleted: keys.iter().cloned().collect(), - }, - )), ChangeSet::merge); - + commit.data.deleted.extend(pruned.deleted.iter().cloned()); commit.meta.inserted.push((to_meta_key(LAST_PRUNED, &()), index.encode())); commit.meta.deleted.push(pruned.journal_key.clone()); self.pending_prunings += 1; @@ -230,29 +155,16 @@ impl RefWindow { /// Add a change set to the window. Creates a journal record and pushes it to `commit` pub fn note_canonical(&mut self, hash: &BlockHash, commit: &mut CommitSet) { - trace!( - target: "state-db", - "Adding to pruning window: {:?} ({} inserted, {} deleted)", - hash, - commit.inserted_len(), - commit.deleted_len(), - ); - let inserted = commit.data.iter().map(|changeset| ( - changeset.0.clone(), - changeset.1.inserted.iter().map(|(k, _)| k.clone()).collect(), - )).collect(); - let deleted = commit.data.iter_mut().map(|changeset| ( - changeset.0.clone(), - ::std::mem::replace(&mut changeset.1.deleted, Vec::new()), - )).collect(); - - let journal_record = JournalRecordV1 { + trace!(target: "state-db", "Adding to pruning window: {:?} ({} inserted, {} deleted)", hash, commit.data.inserted.len(), commit.data.deleted.len()); + let inserted = commit.data.inserted.iter().map(|(k, _)| k.clone()).collect(); + let deleted = ::std::mem::replace(&mut commit.data.deleted, Vec::new()); + let journal_record = JournalRecord { hash: hash.clone(), inserted, deleted, }; let block = self.pending_number + self.death_rows.len() as u64; - let journal_key = to_journal_key_v1(block); + let journal_key = to_journal_key(block); commit.meta.inserted.push((journal_key.clone(), journal_record.encode())); self.import(&journal_record.hash, journal_key, journal_record.inserted.into_iter(), journal_record.deleted); self.pending_canonicalizations += 1; @@ -264,12 +176,8 @@ impl RefWindow { for _ in 0 .. self.pending_prunings { let pruned = self.death_rows.pop_front().expect("pending_prunings is always < death_rows.len()"); trace!(target: "state-db", "Applying pruning {:?} ({} deleted)", pruned.hash, pruned.deleted.len()); - for (child_info, deleted) in pruned.deleted.iter() { - if let Some(child_index) = self.death_index.get_mut(child_info) { - for key in deleted.iter() { - child_index.remove(key); - } - } + for k in pruned.deleted.iter() { + self.death_index.remove(&k); } self.pending_number += 1; } @@ -284,11 +192,7 @@ impl RefWindow { // deleted in case transaction fails and `revert_pending` is called. self.death_rows.truncate(self.death_rows.len() - self.pending_canonicalizations); let new_max_block = self.death_rows.len() as u64 + self.pending_number; - - self.death_index.retain(|_ct, child_index| { - child_index.retain(|_, block| *block < new_max_block); - !child_index.is_empty() - }); + self.death_index.retain(|_, block| *block < new_max_block); self.pending_canonicalizations = 0; self.pending_prunings = 0; } @@ -341,10 +245,9 @@ mod tests { assert!(pruning.have_block(&h)); pruning.apply_pending(); assert!(pruning.have_block(&h)); - assert_eq!(commit.deleted_len(), 0); + assert!(commit.data.deleted.is_empty()); assert_eq!(pruning.death_rows.len(), 1); - let death_index_len: usize = pruning.death_index.iter().map(|(_ct, map)| map.len()).sum(); - assert_eq!(death_index_len, 2); + assert_eq!(pruning.death_index.len(), 2); assert!(db.data_eq(&make_db(&[1, 2, 3, 4, 5]))); check_journal(&pruning, &db); @@ -356,8 +259,7 @@ mod tests { assert!(!pruning.have_block(&h)); assert!(db.data_eq(&make_db(&[2, 4, 5]))); assert!(pruning.death_rows.is_empty()); - let death_index_len: usize = pruning.death_index.iter().map(|(_ct, map)| map.len()).sum(); - assert!(death_index_len == 0); + assert!(pruning.death_index.is_empty()); assert_eq!(pruning.pending_number, 1); } diff --git a/client/state-db/src/test.rs b/client/state-db/src/test.rs index b9f2941bcc5e0..accafa9bf831f 100644 --- a/client/state-db/src/test.rs +++ b/client/state-db/src/test.rs @@ -18,12 +18,11 @@ use std::collections::HashMap; use sp_core::H256; -use crate::{DBValue, ChangeSet, CommitSet, MetaDb, NodeDb, ChildTrieChangeSets}; -use sp_core::storage::{ChildInfo, ChildrenMap}; +use crate::{DBValue, ChangeSet, CommitSet, MetaDb, NodeDb}; #[derive(Default, Debug, Clone, PartialEq, Eq)] pub struct TestDb { - pub data: ChildrenMap>, + pub data: HashMap, pub meta: HashMap, DBValue>, } @@ -39,24 +38,17 @@ impl NodeDb for TestDb { type Error = (); type Key = H256; - fn get(&self, child_info: &ChildInfo, key: &H256) -> Result, ()> { - Ok(self.data.get(child_info).and_then(|data| data.get(key).cloned())) + fn get(&self, key: &H256) -> Result, ()> { + Ok(self.data.get(key).cloned()) } } impl TestDb { pub fn commit(&mut self, commit: &CommitSet) { - for ct in commit.data.iter() { - self.data.entry(ct.0.clone()).or_default() - .extend(ct.1.inserted.iter().cloned()) - } + self.data.extend(commit.data.inserted.iter().cloned()); self.meta.extend(commit.meta.inserted.iter().cloned()); - for ct in commit.data.iter() { - if let Some(self_data) = self.data.get_mut(&ct.0) { - for k in ct.1.deleted.iter() { - self_data.remove(k); - } - } + for k in commit.data.deleted.iter() { + self.data.remove(k); } self.meta.extend(commit.meta.inserted.iter().cloned()); for k in commit.meta.deleted.iter() { @@ -81,28 +73,21 @@ pub fn make_changeset(inserted: &[u64], deleted: &[u64]) -> ChangeSet { } } -pub fn make_childchangeset(inserted: &[u64], deleted: &[u64]) -> ChildTrieChangeSets { - let mut result = ChildTrieChangeSets::default(); - result.insert(ChildInfo::top_trie(), make_changeset(inserted, deleted)); - result -} - pub fn make_commit(inserted: &[u64], deleted: &[u64]) -> CommitSet { CommitSet { - data: make_childchangeset(inserted, deleted), + data: make_changeset(inserted, deleted), meta: ChangeSet::default(), } } pub fn make_db(inserted: &[u64]) -> TestDb { - let mut data = ChildrenMap::default(); - data.insert(ChildInfo::top_trie(), inserted.iter() - .map(|v| { - (H256::from_low_u64_be(*v), H256::from_low_u64_be(*v).as_bytes().to_vec()) - }) - .collect()); TestDb { - data, + data: inserted + .iter() + .map(|v| { + (H256::from_low_u64_be(*v), H256::from_low_u64_be(*v).as_bytes().to_vec()) + }) + .collect(), meta: Default::default(), } } diff --git a/primitives/storage/Cargo.toml b/primitives/storage/Cargo.toml index c0af25fc9ba9c..c9fda1816b55e 100644 --- a/primitives/storage/Cargo.toml +++ b/primitives/storage/Cargo.toml @@ -11,8 +11,7 @@ sp-std = { version = "2.0.0", default-features = false, path = "../std" } serde = { version = "1.0.101", optional = true, features = ["derive"] } impl-serde = { version = "0.2.3", optional = true } sp-debug-derive = { version = "2.0.0", path = "../debug-derive" } -codec = { package = "parity-scale-codec", version = "1.0.0", default-features = false, features = ["derive"] } [features] default = [ "std" ] -std = [ "sp-std/std", "serde", "impl-serde", "codec/std" ] +std = [ "sp-std/std", "serde", "impl-serde" ] diff --git a/primitives/storage/src/lib.rs b/primitives/storage/src/lib.rs index 69e746f725267..2e6df51dfb3e6 100644 --- a/primitives/storage/src/lib.rs +++ b/primitives/storage/src/lib.rs @@ -18,7 +18,6 @@ #![cfg_attr(not(feature = "std"), no_std)] -use codec::{Decode, Encode}; #[cfg(feature = "std")] use serde::{Serialize, Deserialize}; #[cfg(feature = "std")] @@ -191,7 +190,7 @@ impl<'a> ChildStorageKey<'a> { /// Information related to a child state. -#[derive(Debug, Clone, PartialEq, Eq, Hash, PartialOrd, Ord, Encode, Decode)] +#[derive(Debug, Clone, PartialEq, Eq, Hash, PartialOrd, Ord)] pub enum ChildInfo { Default(ChildTrie), } @@ -269,6 +268,7 @@ impl ChildInfo { /// be related to technical consideration or api variant. #[repr(u32)] #[derive(Clone, Copy, PartialEq)] +#[cfg_attr(feature = "std", derive(Debug))] pub enum ChildType { /// Default, it uses a cryptographic strong unique id as input. CryptoUniqueId = 1, @@ -287,7 +287,7 @@ impl ChildType { /// It share its trie node storage with any kind of key, /// and its unique id needs to be collision free (eg strong /// crypto hash). -#[derive(Debug, Clone, PartialEq, Eq, Hash, PartialOrd, Ord, Encode, Decode)] +#[derive(Debug, Clone, PartialEq, Eq, Hash, PartialOrd, Ord)] pub struct ChildTrie { /// Data containing unique id. /// Unique id must but unique and free of any possible key collision From 8b901a298530480874c924e131aa0d5cd5bd32c2 Mon Sep 17 00:00:00 2001 From: cheme Date: Tue, 11 Feb 2020 11:13:36 +0100 Subject: [PATCH 39/85] Update client-db benches. --- client/db/src/bench.rs | 61 +++++++++++++++++++++++++++++------------- 1 file changed, 42 insertions(+), 19 deletions(-) diff --git a/client/db/src/bench.rs b/client/db/src/bench.rs index 9858a5c148bfa..6d7e244f510f8 100644 --- a/client/db/src/bench.rs +++ b/client/db/src/bench.rs @@ -22,8 +22,8 @@ use std::cell::{Cell, RefCell}; use rand::Rng; use hash_db::{Prefix, Hasher}; -use sp_trie::{MemoryDB, prefixed_key}; -use sp_core::storage::ChildInfo; +use sp_trie::MemoryDB; +use sp_core::storage::{ChildInfo, ChildType}; use sp_runtime::traits::{Block as BlockT, HasherFor}; use sp_runtime::Storage; use sp_state_machine::{DBValue, backend::Backend as StateBackend}; @@ -40,8 +40,13 @@ struct StorageDb { } impl sp_state_machine::Storage> for StorageDb { - fn get(&self, key: &Block::Hash, prefix: Prefix) -> Result, String> { - let key = prefixed_key::>(key, prefix); + fn get( + &self, + child_info: &ChildInfo, + key: &Block::Hash, + prefix: Prefix, + ) -> Result, String> { + let key = crate::keyspace_and_prefixed_key(key.as_ref(), child_info.keyspace(), prefix); self.db.get(0, &key) .map_err(|e| format!("Database backend error: {:?}", e)) } @@ -82,9 +87,10 @@ impl BenchmarkingState { child_content.data.into_iter().map(|(k, v)| (k, Some(v))), child_content.child_info )); - let (root, transaction) = state.state.borrow_mut().as_mut().unwrap().full_storage_root( + let (root, transaction, _) = state.state.borrow_mut().as_mut().unwrap().full_storage_root( genesis.top.into_iter().map(|(k, v)| (k, Some(v))), child_delta, + false, ); state.genesis = transaction.clone(); state.commit(root, transaction)?; @@ -142,7 +148,7 @@ impl StateBackend> for BenchmarkingState { fn child_storage( &self, storage_key: &[u8], - child_info: ChildInfo, + child_info: &ChildInfo, key: &[u8], ) -> Result>, Self::Error> { self.state.borrow().as_ref().ok_or_else(state_err)?.child_storage(storage_key, child_info, key) @@ -155,7 +161,7 @@ impl StateBackend> for BenchmarkingState { fn exists_child_storage( &self, storage_key: &[u8], - child_info: ChildInfo, + child_info: &ChildInfo, key: &[u8], ) -> Result { self.state.borrow().as_ref().ok_or_else(state_err)?.exists_child_storage(storage_key, child_info, key) @@ -168,7 +174,7 @@ impl StateBackend> for BenchmarkingState { fn next_child_storage_key( &self, storage_key: &[u8], - child_info: ChildInfo, + child_info: &ChildInfo, key: &[u8], ) -> Result>, Self::Error> { self.state.borrow().as_ref().ok_or_else(state_err)?.next_child_storage_key(storage_key, child_info, key) @@ -189,7 +195,7 @@ impl StateBackend> for BenchmarkingState { fn for_keys_in_child_storage( &self, storage_key: &[u8], - child_info: ChildInfo, + child_info: &ChildInfo, f: F, ) { if let Some(ref state) = *self.state.borrow() { @@ -200,7 +206,7 @@ impl StateBackend> for BenchmarkingState { fn for_child_keys_with_prefix( &self, storage_key: &[u8], - child_info: ChildInfo, + child_info: &ChildInfo, prefix: &[u8], f: F, ) { @@ -218,7 +224,7 @@ impl StateBackend> for BenchmarkingState { fn child_storage_root( &self, storage_key: &[u8], - child_info: ChildInfo, + child_info: &ChildInfo, delta: I, ) -> (B::Hash, bool, Self::Transaction) where I: IntoIterator, Option>)>, @@ -237,7 +243,7 @@ impl StateBackend> for BenchmarkingState { fn child_keys( &self, storage_key: &[u8], - child_info: ChildInfo, + child_info: &ChildInfo, prefix: &[u8], ) -> Vec> { self.state.borrow().as_ref().map_or(Default::default(), |s| s.child_keys(storage_key, child_info, prefix)) @@ -249,17 +255,34 @@ impl StateBackend> for BenchmarkingState { None } - fn commit(&self, storage_root: as Hasher>::Out, mut transaction: Self::Transaction) + fn commit(&self, storage_root: as Hasher>::Out, transaction: Self::Transaction) -> Result<(), Self::Error> { if let Some(db) = self.db.take() { let mut db_transaction = DBTransaction::new(); - - for (key, (val, rc)) in transaction.drain() { - if rc > 0 { - db_transaction.put(0, &key, &val); - } else if rc < 0 { - db_transaction.delete(0, &key); + let mut keyspace = crate::Keyspaced::new(&[]); + for (info, mut updates) in transaction.into_iter() { + // child info with strong unique id are using the same state-db with prefixed key + if info.child_type() != ChildType::CryptoUniqueId { + // Unhandled child kind + unimplemented!( + "Data for {:?} without a backend implementation", + info.child_type(), + ); + } + keyspace.change_keyspace(info.keyspace()); + for (key, (val, rc)) in updates.drain() { + let key = if info.is_top_trie() { + key + } else { + keyspace.prefix_key(key.as_slice()).to_vec() + }; + + if rc > 0 { + db_transaction.put(0, &key, &val); + } else if rc < 0 { + db_transaction.delete(0, &key); + } } } db.write(db_transaction).map_err(|_| String::from("Error committing transaction"))?; From f6efdadecdedddea9e3ef19b1e1394f2b2393871 Mon Sep 17 00:00:00 2001 From: cheme Date: Tue, 11 Feb 2020 11:23:39 +0100 Subject: [PATCH 40/85] Bump runtime impl version. --- bin/node/runtime/src/lib.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/bin/node/runtime/src/lib.rs b/bin/node/runtime/src/lib.rs index be9dbb96869a4..ca6ce955e665d 100644 --- a/bin/node/runtime/src/lib.rs +++ b/bin/node/runtime/src/lib.rs @@ -82,7 +82,7 @@ pub const VERSION: RuntimeVersion = RuntimeVersion { // implementation changes and behavior does not, then leave spec_version as // is and increment impl_version. spec_version: 215, - impl_version: 1, + impl_version: 2, apis: RUNTIME_API_VERSIONS, }; From 93de9600d2aba489359a28513c98f85d6da922ae Mon Sep 17 00:00:00 2001 From: cheme Date: Thu, 13 Feb 2020 12:20:09 +0100 Subject: [PATCH 41/85] fix new code --- bin/node/runtime/src/lib.rs | 2 +- client/network/src/protocol/light_client_handler.rs | 10 ++++++---- 2 files changed, 7 insertions(+), 5 deletions(-) diff --git a/bin/node/runtime/src/lib.rs b/bin/node/runtime/src/lib.rs index 2f7aee7f8f50c..2c9e4af8c4db2 100644 --- a/bin/node/runtime/src/lib.rs +++ b/bin/node/runtime/src/lib.rs @@ -82,7 +82,7 @@ pub const VERSION: RuntimeVersion = RuntimeVersion { // implementation changes and behavior does not, then leave spec_version as // is and increment impl_version. spec_version: 216, - impl_version: 2, + impl_version: 3, apis: RUNTIME_API_VERSIONS, }; diff --git a/client/network/src/protocol/light_client_handler.rs b/client/network/src/protocol/light_client_handler.rs index f5be23c0d4d49..b787838ff4c13 100644 --- a/client/network/src/protocol/light_client_handler.rs +++ b/client/network/src/protocol/light_client_handler.rs @@ -512,7 +512,7 @@ where let proof = if let Some(info) = ChildInfo::resolve_child_info(request.child_type, &request.child_info[..]) { - match self.chain.read_child_proof(&block, &request.storage_key, info, &request.keys) { + match self.chain.read_child_proof(&block, &request.storage_key, &info, &request.keys) { Ok(proof) => proof, Err(error) => { log::trace!("remote read child request {} from {} ({} {} at {:?}) failed with: {}", @@ -1141,7 +1141,7 @@ mod tests { use super::{Event, LightClientHandler, Request, OutboundProtocol, PeerStatus}; use void::Void; - const CHILD_INFO: ChildInfo<'static> = ChildInfo::new_default(b"foobarbaz"); + const CHILD_UUID: &[u8] = b"foobarbaz"; type Block = sp_runtime::generic::Block, substrate_test_runtime::Extrinsic>; type Handler = LightClientHandler>, Block>; @@ -1636,7 +1636,8 @@ mod tests { #[test] fn receives_remote_read_child_response() { - let info = CHILD_INFO.info(); + let child_info = ChildInfo::new_default(CHILD_UUID); + let info = child_info.info(); let mut chan = oneshot::channel(); let request = fetcher::RemoteReadChildRequest { header: dummy_header(), @@ -1739,7 +1740,8 @@ mod tests { #[test] fn send_receive_read_child() { - let info = CHILD_INFO.info(); + let child_info = ChildInfo::new_default(CHILD_UUID); + let info = child_info.info(); let chan = oneshot::channel(); let request = fetcher::RemoteReadChildRequest { header: dummy_header(), From c04ba958be38023658f5ff02cadd3782bee3a24f Mon Sep 17 00:00:00 2001 From: cheme Date: Mon, 17 Feb 2020 10:27:34 +0100 Subject: [PATCH 42/85] Fixing merge. --- client/network/src/protocol/light_client_handler.rs | 6 +++++- 1 file changed, 5 insertions(+), 1 deletion(-) diff --git a/client/network/src/protocol/light_client_handler.rs b/client/network/src/protocol/light_client_handler.rs index f5be23c0d4d49..16daaeb506334 100644 --- a/client/network/src/protocol/light_client_handler.rs +++ b/client/network/src/protocol/light_client_handler.rs @@ -511,7 +511,11 @@ where let block = Decode::decode(&mut request.block.as_ref())?; let proof = - if let Some(info) = ChildInfo::resolve_child_info(request.child_type, &request.child_info[..]) { + if let Some(info) = ChildInfo::resolve_child_info( + request.child_type, + &request.child_info[..], + &request.storage_key[..], + ) { match self.chain.read_child_proof(&block, &request.storage_key, info, &request.keys) { Ok(proof) => proof, Err(error) => { From ffaf9f597c963a95dc91430b92f458e091e67477 Mon Sep 17 00:00:00 2001 From: cheme Date: Mon, 17 Feb 2020 22:39:27 +0100 Subject: [PATCH 43/85] Switch back to using prefix on host function, remove child_id and child_info. --- client/api/src/light.rs | 5 - client/chain-spec/src/chain_spec.rs | 1 - client/db/src/bench.rs | 26 +-- client/db/src/lib.rs | 29 +-- client/db/src/storage_cache.rs | 23 +- client/network/src/chain.rs | 4 +- client/network/src/protocol.rs | 55 ++--- .../src/protocol/light_client_handler.rs | 44 +--- client/network/src/protocol/light_dispatch.rs | 11 +- client/network/src/protocol/message.rs | 5 - .../src/protocol/schema/light.v1.proto | 7 +- client/rpc-api/src/state/mod.rs | 8 - client/rpc/src/state/mod.rs | 32 +-- client/rpc/src/state/state_full.rs | 55 +++-- client/rpc/src/state/state_light.rs | 10 +- client/rpc/src/state/tests.rs | 20 +- client/src/client.rs | 12 +- client/src/in_mem.rs | 4 +- client/src/light/backend.rs | 23 +- client/src/light/fetcher.rs | 22 +- frame/contracts/src/account_db.rs | 11 +- frame/contracts/src/lib.rs | 32 +-- frame/contracts/src/rent.rs | 6 +- frame/contracts/src/tests.rs | 2 +- frame/support/src/storage/child.rs | 167 +++++++-------- primitives/externalities/src/lib.rs | 20 +- primitives/io/src/lib.rs | 202 ++++++++---------- primitives/state-machine/src/backend.rs | 39 ++-- primitives/state-machine/src/basic.rs | 79 ++++--- .../state-machine/src/changes_trie/build.rs | 6 +- primitives/state-machine/src/ext.rs | 122 +++++------ .../state-machine/src/in_memory_backend.rs | 71 +++--- primitives/state-machine/src/lib.rs | 44 ++-- .../state-machine/src/overlayed_changes.rs | 37 ++-- .../state-machine/src/proving_backend.rs | 53 ++--- primitives/state-machine/src/testing.rs | 4 +- primitives/state-machine/src/trie_backend.rs | 39 ++-- .../state-machine/src/trie_backend_essence.rs | 47 ++-- primitives/storage/src/lib.rs | 178 ++++++++------- primitives/trie/src/lib.rs | 6 - test-utils/runtime/client/src/lib.rs | 13 +- test-utils/runtime/src/lib.rs | 22 +- 42 files changed, 670 insertions(+), 926 deletions(-) diff --git a/client/api/src/light.rs b/client/api/src/light.rs index c0bebc1740a8a..2911d77f18209 100644 --- a/client/api/src/light.rs +++ b/client/api/src/light.rs @@ -82,11 +82,6 @@ pub struct RemoteReadChildRequest { pub header: Header, /// Storage key for child. pub storage_key: Vec, - /// Child trie source information. - pub child_info: Vec, - /// Child type, its required to resolve `child_info` - /// content and choose child implementation. - pub child_type: u32, /// Child storage key to read. pub keys: Vec>, /// Number of times to retry request. None means that default RETRY_COUNT is used. diff --git a/client/chain-spec/src/chain_spec.rs b/client/chain-spec/src/chain_spec.rs index b47c41f107ccd..bf12d3e578a73 100644 --- a/client/chain-spec/src/chain_spec.rs +++ b/client/chain-spec/src/chain_spec.rs @@ -80,7 +80,6 @@ impl BuildStorage for ChainSpec { let child_info = ChildInfo::resolve_child_info( child_content.child_type, child_content.child_info.as_slice(), - storage_key.0.as_slice(), ).expect("chain spec contains correct content").to_owned(); ( storage_key.0, diff --git a/client/db/src/bench.rs b/client/db/src/bench.rs index 9858a5c148bfa..4d80d77cb60c2 100644 --- a/client/db/src/bench.rs +++ b/client/db/src/bench.rs @@ -77,10 +77,9 @@ impl BenchmarkingState { }; state.reopen()?; - let child_delta = genesis.children.into_iter().map(|(storage_key, child_content)| ( - storage_key, + let child_delta = genesis.children.into_iter().map(|(_storage_key, child_content)| ( + child_content.child_info, child_content.data.into_iter().map(|(k, v)| (k, Some(v))), - child_content.child_info )); let (root, transaction) = state.state.borrow_mut().as_mut().unwrap().full_storage_root( genesis.top.into_iter().map(|(k, v)| (k, Some(v))), @@ -141,11 +140,10 @@ impl StateBackend> for BenchmarkingState { fn child_storage( &self, - storage_key: &[u8], child_info: ChildInfo, key: &[u8], ) -> Result>, Self::Error> { - self.state.borrow().as_ref().ok_or_else(state_err)?.child_storage(storage_key, child_info, key) + self.state.borrow().as_ref().ok_or_else(state_err)?.child_storage(child_info, key) } fn exists_storage(&self, key: &[u8]) -> Result { @@ -154,11 +152,10 @@ impl StateBackend> for BenchmarkingState { fn exists_child_storage( &self, - storage_key: &[u8], child_info: ChildInfo, key: &[u8], ) -> Result { - self.state.borrow().as_ref().ok_or_else(state_err)?.exists_child_storage(storage_key, child_info, key) + self.state.borrow().as_ref().ok_or_else(state_err)?.exists_child_storage(child_info, key) } fn next_storage_key(&self, key: &[u8]) -> Result>, Self::Error> { @@ -167,11 +164,10 @@ impl StateBackend> for BenchmarkingState { fn next_child_storage_key( &self, - storage_key: &[u8], child_info: ChildInfo, key: &[u8], ) -> Result>, Self::Error> { - self.state.borrow().as_ref().ok_or_else(state_err)?.next_child_storage_key(storage_key, child_info, key) + self.state.borrow().as_ref().ok_or_else(state_err)?.next_child_storage_key(child_info, key) } fn for_keys_with_prefix(&self, prefix: &[u8], f: F) { @@ -188,24 +184,22 @@ impl StateBackend> for BenchmarkingState { fn for_keys_in_child_storage( &self, - storage_key: &[u8], child_info: ChildInfo, f: F, ) { if let Some(ref state) = *self.state.borrow() { - state.for_keys_in_child_storage(storage_key, child_info, f) + state.for_keys_in_child_storage(child_info, f) } } fn for_child_keys_with_prefix( &self, - storage_key: &[u8], child_info: ChildInfo, prefix: &[u8], f: F, ) { if let Some(ref state) = *self.state.borrow() { - state.for_child_keys_with_prefix(storage_key, child_info, prefix, f) + state.for_child_keys_with_prefix(child_info, prefix, f) } } @@ -217,13 +211,12 @@ impl StateBackend> for BenchmarkingState { fn child_storage_root( &self, - storage_key: &[u8], child_info: ChildInfo, delta: I, ) -> (B::Hash, bool, Self::Transaction) where I: IntoIterator, Option>)>, { - self.state.borrow().as_ref().map_or(Default::default(), |s| s.child_storage_root(storage_key, child_info, delta)) + self.state.borrow().as_ref().map_or(Default::default(), |s| s.child_storage_root(child_info, delta)) } fn pairs(&self) -> Vec<(Vec, Vec)> { @@ -236,11 +229,10 @@ impl StateBackend> for BenchmarkingState { fn child_keys( &self, - storage_key: &[u8], child_info: ChildInfo, prefix: &[u8], ) -> Vec> { - self.state.borrow().as_ref().map_or(Default::default(), |s| s.child_keys(storage_key, child_info, prefix)) + self.state.borrow().as_ref().map_or(Default::default(), |s| s.child_keys(child_info, prefix)) } fn as_trie_backend(&mut self) diff --git a/client/db/src/lib.rs b/client/db/src/lib.rs index 38935928a3c0d..efbcb26ff8fd8 100644 --- a/client/db/src/lib.rs +++ b/client/db/src/lib.rs @@ -152,11 +152,10 @@ impl StateBackend> for RefTrackingState { fn child_storage( &self, - storage_key: &[u8], child_info: ChildInfo, key: &[u8], ) -> Result>, Self::Error> { - self.state.child_storage(storage_key, child_info, key) + self.state.child_storage(child_info, key) } fn exists_storage(&self, key: &[u8]) -> Result { @@ -165,11 +164,10 @@ impl StateBackend> for RefTrackingState { fn exists_child_storage( &self, - storage_key: &[u8], child_info: ChildInfo, key: &[u8], ) -> Result { - self.state.exists_child_storage(storage_key, child_info, key) + self.state.exists_child_storage(child_info, key) } fn next_storage_key(&self, key: &[u8]) -> Result>, Self::Error> { @@ -178,11 +176,10 @@ impl StateBackend> for RefTrackingState { fn next_child_storage_key( &self, - storage_key: &[u8], child_info: ChildInfo, key: &[u8], ) -> Result>, Self::Error> { - self.state.next_child_storage_key(storage_key, child_info, key) + self.state.next_child_storage_key(child_info, key) } fn for_keys_with_prefix(&self, prefix: &[u8], f: F) { @@ -195,21 +192,19 @@ impl StateBackend> for RefTrackingState { fn for_keys_in_child_storage( &self, - storage_key: &[u8], child_info: ChildInfo, f: F, ) { - self.state.for_keys_in_child_storage(storage_key, child_info, f) + self.state.for_keys_in_child_storage(child_info, f) } fn for_child_keys_with_prefix( &self, - storage_key: &[u8], child_info: ChildInfo, prefix: &[u8], f: F, ) { - self.state.for_child_keys_with_prefix(storage_key, child_info, prefix, f) + self.state.for_child_keys_with_prefix(child_info, prefix, f) } fn storage_root(&self, delta: I) -> (B::Hash, Self::Transaction) @@ -221,14 +216,13 @@ impl StateBackend> for RefTrackingState { fn child_storage_root( &self, - storage_key: &[u8], child_info: ChildInfo, delta: I, ) -> (B::Hash, bool, Self::Transaction) where I: IntoIterator, Option>)>, { - self.state.child_storage_root(storage_key, child_info, delta) + self.state.child_storage_root(child_info, delta) } fn pairs(&self) -> Vec<(Vec, Vec)> { @@ -241,11 +235,10 @@ impl StateBackend> for RefTrackingState { fn child_keys( &self, - storage_key: &[u8], child_info: ChildInfo, prefix: &[u8], ) -> Vec> { - self.state.child_keys(storage_key, child_info, prefix) + self.state.child_keys(child_info, prefix) } fn as_trie_backend(&mut self) @@ -588,10 +581,10 @@ impl sc_client_api::backend::BlockImportOperation for Bloc return Err(sp_blockchain::Error::GenesisInvalid.into()); } - let child_delta = storage.children.into_iter().map(|(storage_key, child_content)| ( - storage_key, - child_content.data.into_iter().map(|(k, v)| (k, Some(v))), child_content.child_info), - ); + let child_delta = storage.children.into_iter().map(|(_storage_key, child_content)|( + child_content.child_info, + child_content.data.into_iter().map(|(k, v)| (k, Some(v))), + )); let mut changes_trie_config: Option = None; let (root, transaction) = self.old_state.full_storage_root( diff --git a/client/db/src/storage_cache.rs b/client/db/src/storage_cache.rs index fd85a899b628e..7f5dcecf41dae 100644 --- a/client/db/src/storage_cache.rs +++ b/client/db/src/storage_cache.rs @@ -539,11 +539,10 @@ impl>, B: BlockT> StateBackend> for Ca fn child_storage( &self, - storage_key: &[u8], child_info: ChildInfo, key: &[u8], ) -> Result>, Self::Error> { - let key = (storage_key.to_vec(), key.to_vec()); + let key = (child_info.storage_key().to_vec(), key.to_vec()); let local_cache = self.cache.local_cache.upgradable_read(); if let Some(entry) = local_cache.child_storage.get(&key).cloned() { trace!("Found in local cache: {:?}", key); @@ -561,7 +560,7 @@ impl>, B: BlockT> StateBackend> for Ca } } trace!("Cache miss: {:?}", key); - let value = self.state.child_storage(storage_key, child_info, &key.1[..])?; + let value = self.state.child_storage(child_info, &key.1[..])?; // just pass it through the usage counter let value = self.usage.tally_child_key_read(&key, value, false); @@ -576,20 +575,18 @@ impl>, B: BlockT> StateBackend> for Ca fn exists_child_storage( &self, - storage_key: &[u8], child_info: ChildInfo, key: &[u8], ) -> Result { - self.state.exists_child_storage(storage_key, child_info, key) + self.state.exists_child_storage(child_info, key) } fn for_keys_in_child_storage( &self, - storage_key: &[u8], child_info: ChildInfo, f: F, ) { - self.state.for_keys_in_child_storage(storage_key, child_info, f) + self.state.for_keys_in_child_storage(child_info, f) } fn next_storage_key(&self, key: &[u8]) -> Result>, Self::Error> { @@ -598,11 +595,10 @@ impl>, B: BlockT> StateBackend> for Ca fn next_child_storage_key( &self, - storage_key: &[u8], child_info: ChildInfo, key: &[u8], ) -> Result>, Self::Error> { - self.state.next_child_storage_key(storage_key, child_info, key) + self.state.next_child_storage_key(child_info, key) } fn for_keys_with_prefix(&self, prefix: &[u8], f: F) { @@ -615,12 +611,11 @@ impl>, B: BlockT> StateBackend> for Ca fn for_child_keys_with_prefix( &self, - storage_key: &[u8], child_info: ChildInfo, prefix: &[u8], f: F, ) { - self.state.for_child_keys_with_prefix(storage_key, child_info, prefix, f) + self.state.for_child_keys_with_prefix(child_info, prefix, f) } fn storage_root(&self, delta: I) -> (B::Hash, Self::Transaction) @@ -632,14 +627,13 @@ impl>, B: BlockT> StateBackend> for Ca fn child_storage_root( &self, - storage_key: &[u8], child_info: ChildInfo, delta: I, ) -> (B::Hash, bool, Self::Transaction) where I: IntoIterator, Option>)>, { - self.state.child_storage_root(storage_key, child_info, delta) + self.state.child_storage_root(child_info, delta) } fn pairs(&self) -> Vec<(Vec, Vec)> { @@ -652,11 +646,10 @@ impl>, B: BlockT> StateBackend> for Ca fn child_keys( &self, - storage_key: &[u8], child_info: ChildInfo, prefix: &[u8], ) -> Vec> { - self.state.child_keys(storage_key, child_info, prefix) + self.state.child_keys(child_info, prefix) } fn as_trie_backend(&mut self) -> Option<&TrieBackend>> { diff --git a/client/network/src/chain.rs b/client/network/src/chain.rs index b991a0e65208c..e419323c99edd 100644 --- a/client/network/src/chain.rs +++ b/client/network/src/chain.rs @@ -56,7 +56,6 @@ pub trait Client: Send + Sync { fn read_child_proof( &self, block: &Block::Hash, - storage_key: &[u8], child_info: ChildInfo, keys: &[Vec], ) -> Result; @@ -138,12 +137,11 @@ impl Client for SubstrateClient where fn read_child_proof( &self, block: &Block::Hash, - storage_key: &[u8], child_info: ChildInfo, keys: &[Vec], ) -> Result { (self as &SubstrateClient) - .read_child_proof(&BlockId::Hash(block.clone()), storage_key, child_info, keys) + .read_child_proof(&BlockId::Hash(block.clone()), child_info, keys) } fn execution_proof( diff --git a/client/network/src/protocol.rs b/client/network/src/protocol.rs index c1f3123440449..52914cca277e0 100644 --- a/client/network/src/protocol.rs +++ b/client/network/src/protocol.rs @@ -23,7 +23,7 @@ use libp2p::{Multiaddr, PeerId}; use libp2p::core::{ConnectedPoint, nodes::{listeners::ListenerId, Substream}, muxing::StreamMuxerBox}; use libp2p::swarm::{ProtocolsHandler, IntoProtocolsHandler}; use libp2p::swarm::{NetworkBehaviour, NetworkBehaviourAction, PollParameters}; -use sp_core::storage::{StorageKey, ChildInfo}; +use sp_core::storage::{StorageKey, OwnedChildInfo}; use sp_consensus::{ BlockOrigin, block_validation::BlockAnnounceValidator, @@ -251,16 +251,12 @@ impl<'a, B: BlockT> LightDispatchNetwork for LightDispatchIn<'a> { id: RequestId, block: ::Hash, storage_key: Vec, - child_info: Vec, - child_type: u32, keys: Vec>, ) { let message: Message = message::generic::Message::RemoteReadChildRequest(message::RemoteReadChildRequest { id, block, storage_key, - child_info, - child_type, keys, }); @@ -1571,41 +1567,24 @@ impl, H: ExHashT> Protocol { trace!(target: "sync", "Remote read child request {} from {} ({} {} at {})", request.id, who, request.storage_key.to_hex::(), keys_str(), request.block); - let proof = if let Some(child_info) = ChildInfo::resolve_child_info( - request.child_type, - &request.child_info[..], - &request.storage_key[..], + let child_info = OwnedChildInfo::new_default(request.storage_key.clone()); + let proof = match self.context_data.chain.read_child_proof( + &request.block, + child_info.as_ref(), + &request.keys, ) { - match self.context_data.chain.read_child_proof( - &request.block, - &request.storage_key, - child_info, - &request.keys, - ) { - Ok(proof) => proof, - Err(error) => { - trace!(target: "sync", "Remote read child request {} from {} ({} {} at {}) failed with: {}", - request.id, - who, - request.storage_key.to_hex::(), - keys_str(), - request.block, - error - ); - StorageProof::empty() - } + Ok(proof) => proof, + Err(error) => { + trace!(target: "sync", "Remote read child request {} from {} ({} {} at {}) failed with: {}", + request.id, + who, + request.storage_key.to_hex::(), + keys_str(), + request.block, + error + ); + StorageProof::empty() } - } else { - trace!(target: "sync", "Remote read child request {} from {} ({} {} at {}) failed with: {}", - request.id, - who, - request.storage_key.to_hex::(), - keys_str(), - request.block, - "invalid child info and type", - ); - - StorageProof::empty() }; self.send_message( &who, diff --git a/client/network/src/protocol/light_client_handler.rs b/client/network/src/protocol/light_client_handler.rs index 16daaeb506334..3480de1bb5700 100644 --- a/client/network/src/protocol/light_client_handler.rs +++ b/client/network/src/protocol/light_client_handler.rs @@ -48,7 +48,7 @@ use rustc_hex::ToHex; use sc_client::light::fetcher; use sc_client_api::StorageProof; use sc_peerset::ReputationChange; -use sp_core::storage::{ChildInfo, StorageKey}; +use sp_core::storage::{ChildInfo, OwnedChildInfo, StorageKey}; use sp_blockchain::{Error as ClientError}; use sp_runtime::traits::{Block, Header, NumberFor, Zero}; use std::{ @@ -510,36 +510,20 @@ where let block = Decode::decode(&mut request.block.as_ref())?; - let proof = - if let Some(info) = ChildInfo::resolve_child_info( - request.child_type, - &request.child_info[..], - &request.storage_key[..], - ) { - match self.chain.read_child_proof(&block, &request.storage_key, info, &request.keys) { - Ok(proof) => proof, - Err(error) => { - log::trace!("remote read child request {} from {} ({} {} at {:?}) failed with: {}", - request_id, - peer, - request.storage_key.to_hex::(), - fmt_keys(request.keys.first(), request.keys.last()), - request.block, - error); - StorageProof::empty() - } - } - } else { + let child_info = OwnedChildInfo::new_default(request.storage_key.clone()); + let proof = match self.chain.read_child_proof(&block, child_info.as_ref(), &request.keys) { + Ok(proof) => proof, + Err(error) => { log::trace!("remote read child request {} from {} ({} {} at {:?}) failed with: {}", request_id, peer, request.storage_key.to_hex::(), fmt_keys(request.keys.first(), request.keys.last()), request.block, - "invalid child info and type" - ); + error); StorageProof::empty() - }; + } + }; let response = { let r = api::v1::light::RemoteReadResponse { proof: proof.encode() }; @@ -936,8 +920,6 @@ fn serialise_request(id: u64, request: &Request) -> api::v1::light: let r = api::v1::light::RemoteReadChildRequest { block: request.block.encode(), storage_key: request.storage_key.clone(), - child_type: request.child_type.clone(), - child_info: request.child_info.clone(), keys: request.keys.clone(), }; api::v1::light::request::Request::RemoteReadChildRequest(r) @@ -1145,8 +1127,6 @@ mod tests { use super::{Event, LightClientHandler, Request, OutboundProtocol, PeerStatus}; use void::Void; - const CHILD_INFO: ChildInfo<'static> = ChildInfo::new_default(b"foobarbaz"); - type Block = sp_runtime::generic::Block, substrate_test_runtime::Extrinsic>; type Handler = LightClientHandler>, Block>; type Swarm = libp2p::swarm::Swarm, Handler>; @@ -1640,15 +1620,12 @@ mod tests { #[test] fn receives_remote_read_child_response() { - let info = CHILD_INFO.info(); let mut chan = oneshot::channel(); let request = fetcher::RemoteReadChildRequest { header: dummy_header(), block: Default::default(), storage_key: b":child_storage:sub".to_vec(), keys: vec![b":key".to_vec()], - child_info: info.0.to_vec(), - child_type: info.1, retry_count: None, }; issue_request(Request::ReadChild { request, sender: chan.0 }); @@ -1743,15 +1720,12 @@ mod tests { #[test] fn send_receive_read_child() { - let info = CHILD_INFO.info(); let chan = oneshot::channel(); let request = fetcher::RemoteReadChildRequest { header: dummy_header(), block: Default::default(), - storage_key: b":child_storage:sub".to_vec(), + storage_key: b"sub".to_vec(), keys: vec![b":key".to_vec()], - child_info: info.0.to_vec(), - child_type: info.1, retry_count: None, }; send_receive(Request::ReadChild { request, sender: chan.0 }); diff --git a/client/network/src/protocol/light_dispatch.rs b/client/network/src/protocol/light_dispatch.rs index ba3a6d33fda70..a06368396f779 100644 --- a/client/network/src/protocol/light_dispatch.rs +++ b/client/network/src/protocol/light_dispatch.rs @@ -70,8 +70,6 @@ pub trait LightDispatchNetwork { id: RequestId, block: ::Hash, storage_key: Vec, - child_info: Vec, - child_type: u32, keys: Vec>, ); @@ -625,8 +623,6 @@ impl Request { self.id, data.block, data.storage_key.clone(), - data.child_info.clone(), - data.child_type, data.keys.clone(), ), RequestData::RemoteCall(ref data, _) => @@ -682,7 +678,6 @@ pub mod tests { use std::sync::Arc; use std::time::Instant; use futures::channel::oneshot; - use sp_core::storage::ChildInfo; use sp_runtime::traits::{Block as BlockT, NumberFor, Header as HeaderT}; use sp_blockchain::{Error as ClientError, Result as ClientResult}; use sc_client_api::{FetchChecker, RemoteHeaderRequest, @@ -823,7 +818,7 @@ pub mod tests { fn send_header_request(&mut self, _: &PeerId, _: RequestId, _: <::Header as HeaderT>::Number) {} fn send_read_request(&mut self, _: &PeerId, _: RequestId, _: ::Hash, _: Vec>) {} fn send_read_child_request(&mut self, _: &PeerId, _: RequestId, _: ::Hash, _: Vec, - _: Vec, _: u32, _: Vec>) {} + _: Vec>) {} fn send_call_request(&mut self, _: &PeerId, _: RequestId, _: ::Hash, _: String, _: Vec) {} fn send_changes_request(&mut self, _: &PeerId, _: RequestId, _: ::Hash, _: ::Hash, _: ::Hash, _: ::Hash, _: Option>, _: Vec) {} @@ -1045,14 +1040,10 @@ pub mod tests { light_dispatch.on_connect(&mut network_interface, peer0.clone(), Roles::FULL, 1000); let (tx, response) = oneshot::channel(); - let child_info = ChildInfo::new_default(b"unique_id_1"); - let (child_info, child_type) = child_info.info(); light_dispatch.add_request(&mut network_interface, RequestData::RemoteReadChild(RemoteReadChildRequest { header: dummy_header(), block: Default::default(), storage_key: b"sub".to_vec(), - child_info: child_info.to_vec(), - child_type, keys: vec![b":key".to_vec()], retry_count: None, }, tx)); diff --git a/client/network/src/protocol/message.rs b/client/network/src/protocol/message.rs index ef7d550de6cbe..d9e12c7596273 100644 --- a/client/network/src/protocol/message.rs +++ b/client/network/src/protocol/message.rs @@ -424,11 +424,6 @@ pub mod generic { pub block: H, /// Child Storage key. pub storage_key: Vec, - /// Child trie source information. - pub child_info: Vec, - /// Child type, its required to resolve `child_info` - /// content and choose child implementation. - pub child_type: u32, /// Storage key. pub keys: Vec>, } diff --git a/client/network/src/protocol/schema/light.v1.proto b/client/network/src/protocol/schema/light.v1.proto index b9aee67b5ee24..930d229b0bf7c 100644 --- a/client/network/src/protocol/schema/light.v1.proto +++ b/client/network/src/protocol/schema/light.v1.proto @@ -73,13 +73,8 @@ message RemoteReadChildRequest { bytes block = 2; // Child Storage key. bytes storage_key = 3; - // Child trie source information. - bytes child_info = 4; - /// Child type, its required to resolve `child_info` - /// content and choose child implementation. - uint32 child_type = 5; // Storage keys. - repeated bytes keys = 6; + repeated bytes keys = 4; } // Remote header request. diff --git a/client/rpc-api/src/state/mod.rs b/client/rpc-api/src/state/mod.rs index b2cf8ce909b20..48d363bb8921c 100644 --- a/client/rpc-api/src/state/mod.rs +++ b/client/rpc-api/src/state/mod.rs @@ -77,8 +77,6 @@ pub trait StateApi { fn child_storage_keys( &self, child_storage_key: StorageKey, - child_info: StorageKey, - child_type: u32, prefix: StorageKey, hash: Option ) -> FutureResult>; @@ -88,8 +86,6 @@ pub trait StateApi { fn child_storage( &self, child_storage_key: StorageKey, - child_info: StorageKey, - child_type: u32, key: StorageKey, hash: Option ) -> FutureResult>; @@ -99,8 +95,6 @@ pub trait StateApi { fn child_storage_hash( &self, child_storage_key: StorageKey, - child_info: StorageKey, - child_type: u32, key: StorageKey, hash: Option ) -> FutureResult>; @@ -110,8 +104,6 @@ pub trait StateApi { fn child_storage_size( &self, child_storage_key: StorageKey, - child_info: StorageKey, - child_type: u32, key: StorageKey, hash: Option ) -> FutureResult>; diff --git a/client/rpc/src/state/mod.rs b/client/rpc/src/state/mod.rs index 8f621cc8afc96..57a4b6cab897e 100644 --- a/client/rpc/src/state/mod.rs +++ b/client/rpc/src/state/mod.rs @@ -108,8 +108,6 @@ pub trait StateBackend: Send + Sync + 'static &self, block: Option, child_storage_key: StorageKey, - child_info: StorageKey, - child_type: u32, prefix: StorageKey, ) -> FutureResult>; @@ -118,8 +116,6 @@ pub trait StateBackend: Send + Sync + 'static &self, block: Option, child_storage_key: StorageKey, - child_info: StorageKey, - child_type: u32, key: StorageKey, ) -> FutureResult>; @@ -128,8 +124,6 @@ pub trait StateBackend: Send + Sync + 'static &self, block: Option, child_storage_key: StorageKey, - child_info: StorageKey, - child_type: u32, key: StorageKey, ) -> FutureResult>; @@ -138,11 +132,9 @@ pub trait StateBackend: Send + Sync + 'static &self, block: Option, child_storage_key: StorageKey, - child_info: StorageKey, - child_type: u32, key: StorageKey, ) -> FutureResult> { - Box::new(self.child_storage(block, child_storage_key, child_info, child_type, key) + Box::new(self.child_storage(block, child_storage_key, key) .map(|x| x.map(|x| x.0.len() as u64))) } @@ -303,45 +295,37 @@ impl StateApi for State fn child_storage( &self, child_storage_key: StorageKey, - child_info: StorageKey, - child_type: u32, key: StorageKey, block: Option ) -> FutureResult> { - self.backend.child_storage(block, child_storage_key, child_info, child_type, key) + self.backend.child_storage(block, child_storage_key, key) } fn child_storage_keys( &self, child_storage_key: StorageKey, - child_info: StorageKey, - child_type: u32, key_prefix: StorageKey, block: Option ) -> FutureResult> { - self.backend.child_storage_keys(block, child_storage_key, child_info, child_type, key_prefix) + self.backend.child_storage_keys(block, child_storage_key, key_prefix) } fn child_storage_hash( &self, child_storage_key: StorageKey, - child_info: StorageKey, - child_type: u32, key: StorageKey, block: Option ) -> FutureResult> { - self.backend.child_storage_hash(block, child_storage_key, child_info, child_type, key) + self.backend.child_storage_hash(block, child_storage_key, key) } fn child_storage_size( &self, child_storage_key: StorageKey, - child_info: StorageKey, - child_type: u32, key: StorageKey, block: Option ) -> FutureResult> { - self.backend.child_storage_size(block, child_storage_key, child_info, child_type, key) + self.backend.child_storage_size(block, child_storage_key, key) } fn metadata(&self, block: Option) -> FutureResult { @@ -390,9 +374,3 @@ impl StateApi for State fn client_err(err: sp_blockchain::Error) -> Error { Error::Client(Box::new(err)) } - -const CHILD_RESOLUTION_ERROR: &str = "Unexpected child info and type"; - -fn child_resolution_error() -> sp_blockchain::Error { - sp_blockchain::Error::Msg(CHILD_RESOLUTION_ERROR.to_string()) -} diff --git a/client/rpc/src/state/state_full.rs b/client/rpc/src/state/state_full.rs index caf7a5787e1c3..238c99fc9e67b 100644 --- a/client/rpc/src/state/state_full.rs +++ b/client/rpc/src/state/state_full.rs @@ -33,7 +33,7 @@ use sc_client::{ Client, CallExecutor, BlockchainEvents }; use sp_core::{ - Bytes, storage::{well_known_keys, StorageKey, StorageData, StorageChangeSet, ChildInfo}, + Bytes, storage::{well_known_keys, StorageKey, StorageData, StorageChangeSet, OwnedChildInfo}, }; use sp_version::RuntimeVersion; use sp_runtime::{ @@ -42,7 +42,7 @@ use sp_runtime::{ use sp_api::{Metadata, ProvideRuntimeApi}; -use super::{StateBackend, error::{FutureResult, Error, Result}, client_err, child_resolution_error}; +use super::{StateBackend, error::{FutureResult, Error, Result}, client_err}; /// Ranges to query in state_queryStorage. struct QueryStorageRange { @@ -309,19 +309,18 @@ impl StateBackend for FullState, storage_key: StorageKey, - child_info: StorageKey, - child_type: u32, prefix: StorageKey, ) -> FutureResult> { Box::new(result( self.block_or_best(block) - .and_then(|block| self.client.child_storage_keys( - &BlockId::Hash(block), - &storage_key, - ChildInfo::resolve_child_info(child_type, &child_info.0[..], &storage_key.0[..]) - .ok_or_else(child_resolution_error)?, - &prefix, - )) + .and_then(|block| { + let child_info = OwnedChildInfo::new_default(storage_key.0); + self.client.child_storage_keys( + &BlockId::Hash(block), + child_info.as_ref(), + &prefix, + ) + }) .map_err(client_err))) } @@ -329,19 +328,18 @@ impl StateBackend for FullState, storage_key: StorageKey, - child_info: StorageKey, - child_type: u32, key: StorageKey, ) -> FutureResult> { Box::new(result( self.block_or_best(block) - .and_then(|block| self.client.child_storage( - &BlockId::Hash(block), - &storage_key, - ChildInfo::resolve_child_info(child_type, &child_info.0[..], &storage_key.0[..]) - .ok_or_else(child_resolution_error)?, - &key, - )) + .and_then(|block| { + let child_info = OwnedChildInfo::new_default(storage_key.0); + self.client.child_storage( + &BlockId::Hash(block), + child_info.as_ref(), + &key, + ) + }) .map_err(client_err))) } @@ -349,19 +347,18 @@ impl StateBackend for FullState, storage_key: StorageKey, - child_info: StorageKey, - child_type: u32, key: StorageKey, ) -> FutureResult> { Box::new(result( self.block_or_best(block) - .and_then(|block| self.client.child_storage_hash( - &BlockId::Hash(block), - &storage_key, - ChildInfo::resolve_child_info(child_type, &child_info.0[..], &storage_key.0[..]) - .ok_or_else(child_resolution_error)?, - &key, - )) + .and_then(|block| { + let child_info = OwnedChildInfo::new_default(storage_key.0); + self.client.child_storage_hash( + &BlockId::Hash(block), + child_info.as_ref(), + &key, + ) + }) .map_err(client_err))) } diff --git a/client/rpc/src/state/state_light.rs b/client/rpc/src/state/state_light.rs index 7b2455a8fce38..485950de97c00 100644 --- a/client/rpc/src/state/state_light.rs +++ b/client/rpc/src/state/state_light.rs @@ -250,8 +250,6 @@ impl StateBackend for LightState, _child_storage_key: StorageKey, - _child_info: StorageKey, - _child_type: u32, _prefix: StorageKey, ) -> FutureResult> { Box::new(result(Err(client_err(ClientError::NotAvailableOnLightClient)))) @@ -261,8 +259,6 @@ impl StateBackend for LightState, child_storage_key: StorageKey, - child_info: StorageKey, - child_type: u32, key: StorageKey, ) -> FutureResult> { let block = self.block_or_best(block); @@ -273,8 +269,6 @@ impl StateBackend for LightState StateBackend for LightState, child_storage_key: StorageKey, - child_info: StorageKey, - child_type: u32, key: StorageKey, ) -> FutureResult> { Box::new(self - .child_storage(block, child_storage_key, child_info, child_type, key) + .child_storage(block, child_storage_key, key) .and_then(|maybe_storage| result(Ok(maybe_storage.map(|storage| HasherFor::::hash(&storage.0)))) ) diff --git a/client/rpc/src/state/tests.rs b/client/rpc/src/state/tests.rs index 39964f38f6f49..e78010b7648cb 100644 --- a/client/rpc/src/state/tests.rs +++ b/client/rpc/src/state/tests.rs @@ -30,26 +30,26 @@ use substrate_test_runtime_client::{ runtime, }; -const CHILD_INFO: ChildInfo<'static> = ChildInfo::new_default(b"unique_id"); +const STORAGE_KEY: &[u8] = b"child"; +const CHILD_INFO: ChildInfo<'static> = ChildInfo::default_unchecked( + b":child_storage:default:child" +); #[test] fn should_return_storage() { const KEY: &[u8] = b":mock"; const VALUE: &[u8] = b"hello world"; - const STORAGE_KEY: &[u8] = b"child"; const CHILD_VALUE: &[u8] = b"hello world !"; let mut core = tokio::runtime::Runtime::new().unwrap(); let client = TestClientBuilder::new() .add_extra_storage(KEY.to_vec(), VALUE.to_vec()) - .add_extra_child_storage(STORAGE_KEY.to_vec(), CHILD_INFO, KEY.to_vec(), CHILD_VALUE.to_vec()) + .add_extra_child_storage(CHILD_INFO, KEY.to_vec(), CHILD_VALUE.to_vec()) .build(); let genesis_hash = client.genesis_hash(); let client = new_full(Arc::new(client), Subscriptions::new(Arc::new(core.executor()))); let key = StorageKey(KEY.to_vec()); let storage_key = StorageKey(STORAGE_KEY.to_vec()); - let (child_info, child_type) = CHILD_INFO.info(); - let child_info = StorageKey(child_info.to_vec()); assert_eq!( client.storage(key.clone(), Some(genesis_hash).into()).wait() @@ -67,7 +67,7 @@ fn should_return_storage() { ); assert_eq!( core.block_on( - client.child_storage(storage_key, child_info, child_type, key, Some(genesis_hash).into()) + client.child_storage(storage_key, key, Some(genesis_hash).into()) .map(|x| x.map(|x| x.0.len())) ).unwrap().unwrap() as usize, CHILD_VALUE.len(), @@ -77,8 +77,6 @@ fn should_return_storage() { #[test] fn should_return_child_storage() { - let (child_info, child_type) = CHILD_INFO.info(); - let child_info = StorageKey(child_info.to_vec()); let core = tokio::runtime::Runtime::new().unwrap(); let client = Arc::new(substrate_test_runtime_client::TestClientBuilder::new() .add_child_storage("test", "key", CHILD_INFO, vec![42_u8]) @@ -92,8 +90,6 @@ fn should_return_child_storage() { assert_matches!( client.child_storage( child_key.clone(), - child_info.clone(), - child_type, key.clone(), Some(genesis_hash).into(), ).wait(), @@ -102,8 +98,6 @@ fn should_return_child_storage() { assert_matches!( client.child_storage_hash( child_key.clone(), - child_info.clone(), - child_type, key.clone(), Some(genesis_hash).into(), ).wait().map(|x| x.is_some()), @@ -112,8 +106,6 @@ fn should_return_child_storage() { assert_matches!( client.child_storage_size( child_key.clone(), - child_info.clone(), - child_type, key.clone(), None, ).wait(), diff --git a/client/src/client.rs b/client/src/client.rs index d085b92025fdf..a40068609b564 100644 --- a/client/src/client.rs +++ b/client/src/client.rs @@ -334,12 +334,11 @@ impl Client where pub fn child_storage_keys( &self, id: &BlockId, - child_storage_key: &StorageKey, child_info: ChildInfo, key_prefix: &StorageKey ) -> sp_blockchain::Result> { let keys = self.state_at(id)? - .child_keys(&child_storage_key.0, child_info, &key_prefix.0) + .child_keys(child_info, &key_prefix.0) .into_iter() .map(StorageKey) .collect(); @@ -350,12 +349,11 @@ impl Client where pub fn child_storage( &self, id: &BlockId, - storage_key: &StorageKey, child_info: ChildInfo, key: &StorageKey ) -> sp_blockchain::Result> { Ok(self.state_at(id)? - .child_storage(&storage_key.0, child_info, &key.0) + .child_storage(child_info, &key.0) .map_err(|e| sp_blockchain::Error::from_state(Box::new(e)))? .map(StorageData)) } @@ -364,12 +362,11 @@ impl Client where pub fn child_storage_hash( &self, id: &BlockId, - storage_key: &StorageKey, child_info: ChildInfo, key: &StorageKey ) -> sp_blockchain::Result> { Ok(self.state_at(id)? - .child_storage_hash(&storage_key.0, child_info, &key.0) + .child_storage_hash(child_info, &key.0) .map_err(|e| sp_blockchain::Error::from_state(Box::new(e)))? ) } @@ -406,7 +403,6 @@ impl Client where pub fn read_child_proof( &self, id: &BlockId, - storage_key: &[u8], child_info: ChildInfo, keys: I, ) -> sp_blockchain::Result where @@ -414,7 +410,7 @@ impl Client where I::Item: AsRef<[u8]>, { self.state_at(id) - .and_then(|state| prove_child_read(state, storage_key, child_info, keys) + .and_then(|state| prove_child_read(state, child_info, keys) .map_err(Into::into)) } diff --git a/client/src/in_mem.rs b/client/src/in_mem.rs index dcff8102aeb6d..3986c70116c01 100644 --- a/client/src/in_mem.rs +++ b/client/src/in_mem.rs @@ -516,8 +516,8 @@ impl backend::BlockImportOperation for BlockImportOperatio check_genesis_storage(&storage)?; let child_delta = storage.children.into_iter() - .map(|(storage_key, child_content)| - (storage_key, child_content.data.into_iter().map(|(k, v)| (k, Some(v))), child_content.child_info)); + .map(|(_storage_key, child_content)| + (child_content.child_info, child_content.data.into_iter().map(|(k, v)| (k, Some(v))))); let (root, transaction) = self.old_state.full_storage_root( storage.top.into_iter().map(|(k, v)| (k, Some(v))), diff --git a/client/src/light/backend.rs b/client/src/light/backend.rs index ad9f43587e4cd..e4e5d681813b9 100644 --- a/client/src/light/backend.rs +++ b/client/src/light/backend.rs @@ -312,17 +312,17 @@ impl BlockImportOperation for ImportOperation self.changes_trie_config_update = Some(changes_trie_config); // this is only called when genesis block is imported => shouldn't be performance bottleneck - let mut storage: HashMap, OwnedChildInfo)>, _> = HashMap::new(); + let mut storage: HashMap, _> = HashMap::new(); storage.insert(None, input.top); // create a list of children keys to re-compute roots for let child_delta = input.children.iter() - .map(|(storage_key, storage_child)| (storage_key.clone(), None, storage_child.child_info.clone())) + .map(|(_storage_key, storage_child)| (storage_child.child_info.clone(), None)) .collect::>(); // make sure to persist the child storage - for (child_key, storage_child) in input.children { - storage.insert(Some((child_key, storage_child.child_info)), storage_child.data); + for (_child_key, storage_child) in input.children { + storage.insert(Some(storage_child.child_info), storage_child.data); } let storage_update = InMemoryBackend::from(storage); @@ -386,13 +386,12 @@ impl StateBackend for GenesisOrUnavailableState fn child_storage( &self, - storage_key: &[u8], child_info: ChildInfo, key: &[u8], ) -> ClientResult>> { match *self { GenesisOrUnavailableState::Genesis(ref state) => - Ok(state.child_storage(storage_key, child_info, key).expect(IN_MEMORY_EXPECT_PROOF)), + Ok(state.child_storage(child_info, key).expect(IN_MEMORY_EXPECT_PROOF)), GenesisOrUnavailableState::Unavailable => Err(ClientError::NotAvailableOnLightClient), } } @@ -407,13 +406,12 @@ impl StateBackend for GenesisOrUnavailableState fn next_child_storage_key( &self, - storage_key: &[u8], child_info: ChildInfo, key: &[u8], ) -> Result>, Self::Error> { match *self { GenesisOrUnavailableState::Genesis(ref state) => Ok( - state.next_child_storage_key(storage_key, child_info, key) + state.next_child_storage_key(child_info, key) .expect(IN_MEMORY_EXPECT_PROOF) ), GenesisOrUnavailableState::Unavailable => Err(ClientError::NotAvailableOnLightClient), @@ -436,27 +434,25 @@ impl StateBackend for GenesisOrUnavailableState fn for_keys_in_child_storage( &self, - storage_key: &[u8], child_info: ChildInfo, action: A, ) { match *self { GenesisOrUnavailableState::Genesis(ref state) => - state.for_keys_in_child_storage(storage_key, child_info, action), + state.for_keys_in_child_storage(child_info, action), GenesisOrUnavailableState::Unavailable => (), } } fn for_child_keys_with_prefix( &self, - storage_key: &[u8], child_info: ChildInfo, prefix: &[u8], action: A, ) { match *self { GenesisOrUnavailableState::Genesis(ref state) => - state.for_child_keys_with_prefix(storage_key, child_info, prefix, action), + state.for_child_keys_with_prefix(child_info, prefix, action), GenesisOrUnavailableState::Unavailable => (), } } @@ -474,7 +470,6 @@ impl StateBackend for GenesisOrUnavailableState fn child_storage_root( &self, - storage_key: &[u8], child_info: ChildInfo, delta: I, ) -> (H::Out, bool, Self::Transaction) @@ -483,7 +478,7 @@ impl StateBackend for GenesisOrUnavailableState { match *self { GenesisOrUnavailableState::Genesis(ref state) => { - let (root, is_equal, _) = state.child_storage_root(storage_key, child_info, delta); + let (root, is_equal, _) = state.child_storage_root(child_info, delta); (root, is_equal, Default::default()) }, GenesisOrUnavailableState::Unavailable => diff --git a/client/src/light/fetcher.rs b/client/src/light/fetcher.rs index e28b1832c29f6..cb0115409405e 100644 --- a/client/src/light/fetcher.rs +++ b/client/src/light/fetcher.rs @@ -23,6 +23,7 @@ use std::marker::PhantomData; use hash_db::{HashDB, Hasher, EMPTY_PREFIX}; use codec::{Decode, Encode}; use sp_core::{convert_hash, traits::CodeExecutor}; +use sp_core::storage::OwnedChildInfo; use sp_runtime::traits::{ Block as BlockT, Header as HeaderT, Hash, HashFor, NumberFor, AtLeast32Bit, CheckedConversion, @@ -240,10 +241,11 @@ impl FetchChecker for LightDataChecker request: &RemoteReadChildRequest, remote_proof: StorageProof, ) -> ClientResult, Option>>> { + let child_trie = OwnedChildInfo::new_default(request.storage_key.clone()); read_child_proof_check::( convert_hash(request.header.state_root()), remote_proof, - &request.storage_key, + child_trie.as_ref(), request.keys.iter(), ).map_err(Into::into) } @@ -345,13 +347,11 @@ pub mod tests { use crate::light::fetcher::{FetchChecker, LightDataChecker, RemoteHeaderRequest}; use crate::light::blockchain::tests::{DummyStorage, DummyBlockchain}; use sp_core::{blake2_256, Blake2Hasher, ChangesTrieConfiguration, H256}; - use sp_core::storage::{well_known_keys, StorageKey, ChildInfo}; + use sp_core::storage::{well_known_keys, StorageKey, OwnedChildInfo}; use sp_runtime::generic::BlockId; use sp_state_machine::Backend; use super::*; - const CHILD_INFO_1: ChildInfo<'static> = ChildInfo::new_default(b"unique_id_1"); - type TestChecker = LightDataChecker< NativeExecutor, Blake2Hasher, @@ -400,11 +400,12 @@ pub mod tests { fn prepare_for_read_child_proof_check() -> (TestChecker, Header, StorageProof, Vec) { use substrate_test_runtime_client::DefaultTestClientBuilderExt; use substrate_test_runtime_client::TestClientBuilderExt; + let child_info = OwnedChildInfo::new_default(b"child1".to_vec()); + let child_info = child_info.as_ref(); // prepare remote client let remote_client = substrate_test_runtime_client::TestClientBuilder::new() .add_extra_child_storage( - b"child1".to_vec(), - CHILD_INFO_1, + child_info, b"key1".to_vec(), b"value1".to_vec(), ).build(); @@ -417,15 +418,13 @@ pub mod tests { // 'fetch' child read proof from remote node let child_value = remote_client.child_storage( &remote_block_id, - &StorageKey(b"child1".to_vec()), - CHILD_INFO_1, + child_info, &StorageKey(b"key1".to_vec()), ).unwrap().unwrap().0; assert_eq!(b"value1"[..], child_value[..]); let remote_read_proof = remote_client.read_child_proof( &remote_block_id, - b"child1", - CHILD_INFO_1, + child_info, &[b"key1"], ).unwrap(); @@ -503,14 +502,11 @@ pub mod tests { remote_read_proof, result, ) = prepare_for_read_child_proof_check(); - let child_infos = CHILD_INFO_1.info(); assert_eq!((&local_checker as &dyn FetchChecker).check_read_child_proof( &RemoteReadChildRequest::
{ block: remote_block_header.hash(), header: remote_block_header, storage_key: b"child1".to_vec(), - child_info: child_infos.0.to_vec(), - child_type: child_infos.1, keys: vec![b"key1".to_vec()], retry_count: None, }, diff --git a/frame/contracts/src/account_db.rs b/frame/contracts/src/account_db.rs index 5204f1003a6c5..cd9f595665b07 100644 --- a/frame/contracts/src/account_db.rs +++ b/frame/contracts/src/account_db.rs @@ -128,7 +128,7 @@ impl AccountDb for DirectAccountDb { trie_id: Option<&TrieId>, location: &StorageKey ) -> Option> { - trie_id.and_then(|id| child::get_raw(id, crate::trie_unique_id(&id[..]), &blake2_256(location))) + trie_id.and_then(|id| child::get_raw(crate::trie_unique_id(&id[..]), &blake2_256(location))) } fn get_code_hash(&self, account: &T::AccountId) -> Option> { >::get(account).and_then(|i| i.as_alive().map(|i| i.code_hash)) @@ -175,13 +175,13 @@ impl AccountDb for DirectAccountDb { (false, Some(info), _) => info, // Existing contract is being removed. (true, Some(info), None) => { - child::kill_storage(&info.trie_id, info.child_trie_unique_id()); + child::kill_storage(info.child_trie_unique_id()); >::remove(&address); continue; } // Existing contract is being replaced by a new one. (true, Some(info), Some(code_hash)) => { - child::kill_storage(&info.trie_id, info.child_trie_unique_id()); + child::kill_storage(info.child_trie_unique_id()); AliveContractInfo:: { code_hash, storage_size: T::StorageSizeOffset::get(), @@ -220,7 +220,6 @@ impl AccountDb for DirectAccountDb { for (k, v) in changed.storage.into_iter() { if let Some(value) = child::get_raw( - &new_info.trie_id[..], new_info.child_trie_unique_id(), &blake2_256(&k), ) { @@ -228,9 +227,9 @@ impl AccountDb for DirectAccountDb { } if let Some(value) = v { new_info.storage_size += value.len() as u32; - child::put_raw(&new_info.trie_id[..], new_info.child_trie_unique_id(), &blake2_256(&k), &value[..]); + child::put_raw(new_info.child_trie_unique_id(), &blake2_256(&k), &value[..]); } else { - child::kill(&new_info.trie_id[..], new_info.child_trie_unique_id(), &blake2_256(&k)); + child::kill(new_info.child_trie_unique_id(), &blake2_256(&k)); } } diff --git a/frame/contracts/src/lib.rs b/frame/contracts/src/lib.rs index e88474b508437..ecb2107bbd650 100644 --- a/frame/contracts/src/lib.rs +++ b/frame/contracts/src/lib.rs @@ -121,9 +121,9 @@ use sp_runtime::{ }; use frame_support::dispatch::{DispatchResult, Dispatchable}; use frame_support::{ - Parameter, decl_module, decl_event, decl_storage, decl_error, storage::child, - parameter_types, IsSubType, - weights::DispatchInfo, + Parameter, decl_module, decl_event, decl_storage, decl_error, + parameter_types, IsSubType, weights::DispatchInfo, + storage::child::{self, ChildInfo, OwnedChildInfo}, }; use frame_support::traits::{OnReapAccount, OnUnbalanced, Currency, Get, Time, Randomness}; use frame_system::{self as system, ensure_signed, RawOrigin, ensure_root}; @@ -225,16 +225,14 @@ pub struct RawAliveContractInfo { impl RawAliveContractInfo { /// Associated child trie unique id is built from the hash part of the trie id. - pub fn child_trie_unique_id(&self) -> child::ChildInfo { + pub fn child_trie_unique_id(&self) -> ChildInfo { trie_unique_id(&self.trie_id[..]) } } /// Associated child trie unique id is built from the hash part of the trie id. -pub(crate) fn trie_unique_id(trie_id: &[u8]) -> child::ChildInfo { - // Every new contract uses a new trie id and trie id results from - // hashing, so we can use child storage key (trie id) for child info. - child::ChildInfo::new_uid_parent_key(trie_id) +pub(crate) fn trie_unique_id(trie_id: &[u8]) -> ChildInfo { + ChildInfo::default_unchecked(trie_id) } pub type TombstoneContractInfo = @@ -267,6 +265,10 @@ pub trait TrieIdGenerator { /// /// The implementation must ensure every new trie id is unique: two consecutive calls with the /// same parameter needs to return different trie id values. + /// + /// Also, the implementation is responsible for ensuring that `TrieId` starts with + /// `:child_storage:`. + /// TODO: We want to change this, see https://github.com/paritytech/substrate/issues/2325 fn trie_id(account_id: &AccountId) -> TrieId; } @@ -290,8 +292,9 @@ where let mut buf = Vec::new(); buf.extend_from_slice(account_id.as_ref()); buf.extend_from_slice(&new_seed.to_le_bytes()[..]); - - T::Hashing::hash(&buf[..]).as_ref().to_vec() + let buf = T::Hashing::hash(&buf[..]); + // TODO: see https://github.com/paritytech/substrate/issues/2325 + OwnedChildInfo::new_default(buf.as_ref().to_vec()).owned_info().0 } } @@ -807,12 +810,10 @@ impl Module { let key_values_taken = delta.iter() .filter_map(|key| { child::get_raw( - &origin_contract.trie_id, origin_contract.child_trie_unique_id(), &blake2_256(key), ).map(|value| { child::kill( - &origin_contract.trie_id, origin_contract.child_trie_unique_id(), &blake2_256(key), ); @@ -825,8 +826,8 @@ impl Module { let tombstone = >::new( // This operation is cheap enough because last_write (delta not included) // is not this block as it has been checked earlier. - &child::child_root( - &origin_contract.trie_id, + &child::root( + origin_contract.child_trie_unique_id(), )[..], code_hash, ); @@ -834,7 +835,6 @@ impl Module { if tombstone != dest_tombstone { for (key, value) in key_values_taken { child::put_raw( - &origin_contract.trie_id, origin_contract.child_trie_unique_id(), &blake2_256(key), &value, @@ -935,7 +935,7 @@ decl_storage! { impl OnReapAccount for Module { fn on_reap_account(who: &T::AccountId) { if let Some(ContractInfo::Alive(info)) = >::take(who) { - child::kill_storage(&info.trie_id, info.child_trie_unique_id()); + child::kill_storage(info.child_trie_unique_id()); } } } diff --git a/frame/contracts/src/rent.rs b/frame/contracts/src/rent.rs index 49beebbf0c202..8b342f95b4350 100644 --- a/frame/contracts/src/rent.rs +++ b/frame/contracts/src/rent.rs @@ -223,7 +223,6 @@ fn enact_verdict( Verdict::Kill => { >::remove(account); child::kill_storage( - &alive_contract_info.trie_id, alive_contract_info.child_trie_unique_id(), ); >::deposit_event(RawEvent::Evicted(account.clone(), false)); @@ -235,7 +234,9 @@ fn enact_verdict( } // Note: this operation is heavy. - let child_storage_root = child::child_root(&alive_contract_info.trie_id); + let child_storage_root = child::root( + alive_contract_info.child_trie_unique_id(), + ); let tombstone = >::new( &child_storage_root[..], @@ -245,7 +246,6 @@ fn enact_verdict( >::insert(account, &tombstone_info); child::kill_storage( - &alive_contract_info.trie_id, alive_contract_info.child_trie_unique_id(), ); diff --git a/frame/contracts/src/tests.rs b/frame/contracts/src/tests.rs index e9cd522f2efa8..650726165a80b 100644 --- a/frame/contracts/src/tests.rs +++ b/frame/contracts/src/tests.rs @@ -202,7 +202,7 @@ impl TrieIdGenerator for DummyTrieIdGenerator { let mut res = vec![]; res.extend_from_slice(&new_seed.to_le_bytes()); res.extend_from_slice(&account_id.to_le_bytes()); - res + child::OwnedChildInfo::new_default(res).owned_info().0 } } diff --git a/frame/support/src/storage/child.rs b/frame/support/src/storage/child.rs index f549ffc25fd94..32e5bcf1dadf6 100644 --- a/frame/support/src/storage/child.rs +++ b/frame/support/src/storage/child.rs @@ -16,100 +16,90 @@ //! Operation on runtime child storages. //! -//! This module is a currently only a variant of unhashed with additional `storage_key`. -//! Note that `storage_key` must be unique and strong (strong in the sense of being long enough to -//! avoid collision from a resistant hash function (which unique implies)). -//! -//! A **key collision free** unique id is required as parameter to avoid key collision -//! between child tries. -//! This unique id management and generation responsability is delegated to pallet module. -// NOTE: could replace unhashed by having only one kind of storage (root being null storage key (storage_key can become Option<&[u8]>). +//! This module is a currently only a variant of unhashed with additional `child_info`. +// NOTE: could replace unhashed by having only one kind of storage (top trie being the child info +// of null length parent storage key). use crate::sp_std::prelude::*; use codec::{Codec, Encode, Decode}; -pub use sp_core::storage::ChildInfo; +pub use sp_core::storage::{ChildInfo, OwnedChildInfo, ChildType}; /// Return the value of the item in storage under `key`, or `None` if there is no explicit entry. pub fn get( - storage_key: &[u8], child_info: ChildInfo, key: &[u8], ) -> Option { - let (data, child_type) = child_info.info(); - sp_io::storage::child_get( - storage_key, - data, - child_type, - key, - ).and_then(|v| { - Decode::decode(&mut &v[..]).map(Some).unwrap_or_else(|_| { - // TODO #3700: error should be handleable. - runtime_print!("ERROR: Corrupted state in child trie at {:?}/{:?}", storage_key, key); - None - }) - }) + match child_info.child_type() { + ChildType::ParentKeyId => { + let storage_key = child_info.storage_key(); + sp_io::default_child_storage::get( + storage_key, + key, + ).and_then(|v| { + Decode::decode(&mut &v[..]).map(Some).unwrap_or_else(|_| { + // TODO #3700: error should be handleable. + runtime_print!("ERROR: Corrupted state in child trie at {:?}/{:?}", storage_key, key); + None + }) + }) + }, + } } /// Return the value of the item in storage under `key`, or the type's default if there is no /// explicit entry. pub fn get_or_default( - storage_key: &[u8], child_info: ChildInfo, key: &[u8], ) -> T { - get(storage_key, child_info, key).unwrap_or_else(Default::default) + get(child_info, key).unwrap_or_else(Default::default) } /// Return the value of the item in storage under `key`, or `default_value` if there is no /// explicit entry. pub fn get_or( - storage_key: &[u8], child_info: ChildInfo, key: &[u8], default_value: T, ) -> T { - get(storage_key, child_info, key).unwrap_or(default_value) + get(child_info, key).unwrap_or(default_value) } /// Return the value of the item in storage under `key`, or `default_value()` if there is no /// explicit entry. pub fn get_or_else T>( - storage_key: &[u8], child_info: ChildInfo, key: &[u8], default_value: F, ) -> T { - get(storage_key, child_info, key).unwrap_or_else(default_value) + get(child_info, key).unwrap_or_else(default_value) } /// Put `value` in storage under `key`. pub fn put( - storage_key: &[u8], child_info: ChildInfo, key: &[u8], value: &T, ) { - let (data, child_type) = child_info.info(); - value.using_encoded(|slice| - sp_io::storage::child_set( - storage_key, - data, - child_type, - key, - slice, - ) - ); + match child_info.child_type() { + ChildType::ParentKeyId => value.using_encoded(|slice| + sp_io::default_child_storage::set( + child_info.storage_key(), + key, + slice, + ) + ), + } } /// Remove `key` from storage, returning its value if it had an explicit entry or `None` otherwise. pub fn take( - storage_key: &[u8], child_info: ChildInfo, key: &[u8], ) -> Option { - let r = get(storage_key, child_info, key); + let r = get(child_info, key); if r.is_some() { - kill(storage_key, child_info, key); + kill(child_info, key); } r } @@ -117,113 +107,106 @@ pub fn take( /// Remove `key` from storage, returning its value, or, if there was no explicit entry in storage, /// the default for its type. pub fn take_or_default( - storage_key: &[u8], child_info: ChildInfo, key: &[u8], ) -> T { - take(storage_key, child_info, key).unwrap_or_else(Default::default) + take(child_info, key).unwrap_or_else(Default::default) } /// Return the value of the item in storage under `key`, or `default_value` if there is no /// explicit entry. Ensure there is no explicit entry on return. pub fn take_or( - storage_key: &[u8], child_info: ChildInfo, key: &[u8], default_value: T, ) -> T { - take(storage_key, child_info, key).unwrap_or(default_value) + take(child_info, key).unwrap_or(default_value) } /// Return the value of the item in storage under `key`, or `default_value()` if there is no /// explicit entry. Ensure there is no explicit entry on return. pub fn take_or_else T>( - storage_key: &[u8], child_info: ChildInfo, key: &[u8], default_value: F, ) -> T { - take(storage_key, child_info, key).unwrap_or_else(default_value) + take(child_info, key).unwrap_or_else(default_value) } /// Check to see if `key` has an explicit entry in storage. pub fn exists( - storage_key: &[u8], child_info: ChildInfo, key: &[u8], ) -> bool { - let (data, child_type) = child_info.info(); - sp_io::storage::child_read( - storage_key, data, child_type, - key, &mut [0;0][..], 0, - ).is_some() + match child_info.child_type() { + ChildType::ParentKeyId => sp_io::default_child_storage::read( + child_info.storage_key(), + key, &mut [0;0][..], 0, + ).is_some(), + } } /// Remove all `storage_key` key/values pub fn kill_storage( - storage_key: &[u8], child_info: ChildInfo, ) { - let (data, child_type) = child_info.info(); - sp_io::storage::child_storage_kill( - storage_key, - data, - child_type, - ) + match child_info.child_type() { + ChildType::ParentKeyId => sp_io::default_child_storage::storage_kill( + child_info.storage_key(), + ), + } } /// Ensure `key` has no explicit entry in storage. pub fn kill( - storage_key: &[u8], child_info: ChildInfo, key: &[u8], ) { - let (data, child_type) = child_info.info(); - sp_io::storage::child_clear( - storage_key, - data, - child_type, - key, - ); + match child_info.child_type() { + ChildType::ParentKeyId => { + sp_io::default_child_storage::clear( + child_info.storage_key(), + key, + ); + }, + } } /// Get a Vec of bytes from storage. pub fn get_raw( - storage_key: &[u8], child_info: ChildInfo, key: &[u8], ) -> Option> { - let (data, child_type) = child_info.info(); - sp_io::storage::child_get( - storage_key, - data, - child_type, - key, - ) + match child_info.child_type() { + ChildType::ParentKeyId => sp_io::default_child_storage::get( + child_info.storage_key(), + key, + ), + } } /// Put a raw byte slice into storage. pub fn put_raw( - storage_key: &[u8], child_info: ChildInfo, key: &[u8], value: &[u8], ) { - let (data, child_type) = child_info.info(); - sp_io::storage::child_set( - storage_key, - data, - child_type, - key, - value, - ) + match child_info.child_type() { + ChildType::ParentKeyId => sp_io::default_child_storage::set( + child_info.storage_key(), + key, + value, + ), + } } /// Calculate current child root value. -pub fn child_root( - storage_key: &[u8], +pub fn root( + child_info: ChildInfo, ) -> Vec { - sp_io::storage::child_root( - storage_key, - ) + match child_info.child_type() { + ChildType::ParentKeyId => sp_io::default_child_storage::root( + child_info.storage_key(), + ), + } } diff --git a/primitives/externalities/src/lib.rs b/primitives/externalities/src/lib.rs index 891557ab2c1d4..2bdc6600f8a01 100644 --- a/primitives/externalities/src/lib.rs +++ b/primitives/externalities/src/lib.rs @@ -47,7 +47,6 @@ pub trait Externalities: ExtensionStore { /// Returns an `Option` that holds the SCALE encoded hash. fn child_storage_hash( &self, - storage_key: &[u8], child_info: ChildInfo, key: &[u8], ) -> Option>; @@ -60,7 +59,6 @@ pub trait Externalities: ExtensionStore { /// Returns an `Option` that holds the SCALE encoded hash. fn original_child_storage( &self, - storage_key: &[u8], child_info: ChildInfo, key: &[u8], ) -> Option>; @@ -77,7 +75,6 @@ pub trait Externalities: ExtensionStore { /// Returns an `Option` that holds the SCALE encoded hash. fn original_child_storage_hash( &self, - storage_key: &[u8], child_info: ChildInfo, key: &[u8], ) -> Option>; @@ -87,7 +84,6 @@ pub trait Externalities: ExtensionStore { /// Returns an `Option` that holds the SCALE encoded hash. fn child_storage( &self, - storage_key: &[u8], child_info: ChildInfo, key: &[u8], ) -> Option>; @@ -100,12 +96,11 @@ pub trait Externalities: ExtensionStore { /// Set child storage entry `key` of current contract being called (effective immediately). fn set_child_storage( &mut self, - storage_key: Vec, child_info: ChildInfo, key: Vec, value: Vec, ) { - self.place_child_storage(storage_key, child_info, key, Some(value)) + self.place_child_storage(child_info, key, Some(value)) } /// Clear a storage entry (`key`) of current contract being called (effective immediately). @@ -116,11 +111,10 @@ pub trait Externalities: ExtensionStore { /// Clear a child storage entry (`key`) of current contract being called (effective immediately). fn clear_child_storage( &mut self, - storage_key: &[u8], child_info: ChildInfo, key: &[u8], ) { - self.place_child_storage(storage_key.to_vec(), child_info, key.to_vec(), None) + self.place_child_storage(child_info, key.to_vec(), None) } /// Whether a storage entry exists. @@ -131,11 +125,10 @@ pub trait Externalities: ExtensionStore { /// Whether a child storage entry exists. fn exists_child_storage( &self, - storage_key: &[u8], child_info: ChildInfo, key: &[u8], ) -> bool { - self.child_storage(storage_key, child_info, key).is_some() + self.child_storage(child_info, key).is_some() } /// Returns the key immediately following the given key, if it exists. @@ -144,13 +137,12 @@ pub trait Externalities: ExtensionStore { /// Returns the key immediately following the given key, if it exists, in child storage. fn next_child_storage_key( &self, - storage_key: &[u8], child_info: ChildInfo, key: &[u8], ) -> Option>; /// Clear an entire child storage. - fn kill_child_storage(&mut self, storage_key: &[u8], child_info: ChildInfo); + fn kill_child_storage(&mut self, child_info: ChildInfo); /// Clear storage entries which keys are start with the given prefix. fn clear_prefix(&mut self, prefix: &[u8]); @@ -158,7 +150,6 @@ pub trait Externalities: ExtensionStore { /// Clear child storage entries which keys are start with the given prefix. fn clear_child_prefix( &mut self, - storage_key: &[u8], child_info: ChildInfo, prefix: &[u8], ); @@ -169,7 +160,6 @@ pub trait Externalities: ExtensionStore { /// Set or clear a child storage entry. Return whether the operation succeeds. fn place_child_storage( &mut self, - storage_key: Vec, child_info: ChildInfo, key: Vec, value: Option>, @@ -192,7 +182,7 @@ pub trait Externalities: ExtensionStore { /// storage map will be removed. fn child_storage_root( &mut self, - storage_key: &[u8], + child_info: ChildInfo, ) -> Vec; /// Get the change trie root of the current storage overlay at a block with given parent. diff --git a/primitives/io/src/lib.rs b/primitives/io/src/lib.rs index d2ceea582051a..9ee9b76ac265f 100644 --- a/primitives/io/src/lib.rs +++ b/primitives/io/src/lib.rs @@ -76,29 +76,6 @@ pub trait Storage { self.storage(key).map(|s| s.to_vec()) } - /// All Child api uses : - /// - A `child_storage_key` to define the anchor point for the child proof - /// (commonly the location where the child root is stored in its parent trie). - /// - A `child_storage_types` to identify the kind of the child type and how its - /// `child definition` parameter is encoded. - /// - A `child_definition_parameter` which is the additional information required - /// to use the child trie. For instance defaults child tries requires this to - /// contain a collision free unique id. - /// - /// This function specifically returns the data for `key` in the child storage or `None` - /// if the key can not be found. - fn child_get( - &self, - storage_key: &[u8], - child_definition: &[u8], - child_type: u32, - key: &[u8], - ) -> Option> { - let child_info = ChildInfo::resolve_child_info(child_type, child_definition, storage_key) - .expect("Invalid child definition"); - self.child_storage(storage_key, child_info, key).map(|s| s.to_vec()) - } - /// Get `key` from storage, placing the value into `value_out` and return the number of /// bytes that the entry in storage has beyond the offset or `None` if the storage entry /// doesn't exist at all. @@ -114,6 +91,71 @@ pub trait Storage { }) } + /// Set `key` to `value` in the storage. + fn set(&mut self, key: &[u8], value: &[u8]) { + self.set_storage(key.to_vec(), value.to_vec()); + } + + /// Clear the storage of the given `key` and its value. + fn clear(&mut self, key: &[u8]) { + self.clear_storage(key) + } + + /// Check whether the given `key` exists in storage. + fn exists(&self, key: &[u8]) -> bool { + self.exists_storage(key) + } + + /// Clear the storage of each key-value pair where the key starts with the given `prefix`. + fn clear_prefix(&mut self, prefix: &[u8]) { + Externalities::clear_prefix(*self, prefix) + } + + /// "Commit" all existing operations and compute the resulting storage root. + /// + /// The hashing algorithm is defined by the `Block`. + /// + /// Returns the SCALE encoded hash. + fn root(&mut self) -> Vec { + self.storage_root() + } + + /// "Commit" all existing operations and get the resulting storage change root. + /// `parent_hash` is a SCALE encoded hash. + /// + /// The hashing algorithm is defined by the `Block`. + /// + /// Returns an `Option` that holds the SCALE encoded hash. + fn changes_root(&mut self, parent_hash: &[u8]) -> Option> { + self.storage_changes_root(parent_hash) + .expect("Invalid `parent_hash` given to `changes_root`.") + } + + /// Get the next key in storage after the given one in lexicographic order. + fn next_key(&mut self, key: &[u8]) -> Option> { + self.next_storage_key(&key) + } + +} + + +/// Interface for accessing the child storage for default child trie, +/// from within the runtime. +#[runtime_interface] +pub trait DefaultChildStorage { + /// `storage_key` is the full location of the root of the child trie in the parent trie. + /// + /// This function specifically returns the data for `key` in the child storage or `None` + /// if the key can not be found. + fn get( + &self, + storage_key: &[u8], + key: &[u8], + ) -> Option> { + let child_info = ChildInfo::default_unchecked(storage_key); + self.child_storage(child_info, key).map(|s| s.to_vec()) + } + /// Get `key` from child storage, placing the value into `value_out` and return the number /// of bytes that the entry in storage has beyond the offset or `None` if the storage entry /// doesn't exist at all. @@ -121,18 +163,15 @@ pub trait Storage { /// are copied into `value_out`. /// /// See `child_get` for common child api parameters. - fn child_read( + fn read( &self, storage_key: &[u8], - child_definition: &[u8], - child_type: u32, key: &[u8], value_out: &mut [u8], value_offset: u32, ) -> Option { - let child_info = ChildInfo::resolve_child_info(child_type, child_definition, storage_key) - .expect("Invalid child definition"); - self.child_storage(storage_key, child_info, key) + let child_info = ChildInfo::default_unchecked(storage_key); + self.child_storage(child_info, key) .map(|value| { let value_offset = value_offset as usize; let data = &value[value_offset.min(value.len())..]; @@ -142,108 +181,64 @@ pub trait Storage { }) } - /// Set `key` to `value` in the storage. - fn set(&mut self, key: &[u8], value: &[u8]) { - self.set_storage(key.to_vec(), value.to_vec()); - } - /// Set `key` to `value` in the child storage denoted by `storage_key`. /// /// See `child_get` for common child api parameters. - fn child_set( + fn set( &mut self, storage_key: &[u8], - child_definition: &[u8], - child_type: u32, key: &[u8], value: &[u8], ) { - let child_info = ChildInfo::resolve_child_info(child_type, child_definition, storage_key) - .expect("Invalid child definition"); - self.set_child_storage(storage_key.to_vec(), child_info, key.to_vec(), value.to_vec()); - } - - /// Clear the storage of the given `key` and its value. - fn clear(&mut self, key: &[u8]) { - self.clear_storage(key) + let child_info = ChildInfo::default_unchecked(storage_key); + self.set_child_storage(child_info, key.to_vec(), value.to_vec()); } /// Clear the given child storage of the given `key` and its value. /// /// See `child_get` for common child api parameters. - fn child_clear( + fn clear ( &mut self, storage_key: &[u8], - child_definition: &[u8], - child_type: u32, key: &[u8], ) { - let child_info = ChildInfo::resolve_child_info(child_type, child_definition, storage_key) - .expect("Invalid child definition"); - self.clear_child_storage(storage_key, child_info, key); + let child_info = ChildInfo::default_unchecked(storage_key); + self.clear_child_storage(child_info, key); } /// Clear an entire child storage. /// /// See `child_get` for common child api parameters. - fn child_storage_kill( + fn storage_kill( &mut self, storage_key: &[u8], - child_definition: &[u8], - child_type: u32, ) { - let child_info = ChildInfo::resolve_child_info(child_type, child_definition, storage_key) - .expect("Invalid child definition"); - self.kill_child_storage(storage_key, child_info); - } - - /// Check whether the given `key` exists in storage. - fn exists(&self, key: &[u8]) -> bool { - self.exists_storage(key) + let child_info = ChildInfo::default_unchecked(storage_key); + self.kill_child_storage(child_info); } /// Check whether the given `key` exists in storage. /// /// See `child_get` for common child api parameters. - fn child_exists( + fn exists( &self, storage_key: &[u8], - child_definition: &[u8], - child_type: u32, key: &[u8], ) -> bool { - let child_info = ChildInfo::resolve_child_info(child_type, child_definition, storage_key) - .expect("Invalid child definition"); - self.exists_child_storage(storage_key, child_info, key) - } - - /// Clear the storage of each key-value pair where the key starts with the given `prefix`. - fn clear_prefix(&mut self, prefix: &[u8]) { - Externalities::clear_prefix(*self, prefix) + let child_info = ChildInfo::default_unchecked(storage_key); + self.exists_child_storage(child_info, key) } /// Clear the child storage of each key-value pair where the key starts with the given `prefix`. /// /// See `child_get` for common child api parameters. - fn child_clear_prefix( + fn clear_prefix( &mut self, storage_key: &[u8], - child_definition: &[u8], - child_type: u32, prefix: &[u8], ) { - let child_info = ChildInfo::resolve_child_info(child_type, child_definition, storage_key) - .expect("Invalid child definition"); - self.clear_child_prefix(storage_key, child_info, prefix); - } - - /// "Commit" all existing operations and compute the resulting storage root. - /// - /// The hashing algorithm is defined by the `Block`. - /// - /// Returns the SCALE encoded hash. - fn root(&mut self) -> Vec { - self.storage_root() + let child_info = ChildInfo::default_unchecked(storage_key); + self.clear_child_prefix(child_info, prefix); } /// "Commit" all existing operations and compute the resulting child storage root. @@ -253,40 +248,22 @@ pub trait Storage { /// Returns the SCALE encoded hash. /// /// See `child_get` for common child api parameters. - fn child_root( + fn root( &mut self, storage_key: &[u8], ) -> Vec { - self.child_storage_root(storage_key) - } - - /// "Commit" all existing operations and get the resulting storage change root. - /// `parent_hash` is a SCALE encoded hash. - /// - /// The hashing algorithm is defined by the `Block`. - /// - /// Returns an `Option` that holds the SCALE encoded hash. - fn changes_root(&mut self, parent_hash: &[u8]) -> Option> { - self.storage_changes_root(parent_hash) - .expect("Invalid `parent_hash` given to `changes_root`.") - } - - /// Get the next key in storage after the given one in lexicographic order. - fn next_key(&mut self, key: &[u8]) -> Option> { - self.next_storage_key(&key) + let child_info = ChildInfo::default_unchecked(storage_key); + self.child_storage_root(child_info) } /// Get the next key in storage after the given one in lexicographic order in child storage. - fn child_next_key( + fn next_key( &mut self, storage_key: &[u8], - child_definition: &[u8], - child_type: u32, key: &[u8], ) -> Option> { - let child_info = ChildInfo::resolve_child_info(child_type, child_definition, storage_key) - .expect("Invalid child definition"); - self.next_child_storage_key(storage_key, child_info, key) + let child_info = ChildInfo::default_unchecked(storage_key); + self.next_child_storage_key(child_info, key) } } @@ -917,6 +894,7 @@ pub type TestExternalities = sp_state_machine::TestExternalities: std::fmt::Debug { /// Get keyed child storage or None if there is nothing associated. fn child_storage( &self, - storage_key: &[u8], child_info: ChildInfo, key: &[u8], ) -> Result, Self::Error>; @@ -62,11 +61,10 @@ pub trait Backend: std::fmt::Debug { /// Get child keyed storage value hash or None if there is nothing associated. fn child_storage_hash( &self, - storage_key: &[u8], child_info: ChildInfo, key: &[u8], ) -> Result, Self::Error> { - self.child_storage(storage_key, child_info, key).map(|v| v.map(|v| H::hash(&v))) + self.child_storage(child_info, key).map(|v| v.map(|v| H::hash(&v))) } /// true if a key exists in storage. @@ -77,11 +75,10 @@ pub trait Backend: std::fmt::Debug { /// true if a key exists in child storage. fn exists_child_storage( &self, - storage_key: &[u8], child_info: ChildInfo, key: &[u8], ) -> Result { - Ok(self.child_storage(storage_key, child_info, key)?.is_some()) + Ok(self.child_storage(child_info, key)?.is_some()) } /// Return the next key in storage in lexicographic order or `None` if there is no value. @@ -90,7 +87,6 @@ pub trait Backend: std::fmt::Debug { /// Return the next key in child storage in lexicographic order or `None` if there is no value. fn next_child_storage_key( &self, - storage_key: &[u8], child_info: ChildInfo, key: &[u8] ) -> Result, Self::Error>; @@ -98,7 +94,6 @@ pub trait Backend: std::fmt::Debug { /// Retrieve all entries keys of child storage and call `f` for each of those keys. fn for_keys_in_child_storage( &self, - storage_key: &[u8], child_info: ChildInfo, f: F, ); @@ -118,7 +113,6 @@ pub trait Backend: std::fmt::Debug { /// call `f` for each of those keys. fn for_child_keys_with_prefix( &self, - storage_key: &[u8], child_info: ChildInfo, prefix: &[u8], f: F, @@ -137,7 +131,6 @@ pub trait Backend: std::fmt::Debug { /// is true if child storage root equals default storage root. fn child_storage_root( &self, - storage_key: &[u8], child_info: ChildInfo, delta: I, ) -> (H::Out, bool, Self::Transaction) @@ -158,12 +151,11 @@ pub trait Backend: std::fmt::Debug { /// Get all keys of child storage with given prefix fn child_keys( &self, - storage_key: &[u8], child_info: ChildInfo, prefix: &[u8], ) -> Vec { let mut all = Vec::new(); - self.for_child_keys_with_prefix(storage_key, child_info, prefix, |k| all.push(k.to_vec())); + self.for_child_keys_with_prefix(child_info, prefix, |k| all.push(k.to_vec())); all } @@ -183,16 +175,16 @@ pub trait Backend: std::fmt::Debug { where I1: IntoIterator)>, I2i: IntoIterator)>, - I2: IntoIterator, + I2: IntoIterator, H::Out: Ord + Encode, { let mut txs: Self::Transaction = Default::default(); let mut child_roots: Vec<_> = Default::default(); // child first - for (mut storage_key, child_delta, child_info) in child_deltas { + for (child_info, child_delta) in child_deltas { let (child_root, empty, child_txs) = - self.child_storage_root(&storage_key[..], child_info.as_ref(), child_delta); - child_info.as_ref().do_prefix_key(&mut storage_key, None); + self.child_storage_root(child_info.as_ref(), child_delta); + let storage_key = child_info.storage_key(); txs.consolidate(child_txs); if empty { child_roots.push((storage_key, None)); @@ -237,20 +229,18 @@ impl<'a, T: Backend, H: Hasher> Backend for &'a T { fn child_storage( &self, - storage_key: &[u8], child_info: ChildInfo, key: &[u8], ) -> Result, Self::Error> { - (*self).child_storage(storage_key, child_info, key) + (*self).child_storage(child_info, key) } fn for_keys_in_child_storage( &self, - storage_key: &[u8], child_info: ChildInfo, f: F, ) { - (*self).for_keys_in_child_storage(storage_key, child_info, f) + (*self).for_keys_in_child_storage(child_info, f) } fn next_storage_key(&self, key: &[u8]) -> Result, Self::Error> { @@ -259,11 +249,10 @@ impl<'a, T: Backend, H: Hasher> Backend for &'a T { fn next_child_storage_key( &self, - storage_key: &[u8], child_info: ChildInfo, key: &[u8], ) -> Result, Self::Error> { - (*self).next_child_storage_key(storage_key, child_info, key) + (*self).next_child_storage_key(child_info, key) } fn for_keys_with_prefix(&self, prefix: &[u8], f: F) { @@ -272,12 +261,11 @@ impl<'a, T: Backend, H: Hasher> Backend for &'a T { fn for_child_keys_with_prefix( &self, - storage_key: &[u8], child_info: ChildInfo, prefix: &[u8], f: F, ) { - (*self).for_child_keys_with_prefix(storage_key, child_info, prefix, f) + (*self).for_child_keys_with_prefix(child_info, prefix, f) } fn storage_root(&self, delta: I) -> (H::Out, Self::Transaction) @@ -290,7 +278,6 @@ impl<'a, T: Backend, H: Hasher> Backend for &'a T { fn child_storage_root( &self, - storage_key: &[u8], child_info: ChildInfo, delta: I, ) -> (H::Out, bool, Self::Transaction) @@ -298,7 +285,7 @@ impl<'a, T: Backend, H: Hasher> Backend for &'a T { I: IntoIterator)>, H::Out: Ord, { - (*self).child_storage_root(storage_key, child_info, delta) + (*self).child_storage_root(child_info, delta) } fn pairs(&self) -> Vec<(StorageKey, StorageValue)> { @@ -327,7 +314,7 @@ impl Consolidate for () { } impl Consolidate for Vec<( - Option<(StorageKey, OwnedChildInfo)>, + Option, StorageCollection, )> { fn consolidate(&mut self, mut other: Self) { diff --git a/primitives/state-machine/src/basic.rs b/primitives/state-machine/src/basic.rs index 344613242ccc9..e0be6e18fd567 100644 --- a/primitives/state-machine/src/basic.rs +++ b/primitives/state-machine/src/basic.rs @@ -129,38 +129,35 @@ impl Externalities for BasicExternalities { fn child_storage( &self, - storage_key: &[u8], - _child_info: ChildInfo, + child_info: ChildInfo, key: &[u8], ) -> Option { - self.inner.children.get(storage_key.as_ref()).and_then(|child| child.data.get(key)).cloned() + let storage_key = child_info.storage_key(); + self.inner.children.get(storage_key).and_then(|child| child.data.get(key)).cloned() } fn child_storage_hash( &self, - storage_key: &[u8], child_info: ChildInfo, key: &[u8], ) -> Option> { - self.child_storage(storage_key, child_info, key).map(|v| Blake2Hasher::hash(&v).encode()) + self.child_storage(child_info, key).map(|v| Blake2Hasher::hash(&v).encode()) } fn original_child_storage_hash( &self, - storage_key: &[u8], child_info: ChildInfo, key: &[u8], ) -> Option> { - self.child_storage_hash(storage_key, child_info, key) + self.child_storage_hash(child_info, key) } fn original_child_storage( &self, - storage_key: &[u8], child_info: ChildInfo, key: &[u8], ) -> Option { - Externalities::child_storage(self, storage_key, child_info, key) + Externalities::child_storage(self, child_info, key) } fn next_storage_key(&self, key: &[u8]) -> Option { @@ -170,10 +167,10 @@ impl Externalities for BasicExternalities { fn next_child_storage_key( &self, - storage_key: &[u8], - _child_info: ChildInfo, + child_info: ChildInfo, key: &[u8], ) -> Option { + let storage_key = child_info.storage_key(); let range = (Bound::Excluded(key), Bound::Unbounded); self.inner.children.get(storage_key.as_ref()) .and_then(|child| child.data.range::<[u8], _>(range).next().map(|(k, _)| k).cloned()) @@ -193,11 +190,11 @@ impl Externalities for BasicExternalities { fn place_child_storage( &mut self, - storage_key: StorageKey, child_info: ChildInfo, key: StorageKey, value: Option, ) { + let storage_key = child_info.storage_key().to_vec(); let child_map = self.inner.children.entry(storage_key) .or_insert_with(|| StorageChild { data: Default::default(), @@ -212,10 +209,10 @@ impl Externalities for BasicExternalities { fn kill_child_storage( &mut self, - storage_key: &[u8], - _child_info: ChildInfo, + child_info: ChildInfo, ) { - self.inner.children.remove(storage_key.as_ref()); + let storage_key = child_info.storage_key(); + self.inner.children.remove(storage_key); } fn clear_prefix(&mut self, prefix: &[u8]) { @@ -240,11 +237,11 @@ impl Externalities for BasicExternalities { fn clear_child_prefix( &mut self, - storage_key: &[u8], - _child_info: ChildInfo, + child_info: ChildInfo, prefix: &[u8], ) { - if let Some(child) = self.inner.children.get_mut(storage_key.as_ref()) { + let storage_key = child_info.storage_key(); + if let Some(child) = self.inner.children.get_mut(storage_key) { let to_remove = child.data.range::<[u8], _>((Bound::Included(prefix), Bound::Unbounded)) .map(|(k, _)| k) .take_while(|k| k.starts_with(prefix)) @@ -262,20 +259,18 @@ impl Externalities for BasicExternalities { fn storage_root(&mut self) -> Vec { let mut top = self.inner.top.clone(); let keys: Vec<_> = self.inner.children.iter().map(|(k, v)| { - let mut prefixed = k.to_vec(); - v.child_info.as_ref().do_prefix_key(&mut prefixed, None); - (k.to_vec(), prefixed) + (k.to_vec(), v.child_info.clone()) }).collect(); // Single child trie implementation currently allows using the same child // empty root for all child trie. Using null storage key until multiple // type of child trie support. - let empty_hash = default_child_trie_root::>(&[]); - for (storage_key, prefixed_storage_key) in keys { - let child_root = self.child_storage_root(storage_key.as_slice()); + let empty_hash = default_child_trie_root::>(); + for (storage_key, child_info) in keys { + let child_root = self.child_storage_root(child_info.as_ref()); if &empty_hash[..] == &child_root[..] { - top.remove(prefixed_storage_key.as_slice()); + top.remove(storage_key.as_slice()); } else { - top.insert(prefixed_storage_key, child_root); + top.insert(storage_key, child_root); } } @@ -284,15 +279,15 @@ impl Externalities for BasicExternalities { fn child_storage_root( &mut self, - storage_key: &[u8], + child_info: ChildInfo, ) -> Vec { - if let Some(child) = self.inner.children.get(storage_key.as_ref()) { + if let Some(child) = self.inner.children.get(child_info.storage_key()) { let delta = child.data.clone().into_iter().map(|(k, v)| (k, Some(v))); InMemoryBackend::::default() - .child_storage_root(storage_key.as_ref(), child.child_info.as_ref(), delta).0 + .child_storage_root(child.child_info.as_ref(), delta).0 } else { - default_child_trie_root::>(&[]) + default_child_trie_root::>() }.encode() } @@ -316,7 +311,9 @@ mod tests { use sp_core::storage::well_known_keys::CODE; use hex_literal::hex; - const CHILD_INFO_1: ChildInfo<'static> = ChildInfo::new_default(b"unique_id_1"); + const CHILD_INFO_1: ChildInfo<'static> = ChildInfo::default_unchecked( + b":child_storage:default:unique_id_1" + ); #[test] fn commit_should_work() { @@ -341,30 +338,26 @@ mod tests { #[test] fn children_works() { - let child_storage = b"test".to_vec(); - let mut ext = BasicExternalities::new(Storage { top: Default::default(), children: map![ - child_storage.clone() => StorageChild { + CHILD_INFO_1.storage_key().to_vec() => StorageChild { data: map![ b"doe".to_vec() => b"reindeer".to_vec() ], child_info: CHILD_INFO_1.to_owned(), } ] }); - let child = &child_storage[..]; - - assert_eq!(ext.child_storage(child, CHILD_INFO_1, b"doe"), Some(b"reindeer".to_vec())); + assert_eq!(ext.child_storage(CHILD_INFO_1, b"doe"), Some(b"reindeer".to_vec())); - ext.set_child_storage(child.to_vec(), CHILD_INFO_1, b"dog".to_vec(), b"puppy".to_vec()); - assert_eq!(ext.child_storage(child, CHILD_INFO_1, b"dog"), Some(b"puppy".to_vec())); + ext.set_child_storage(CHILD_INFO_1, b"dog".to_vec(), b"puppy".to_vec()); + assert_eq!(ext.child_storage(CHILD_INFO_1, b"dog"), Some(b"puppy".to_vec())); - ext.clear_child_storage(child, CHILD_INFO_1, b"dog"); - assert_eq!(ext.child_storage(child, CHILD_INFO_1, b"dog"), None); + ext.clear_child_storage(CHILD_INFO_1, b"dog"); + assert_eq!(ext.child_storage(CHILD_INFO_1, b"dog"), None); - ext.kill_child_storage(child, CHILD_INFO_1); - assert_eq!(ext.child_storage(child, CHILD_INFO_1, b"doe"), None); + ext.kill_child_storage(CHILD_INFO_1); + assert_eq!(ext.child_storage(CHILD_INFO_1, b"doe"), None); } #[test] diff --git a/primitives/state-machine/src/changes_trie/build.rs b/primitives/state-machine/src/changes_trie/build.rs index c731d4104b260..d3dadebf8d977 100644 --- a/primitives/state-machine/src/changes_trie/build.rs +++ b/primitives/state-machine/src/changes_trie/build.rs @@ -158,7 +158,7 @@ fn prepare_extrinsics_input_inner<'a, B, H, Number>( if let Some(sk) = storage_key.as_ref() { if !changes.child_storage(sk, k).map(|v| v.is_some()).unwrap_or_default() { if let Some(child_info) = child_info.as_ref() { - if !backend.exists_child_storage(sk, child_info.as_ref(), k) + if !backend.exists_child_storage(child_info.as_ref(), k) .map_err(|e| format!("{}", e))? { return Ok(map); } @@ -351,8 +351,8 @@ mod test { use crate::overlayed_changes::{OverlayedValue, OverlayedChangeSet}; use super::*; - const CHILD_INFO_1: ChildInfo<'static> = ChildInfo::new_default(b"unique_id_1"); - const CHILD_INFO_2: ChildInfo<'static> = ChildInfo::new_default(b"unique_id_2"); + const CHILD_INFO_1: ChildInfo<'static> = ChildInfo::default_unchecked(b":child_storage:default:unique_id_1"); + const CHILD_INFO_2: ChildInfo<'static> = ChildInfo::default_unchecked(b":child_storage:default:unique_id_2"); fn prepare_for_build(zero: u64) -> ( InMemoryBackend, diff --git a/primitives/state-machine/src/ext.rs b/primitives/state-machine/src/ext.rs index d073846b5b8c3..aa2a7d5fa2ea4 100644 --- a/primitives/state-machine/src/ext.rs +++ b/primitives/state-machine/src/ext.rs @@ -205,22 +205,21 @@ where fn child_storage( &self, - storage_key: &[u8], child_info: ChildInfo, key: &[u8], ) -> Option { let _guard = sp_panic_handler::AbortGuard::force_abort(); let result = self.overlay - .child_storage(storage_key.as_ref(), key) + .child_storage(child_info.storage_key(), key) .map(|x| x.map(|x| x.to_vec())) .unwrap_or_else(|| - self.backend.child_storage(storage_key.as_ref(), child_info, key) + self.backend.child_storage(child_info, key) .expect(EXT_NOT_ALLOWED_TO_FAIL) ); trace!(target: "state-trace", "{:04x}: GetChild({}) {}={:?}", self.id, - HexDisplay::from(&storage_key.as_ref()), + HexDisplay::from(&child_info.storage_key()), HexDisplay::from(&key), result.as_ref().map(HexDisplay::from) ); @@ -230,22 +229,21 @@ where fn child_storage_hash( &self, - storage_key: &[u8], child_info: ChildInfo, key: &[u8], ) -> Option> { let _guard = sp_panic_handler::AbortGuard::force_abort(); let result = self.overlay - .child_storage(storage_key.as_ref(), key) + .child_storage(child_info.storage_key(), key) .map(|x| x.map(|x| H::hash(x))) .unwrap_or_else(|| - self.backend.child_storage_hash(storage_key.as_ref(), child_info, key) + self.backend.child_storage_hash(child_info, key) .expect(EXT_NOT_ALLOWED_TO_FAIL) ); trace!(target: "state-trace", "{:04x}: ChildHash({}) {}={:?}", self.id, - HexDisplay::from(&storage_key.as_ref()), + HexDisplay::from(&child_info.storage_key()), HexDisplay::from(&key), result, ); @@ -255,18 +253,17 @@ where fn original_child_storage( &self, - storage_key: &[u8], child_info: ChildInfo, key: &[u8], ) -> Option { let _guard = sp_panic_handler::AbortGuard::force_abort(); let result = self.backend - .child_storage(storage_key.as_ref(), child_info, key) + .child_storage(child_info, key) .expect(EXT_NOT_ALLOWED_TO_FAIL); trace!(target: "state-trace", "{:04x}: ChildOriginal({}) {}={:?}", self.id, - HexDisplay::from(&storage_key.as_ref()), + HexDisplay::from(&child_info.storage_key()), HexDisplay::from(&key), result.as_ref().map(HexDisplay::from), ); @@ -276,18 +273,17 @@ where fn original_child_storage_hash( &self, - storage_key: &[u8], child_info: ChildInfo, key: &[u8], ) -> Option> { let _guard = sp_panic_handler::AbortGuard::force_abort(); let result = self.backend - .child_storage_hash(storage_key.as_ref(), child_info, key) + .child_storage_hash(child_info, key) .expect(EXT_NOT_ALLOWED_TO_FAIL); trace!(target: "state-trace", "{}: ChildHashOriginal({}) {}={:?}", self.id, - HexDisplay::from(&storage_key.as_ref()), + HexDisplay::from(&child_info.storage_key()), HexDisplay::from(&key), result, ); @@ -312,22 +308,21 @@ where fn exists_child_storage( &self, - storage_key: &[u8], child_info: ChildInfo, key: &[u8], ) -> bool { let _guard = sp_panic_handler::AbortGuard::force_abort(); - let result = match self.overlay.child_storage(storage_key.as_ref(), key) { + let result = match self.overlay.child_storage(child_info.storage_key(), key) { Some(x) => x.is_some(), _ => self.backend - .exists_child_storage(storage_key.as_ref(), child_info, key) + .exists_child_storage(child_info, key) .expect(EXT_NOT_ALLOWED_TO_FAIL), }; trace!(target: "state-trace", "{:04x}: ChildExists({}) {}={:?}", self.id, - HexDisplay::from(&storage_key.as_ref()), + HexDisplay::from(&child_info.storage_key()), HexDisplay::from(&key), result, ); @@ -351,15 +346,14 @@ where fn next_child_storage_key( &self, - storage_key: &[u8], child_info: ChildInfo, key: &[u8], ) -> Option { let next_backend_key = self.backend - .next_child_storage_key(storage_key.as_ref(), child_info, key) + .next_child_storage_key(child_info, key) .expect(EXT_NOT_ALLOWED_TO_FAIL); let next_overlay_key_change = self.overlay.next_child_storage_key_change( - storage_key.as_ref(), + child_info.storage_key(), key ); @@ -370,7 +364,6 @@ where Some(overlay_key.0.to_vec()) } else { self.next_child_storage_key( - storage_key, child_info, &overlay_key.0[..], ) @@ -396,38 +389,36 @@ where fn place_child_storage( &mut self, - storage_key: StorageKey, child_info: ChildInfo, key: StorageKey, value: Option, ) { trace!(target: "state-trace", "{:04x}: PutChild({}) {}={:?}", self.id, - HexDisplay::from(&storage_key), + HexDisplay::from(&child_info.storage_key()), HexDisplay::from(&key), value.as_ref().map(HexDisplay::from) ); let _guard = sp_panic_handler::AbortGuard::force_abort(); self.mark_dirty(); - self.overlay.set_child_storage(storage_key, child_info, key, value); + self.overlay.set_child_storage(child_info, key, value); } fn kill_child_storage( &mut self, - storage_key: &[u8], child_info: ChildInfo, ) { trace!(target: "state-trace", "{:04x}: KillChild({})", self.id, - HexDisplay::from(&storage_key.as_ref()), + HexDisplay::from(&child_info.storage_key()), ); let _guard = sp_panic_handler::AbortGuard::force_abort(); self.mark_dirty(); - self.overlay.clear_child_storage(storage_key.as_ref(), child_info); - self.backend.for_keys_in_child_storage(storage_key.as_ref(), child_info, |key| { - self.overlay.set_child_storage(storage_key.as_ref().to_vec(), child_info, key.to_vec(), None); + self.overlay.clear_child_storage(child_info); + self.backend.for_keys_in_child_storage(child_info, |key| { + self.overlay.set_child_storage(child_info, key.to_vec(), None); }); } @@ -451,21 +442,20 @@ where fn clear_child_prefix( &mut self, - storage_key: &[u8], child_info: ChildInfo, prefix: &[u8], ) { trace!(target: "state-trace", "{:04x}: ClearChildPrefix({}) {}", self.id, - HexDisplay::from(&storage_key.as_ref()), + HexDisplay::from(&child_info.storage_key()), HexDisplay::from(&prefix), ); let _guard = sp_panic_handler::AbortGuard::force_abort(); self.mark_dirty(); - self.overlay.clear_child_prefix(storage_key.as_ref(), child_info, prefix); - self.backend.for_child_keys_with_prefix(storage_key.as_ref(), child_info, prefix, |key| { - self.overlay.set_child_storage(storage_key.as_ref().to_vec(), child_info, key.to_vec(), None); + self.overlay.clear_child_prefix(child_info, prefix); + self.backend.for_child_keys_with_prefix(child_info, prefix, |key| { + self.overlay.set_child_storage(child_info, key.to_vec(), None); }); } @@ -490,24 +480,24 @@ where fn child_storage_root( &mut self, - storage_key: &[u8], + child_info: ChildInfo, ) -> Vec { let _guard = sp_panic_handler::AbortGuard::force_abort(); + let storage_key = child_info.storage_key(); if self.storage_transaction_cache.transaction_storage_root.is_some() { let root = self - .storage(storage_key.as_ref()) + .storage(storage_key) .and_then(|k| Decode::decode(&mut &k[..]).ok()) .unwrap_or( - default_child_trie_root::>(&[]) + default_child_trie_root::>() ); trace!(target: "state-trace", "{:04x}: ChildRoot({}) (cached) {}", self.id, - HexDisplay::from(&storage_key.as_ref()), + HexDisplay::from(&storage_key), HexDisplay::from(&root.as_ref()), ); root.encode() } else { - let storage_key = storage_key.as_ref(); if let Some(child_info) = self.overlay.child_info(storage_key).cloned() { let (root, is_empty, _) = { @@ -520,7 +510,7 @@ where .flat_map(|(map, _)| map.clone().into_iter().map(|(k, v)| (k, v.value))) ); - self.backend.child_storage_root(storage_key, child_info.as_ref(), delta) + self.backend.child_storage_root(child_info.as_ref(), delta) }; let root = root.encode(); @@ -547,7 +537,7 @@ where .storage(storage_key.as_ref()) .and_then(|k| Decode::decode(&mut &k[..]).ok()) .unwrap_or( - default_child_trie_root::>(&[]) + default_child_trie_root::>() ); trace!(target: "state-trace", "{:04x}: ChildRoot({}) (no change) {}", self.id, @@ -633,9 +623,9 @@ mod tests { type TestBackend = InMemoryBackend; type TestExt<'a> = Ext<'a, Blake2Hasher, u64, TestBackend>; - const CHILD_KEY_1: &[u8] = b"Child1"; - const CHILD_UUID_1: &[u8] = b"unique_id_1"; - const CHILD_INFO_1: ChildInfo<'static> = ChildInfo::new_default(CHILD_UUID_1); + const CHILD_INFO_1: ChildInfo<'static> = ChildInfo::default_unchecked( + b":child_storage:default:Child1" + ); fn prepare_overlay_with_changes() -> OverlayedChanges { @@ -750,12 +740,12 @@ mod tests { fn next_child_storage_key_works() { let mut cache = StorageTransactionCache::default(); let mut overlay = OverlayedChanges::default(); - overlay.set_child_storage(CHILD_KEY_1.to_vec(), CHILD_INFO_1, vec![20], None); - overlay.set_child_storage(CHILD_KEY_1.to_vec(), CHILD_INFO_1, vec![30], Some(vec![31])); + overlay.set_child_storage(CHILD_INFO_1, vec![20], None); + overlay.set_child_storage(CHILD_INFO_1, vec![30], Some(vec![31])); let backend = Storage { top: map![], children: map![ - CHILD_KEY_1.to_vec() => StorageChild { + CHILD_INFO_1.storage_key().to_vec() => StorageChild { data: map![ vec![10] => vec![10], vec![20] => vec![20], @@ -770,35 +760,35 @@ mod tests { let ext = TestExt::new(&mut overlay, &mut cache, &backend, None, None); // next_backend < next_overlay - assert_eq!(ext.next_child_storage_key(CHILD_KEY_1, CHILD_INFO_1, &[5]), Some(vec![10])); + assert_eq!(ext.next_child_storage_key(CHILD_INFO_1, &[5]), Some(vec![10])); // next_backend == next_overlay but next_overlay is a delete - assert_eq!(ext.next_child_storage_key(CHILD_KEY_1, CHILD_INFO_1, &[10]), Some(vec![30])); + assert_eq!(ext.next_child_storage_key(CHILD_INFO_1, &[10]), Some(vec![30])); // next_overlay < next_backend - assert_eq!(ext.next_child_storage_key(CHILD_KEY_1, CHILD_INFO_1, &[20]), Some(vec![30])); + assert_eq!(ext.next_child_storage_key(CHILD_INFO_1, &[20]), Some(vec![30])); // next_backend exist but next_overlay doesn't exist - assert_eq!(ext.next_child_storage_key(CHILD_KEY_1, CHILD_INFO_1, &[30]), Some(vec![40])); + assert_eq!(ext.next_child_storage_key(CHILD_INFO_1, &[30]), Some(vec![40])); drop(ext); - overlay.set_child_storage(CHILD_KEY_1.to_vec(), CHILD_INFO_1, vec![50], Some(vec![50])); + overlay.set_child_storage(CHILD_INFO_1, vec![50], Some(vec![50])); let ext = TestExt::new(&mut overlay, &mut cache, &backend, None, None); // next_overlay exist but next_backend doesn't exist - assert_eq!(ext.next_child_storage_key(CHILD_KEY_1, CHILD_INFO_1, &[40]), Some(vec![50])); + assert_eq!(ext.next_child_storage_key(CHILD_INFO_1, &[40]), Some(vec![50])); } #[test] fn child_storage_works() { let mut cache = StorageTransactionCache::default(); let mut overlay = OverlayedChanges::default(); - overlay.set_child_storage(CHILD_KEY_1.to_vec(), CHILD_INFO_1, vec![20], None); - overlay.set_child_storage(CHILD_KEY_1.to_vec(), CHILD_INFO_1, vec![30], Some(vec![31])); + overlay.set_child_storage(CHILD_INFO_1, vec![20], None); + overlay.set_child_storage(CHILD_INFO_1, vec![30], Some(vec![31])); let backend = Storage { top: map![], children: map![ - CHILD_KEY_1.to_vec() => StorageChild { + CHILD_INFO_1.storage_key().to_vec() => StorageChild { data: map![ vec![10] => vec![10], vec![20] => vec![20], @@ -811,24 +801,24 @@ mod tests { let ext = TestExt::new(&mut overlay, &mut cache, &backend, None, None); - assert_eq!(ext.child_storage(CHILD_KEY_1, CHILD_INFO_1, &[10]), Some(vec![10])); - assert_eq!(ext.original_child_storage(CHILD_KEY_1, CHILD_INFO_1, &[10]), Some(vec![10])); + assert_eq!(ext.child_storage(CHILD_INFO_1, &[10]), Some(vec![10])); + assert_eq!(ext.original_child_storage(CHILD_INFO_1, &[10]), Some(vec![10])); assert_eq!( - ext.child_storage_hash(CHILD_KEY_1, CHILD_INFO_1, &[10]), + ext.child_storage_hash(CHILD_INFO_1, &[10]), Some(Blake2Hasher::hash(&[10]).as_ref().to_vec()), ); - assert_eq!(ext.child_storage(CHILD_KEY_1, CHILD_INFO_1, &[20]), None); - assert_eq!(ext.original_child_storage(CHILD_KEY_1, CHILD_INFO_1, &[20]), Some(vec![20])); + assert_eq!(ext.child_storage(CHILD_INFO_1, &[20]), None); + assert_eq!(ext.original_child_storage(CHILD_INFO_1, &[20]), Some(vec![20])); assert_eq!( - ext.child_storage_hash(CHILD_KEY_1, CHILD_INFO_1, &[20]), + ext.child_storage_hash(CHILD_INFO_1, &[20]), None, ); - assert_eq!(ext.child_storage(CHILD_KEY_1, CHILD_INFO_1, &[30]), Some(vec![31])); - assert_eq!(ext.original_child_storage(CHILD_KEY_1, CHILD_INFO_1, &[30]), Some(vec![40])); + assert_eq!(ext.child_storage(CHILD_INFO_1, &[30]), Some(vec![31])); + assert_eq!(ext.original_child_storage(CHILD_INFO_1, &[30]), Some(vec![40])); assert_eq!( - ext.child_storage_hash(CHILD_KEY_1, CHILD_INFO_1, &[30]), + ext.child_storage_hash(CHILD_INFO_1, &[30]), Some(Blake2Hasher::hash(&[31]).as_ref().to_vec()), ); diff --git a/primitives/state-machine/src/in_memory_backend.rs b/primitives/state-machine/src/in_memory_backend.rs index 02fd61de9c603..1a977e1d14076 100644 --- a/primitives/state-machine/src/in_memory_backend.rs +++ b/primitives/state-machine/src/in_memory_backend.rs @@ -27,7 +27,7 @@ use sp_trie::{ MemoryDB, child_trie_root, default_child_trie_root, TrieConfiguration, trie_types::Layout, }; use codec::Codec; -use sp_core::storage::{ChildInfo, OwnedChildInfo, Storage}; +use sp_core::storage::{ChildInfo, OwnedChildInfo, ChildType, Storage}; /// Error impossible. // FIXME: use `!` type when stabilized. https://github.com/rust-lang/rust/issues/35121 @@ -47,7 +47,7 @@ impl error::Error for Void { /// In-memory backend. Fully recomputes tries each time `as_trie_backend` is called but useful for /// tests and proof checking. pub struct InMemory { - inner: HashMap, BTreeMap>, + inner: HashMap, BTreeMap>, // This field is only needed for returning reference in `as_trie_backend`. trie: Option, H>>, _hasher: PhantomData, @@ -88,7 +88,7 @@ impl PartialEq for InMemory { impl InMemory { /// Copy the state, with applied updates pub fn update< - T: IntoIterator, StorageCollection)> + T: IntoIterator, StorageCollection)> >( &self, changes: T, @@ -107,10 +107,10 @@ impl InMemory { } } -impl From, BTreeMap>> +impl From, BTreeMap>> for InMemory { - fn from(inner: HashMap, BTreeMap>) -> Self { + fn from(inner: HashMap, BTreeMap>) -> Self { InMemory { inner, trie: None, @@ -121,8 +121,8 @@ impl From, BTreeMap From for InMemory { fn from(inners: Storage) -> Self { - let mut inner: HashMap, BTreeMap> - = inners.children.into_iter().map(|(k, c)| (Some((k, c.child_info)), c.data)).collect(); + let mut inner: HashMap, BTreeMap> + = inners.children.into_iter().map(|(_k, c)| (Some(c.child_info), c.data)).collect(); inner.insert(None, inners.top); InMemory { inner, @@ -144,12 +144,12 @@ impl From> for InMemory { } } -impl From, StorageCollection)>> +impl From, StorageCollection)>> for InMemory { fn from( - inner: Vec<(Option<(StorageKey, OwnedChildInfo)>, StorageCollection)>, + inner: Vec<(Option, StorageCollection)>, ) -> Self { - let mut expanded: HashMap, BTreeMap> + let mut expanded: HashMap, BTreeMap> = HashMap::new(); for (child_info, key_values) in inner { let entry = expanded.entry(child_info).or_default(); @@ -165,9 +165,9 @@ impl From, StorageCollectio impl InMemory { /// child storage key iterator - pub fn child_storage_keys(&self) -> impl Iterator { + pub fn child_storage_keys(&self) -> impl Iterator { self.inner.iter().filter_map(|item| - item.0.as_ref().map(|v|(&v.0[..], v.1.as_ref())) + item.0.as_ref().map(|v| v.as_ref()) ) } } @@ -175,7 +175,7 @@ impl InMemory { impl Backend for InMemory where H::Out: Codec { type Error = Void; type Transaction = Vec<( - Option<(StorageKey, OwnedChildInfo)>, + Option, StorageCollection, )>; type TrieBackendStorage = MemoryDB; @@ -186,11 +186,10 @@ impl Backend for InMemory where H::Out: Codec { fn child_storage( &self, - storage_key: &[u8], child_info: ChildInfo, key: &[u8], ) -> Result, Self::Error> { - Ok(self.inner.get(&Some((storage_key.to_vec(), child_info.to_owned()))) + Ok(self.inner.get(&Some(child_info.to_owned())) .and_then(|map| map.get(key).map(Clone::clone))) } @@ -210,22 +209,20 @@ impl Backend for InMemory where H::Out: Codec { fn for_keys_in_child_storage( &self, - storage_key: &[u8], child_info: ChildInfo, mut f: F, ) { - self.inner.get(&Some((storage_key.to_vec(), child_info.to_owned()))) + self.inner.get(&Some(child_info.to_owned())) .map(|map| map.keys().for_each(|k| f(&k))); } fn for_child_keys_with_prefix( &self, - storage_key: &[u8], child_info: ChildInfo, prefix: &[u8], f: F, ) { - self.inner.get(&Some((storage_key.to_vec(), child_info.to_owned()))) + self.inner.get(&Some(child_info.to_owned())) .map(|map| map.keys().filter(|key| key.starts_with(prefix)).map(|k| &**k).for_each(f)); } @@ -252,7 +249,6 @@ impl Backend for InMemory where H::Out: Codec { fn child_storage_root( &self, - storage_key: &[u8], child_info: ChildInfo, delta: I, ) -> (H::Out, bool, Self::Transaction) @@ -260,9 +256,8 @@ impl Backend for InMemory where H::Out: Codec { I: IntoIterator, Option>)>, H::Out: Ord { - let storage_key = storage_key.to_vec(); - let parent_prefix = child_info.parent_prefix(None); - let child_info = Some((storage_key.clone(), child_info.to_owned())); + let child_type = child_info.child_type(); + let child_info = Some(child_info.to_owned()); let existing_pairs = self.inner.get(&child_info) .into_iter() @@ -270,7 +265,6 @@ impl Backend for InMemory where H::Out: Codec { let transaction: Vec<_> = delta.into_iter().collect(); let root = child_trie_root::, _, _, _>( - &storage_key, existing_pairs.chain(transaction.iter().cloned()) .collect::>() .into_iter() @@ -279,7 +273,9 @@ impl Backend for InMemory where H::Out: Codec { let full_transaction = transaction.into_iter().collect(); - let is_default = root == default_child_trie_root::>(parent_prefix); + let is_default = match child_type { + ChildType::ParentKeyId => root == default_child_trie_root::>(), + }; (root, is_default, vec![(child_info, full_transaction)]) } @@ -294,12 +290,11 @@ impl Backend for InMemory where H::Out: Codec { fn next_child_storage_key( &self, - storage_key: &[u8], child_info: ChildInfo, key: &[u8], ) -> Result, Self::Error> { let range = (ops::Bound::Excluded(key), ops::Bound::Unbounded); - let next_key = self.inner.get(&Some((storage_key.to_vec(), child_info.to_owned()))) + let next_key = self.inner.get(&Some(child_info.to_owned())) .and_then(|map| map.range::<[u8], _>(range).next().map(|(k, _)| k).cloned()); Ok(next_key) @@ -321,11 +316,10 @@ impl Backend for InMemory where H::Out: Codec { fn child_keys( &self, - storage_key: &[u8], child_info: ChildInfo, prefix: &[u8], ) -> Vec { - self.inner.get(&Some((storage_key.to_vec(), child_info.to_owned()))) + self.inner.get(&Some(child_info.to_owned())) .into_iter() .flat_map(|map| map.keys().filter(|k| k.starts_with(prefix)).cloned()) .collect() @@ -336,11 +330,8 @@ impl Backend for InMemory where H::Out: Codec { let mut new_child_roots = Vec::new(); let mut root_map = None; for (child_info, map) in &self.inner { - if let Some((storage_key, child_info)) = child_info.as_ref() { - let mut prefix_storage_key = storage_key.to_vec(); - child_info.as_ref().do_prefix_key(&mut prefix_storage_key, None); - // no need to use child_info at this point because we use a MemoryDB for - // proof (with PrefixedMemoryDB it would be needed). + if let Some(child_info) = child_info.as_ref() { + let prefix_storage_key = child_info.as_ref().storage_key().to_vec(); let ch = insert_into_memory_db::(&mut mdb, map.clone().into_iter())?; new_child_roots.push((prefix_storage_key, ch.as_ref().into())); } else { @@ -370,18 +361,18 @@ mod tests { #[test] fn in_memory_with_child_trie_only() { let storage = InMemory::::default(); - let child_info = OwnedChildInfo::new_default(b"unique_id_1".to_vec()); + let child_info = OwnedChildInfo::new_default(b"1".to_vec()); let mut storage = storage.update( vec![( - Some((b"1".to_vec(), child_info.clone())), + Some(child_info.clone()), vec![(b"2".to_vec(), Some(b"3".to_vec()))] )] ); let trie_backend = storage.as_trie_backend().unwrap(); - assert_eq!(trie_backend.child_storage(b"1", child_info.as_ref(), b"2").unwrap(), + assert_eq!(trie_backend.child_storage(child_info.as_ref(), b"2").unwrap(), Some(b"3".to_vec())); - let mut prefixed_storage_key = b"1".to_vec(); - child_info.as_ref().do_prefix_key(&mut prefixed_storage_key, None); - assert!(trie_backend.storage(prefixed_storage_key.as_slice()).unwrap().is_some()); + let child_info = child_info.as_ref(); + let storage_key = child_info.storage_key(); + assert!(trie_backend.storage(storage_key).unwrap().is_some()); } } diff --git a/primitives/state-machine/src/lib.rs b/primitives/state-machine/src/lib.rs index 8bafda6aa6186..8f63aa0da8e40 100644 --- a/primitives/state-machine/src/lib.rs +++ b/primitives/state-machine/src/lib.rs @@ -550,7 +550,6 @@ where /// Generate child storage read proof. pub fn prove_child_read( mut backend: B, - storage_key: &[u8], child_info: ChildInfo, keys: I, ) -> Result> @@ -563,7 +562,7 @@ where { let trie_backend = backend.as_trie_backend() .ok_or_else(|| Box::new(ExecutionError::UnableToGenerateProof) as Box)?; - prove_child_read_on_trie_backend(trie_backend, storage_key, child_info, keys) + prove_child_read_on_trie_backend(trie_backend, child_info, keys) } /// Generate storage read proof on pre-created trie backend. @@ -590,7 +589,6 @@ where /// Generate storage read proof on pre-created trie backend. pub fn prove_child_read_on_trie_backend( trie_backend: &TrieBackend, - storage_key: &[u8], child_info: ChildInfo, keys: I, ) -> Result> @@ -604,7 +602,7 @@ where let proving_backend = proving_backend::ProvingBackend::<_, H>::new(trie_backend); for key in keys.into_iter() { proving_backend - .child_storage(storage_key, child_info.clone(), key.as_ref()) + .child_storage(child_info.clone(), key.as_ref()) .map_err(|e| Box::new(e) as Box)?; } Ok(proving_backend.extract_proof()) @@ -635,7 +633,7 @@ where pub fn read_child_proof_check( root: H::Out, proof: StorageProof, - storage_key: &[u8], + child_info: ChildInfo, keys: I, ) -> Result, Option>>, Box> where @@ -649,7 +647,7 @@ where for key in keys.into_iter() { let value = read_child_proof_check_on_proving_backend( &proving_backend, - storage_key, + child_info, key.as_ref(), )?; result.insert(key.as_ref().to_vec(), value); @@ -672,15 +670,14 @@ where /// Check child storage read proof on pre-created proving backend. pub fn read_child_proof_check_on_proving_backend( proving_backend: &TrieBackend, H>, - storage_key: &[u8], + child_info: ChildInfo, key: &[u8], ) -> Result>, Box> where H: Hasher, H::Out: Ord + Codec, { - // Not a prefixed memory db, using empty unique id and include root resolution. - proving_backend.child_storage(storage_key, ChildInfo::new_default(&[]), key) + proving_backend.child_storage(child_info, key) .map_err(|e| Box::new(e) as Box) } @@ -702,7 +699,9 @@ mod tests { fallback_succeeds: bool, } - const CHILD_INFO_1: ChildInfo<'static> = ChildInfo::new_default(b"unique_id_1"); + const CHILD_INFO_1: ChildInfo<'static> = ChildInfo::default_unchecked( + b":child_storage:default:sub1" + ); impl CodeExecutor for DummyCodeExecutor { type Error = u8; @@ -945,26 +944,22 @@ mod tests { ); ext.set_child_storage( - b"testchild".to_vec(), CHILD_INFO_1, b"abc".to_vec(), b"def".to_vec() ); assert_eq!( ext.child_storage( - b"testchild", CHILD_INFO_1, b"abc" ), Some(b"def".to_vec()) ); ext.kill_child_storage( - b"testchild", CHILD_INFO_1, ); assert_eq!( ext.child_storage( - b"testchild", CHILD_INFO_1, b"abc" ), @@ -1000,20 +995,19 @@ mod tests { let remote_root = remote_backend.storage_root(::std::iter::empty()).0; let remote_proof = prove_child_read( remote_backend, - b"sub1", CHILD_INFO_1, &[b"value3"], ).unwrap(); let local_result1 = read_child_proof_check::( remote_root, remote_proof.clone(), - b"sub1", + CHILD_INFO_1, &[b"value3"], ).unwrap(); let local_result2 = read_child_proof_check::( remote_root, remote_proof.clone(), - b"sub1", + CHILD_INFO_1, &[b"value2"], ).unwrap(); assert_eq!( @@ -1028,13 +1022,17 @@ mod tests { #[test] fn child_storage_uuid() { - const CHILD_INFO_1: ChildInfo<'static> = ChildInfo::new_default(b"unique_id_1"); - const CHILD_INFO_2: ChildInfo<'static> = ChildInfo::new_default(b"unique_id_2"); + + const CHILD_INFO_1: ChildInfo<'static> = ChildInfo::default_unchecked( + b":child_storage:default:sub_test1" + ); + const CHILD_INFO_2: ChildInfo<'static> = ChildInfo::default_unchecked( + b":child_storage:default:sub_test2" + ); + use crate::trie_backend::tests::test_trie; let mut overlay = OverlayedChanges::default(); - let subtrie1 = b"sub_test1"; - let subtrie2 = b"sub_test2"; let mut transaction = { let backend = test_trie(); let mut cache = StorageTransactionCache::default(); @@ -1045,8 +1043,8 @@ mod tests { changes_trie::disabled_state::<_, u64>(), None, ); - ext.set_child_storage(subtrie1.to_vec(), CHILD_INFO_1, b"abc".to_vec(), b"def".to_vec()); - ext.set_child_storage(subtrie2.to_vec(), CHILD_INFO_2, b"abc".to_vec(), b"def".to_vec()); + ext.set_child_storage(CHILD_INFO_1, b"abc".to_vec(), b"def".to_vec()); + ext.set_child_storage(CHILD_INFO_2, b"abc".to_vec(), b"def".to_vec()); ext.storage_root(); cache.transaction.unwrap() }; diff --git a/primitives/state-machine/src/overlayed_changes.rs b/primitives/state-machine/src/overlayed_changes.rs index 37187e163fe1c..7dcbbdd2a0e40 100644 --- a/primitives/state-machine/src/overlayed_changes.rs +++ b/primitives/state-machine/src/overlayed_changes.rs @@ -247,12 +247,12 @@ impl OverlayedChanges { /// `None` can be used to delete a value specified by the given key. pub(crate) fn set_child_storage( &mut self, - storage_key: StorageKey, child_info: ChildInfo, key: StorageKey, val: Option, ) { let extrinsic_index = self.extrinsic_index(); + let storage_key = child_info.storage_key().to_vec(); let map_entry = self.prospective.children.entry(storage_key) .or_insert_with(|| (Default::default(), child_info.to_owned())); let updatable = map_entry.1.try_update(child_info); @@ -275,10 +275,10 @@ impl OverlayedChanges { /// [`discard_prospective`]: #method.discard_prospective pub(crate) fn clear_child_storage( &mut self, - storage_key: &[u8], child_info: ChildInfo, ) { let extrinsic_index = self.extrinsic_index(); + let storage_key = child_info.storage_key(); let map_entry = self.prospective.children.entry(storage_key.to_vec()) .or_insert_with(|| (Default::default(), child_info.to_owned())); let updatable = map_entry.1.try_update(child_info); @@ -349,11 +349,11 @@ impl OverlayedChanges { pub(crate) fn clear_child_prefix( &mut self, - storage_key: &[u8], child_info: ChildInfo, prefix: &[u8], ) { let extrinsic_index = self.extrinsic_index(); + let storage_key = child_info.storage_key(); let map_entry = self.prospective.children.entry(storage_key.to_vec()) .or_insert_with(|| (Default::default(), child_info.to_owned())); let updatable = map_entry.1.try_update(child_info); @@ -538,7 +538,8 @@ impl OverlayedChanges { .chain(self.committed.children.keys()); let child_delta_iter = child_storage_keys.map(|storage_key| ( - storage_key.clone(), + self.child_info(storage_key).cloned() + .expect("child info initialized in either committed or prospective"), self.committed.children.get(storage_key) .into_iter() .flat_map(|(map, _)| map.iter().map(|(k, v)| (k.clone(), v.value.clone()))) @@ -547,8 +548,6 @@ impl OverlayedChanges { .into_iter() .flat_map(|(map, _)| map.iter().map(|(k, v)| (k.clone(), v.value.clone()))) ), - self.child_info(storage_key).cloned() - .expect("child info initialized in either committed or prospective"), ) ); @@ -852,38 +851,40 @@ mod tests { #[test] fn next_child_storage_key_change_works() { let child = b"Child1".to_vec(); - let child_info = ChildInfo::new_default(b"uniqueid"); + let child_info = OwnedChildInfo::new_default(child.clone()); + let child_info = child_info.as_ref(); + let child = child_info.storage_key(); let mut overlay = OverlayedChanges::default(); - overlay.set_child_storage(child.clone(), child_info, vec![20], Some(vec![20])); - overlay.set_child_storage(child.clone(), child_info, vec![30], Some(vec![30])); - overlay.set_child_storage(child.clone(), child_info, vec![40], Some(vec![40])); + overlay.set_child_storage(child_info, vec![20], Some(vec![20])); + overlay.set_child_storage(child_info, vec![30], Some(vec![30])); + overlay.set_child_storage(child_info, vec![40], Some(vec![40])); overlay.commit_prospective(); - overlay.set_child_storage(child.clone(), child_info, vec![10], Some(vec![10])); - overlay.set_child_storage(child.clone(), child_info, vec![30], None); + overlay.set_child_storage(child_info, vec![10], Some(vec![10])); + overlay.set_child_storage(child_info, vec![30], None); // next_prospective < next_committed - let next_to_5 = overlay.next_child_storage_key_change(&child, &[5]).unwrap(); + let next_to_5 = overlay.next_child_storage_key_change(child, &[5]).unwrap(); assert_eq!(next_to_5.0.to_vec(), vec![10]); assert_eq!(next_to_5.1.value, Some(vec![10])); // next_committed < next_prospective - let next_to_10 = overlay.next_child_storage_key_change(&child, &[10]).unwrap(); + let next_to_10 = overlay.next_child_storage_key_change(child, &[10]).unwrap(); assert_eq!(next_to_10.0.to_vec(), vec![20]); assert_eq!(next_to_10.1.value, Some(vec![20])); // next_committed == next_prospective - let next_to_20 = overlay.next_child_storage_key_change(&child, &[20]).unwrap(); + let next_to_20 = overlay.next_child_storage_key_change(child, &[20]).unwrap(); assert_eq!(next_to_20.0.to_vec(), vec![30]); assert_eq!(next_to_20.1.value, None); // next_committed, no next_prospective - let next_to_30 = overlay.next_child_storage_key_change(&child, &[30]).unwrap(); + let next_to_30 = overlay.next_child_storage_key_change(child, &[30]).unwrap(); assert_eq!(next_to_30.0.to_vec(), vec![40]); assert_eq!(next_to_30.1.value, Some(vec![40])); - overlay.set_child_storage(child.clone(), child_info, vec![50], Some(vec![50])); + overlay.set_child_storage(child_info, vec![50], Some(vec![50])); // next_prospective, no next_committed - let next_to_40 = overlay.next_child_storage_key_change(&child, &[40]).unwrap(); + let next_to_40 = overlay.next_child_storage_key_change(child, &[40]).unwrap(); assert_eq!(next_to_40.0.to_vec(), vec![50]); assert_eq!(next_to_40.1.value, Some(vec![50])); } diff --git a/primitives/state-machine/src/proving_backend.rs b/primitives/state-machine/src/proving_backend.rs index 0572907401ba6..ec0ef6a4692ee 100644 --- a/primitives/state-machine/src/proving_backend.rs +++ b/primitives/state-machine/src/proving_backend.rs @@ -143,15 +143,13 @@ impl<'a, S, H> ProvingBackendRecorder<'a, S, H> /// Produce proof for a child key query. pub fn child_storage( &mut self, - storage_key: &[u8], child_info: ChildInfo, key: &[u8] ) -> Result>, String> { - let mut prefixed_storage_key = storage_key.to_vec(); - child_info.do_prefix_key(&mut prefixed_storage_key, None); - let root = self.storage(prefixed_storage_key.as_slice())? + let storage_key = child_info.storage_key(); + let root = self.storage(storage_key)? .and_then(|r| Decode::decode(&mut &r[..]).ok()) - .unwrap_or(default_child_trie_root::>(&[])); + .unwrap_or(default_child_trie_root::>()); let mut read_overlay = S::Overlay::default(); let eph = Ephemeral::new( @@ -162,7 +160,6 @@ impl<'a, S, H> ProvingBackendRecorder<'a, S, H> let map_e = |e| format!("Trie lookup error: {}", e); read_child_trie_value_with::, _, _>( - storage_key, child_info.keyspace(), &eph, &root.as_ref(), @@ -279,20 +276,18 @@ impl<'a, S, H> Backend for ProvingBackend<'a, S, H> fn child_storage( &self, - storage_key: &[u8], child_info: ChildInfo, key: &[u8], ) -> Result>, Self::Error> { - self.0.child_storage(storage_key, child_info, key) + self.0.child_storage(child_info, key) } fn for_keys_in_child_storage( &self, - storage_key: &[u8], child_info: ChildInfo, f: F, ) { - self.0.for_keys_in_child_storage(storage_key, child_info, f) + self.0.for_keys_in_child_storage(child_info, f) } fn next_storage_key(&self, key: &[u8]) -> Result>, Self::Error> { @@ -301,11 +296,10 @@ impl<'a, S, H> Backend for ProvingBackend<'a, S, H> fn next_child_storage_key( &self, - storage_key: &[u8], child_info: ChildInfo, key: &[u8], ) -> Result>, Self::Error> { - self.0.next_child_storage_key(storage_key, child_info, key) + self.0.next_child_storage_key(child_info, key) } fn for_keys_with_prefix(&self, prefix: &[u8], f: F) { @@ -318,12 +312,11 @@ impl<'a, S, H> Backend for ProvingBackend<'a, S, H> fn for_child_keys_with_prefix( &self, - storage_key: &[u8], child_info: ChildInfo, prefix: &[u8], f: F, ) { - self.0.for_child_keys_with_prefix(storage_key, child_info, prefix, f) + self.0.for_child_keys_with_prefix( child_info, prefix, f) } fn pairs(&self) -> Vec<(Vec, Vec)> { @@ -336,11 +329,10 @@ impl<'a, S, H> Backend for ProvingBackend<'a, S, H> fn child_keys( &self, - storage_key: &[u8], child_info: ChildInfo, prefix: &[u8], ) -> Vec> { - self.0.child_keys(storage_key, child_info, prefix) + self.0.child_keys(child_info, prefix) } fn storage_root(&self, delta: I) -> (H::Out, Self::Transaction) @@ -351,7 +343,6 @@ impl<'a, S, H> Backend for ProvingBackend<'a, S, H> fn child_storage_root( &self, - storage_key: &[u8], child_info: ChildInfo, delta: I, ) -> (H::Out, bool, Self::Transaction) @@ -359,7 +350,7 @@ impl<'a, S, H> Backend for ProvingBackend<'a, S, H> I: IntoIterator, Option>)>, H::Out: Ord { - self.0.child_storage_root(storage_key, child_info, delta) + self.0.child_storage_root(child_info, delta) } } @@ -404,8 +395,12 @@ mod tests { use crate::proving_backend::create_proof_check_backend; use sp_trie::PrefixedMemoryDB; - const CHILD_INFO_1: ChildInfo<'static> = ChildInfo::new_default(b"unique_id_1"); - const CHILD_INFO_2: ChildInfo<'static> = ChildInfo::new_default(b"unique_id_2"); + const CHILD_INFO_1: ChildInfo<'static> = ChildInfo::default_unchecked( + b":child_storage:default:sub1" + ); + const CHILD_INFO_2: ChildInfo<'static> = ChildInfo::default_unchecked( + b":child_storage:default:sub2" + ); fn test_proving<'a>( trie_backend: &'a TrieBackend,Blake2Hasher>, @@ -474,33 +469,29 @@ mod tests { #[test] fn proof_recorded_and_checked_with_child() { - let subtrie1 = b"sub1"; - let subtrie2 = b"sub2"; - let own1 = subtrie1.to_vec(); - let own2 = subtrie2.to_vec(); let contents = vec![ (None, (0..64).map(|i| (vec![i], Some(vec![i]))).collect()), - (Some((own1.clone(), CHILD_INFO_1.to_owned())), + (Some(CHILD_INFO_1.to_owned()), (28..65).map(|i| (vec![i], Some(vec![i]))).collect()), - (Some((own2.clone(), CHILD_INFO_2.to_owned())), + (Some(CHILD_INFO_2.to_owned()), (10..15).map(|i| (vec![i], Some(vec![i]))).collect()), ]; let in_memory = InMemoryBackend::::default(); let mut in_memory = in_memory.update(contents); let in_memory_root = in_memory.full_storage_root::<_, Vec<_>, _>( ::std::iter::empty(), - in_memory.child_storage_keys().map(|k|(k.0.to_vec(), Vec::new(), k.1.to_owned())) + in_memory.child_storage_keys().map(|k|(k.to_owned(), Vec::new())) ).0; (0..64).for_each(|i| assert_eq!( in_memory.storage(&[i]).unwrap().unwrap(), vec![i] )); (28..65).for_each(|i| assert_eq!( - in_memory.child_storage(&own1[..], CHILD_INFO_1, &[i]).unwrap().unwrap(), + in_memory.child_storage(CHILD_INFO_1, &[i]).unwrap().unwrap(), vec![i] )); (10..15).for_each(|i| assert_eq!( - in_memory.child_storage(&own2[..], CHILD_INFO_2, &[i]).unwrap().unwrap(), + in_memory.child_storage(CHILD_INFO_2, &[i]).unwrap().unwrap(), vec![i] )); @@ -528,7 +519,7 @@ mod tests { assert_eq!(proof_check.storage(&[64]).unwrap(), None); let proving = ProvingBackend::new(trie); - assert_eq!(proving.child_storage(&own1[..], CHILD_INFO_1, &[64]), Ok(Some(vec![64]))); + assert_eq!(proving.child_storage(CHILD_INFO_1, &[64]), Ok(Some(vec![64]))); let proof = proving.extract_proof(); let proof_check = create_proof_check_backend::( @@ -536,7 +527,7 @@ mod tests { proof ).unwrap(); assert_eq!( - proof_check.child_storage(&own1[..], CHILD_INFO_1, &[64]).unwrap().unwrap(), + proof_check.child_storage(CHILD_INFO_1, &[64]).unwrap().unwrap(), vec![64] ); } diff --git a/primitives/state-machine/src/testing.rs b/primitives/state-machine/src/testing.rs index 39a34509b720b..6ff6d42aba3f8 100644 --- a/primitives/state-machine/src/testing.rs +++ b/primitives/state-machine/src/testing.rs @@ -128,9 +128,9 @@ impl TestExternalities self.overlay.committed.children.clone().into_iter() .chain(self.overlay.prospective.children.clone().into_iter()) - .for_each(|(keyspace, (map, child_info))| { + .for_each(|(_storage_key, (map, child_info))| { transaction.push(( - Some((keyspace, child_info)), + Some(child_info), map.into_iter() .map(|(k, v)| (k, v.value)) .collect::>(), diff --git a/primitives/state-machine/src/trie_backend.rs b/primitives/state-machine/src/trie_backend.rs index 0df13a8fff137..29a31be210c77 100644 --- a/primitives/state-machine/src/trie_backend.rs +++ b/primitives/state-machine/src/trie_backend.rs @@ -20,7 +20,7 @@ use log::{warn, debug}; use hash_db::Hasher; use sp_trie::{Trie, delta_trie_root, default_child_trie_root, child_delta_trie_root}; use sp_trie::trie_types::{TrieDB, TrieError, Layout}; -use sp_core::storage::ChildInfo; +use sp_core::storage::{ChildInfo, ChildType}; use codec::{Codec, Decode}; use crate::{ StorageKey, StorageValue, Backend, @@ -80,11 +80,10 @@ impl, H: Hasher> Backend for TrieBackend where fn child_storage( &self, - storage_key: &[u8], child_info: ChildInfo, key: &[u8], ) -> Result, Self::Error> { - self.essence.child_storage(storage_key, child_info, key) + self.essence.child_storage(child_info, key) } fn next_storage_key(&self, key: &[u8]) -> Result, Self::Error> { @@ -93,11 +92,10 @@ impl, H: Hasher> Backend for TrieBackend where fn next_child_storage_key( &self, - storage_key: &[u8], child_info: ChildInfo, key: &[u8], ) -> Result, Self::Error> { - self.essence.next_child_storage_key(storage_key, child_info, key) + self.essence.next_child_storage_key(child_info, key) } fn for_keys_with_prefix(&self, prefix: &[u8], f: F) { @@ -110,21 +108,19 @@ impl, H: Hasher> Backend for TrieBackend where fn for_keys_in_child_storage( &self, - storage_key: &[u8], child_info: ChildInfo, f: F, ) { - self.essence.for_keys_in_child_storage(storage_key, child_info, f) + self.essence.for_keys_in_child_storage(child_info, f) } fn for_child_keys_with_prefix( &self, - storage_key: &[u8], child_info: ChildInfo, prefix: &[u8], f: F, ) { - self.essence.for_child_keys_with_prefix(storage_key, child_info, prefix, f) + self.essence.for_child_keys_with_prefix(child_info, prefix, f) } fn pairs(&self) -> Vec<(StorageKey, StorageValue)> { @@ -194,7 +190,6 @@ impl, H: Hasher> Backend for TrieBackend where fn child_storage_root( &self, - storage_key: &[u8], child_info: ChildInfo, delta: I, ) -> (H::Out, bool, Self::Transaction) @@ -202,12 +197,13 @@ impl, H: Hasher> Backend for TrieBackend where I: IntoIterator)>, H::Out: Ord, { - let default_root = default_child_trie_root::>(child_info.parent_prefix(None)); + let default_root = match child_info.child_type() { + ChildType::ParentKeyId => default_child_trie_root::>() + }; let mut write_overlay = S::Overlay::default(); - let mut prefixed_storage_key = storage_key.to_vec(); - child_info.do_prefix_key(&mut prefixed_storage_key, None); - let mut root = match self.storage(prefixed_storage_key.as_slice()) { + let storage_key = child_info.storage_key(); + let mut root = match self.storage(storage_key) { Ok(value) => value.and_then(|r| Decode::decode(&mut &r[..]).ok()).unwrap_or(default_root.clone()), Err(e) => { @@ -223,7 +219,6 @@ impl, H: Hasher> Backend for TrieBackend where ); match child_delta_trie_root::, _, _, _, _, _>( - storage_key, child_info.keyspace(), &mut eph, root, @@ -252,15 +247,15 @@ pub mod tests { use sp_trie::{TrieMut, PrefixedMemoryDB, trie_types::TrieDBMut, KeySpacedDBMut}; use super::*; - const CHILD_KEY_1: &[u8] = b"sub1"; - const CHILD_UUID_1: &[u8] = b"unique_id_1"; - const CHILD_INFO_1: ChildInfo<'static> = ChildInfo::new_default(CHILD_UUID_1); + const CHILD_INFO_1: ChildInfo<'static> = ChildInfo::default_unchecked( + b":child_storage:default:sub1" + ); fn test_db() -> (PrefixedMemoryDB, H256) { let mut root = H256::default(); let mut mdb = PrefixedMemoryDB::::default(); { - let mut mdb = KeySpacedDBMut::new(&mut mdb, CHILD_UUID_1); + let mut mdb = KeySpacedDBMut::new(&mut mdb, CHILD_INFO_1.keyspace()); let mut trie = TrieDBMut::new(&mut mdb, &mut root); trie.insert(b"value3", &[142]).expect("insert failed"); trie.insert(b"value4", &[124]).expect("insert failed"); @@ -270,9 +265,7 @@ pub mod tests { let mut sub_root = Vec::new(); root.encode_to(&mut sub_root); let mut trie = TrieDBMut::new(&mut mdb, &mut root); - let mut prefixed_storage_key = CHILD_KEY_1.to_vec(); - CHILD_INFO_1.do_prefix_key(&mut prefixed_storage_key, None); - trie.insert(prefixed_storage_key.as_slice(), &sub_root[..]).expect("insert failed"); + trie.insert(CHILD_INFO_1.storage_key(), &sub_root[..]).expect("insert failed"); trie.insert(b"key", b"value").expect("insert failed"); trie.insert(b"value1", &[42]).expect("insert failed"); trie.insert(b"value2", &[24]).expect("insert failed"); @@ -298,7 +291,7 @@ pub mod tests { fn read_from_child_storage_returns_some() { let test_trie = test_trie(); assert_eq!( - test_trie.child_storage(CHILD_KEY_1, CHILD_INFO_1, b"value3").unwrap(), + test_trie.child_storage(CHILD_INFO_1, b"value3").unwrap(), Some(vec![142u8]), ); } diff --git a/primitives/state-machine/src/trie_backend_essence.rs b/primitives/state-machine/src/trie_backend_essence.rs index 980bf13ad53cb..9a8ad14445c5f 100644 --- a/primitives/state-machine/src/trie_backend_essence.rs +++ b/primitives/state-machine/src/trie_backend_essence.rs @@ -72,21 +72,18 @@ impl, H: Hasher> TrieBackendEssence where H::Out: } /// Access the root of the child storage in its parent trie - fn child_root(&self, storage_key: &[u8], child_info: ChildInfo) -> Result, String> { - let mut prefixed_storage_key = storage_key.to_vec(); - child_info.do_prefix_key(&mut prefixed_storage_key, None); - self.storage(prefixed_storage_key.as_slice()) + fn child_root(&self, child_info: ChildInfo) -> Result, String> { + self.storage(child_info.storage_key()) } /// Return the next key in the child trie i.e. the minimum key that is strictly superior to /// `key` in lexicographic order. pub fn next_child_storage_key( &self, - storage_key: &[u8], child_info: ChildInfo, key: &[u8], ) -> Result, String> { - let child_root = match self.child_root(storage_key, child_info)? { + let child_root = match self.child_root(child_info)? { Some(child_root) => child_root, None => return Ok(None), }; @@ -94,7 +91,7 @@ impl, H: Hasher> TrieBackendEssence where H::Out: let mut hash = H::Out::default(); if child_root.len() != hash.as_ref().len() { - return Err(format!("Invalid child storage hash at {:?}", storage_key)); + return Err(format!("Invalid child storage hash at {:?}", child_info.storage_key())); } // note: child_root and hash must be same size, panics otherwise. hash.as_mut().copy_from_slice(&child_root[..]); @@ -168,12 +165,11 @@ impl, H: Hasher> TrieBackendEssence where H::Out: /// Get the value of child storage at given key. pub fn child_storage( &self, - storage_key: &[u8], child_info: ChildInfo, key: &[u8], ) -> Result, String> { - let root = self.child_root(storage_key, child_info)? - .unwrap_or(default_child_trie_root::>(child_info.parent_prefix(None)).encode()); + let root = self.child_root(child_info)? + .unwrap_or(default_child_trie_root::>().encode()); let mut read_overlay = S::Overlay::default(); let eph = Ephemeral { @@ -183,19 +179,18 @@ impl, H: Hasher> TrieBackendEssence where H::Out: let map_e = |e| format!("Trie lookup error: {}", e); - read_child_trie_value::, _>(storage_key, child_info.keyspace(), &eph, &root, key) + read_child_trie_value::, _>(child_info.keyspace(), &eph, &root, key) .map_err(map_e) } /// Retrieve all entries keys of child storage and call `f` for each of those keys. pub fn for_keys_in_child_storage( &self, - storage_key: &[u8], child_info: ChildInfo, f: F, ) { - let root = match self.child_root(storage_key, child_info) { - Ok(v) => v.unwrap_or(default_child_trie_root::>(child_info.parent_prefix(None)).encode()), + let root = match self.child_root(child_info) { + Ok(v) => v.unwrap_or(default_child_trie_root::>().encode()), Err(e) => { debug!(target: "trie", "Error while iterating child storage: {}", e); return; @@ -209,7 +204,6 @@ impl, H: Hasher> TrieBackendEssence where H::Out: }; if let Err(e) = for_keys_in_child_trie::, _, Ephemeral>( - storage_key, child_info.keyspace(), &eph, &root, @@ -222,13 +216,12 @@ impl, H: Hasher> TrieBackendEssence where H::Out: /// Execute given closure for all keys starting with prefix. pub fn for_child_keys_with_prefix( &self, - storage_key: &[u8], child_info: ChildInfo, prefix: &[u8], mut f: F, ) { - let root_vec = match self.child_root(storage_key, child_info) { - Ok(v) => v.unwrap_or(default_child_trie_root::>(child_info.parent_prefix(None)).encode()), + let root_vec = match self.child_root(child_info) { + Ok(v) => v.unwrap_or(default_child_trie_root::>().encode()), Err(e) => { debug!(target: "trie", "Error while iterating child storage: {}", e); return; @@ -443,7 +436,9 @@ mod test { #[test] fn next_storage_key_and_next_child_storage_key_work() { - let child_info = ChildInfo::new_default(b"uniqueid"); + let child_info = ChildInfo::default_unchecked( + b":child_storage:default:MyChild" + ); // Contains values let mut root_1 = H256::default(); // Contains child trie @@ -467,9 +462,7 @@ mod test { } { let mut trie = TrieDBMut::new(&mut mdb, &mut root_2); - let mut prefixed_storage_key = b"MyChild".to_vec(); - child_info.do_prefix_key(&mut prefixed_storage_key, None); - trie.insert(prefixed_storage_key.as_slice(), root_1.as_ref()) + trie.insert(child_info.storage_key(), root_1.as_ref()) .expect("insert failed"); }; @@ -485,19 +478,19 @@ mod test { let essence_2 = TrieBackendEssence::new(mdb, root_2); assert_eq!( - essence_2.next_child_storage_key(b"MyChild", child_info, b"2"), Ok(Some(b"3".to_vec())) + essence_2.next_child_storage_key(child_info, b"2"), Ok(Some(b"3".to_vec())) ); assert_eq!( - essence_2.next_child_storage_key(b"MyChild", child_info, b"3"), Ok(Some(b"4".to_vec())) + essence_2.next_child_storage_key(child_info, b"3"), Ok(Some(b"4".to_vec())) ); assert_eq!( - essence_2.next_child_storage_key(b"MyChild", child_info, b"4"), Ok(Some(b"6".to_vec())) + essence_2.next_child_storage_key(child_info, b"4"), Ok(Some(b"6".to_vec())) ); assert_eq!( - essence_2.next_child_storage_key(b"MyChild", child_info, b"5"), Ok(Some(b"6".to_vec())) + essence_2.next_child_storage_key(child_info, b"5"), Ok(Some(b"6".to_vec())) ); assert_eq!( - essence_2.next_child_storage_key(b"MyChild", child_info, b"6"), Ok(None) + essence_2.next_child_storage_key(child_info, b"6"), Ok(None) ); } } diff --git a/primitives/storage/src/lib.rs b/primitives/storage/src/lib.rs index ea4dd56a1e7a9..df0b9a932af10 100644 --- a/primitives/storage/src/lib.rs +++ b/primitives/storage/src/lib.rs @@ -62,7 +62,7 @@ pub struct StorageChild { pub struct Storage { /// Top trie storage data. pub top: StorageMap, - /// Children trie storage data by storage key. + /// Children trie storage data. /// Note that the key is not including child prefix, this will /// not be possible if a different kind of trie than `default` /// get in use. @@ -133,7 +133,6 @@ pub mod well_known_keys { /// Information related to a child state. pub enum ChildInfo<'a> { ParentKeyId(ChildTrie<'a>), - Default(ChildTrie<'a>), } /// Owned version of `ChildInfo`. @@ -142,31 +141,46 @@ pub enum ChildInfo<'a> { #[cfg_attr(feature = "std", derive(PartialEq, Eq, Hash, PartialOrd, Ord))] pub enum OwnedChildInfo { ParentKeyId(OwnedChildTrie), - Default(OwnedChildTrie), } impl<'a> ChildInfo<'a> { /// Instantiates information for a default child trie. - pub const fn new_uid_parent_key(storage_key: &'a[u8]) -> Self { + /// This is a rather unsafe method and requires to be + /// use from a valid payload such as: + /// ``` + /// use sp_storage::{ChildInfo, ChildType, OwnedChildInfo}; + /// + /// let info1 = ChildInfo::default_unchecked( + /// b":child_storage:default:stor_key", + /// ); + /// let info2 = OwnedChildInfo::new_default( + /// b"stor_key".to_vec(), + /// ); + /// + /// assert!(info1.info() == info2.as_ref().info()); + /// ``` + pub const fn default_unchecked(encoded: &'a[u8]) -> Self { ChildInfo::ParentKeyId(ChildTrie { - data: storage_key, + data: encoded, }) } - /// Instantiates information for a default child trie. - pub const fn new_default(unique_id: &'a[u8]) -> Self { - ChildInfo::Default(ChildTrie { - data: unique_id, - }) + /// Create child info from a linear byte packed value and a given type. + pub fn resolve_child_info(child_type: u32, info: &'a [u8]) -> Option { + match child_type { + x if x == ChildType::ParentKeyId as u32 => { + debug_assert!( + info.starts_with(ChildType::ParentKeyId.parent_prefix()) + ); + Some(Self::default_unchecked(info)) + }, + _ => None, + } } /// Instantiates a owned version of this child info. pub fn to_owned(&self) -> OwnedChildInfo { match self { - ChildInfo::Default(ChildTrie { data }) - => OwnedChildInfo::Default(OwnedChildTrie { - data: data.to_vec(), - }), ChildInfo::ParentKeyId(ChildTrie { data }) => OwnedChildInfo::ParentKeyId(OwnedChildTrie { data: data.to_vec(), @@ -174,21 +188,6 @@ impl<'a> ChildInfo<'a> { } } - /// Create child info from a linear byte packed value and a given type. - pub fn resolve_child_info(child_type: u32, data: &'a[u8], storage_key: &'a[u8]) -> Option { - match child_type { - x if x == ChildType::ParentKeyId as u32 => { - if !data.len() == 0 { - // do not allow anything for additional data. - return None; - } - Some(ChildInfo::new_uid_parent_key(storage_key)) - }, - x if x == ChildType::CryptoUniqueId as u32 => Some(ChildInfo::new_default(data)), - _ => None, - } - } - /// Return a single byte vector containing packed child info content and its child info type. /// This can be use as input for `resolve_child_info`. pub fn info(&self) -> (&[u8], u32) { @@ -196,9 +195,6 @@ impl<'a> ChildInfo<'a> { ChildInfo::ParentKeyId(ChildTrie { data, }) => (data, ChildType::ParentKeyId as u32), - ChildInfo::Default(ChildTrie { - data, - }) => (data, ChildType::CryptoUniqueId as u32), } } @@ -206,33 +202,43 @@ impl<'a> ChildInfo<'a> { /// This is a unique id of the child trie. The collision resistance of this value /// depends on the type of child info use. For `ChildInfo::Default` it is and need to be. pub fn keyspace(&self) -> &[u8] { + match self { + ChildInfo::ParentKeyId(..) => self.unprefixed_storage_key(), + } + } + + /// Return a reference to the full location in the direct parent of + /// this trie. + /// If the trie got no parent this returns the empty slice, + /// so by nature an empty slice is not a valid parent location. + /// This does not include child type related prefix. + pub fn storage_key(&self) -> &[u8] { match self { ChildInfo::ParentKeyId(ChildTrie { data, }) => &data[..], - ChildInfo::Default(ChildTrie { - data, - }) => &data[..], } } - /// Return the location reserved for this child trie in their parent trie if there - /// is one. - pub fn parent_prefix(&self, _parent: Option<&'a ChildInfo>) -> &'a [u8] { + /// Return a reference to the location in the direct parent of + /// this trie. + /// The static part of the storage key is omitted. + pub fn unprefixed_storage_key(&self) -> &[u8] { match self { - ChildInfo::ParentKeyId(..) - | ChildInfo::Default(..) => DEFAULT_CHILD_TYPE_PARENT_PREFIX, + ChildInfo::ParentKeyId(ChildTrie { + data, + }) => if data.len() != 0 { + &data[ChildType::ParentKeyId.parent_prefix().len()..] + } else { + &[] + }, } } - /// Change a key to get prefixed with the parent prefix. - pub fn do_prefix_key(&self, key: &mut Vec, parent: Option<&ChildInfo>) { - let parent_prefix = self.parent_prefix(parent); - let key_len = key.len(); - if parent_prefix.len() > 0 { - key.resize(key_len + parent_prefix.len(), 0); - key.copy_within(..key_len, parent_prefix.len()); - key[..parent_prefix.len()].copy_from_slice(parent_prefix); + /// Return the type for this child info. + pub fn child_type(&self) -> ChildType { + match self { + ChildInfo::ParentKeyId(..) => ChildType::ParentKeyId, } } } @@ -244,17 +250,37 @@ impl<'a> ChildInfo<'a> { pub enum ChildType { /// If runtime module ensures that the child key is a unique id that will /// only be used once, this parent key is used as a child trie unique id. - ParentKeyId = 0, - /// Default, this uses a cryptographic strong unique id as input, this id - /// is used as a unique child trie identifier. - CryptoUniqueId = 1, + ParentKeyId = 1, +} + +impl ChildType { + /// Change a key to get prefixed with the parent prefix. + /// TODO try to make this method non public + pub fn do_prefix_key(&self, key: &mut Vec) { + let parent_prefix = self.parent_prefix(); + let key_len = key.len(); + if parent_prefix.len() > 0 { + key.resize(key_len + parent_prefix.len(), 0); + key.copy_within(..key_len, parent_prefix.len()); + key[..parent_prefix.len()].copy_from_slice(parent_prefix); + } + } + + /// Return the location reserved for this child trie in their parent trie if there + /// is one. + fn parent_prefix(&self) -> &'static [u8] { + match self { + &ChildType::ParentKeyId => DEFAULT_CHILD_TYPE_PARENT_PREFIX, + } + } } impl OwnedChildInfo { - /// Instantiates info for a default child trie. - pub fn new_default(unique_id: Vec) -> Self { - OwnedChildInfo::Default(OwnedChildTrie { - data: unique_id, + /// Instantiates info for a default child trie with a default parent. + pub fn new_default(mut storage_key: Vec) -> Self { + ChildType::ParentKeyId.do_prefix_key(&mut storage_key); + OwnedChildInfo::ParentKeyId(OwnedChildTrie { + data: storage_key, }) } @@ -262,18 +288,32 @@ impl OwnedChildInfo { /// are not compatible. pub fn try_update(&mut self, other: ChildInfo) -> bool { match self { - OwnedChildInfo::Default(owned_child_trie) => owned_child_trie.try_update(other), OwnedChildInfo::ParentKeyId(owned_child_trie) => owned_child_trie.try_update(other), } } + /// Owned variant of `info`. + pub fn owned_info(self) -> (Vec, u32) { + match self { + OwnedChildInfo::ParentKeyId(OwnedChildTrie { + data, + }) => (data, ChildType::ParentKeyId as u32), + } + } + + /// Return a reference to the full location in the direct parent of + /// this trie. + pub fn storage_key(self) -> Vec { + match self { + OwnedChildInfo::ParentKeyId(OwnedChildTrie { + data, + }) => data, + } + } + /// Get `ChildInfo` reference to this owned child info. pub fn as_ref(&self) -> ChildInfo { match self { - OwnedChildInfo::Default(OwnedChildTrie { data }) - => ChildInfo::Default(ChildTrie { - data: data.as_slice(), - }), OwnedChildInfo::ParentKeyId(OwnedChildTrie { data }) => ChildInfo::ParentKeyId(ChildTrie { data: data.as_slice(), @@ -309,7 +349,6 @@ impl OwnedChildTrie { /// are not compatible. fn try_update(&mut self, other: ChildInfo) -> bool { match other { - ChildInfo::Default(other) => self.data[..] == other.data[..], ChildInfo::ParentKeyId(other) => self.data[..] == other.data[..], } } @@ -319,17 +358,8 @@ const DEFAULT_CHILD_TYPE_PARENT_PREFIX: &'static [u8] = b":child_storage:default #[test] fn assert_default_trie_in_child_trie() { - let child_info = ChildInfo::new_default(b"any key"); - let prefix = child_info.parent_prefix(None); + let child_info = OwnedChildInfo::new_default(b"any key".to_vec()); + let child_info = child_info.as_ref(); + let prefix = child_info.child_type().parent_prefix(); assert!(prefix.starts_with(well_known_keys::CHILD_STORAGE_KEY_PREFIX)); } - -#[test] -fn test_do_prefix() { - let child_info = ChildInfo::new_default(b"any key"); - let mut prefixed_1 = b"key".to_vec(); - child_info.do_prefix_key(&mut prefixed_1, None); - let mut prefixed_2 = DEFAULT_CHILD_TYPE_PARENT_PREFIX.to_vec(); - prefixed_2.extend_from_slice(b"key"); - assert_eq!(prefixed_1, prefixed_2); -} diff --git a/primitives/trie/src/lib.rs b/primitives/trie/src/lib.rs index 0cf268856bb45..b037a27b7b47a 100644 --- a/primitives/trie/src/lib.rs +++ b/primitives/trie/src/lib.rs @@ -212,7 +212,6 @@ pub fn read_trie_value_with< /// Determine the default child trie root. pub fn default_child_trie_root( - _storage_key: &[u8], ) -> ::Out { L::trie_root::<_, Vec, Vec>(core::iter::empty()) } @@ -220,7 +219,6 @@ pub fn default_child_trie_root( /// Determine a child trie root given its ordered contents, closed form. H is the default hasher, /// but a generic implementation may ignore this type parameter and use other hashers. pub fn child_trie_root( - _storage_key: &[u8], input: I, ) -> ::Out where @@ -234,7 +232,6 @@ pub fn child_trie_root( /// Determine a child trie root given a hash DB and delta values. H is the default hasher, /// but a generic implementation may ignore this type parameter and use other hashers. pub fn child_delta_trie_root( - _storage_key: &[u8], keyspace: &[u8], db: &mut DB, root_data: RD, @@ -269,7 +266,6 @@ pub fn child_delta_trie_root( /// Call `f` for all keys in a child trie. pub fn for_keys_in_child_trie( - _storage_key: &[u8], keyspace: &[u8], db: &DB, root_slice: &[u8], @@ -320,7 +316,6 @@ pub fn record_all_keys( /// Read a value from the child trie. pub fn read_child_trie_value( - _storage_key: &[u8], keyspace: &[u8], db: &DB, root_slice: &[u8], @@ -340,7 +335,6 @@ pub fn read_child_trie_value( /// Read a value from the child trie with given query. pub fn read_child_trie_value_with, DB>( - _storage_key: &[u8], keyspace: &[u8], db: &DB, root_slice: &[u8], diff --git a/test-utils/runtime/client/src/lib.rs b/test-utils/runtime/client/src/lib.rs index 2cb08db6ff472..2c6967ff2e0f5 100644 --- a/test-utils/runtime/client/src/lib.rs +++ b/test-utils/runtime/client/src/lib.rs @@ -123,13 +123,13 @@ impl substrate_test_client::GenesisInit for GenesisParameters { let mut storage = self.genesis_config().genesis_map(); - let child_roots = storage.children.iter().map(|(sk, child_content)| { + let child_roots = storage.children.iter().map(|(_sk, child_content)| { let state_root = <<::Header as HeaderT>::Hashing as HashT>::trie_root( child_content.data.clone().into_iter().collect() ); - let mut prefixed_storage_key = sk.clone(); - child_content.child_info.as_ref().do_prefix_key(&mut prefixed_storage_key, None); - (prefixed_storage_key, state_root.encode()) + let child_info = child_content.child_info.as_ref(); + let storage_key = child_info.storage_key().to_vec(); + (storage_key, state_root.encode()) }); let state_root = <<::Header as HeaderT>::Hashing as HashT>::trie_root( storage.top.clone().into_iter().chain(child_roots).collect() @@ -194,14 +194,13 @@ pub trait TestClientBuilderExt: Sized { /// # Panics /// /// Panics if the key is empty. - fn add_extra_child_storage>, K: Into>, V: Into>>( + fn add_extra_child_storage>, V: Into>>( mut self, - storage_key: SK, child_info: ChildInfo, key: K, value: V, ) -> Self { - let storage_key = storage_key.into(); + let storage_key = child_info.storage_key().to_vec(); let key = key.into(); assert!(!storage_key.is_empty()); assert!(!key.is_empty()); diff --git a/test-utils/runtime/src/lib.rs b/test-utils/runtime/src/lib.rs index 944932052fb32..0c3459bbb7f18 100644 --- a/test-utils/runtime/src/lib.rs +++ b/test-utils/runtime/src/lib.rs @@ -49,7 +49,6 @@ use sp_version::NativeVersion; use frame_support::{impl_outer_origin, parameter_types, weights::Weight}; use sp_inherents::{CheckInherentsResult, InherentData}; use cfg_if::cfg_if; -use sp_core::storage::ChildType; // Ensure Babe and Aura use the same crypto to simplify things a bit. pub use sp_consensus_babe::AuthorityId; @@ -873,22 +872,17 @@ fn test_read_storage() { } fn test_read_child_storage() { - const CHILD_KEY: &[u8] = b"read_child_storage"; - const UNIQUE_ID: &[u8] = b":unique_id"; + const STORAGE_KEY: &[u8] = b":child_storage:default:unique_id_1"; const KEY: &[u8] = b":read_child_storage"; - sp_io::storage::child_set( - CHILD_KEY, - UNIQUE_ID, - ChildType::CryptoUniqueId as u32, + sp_io::default_child_storage::set( + STORAGE_KEY, KEY, b"test", ); let mut v = [0u8; 4]; - let r = sp_io::storage::child_read( - CHILD_KEY, - UNIQUE_ID, - ChildType::CryptoUniqueId as u32, + let r = sp_io::default_child_storage::read( + STORAGE_KEY, KEY, &mut v, 0, @@ -897,10 +891,8 @@ fn test_read_child_storage() { assert_eq!(&v, b"test"); let mut v = [0u8; 4]; - let r = sp_io::storage::child_read( - CHILD_KEY, - UNIQUE_ID, - ChildType::CryptoUniqueId as u32, + let r = sp_io::default_child_storage::read( + STORAGE_KEY, KEY, &mut v, 8, From 87bd97c24d9cf8b384ff14fa52df1f779ee8d033 Mon Sep 17 00:00:00 2001 From: cheme Date: Mon, 17 Feb 2020 23:17:48 +0100 Subject: [PATCH 44/85] fix polka ref issue. --- primitives/state-machine/src/basic.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/primitives/state-machine/src/basic.rs b/primitives/state-machine/src/basic.rs index e0be6e18fd567..4f7d7bfb43e16 100644 --- a/primitives/state-machine/src/basic.rs +++ b/primitives/state-machine/src/basic.rs @@ -172,7 +172,7 @@ impl Externalities for BasicExternalities { ) -> Option { let storage_key = child_info.storage_key(); let range = (Bound::Excluded(key), Bound::Unbounded); - self.inner.children.get(storage_key.as_ref()) + self.inner.children.get(storage_key) .and_then(|child| child.data.range::<[u8], _>(range).next().map(|(k, _)| k).cloned()) } From 17cf6130ee8933290bd24c37b4ec3a8e3958c50f Mon Sep 17 00:00:00 2001 From: cheme Date: Tue, 18 Feb 2020 12:40:18 +0100 Subject: [PATCH 45/85] Switching back to unprefixed in child info (all temporary struct are not general child struct, but default child struct only). Applying merge of ChildInfo and OwnedChildInfo. --- client/chain-spec/src/chain_spec.rs | 3 +- client/db/src/bench.rs | 14 +- client/db/src/lib.rs | 14 +- client/db/src/storage_cache.rs | 14 +- client/network/src/chain.rs | 4 +- client/network/src/protocol.rs | 6 +- .../src/protocol/light_client_handler.rs | 6 +- client/rpc/src/state/state_full.rs | 14 +- client/rpc/src/state/tests.rs | 11 +- client/src/client.rs | 8 +- client/src/light/backend.rs | 14 +- client/src/light/fetcher.rs | 12 +- frame/contracts/src/account_db.rs | 12 +- frame/contracts/src/lib.rs | 22 +- frame/contracts/src/rent.rs | 6 +- frame/contracts/src/tests.rs | 2 +- frame/support/src/storage/child.rs | 32 +-- primitives/externalities/src/lib.rs | 24 +- primitives/io/src/lib.rs | 36 +-- primitives/runtime/src/lib.rs | 2 +- primitives/state-machine/src/backend.rs | 40 ++-- primitives/state-machine/src/basic.rs | 56 +++-- .../state-machine/src/changes_trie/build.rs | 56 +++-- primitives/state-machine/src/ext.rs | 89 +++---- .../state-machine/src/in_memory_backend.rs | 52 ++--- primitives/state-machine/src/lib.rs | 44 ++-- .../state-machine/src/overlayed_changes.rs | 21 +- .../state-machine/src/proving_backend.rs | 39 ++-- primitives/state-machine/src/trie_backend.rs | 26 +-- .../state-machine/src/trie_backend_essence.rs | 23 +- primitives/storage/src/lib.rs | 217 ++++++++---------- test-utils/client/src/lib.rs | 10 +- test-utils/runtime/client/src/lib.rs | 9 +- test-utils/runtime/src/lib.rs | 2 +- 34 files changed, 455 insertions(+), 485 deletions(-) diff --git a/client/chain-spec/src/chain_spec.rs b/client/chain-spec/src/chain_spec.rs index bf12d3e578a73..af75f7c3c04f3 100644 --- a/client/chain-spec/src/chain_spec.rs +++ b/client/chain-spec/src/chain_spec.rs @@ -287,8 +287,7 @@ impl ChainSpec { .collect(); let children = storage.children.into_iter() .map(|(sk, child)| { - let info = child.child_info.as_ref(); - let (info, ci_type) = info.info(); + let (info, ci_type) = child.child_info.info(); ( StorageKey(sk), ChildRawStorage { diff --git a/client/db/src/bench.rs b/client/db/src/bench.rs index 4d80d77cb60c2..fce759590e531 100644 --- a/client/db/src/bench.rs +++ b/client/db/src/bench.rs @@ -140,7 +140,7 @@ impl StateBackend> for BenchmarkingState { fn child_storage( &self, - child_info: ChildInfo, + child_info: &ChildInfo, key: &[u8], ) -> Result>, Self::Error> { self.state.borrow().as_ref().ok_or_else(state_err)?.child_storage(child_info, key) @@ -152,7 +152,7 @@ impl StateBackend> for BenchmarkingState { fn exists_child_storage( &self, - child_info: ChildInfo, + child_info: &ChildInfo, key: &[u8], ) -> Result { self.state.borrow().as_ref().ok_or_else(state_err)?.exists_child_storage(child_info, key) @@ -164,7 +164,7 @@ impl StateBackend> for BenchmarkingState { fn next_child_storage_key( &self, - child_info: ChildInfo, + child_info: &ChildInfo, key: &[u8], ) -> Result>, Self::Error> { self.state.borrow().as_ref().ok_or_else(state_err)?.next_child_storage_key(child_info, key) @@ -184,7 +184,7 @@ impl StateBackend> for BenchmarkingState { fn for_keys_in_child_storage( &self, - child_info: ChildInfo, + child_info: &ChildInfo, f: F, ) { if let Some(ref state) = *self.state.borrow() { @@ -194,7 +194,7 @@ impl StateBackend> for BenchmarkingState { fn for_child_keys_with_prefix( &self, - child_info: ChildInfo, + child_info: &ChildInfo, prefix: &[u8], f: F, ) { @@ -211,7 +211,7 @@ impl StateBackend> for BenchmarkingState { fn child_storage_root( &self, - child_info: ChildInfo, + child_info: &ChildInfo, delta: I, ) -> (B::Hash, bool, Self::Transaction) where I: IntoIterator, Option>)>, @@ -229,7 +229,7 @@ impl StateBackend> for BenchmarkingState { fn child_keys( &self, - child_info: ChildInfo, + child_info: &ChildInfo, prefix: &[u8], ) -> Vec> { self.state.borrow().as_ref().map_or(Default::default(), |s| s.child_keys(child_info, prefix)) diff --git a/client/db/src/lib.rs b/client/db/src/lib.rs index efbcb26ff8fd8..fe3d707579291 100644 --- a/client/db/src/lib.rs +++ b/client/db/src/lib.rs @@ -152,7 +152,7 @@ impl StateBackend> for RefTrackingState { fn child_storage( &self, - child_info: ChildInfo, + child_info: &ChildInfo, key: &[u8], ) -> Result>, Self::Error> { self.state.child_storage(child_info, key) @@ -164,7 +164,7 @@ impl StateBackend> for RefTrackingState { fn exists_child_storage( &self, - child_info: ChildInfo, + child_info: &ChildInfo, key: &[u8], ) -> Result { self.state.exists_child_storage(child_info, key) @@ -176,7 +176,7 @@ impl StateBackend> for RefTrackingState { fn next_child_storage_key( &self, - child_info: ChildInfo, + child_info: &ChildInfo, key: &[u8], ) -> Result>, Self::Error> { self.state.next_child_storage_key(child_info, key) @@ -192,7 +192,7 @@ impl StateBackend> for RefTrackingState { fn for_keys_in_child_storage( &self, - child_info: ChildInfo, + child_info: &ChildInfo, f: F, ) { self.state.for_keys_in_child_storage(child_info, f) @@ -200,7 +200,7 @@ impl StateBackend> for RefTrackingState { fn for_child_keys_with_prefix( &self, - child_info: ChildInfo, + child_info: &ChildInfo, prefix: &[u8], f: F, ) { @@ -216,7 +216,7 @@ impl StateBackend> for RefTrackingState { fn child_storage_root( &self, - child_info: ChildInfo, + child_info: &ChildInfo, delta: I, ) -> (B::Hash, bool, Self::Transaction) where @@ -235,7 +235,7 @@ impl StateBackend> for RefTrackingState { fn child_keys( &self, - child_info: ChildInfo, + child_info: &ChildInfo, prefix: &[u8], ) -> Vec> { self.state.child_keys(child_info, prefix) diff --git a/client/db/src/storage_cache.rs b/client/db/src/storage_cache.rs index 7f5dcecf41dae..44d84e5689a83 100644 --- a/client/db/src/storage_cache.rs +++ b/client/db/src/storage_cache.rs @@ -539,7 +539,7 @@ impl>, B: BlockT> StateBackend> for Ca fn child_storage( &self, - child_info: ChildInfo, + child_info: &ChildInfo, key: &[u8], ) -> Result>, Self::Error> { let key = (child_info.storage_key().to_vec(), key.to_vec()); @@ -575,7 +575,7 @@ impl>, B: BlockT> StateBackend> for Ca fn exists_child_storage( &self, - child_info: ChildInfo, + child_info: &ChildInfo, key: &[u8], ) -> Result { self.state.exists_child_storage(child_info, key) @@ -583,7 +583,7 @@ impl>, B: BlockT> StateBackend> for Ca fn for_keys_in_child_storage( &self, - child_info: ChildInfo, + child_info: &ChildInfo, f: F, ) { self.state.for_keys_in_child_storage(child_info, f) @@ -595,7 +595,7 @@ impl>, B: BlockT> StateBackend> for Ca fn next_child_storage_key( &self, - child_info: ChildInfo, + child_info: &ChildInfo, key: &[u8], ) -> Result>, Self::Error> { self.state.next_child_storage_key(child_info, key) @@ -611,7 +611,7 @@ impl>, B: BlockT> StateBackend> for Ca fn for_child_keys_with_prefix( &self, - child_info: ChildInfo, + child_info: &ChildInfo, prefix: &[u8], f: F, ) { @@ -627,7 +627,7 @@ impl>, B: BlockT> StateBackend> for Ca fn child_storage_root( &self, - child_info: ChildInfo, + child_info: &ChildInfo, delta: I, ) -> (B::Hash, bool, Self::Transaction) where @@ -646,7 +646,7 @@ impl>, B: BlockT> StateBackend> for Ca fn child_keys( &self, - child_info: ChildInfo, + child_info: &ChildInfo, prefix: &[u8], ) -> Vec> { self.state.child_keys(child_info, prefix) diff --git a/client/network/src/chain.rs b/client/network/src/chain.rs index e419323c99edd..442334cb4f015 100644 --- a/client/network/src/chain.rs +++ b/client/network/src/chain.rs @@ -56,7 +56,7 @@ pub trait Client: Send + Sync { fn read_child_proof( &self, block: &Block::Hash, - child_info: ChildInfo, + child_info: &ChildInfo, keys: &[Vec], ) -> Result; @@ -137,7 +137,7 @@ impl Client for SubstrateClient where fn read_child_proof( &self, block: &Block::Hash, - child_info: ChildInfo, + child_info: &ChildInfo, keys: &[Vec], ) -> Result { (self as &SubstrateClient) diff --git a/client/network/src/protocol.rs b/client/network/src/protocol.rs index 52914cca277e0..df0156f77e92f 100644 --- a/client/network/src/protocol.rs +++ b/client/network/src/protocol.rs @@ -23,7 +23,7 @@ use libp2p::{Multiaddr, PeerId}; use libp2p::core::{ConnectedPoint, nodes::{listeners::ListenerId, Substream}, muxing::StreamMuxerBox}; use libp2p::swarm::{ProtocolsHandler, IntoProtocolsHandler}; use libp2p::swarm::{NetworkBehaviour, NetworkBehaviourAction, PollParameters}; -use sp_core::storage::{StorageKey, OwnedChildInfo}; +use sp_core::storage::{StorageKey, ChildInfo}; use sp_consensus::{ BlockOrigin, block_validation::BlockAnnounceValidator, @@ -1567,10 +1567,10 @@ impl, H: ExHashT> Protocol { trace!(target: "sync", "Remote read child request {} from {} ({} {} at {})", request.id, who, request.storage_key.to_hex::(), keys_str(), request.block); - let child_info = OwnedChildInfo::new_default(request.storage_key.clone()); + let child_info = ChildInfo::new_default(&request.storage_key); let proof = match self.context_data.chain.read_child_proof( &request.block, - child_info.as_ref(), + &child_info, &request.keys, ) { Ok(proof) => proof, diff --git a/client/network/src/protocol/light_client_handler.rs b/client/network/src/protocol/light_client_handler.rs index 3480de1bb5700..a9accd7f158d1 100644 --- a/client/network/src/protocol/light_client_handler.rs +++ b/client/network/src/protocol/light_client_handler.rs @@ -48,7 +48,7 @@ use rustc_hex::ToHex; use sc_client::light::fetcher; use sc_client_api::StorageProof; use sc_peerset::ReputationChange; -use sp_core::storage::{ChildInfo, OwnedChildInfo, StorageKey}; +use sp_core::storage::{ChildInfo, StorageKey}; use sp_blockchain::{Error as ClientError}; use sp_runtime::traits::{Block, Header, NumberFor, Zero}; use std::{ @@ -510,8 +510,8 @@ where let block = Decode::decode(&mut request.block.as_ref())?; - let child_info = OwnedChildInfo::new_default(request.storage_key.clone()); - let proof = match self.chain.read_child_proof(&block, child_info.as_ref(), &request.keys) { + let child_info = ChildInfo::new_default(&request.storage_key); + let proof = match self.chain.read_child_proof(&block, &child_info, &request.keys) { Ok(proof) => proof, Err(error) => { log::trace!("remote read child request {} from {} ({} {} at {:?}) failed with: {}", diff --git a/client/rpc/src/state/state_full.rs b/client/rpc/src/state/state_full.rs index 238c99fc9e67b..a949cee862845 100644 --- a/client/rpc/src/state/state_full.rs +++ b/client/rpc/src/state/state_full.rs @@ -33,7 +33,7 @@ use sc_client::{ Client, CallExecutor, BlockchainEvents }; use sp_core::{ - Bytes, storage::{well_known_keys, StorageKey, StorageData, StorageChangeSet, OwnedChildInfo}, + Bytes, storage::{well_known_keys, StorageKey, StorageData, StorageChangeSet, ChildInfo}, }; use sp_version::RuntimeVersion; use sp_runtime::{ @@ -314,10 +314,10 @@ impl StateBackend for FullState StateBackend for FullState StateBackend for FullState = ChildInfo::default_unchecked( - b":child_storage:default:child" -); #[test] fn should_return_storage() { @@ -41,10 +38,11 @@ fn should_return_storage() { const VALUE: &[u8] = b"hello world"; const CHILD_VALUE: &[u8] = b"hello world !"; + let child_info = ChildInfo::new_default(STORAGE_KEY); let mut core = tokio::runtime::Runtime::new().unwrap(); let client = TestClientBuilder::new() .add_extra_storage(KEY.to_vec(), VALUE.to_vec()) - .add_extra_child_storage(CHILD_INFO, KEY.to_vec(), CHILD_VALUE.to_vec()) + .add_extra_child_storage(&child_info, KEY.to_vec(), CHILD_VALUE.to_vec()) .build(); let genesis_hash = client.genesis_hash(); let client = new_full(Arc::new(client), Subscriptions::new(Arc::new(core.executor()))); @@ -77,13 +75,14 @@ fn should_return_storage() { #[test] fn should_return_child_storage() { + let child_info = ChildInfo::new_default(STORAGE_KEY); let core = tokio::runtime::Runtime::new().unwrap(); let client = Arc::new(substrate_test_runtime_client::TestClientBuilder::new() - .add_child_storage("test", "key", CHILD_INFO, vec![42_u8]) + .add_child_storage(&child_info, "key", vec![42_u8]) .build()); let genesis_hash = client.genesis_hash(); let client = new_full(client, Subscriptions::new(Arc::new(core.executor()))); - let child_key = StorageKey(b"test".to_vec()); + let child_key = StorageKey(STORAGE_KEY.to_vec()); let key = StorageKey(b"key".to_vec()); diff --git a/client/src/client.rs b/client/src/client.rs index a40068609b564..2f69d21a41ec1 100644 --- a/client/src/client.rs +++ b/client/src/client.rs @@ -334,7 +334,7 @@ impl Client where pub fn child_storage_keys( &self, id: &BlockId, - child_info: ChildInfo, + child_info: &ChildInfo, key_prefix: &StorageKey ) -> sp_blockchain::Result> { let keys = self.state_at(id)? @@ -349,7 +349,7 @@ impl Client where pub fn child_storage( &self, id: &BlockId, - child_info: ChildInfo, + child_info: &ChildInfo, key: &StorageKey ) -> sp_blockchain::Result> { Ok(self.state_at(id)? @@ -362,7 +362,7 @@ impl Client where pub fn child_storage_hash( &self, id: &BlockId, - child_info: ChildInfo, + child_info: &ChildInfo, key: &StorageKey ) -> sp_blockchain::Result> { Ok(self.state_at(id)? @@ -403,7 +403,7 @@ impl Client where pub fn read_child_proof( &self, id: &BlockId, - child_info: ChildInfo, + child_info: &ChildInfo, keys: I, ) -> sp_blockchain::Result where I: IntoIterator, diff --git a/client/src/light/backend.rs b/client/src/light/backend.rs index e4e5d681813b9..4fba83b882c68 100644 --- a/client/src/light/backend.rs +++ b/client/src/light/backend.rs @@ -24,7 +24,7 @@ use parking_lot::RwLock; use codec::{Decode, Encode}; use sp_core::ChangesTrieConfiguration; -use sp_core::storage::{well_known_keys, ChildInfo, OwnedChildInfo}; +use sp_core::storage::{well_known_keys, ChildInfo}; use sp_core::offchain::storage::InMemOffchainStorage; use sp_state_machine::{ Backend as StateBackend, TrieBackend, InMemoryBackend, ChangesTrieTransaction, @@ -312,7 +312,7 @@ impl BlockImportOperation for ImportOperation self.changes_trie_config_update = Some(changes_trie_config); // this is only called when genesis block is imported => shouldn't be performance bottleneck - let mut storage: HashMap, _> = HashMap::new(); + let mut storage: HashMap, _> = HashMap::new(); storage.insert(None, input.top); // create a list of children keys to re-compute roots for @@ -386,7 +386,7 @@ impl StateBackend for GenesisOrUnavailableState fn child_storage( &self, - child_info: ChildInfo, + child_info: &ChildInfo, key: &[u8], ) -> ClientResult>> { match *self { @@ -406,7 +406,7 @@ impl StateBackend for GenesisOrUnavailableState fn next_child_storage_key( &self, - child_info: ChildInfo, + child_info: &ChildInfo, key: &[u8], ) -> Result>, Self::Error> { match *self { @@ -434,7 +434,7 @@ impl StateBackend for GenesisOrUnavailableState fn for_keys_in_child_storage( &self, - child_info: ChildInfo, + child_info: &ChildInfo, action: A, ) { match *self { @@ -446,7 +446,7 @@ impl StateBackend for GenesisOrUnavailableState fn for_child_keys_with_prefix( &self, - child_info: ChildInfo, + child_info: &ChildInfo, prefix: &[u8], action: A, ) { @@ -470,7 +470,7 @@ impl StateBackend for GenesisOrUnavailableState fn child_storage_root( &self, - child_info: ChildInfo, + child_info: &ChildInfo, delta: I, ) -> (H::Out, bool, Self::Transaction) where diff --git a/client/src/light/fetcher.rs b/client/src/light/fetcher.rs index cb0115409405e..f37c06bea247d 100644 --- a/client/src/light/fetcher.rs +++ b/client/src/light/fetcher.rs @@ -23,7 +23,7 @@ use std::marker::PhantomData; use hash_db::{HashDB, Hasher, EMPTY_PREFIX}; use codec::{Decode, Encode}; use sp_core::{convert_hash, traits::CodeExecutor}; -use sp_core::storage::OwnedChildInfo; +use sp_core::storage::ChildInfo; use sp_runtime::traits::{ Block as BlockT, Header as HeaderT, Hash, HashFor, NumberFor, AtLeast32Bit, CheckedConversion, @@ -241,11 +241,11 @@ impl FetchChecker for LightDataChecker request: &RemoteReadChildRequest, remote_proof: StorageProof, ) -> ClientResult, Option>>> { - let child_trie = OwnedChildInfo::new_default(request.storage_key.clone()); + let child_trie = ChildInfo::new_default(&request.storage_key); read_child_proof_check::( convert_hash(request.header.state_root()), remote_proof, - child_trie.as_ref(), + &child_trie, request.keys.iter(), ).map_err(Into::into) } @@ -347,7 +347,7 @@ pub mod tests { use crate::light::fetcher::{FetchChecker, LightDataChecker, RemoteHeaderRequest}; use crate::light::blockchain::tests::{DummyStorage, DummyBlockchain}; use sp_core::{blake2_256, Blake2Hasher, ChangesTrieConfiguration, H256}; - use sp_core::storage::{well_known_keys, StorageKey, OwnedChildInfo}; + use sp_core::storage::{well_known_keys, StorageKey, ChildInfo}; use sp_runtime::generic::BlockId; use sp_state_machine::Backend; use super::*; @@ -400,8 +400,8 @@ pub mod tests { fn prepare_for_read_child_proof_check() -> (TestChecker, Header, StorageProof, Vec) { use substrate_test_runtime_client::DefaultTestClientBuilderExt; use substrate_test_runtime_client::TestClientBuilderExt; - let child_info = OwnedChildInfo::new_default(b"child1".to_vec()); - let child_info = child_info.as_ref(); + let child_info = ChildInfo::new_default(b"child1"); + let child_info = &child_info; // prepare remote client let remote_client = substrate_test_runtime_client::TestClientBuilder::new() .add_extra_child_storage( diff --git a/frame/contracts/src/account_db.rs b/frame/contracts/src/account_db.rs index cd9f595665b07..7617546c0aa1e 100644 --- a/frame/contracts/src/account_db.rs +++ b/frame/contracts/src/account_db.rs @@ -128,7 +128,7 @@ impl AccountDb for DirectAccountDb { trie_id: Option<&TrieId>, location: &StorageKey ) -> Option> { - trie_id.and_then(|id| child::get_raw(crate::trie_unique_id(&id[..]), &blake2_256(location))) + trie_id.and_then(|id| child::get_raw(&crate::trie_unique_id(&id[..]), &blake2_256(location))) } fn get_code_hash(&self, account: &T::AccountId) -> Option> { >::get(account).and_then(|i| i.as_alive().map(|i| i.code_hash)) @@ -175,13 +175,13 @@ impl AccountDb for DirectAccountDb { (false, Some(info), _) => info, // Existing contract is being removed. (true, Some(info), None) => { - child::kill_storage(info.child_trie_unique_id()); + child::kill_storage(&info.child_trie_unique_id()); >::remove(&address); continue; } // Existing contract is being replaced by a new one. (true, Some(info), Some(code_hash)) => { - child::kill_storage(info.child_trie_unique_id()); + child::kill_storage(&info.child_trie_unique_id()); AliveContractInfo:: { code_hash, storage_size: T::StorageSizeOffset::get(), @@ -220,16 +220,16 @@ impl AccountDb for DirectAccountDb { for (k, v) in changed.storage.into_iter() { if let Some(value) = child::get_raw( - new_info.child_trie_unique_id(), + &new_info.child_trie_unique_id(), &blake2_256(&k), ) { new_info.storage_size -= value.len() as u32; } if let Some(value) = v { new_info.storage_size += value.len() as u32; - child::put_raw(new_info.child_trie_unique_id(), &blake2_256(&k), &value[..]); + child::put_raw(&new_info.child_trie_unique_id(), &blake2_256(&k), &value[..]); } else { - child::kill(new_info.child_trie_unique_id(), &blake2_256(&k)); + child::kill(&new_info.child_trie_unique_id(), &blake2_256(&k)); } } diff --git a/frame/contracts/src/lib.rs b/frame/contracts/src/lib.rs index ecb2107bbd650..ae9bbbe3f42b9 100644 --- a/frame/contracts/src/lib.rs +++ b/frame/contracts/src/lib.rs @@ -123,7 +123,7 @@ use frame_support::dispatch::{DispatchResult, Dispatchable}; use frame_support::{ Parameter, decl_module, decl_event, decl_storage, decl_error, parameter_types, IsSubType, weights::DispatchInfo, - storage::child::{self, ChildInfo, OwnedChildInfo}, + storage::child::{self, ChildInfo}, }; use frame_support::traits::{OnReapAccount, OnUnbalanced, Currency, Get, Time, Randomness}; use frame_system::{self as system, ensure_signed, RawOrigin, ensure_root}; @@ -232,7 +232,7 @@ impl RawAliveContractInfo ChildInfo { - ChildInfo::default_unchecked(trie_id) + ChildInfo::new_default(trie_id) } pub type TombstoneContractInfo = @@ -265,10 +265,6 @@ pub trait TrieIdGenerator { /// /// The implementation must ensure every new trie id is unique: two consecutive calls with the /// same parameter needs to return different trie id values. - /// - /// Also, the implementation is responsible for ensuring that `TrieId` starts with - /// `:child_storage:`. - /// TODO: We want to change this, see https://github.com/paritytech/substrate/issues/2325 fn trie_id(account_id: &AccountId) -> TrieId; } @@ -292,9 +288,7 @@ where let mut buf = Vec::new(); buf.extend_from_slice(account_id.as_ref()); buf.extend_from_slice(&new_seed.to_le_bytes()[..]); - let buf = T::Hashing::hash(&buf[..]); - // TODO: see https://github.com/paritytech/substrate/issues/2325 - OwnedChildInfo::new_default(buf.as_ref().to_vec()).owned_info().0 + T::Hashing::hash(&buf[..]).as_ref().into() } } @@ -810,11 +804,11 @@ impl Module { let key_values_taken = delta.iter() .filter_map(|key| { child::get_raw( - origin_contract.child_trie_unique_id(), + &origin_contract.child_trie_unique_id(), &blake2_256(key), ).map(|value| { child::kill( - origin_contract.child_trie_unique_id(), + &origin_contract.child_trie_unique_id(), &blake2_256(key), ); @@ -827,7 +821,7 @@ impl Module { // This operation is cheap enough because last_write (delta not included) // is not this block as it has been checked earlier. &child::root( - origin_contract.child_trie_unique_id(), + &origin_contract.child_trie_unique_id(), )[..], code_hash, ); @@ -835,7 +829,7 @@ impl Module { if tombstone != dest_tombstone { for (key, value) in key_values_taken { child::put_raw( - origin_contract.child_trie_unique_id(), + &origin_contract.child_trie_unique_id(), &blake2_256(key), &value, ); @@ -935,7 +929,7 @@ decl_storage! { impl OnReapAccount for Module { fn on_reap_account(who: &T::AccountId) { if let Some(ContractInfo::Alive(info)) = >::take(who) { - child::kill_storage(info.child_trie_unique_id()); + child::kill_storage(&info.child_trie_unique_id()); } } } diff --git a/frame/contracts/src/rent.rs b/frame/contracts/src/rent.rs index 8b342f95b4350..e48ea9a1c2707 100644 --- a/frame/contracts/src/rent.rs +++ b/frame/contracts/src/rent.rs @@ -223,7 +223,7 @@ fn enact_verdict( Verdict::Kill => { >::remove(account); child::kill_storage( - alive_contract_info.child_trie_unique_id(), + &alive_contract_info.child_trie_unique_id(), ); >::deposit_event(RawEvent::Evicted(account.clone(), false)); None @@ -235,7 +235,7 @@ fn enact_verdict( // Note: this operation is heavy. let child_storage_root = child::root( - alive_contract_info.child_trie_unique_id(), + &alive_contract_info.child_trie_unique_id(), ); let tombstone = >::new( @@ -246,7 +246,7 @@ fn enact_verdict( >::insert(account, &tombstone_info); child::kill_storage( - alive_contract_info.child_trie_unique_id(), + &alive_contract_info.child_trie_unique_id(), ); >::deposit_event(RawEvent::Evicted(account.clone(), true)); diff --git a/frame/contracts/src/tests.rs b/frame/contracts/src/tests.rs index 650726165a80b..e9cd522f2efa8 100644 --- a/frame/contracts/src/tests.rs +++ b/frame/contracts/src/tests.rs @@ -202,7 +202,7 @@ impl TrieIdGenerator for DummyTrieIdGenerator { let mut res = vec![]; res.extend_from_slice(&new_seed.to_le_bytes()); res.extend_from_slice(&account_id.to_le_bytes()); - child::OwnedChildInfo::new_default(res).owned_info().0 + res } } diff --git a/frame/support/src/storage/child.rs b/frame/support/src/storage/child.rs index 32e5bcf1dadf6..658908d258a2f 100644 --- a/frame/support/src/storage/child.rs +++ b/frame/support/src/storage/child.rs @@ -22,11 +22,11 @@ use crate::sp_std::prelude::*; use codec::{Codec, Encode, Decode}; -pub use sp_core::storage::{ChildInfo, OwnedChildInfo, ChildType}; +pub use sp_core::storage::{ChildInfo, ChildType}; /// Return the value of the item in storage under `key`, or `None` if there is no explicit entry. pub fn get( - child_info: ChildInfo, + child_info: &ChildInfo, key: &[u8], ) -> Option { match child_info.child_type() { @@ -49,7 +49,7 @@ pub fn get( /// Return the value of the item in storage under `key`, or the type's default if there is no /// explicit entry. pub fn get_or_default( - child_info: ChildInfo, + child_info: &ChildInfo, key: &[u8], ) -> T { get(child_info, key).unwrap_or_else(Default::default) @@ -58,7 +58,7 @@ pub fn get_or_default( /// Return the value of the item in storage under `key`, or `default_value` if there is no /// explicit entry. pub fn get_or( - child_info: ChildInfo, + child_info: &ChildInfo, key: &[u8], default_value: T, ) -> T { @@ -68,7 +68,7 @@ pub fn get_or( /// Return the value of the item in storage under `key`, or `default_value()` if there is no /// explicit entry. pub fn get_or_else T>( - child_info: ChildInfo, + child_info: &ChildInfo, key: &[u8], default_value: F, ) -> T { @@ -77,7 +77,7 @@ pub fn get_or_else T>( /// Put `value` in storage under `key`. pub fn put( - child_info: ChildInfo, + child_info: &ChildInfo, key: &[u8], value: &T, ) { @@ -94,7 +94,7 @@ pub fn put( /// Remove `key` from storage, returning its value if it had an explicit entry or `None` otherwise. pub fn take( - child_info: ChildInfo, + child_info: &ChildInfo, key: &[u8], ) -> Option { let r = get(child_info, key); @@ -107,7 +107,7 @@ pub fn take( /// Remove `key` from storage, returning its value, or, if there was no explicit entry in storage, /// the default for its type. pub fn take_or_default( - child_info: ChildInfo, + child_info: &ChildInfo, key: &[u8], ) -> T { take(child_info, key).unwrap_or_else(Default::default) @@ -116,7 +116,7 @@ pub fn take_or_default( /// Return the value of the item in storage under `key`, or `default_value` if there is no /// explicit entry. Ensure there is no explicit entry on return. pub fn take_or( - child_info: ChildInfo, + child_info: &ChildInfo, key: &[u8], default_value: T, ) -> T { @@ -126,7 +126,7 @@ pub fn take_or( /// Return the value of the item in storage under `key`, or `default_value()` if there is no /// explicit entry. Ensure there is no explicit entry on return. pub fn take_or_else T>( - child_info: ChildInfo, + child_info: &ChildInfo, key: &[u8], default_value: F, ) -> T { @@ -135,7 +135,7 @@ pub fn take_or_else T>( /// Check to see if `key` has an explicit entry in storage. pub fn exists( - child_info: ChildInfo, + child_info: &ChildInfo, key: &[u8], ) -> bool { match child_info.child_type() { @@ -148,7 +148,7 @@ pub fn exists( /// Remove all `storage_key` key/values pub fn kill_storage( - child_info: ChildInfo, + child_info: &ChildInfo, ) { match child_info.child_type() { ChildType::ParentKeyId => sp_io::default_child_storage::storage_kill( @@ -159,7 +159,7 @@ pub fn kill_storage( /// Ensure `key` has no explicit entry in storage. pub fn kill( - child_info: ChildInfo, + child_info: &ChildInfo, key: &[u8], ) { match child_info.child_type() { @@ -174,7 +174,7 @@ pub fn kill( /// Get a Vec of bytes from storage. pub fn get_raw( - child_info: ChildInfo, + child_info: &ChildInfo, key: &[u8], ) -> Option> { match child_info.child_type() { @@ -187,7 +187,7 @@ pub fn get_raw( /// Put a raw byte slice into storage. pub fn put_raw( - child_info: ChildInfo, + child_info: &ChildInfo, key: &[u8], value: &[u8], ) { @@ -202,7 +202,7 @@ pub fn put_raw( /// Calculate current child root value. pub fn root( - child_info: ChildInfo, + child_info: &ChildInfo, ) -> Vec { match child_info.child_type() { ChildType::ParentKeyId => sp_io::default_child_storage::root( diff --git a/primitives/externalities/src/lib.rs b/primitives/externalities/src/lib.rs index 2bdc6600f8a01..beb59745e8831 100644 --- a/primitives/externalities/src/lib.rs +++ b/primitives/externalities/src/lib.rs @@ -47,7 +47,7 @@ pub trait Externalities: ExtensionStore { /// Returns an `Option` that holds the SCALE encoded hash. fn child_storage_hash( &self, - child_info: ChildInfo, + child_info: &ChildInfo, key: &[u8], ) -> Option>; @@ -59,7 +59,7 @@ pub trait Externalities: ExtensionStore { /// Returns an `Option` that holds the SCALE encoded hash. fn original_child_storage( &self, - child_info: ChildInfo, + child_info: &ChildInfo, key: &[u8], ) -> Option>; @@ -75,7 +75,7 @@ pub trait Externalities: ExtensionStore { /// Returns an `Option` that holds the SCALE encoded hash. fn original_child_storage_hash( &self, - child_info: ChildInfo, + child_info: &ChildInfo, key: &[u8], ) -> Option>; @@ -84,7 +84,7 @@ pub trait Externalities: ExtensionStore { /// Returns an `Option` that holds the SCALE encoded hash. fn child_storage( &self, - child_info: ChildInfo, + child_info: &ChildInfo, key: &[u8], ) -> Option>; @@ -96,7 +96,7 @@ pub trait Externalities: ExtensionStore { /// Set child storage entry `key` of current contract being called (effective immediately). fn set_child_storage( &mut self, - child_info: ChildInfo, + child_info: &ChildInfo, key: Vec, value: Vec, ) { @@ -111,7 +111,7 @@ pub trait Externalities: ExtensionStore { /// Clear a child storage entry (`key`) of current contract being called (effective immediately). fn clear_child_storage( &mut self, - child_info: ChildInfo, + child_info: &ChildInfo, key: &[u8], ) { self.place_child_storage(child_info, key.to_vec(), None) @@ -125,7 +125,7 @@ pub trait Externalities: ExtensionStore { /// Whether a child storage entry exists. fn exists_child_storage( &self, - child_info: ChildInfo, + child_info: &ChildInfo, key: &[u8], ) -> bool { self.child_storage(child_info, key).is_some() @@ -137,12 +137,12 @@ pub trait Externalities: ExtensionStore { /// Returns the key immediately following the given key, if it exists, in child storage. fn next_child_storage_key( &self, - child_info: ChildInfo, + child_info: &ChildInfo, key: &[u8], ) -> Option>; /// Clear an entire child storage. - fn kill_child_storage(&mut self, child_info: ChildInfo); + fn kill_child_storage(&mut self, child_info: &ChildInfo); /// Clear storage entries which keys are start with the given prefix. fn clear_prefix(&mut self, prefix: &[u8]); @@ -150,7 +150,7 @@ pub trait Externalities: ExtensionStore { /// Clear child storage entries which keys are start with the given prefix. fn clear_child_prefix( &mut self, - child_info: ChildInfo, + child_info: &ChildInfo, prefix: &[u8], ); @@ -160,7 +160,7 @@ pub trait Externalities: ExtensionStore { /// Set or clear a child storage entry. Return whether the operation succeeds. fn place_child_storage( &mut self, - child_info: ChildInfo, + child_info: &ChildInfo, key: Vec, value: Option>, ); @@ -182,7 +182,7 @@ pub trait Externalities: ExtensionStore { /// storage map will be removed. fn child_storage_root( &mut self, - child_info: ChildInfo, + child_info: &ChildInfo, ) -> Vec; /// Get the change trie root of the current storage overlay at a block with given parent. diff --git a/primitives/io/src/lib.rs b/primitives/io/src/lib.rs index 9ee9b76ac265f..befc3434761d8 100644 --- a/primitives/io/src/lib.rs +++ b/primitives/io/src/lib.rs @@ -152,8 +152,8 @@ pub trait DefaultChildStorage { storage_key: &[u8], key: &[u8], ) -> Option> { - let child_info = ChildInfo::default_unchecked(storage_key); - self.child_storage(child_info, key).map(|s| s.to_vec()) + let child_info = ChildInfo::new_default(storage_key); + self.child_storage(&child_info, key).map(|s| s.to_vec()) } /// Get `key` from child storage, placing the value into `value_out` and return the number @@ -170,8 +170,8 @@ pub trait DefaultChildStorage { value_out: &mut [u8], value_offset: u32, ) -> Option { - let child_info = ChildInfo::default_unchecked(storage_key); - self.child_storage(child_info, key) + let child_info = ChildInfo::new_default(storage_key); + self.child_storage(&child_info, key) .map(|value| { let value_offset = value_offset as usize; let data = &value[value_offset.min(value.len())..]; @@ -190,8 +190,8 @@ pub trait DefaultChildStorage { key: &[u8], value: &[u8], ) { - let child_info = ChildInfo::default_unchecked(storage_key); - self.set_child_storage(child_info, key.to_vec(), value.to_vec()); + let child_info = ChildInfo::new_default(storage_key); + self.set_child_storage(&child_info, key.to_vec(), value.to_vec()); } /// Clear the given child storage of the given `key` and its value. @@ -202,8 +202,8 @@ pub trait DefaultChildStorage { storage_key: &[u8], key: &[u8], ) { - let child_info = ChildInfo::default_unchecked(storage_key); - self.clear_child_storage(child_info, key); + let child_info = ChildInfo::new_default(storage_key); + self.clear_child_storage(&child_info, key); } /// Clear an entire child storage. @@ -213,8 +213,8 @@ pub trait DefaultChildStorage { &mut self, storage_key: &[u8], ) { - let child_info = ChildInfo::default_unchecked(storage_key); - self.kill_child_storage(child_info); + let child_info = ChildInfo::new_default(storage_key); + self.kill_child_storage(&child_info); } /// Check whether the given `key` exists in storage. @@ -225,8 +225,8 @@ pub trait DefaultChildStorage { storage_key: &[u8], key: &[u8], ) -> bool { - let child_info = ChildInfo::default_unchecked(storage_key); - self.exists_child_storage(child_info, key) + let child_info = ChildInfo::new_default(storage_key); + self.exists_child_storage(&child_info, key) } /// Clear the child storage of each key-value pair where the key starts with the given `prefix`. @@ -237,8 +237,8 @@ pub trait DefaultChildStorage { storage_key: &[u8], prefix: &[u8], ) { - let child_info = ChildInfo::default_unchecked(storage_key); - self.clear_child_prefix(child_info, prefix); + let child_info = ChildInfo::new_default(storage_key); + self.clear_child_prefix(&child_info, prefix); } /// "Commit" all existing operations and compute the resulting child storage root. @@ -252,8 +252,8 @@ pub trait DefaultChildStorage { &mut self, storage_key: &[u8], ) -> Vec { - let child_info = ChildInfo::default_unchecked(storage_key); - self.child_storage_root(child_info) + let child_info = ChildInfo::new_default(storage_key); + self.child_storage_root(&child_info) } /// Get the next key in storage after the given one in lexicographic order in child storage. @@ -262,8 +262,8 @@ pub trait DefaultChildStorage { storage_key: &[u8], key: &[u8], ) -> Option> { - let child_info = ChildInfo::default_unchecked(storage_key); - self.next_child_storage_key(child_info, key) + let child_info = ChildInfo::new_default(storage_key); + self.next_child_storage_key(&child_info, key) } } diff --git a/primitives/runtime/src/lib.rs b/primitives/runtime/src/lib.rs index 517141a210e06..5049d7be1369d 100644 --- a/primitives/runtime/src/lib.rs +++ b/primitives/runtime/src/lib.rs @@ -136,7 +136,7 @@ impl BuildStorage for sp_core::storage::Storage { let k = k.clone(); if let Some(map) = storage.children.get_mut(&k) { map.data.extend(other_map.data.iter().map(|(k, v)| (k.clone(), v.clone()))); - if !map.child_info.try_update(other_map.child_info.as_ref()) { + if !map.child_info.try_update(&other_map.child_info) { return Err("Incompatible child info update".to_string()); } } else { diff --git a/primitives/state-machine/src/backend.rs b/primitives/state-machine/src/backend.rs index 55b7e988dad07..aa089bab9e920 100644 --- a/primitives/state-machine/src/backend.rs +++ b/primitives/state-machine/src/backend.rs @@ -20,7 +20,7 @@ use log::warn; use hash_db::Hasher; use codec::Encode; -use sp_core::storage::{ChildInfo, OwnedChildInfo}; +use sp_core::storage::ChildInfo; use sp_trie::{TrieMut, MemoryDB, trie_types::TrieDBMut}; use crate::{ @@ -54,14 +54,14 @@ pub trait Backend: std::fmt::Debug { /// Get keyed child storage or None if there is nothing associated. fn child_storage( &self, - child_info: ChildInfo, + child_info: &ChildInfo, key: &[u8], ) -> Result, Self::Error>; /// Get child keyed storage value hash or None if there is nothing associated. fn child_storage_hash( &self, - child_info: ChildInfo, + child_info: &ChildInfo, key: &[u8], ) -> Result, Self::Error> { self.child_storage(child_info, key).map(|v| v.map(|v| H::hash(&v))) @@ -75,7 +75,7 @@ pub trait Backend: std::fmt::Debug { /// true if a key exists in child storage. fn exists_child_storage( &self, - child_info: ChildInfo, + child_info: &ChildInfo, key: &[u8], ) -> Result { Ok(self.child_storage(child_info, key)?.is_some()) @@ -87,14 +87,14 @@ pub trait Backend: std::fmt::Debug { /// Return the next key in child storage in lexicographic order or `None` if there is no value. fn next_child_storage_key( &self, - child_info: ChildInfo, + child_info: &ChildInfo, key: &[u8] ) -> Result, Self::Error>; /// Retrieve all entries keys of child storage and call `f` for each of those keys. fn for_keys_in_child_storage( &self, - child_info: ChildInfo, + child_info: &ChildInfo, f: F, ); @@ -113,7 +113,7 @@ pub trait Backend: std::fmt::Debug { /// call `f` for each of those keys. fn for_child_keys_with_prefix( &self, - child_info: ChildInfo, + child_info: &ChildInfo, prefix: &[u8], f: F, ); @@ -131,7 +131,7 @@ pub trait Backend: std::fmt::Debug { /// is true if child storage root equals default storage root. fn child_storage_root( &self, - child_info: ChildInfo, + child_info: &ChildInfo, delta: I, ) -> (H::Out, bool, Self::Transaction) where @@ -151,7 +151,7 @@ pub trait Backend: std::fmt::Debug { /// Get all keys of child storage with given prefix fn child_keys( &self, - child_info: ChildInfo, + child_info: &ChildInfo, prefix: &[u8], ) -> Vec { let mut all = Vec::new(); @@ -175,7 +175,7 @@ pub trait Backend: std::fmt::Debug { where I1: IntoIterator)>, I2i: IntoIterator)>, - I2: IntoIterator, + I2: IntoIterator, H::Out: Ord + Encode, { let mut txs: Self::Transaction = Default::default(); @@ -183,13 +183,13 @@ pub trait Backend: std::fmt::Debug { // child first for (child_info, child_delta) in child_deltas { let (child_root, empty, child_txs) = - self.child_storage_root(child_info.as_ref(), child_delta); - let storage_key = child_info.storage_key(); + self.child_storage_root(&child_info, child_delta); + let prefixed_storage_key = child_info.prefixed_storage_key(); txs.consolidate(child_txs); if empty { - child_roots.push((storage_key, None)); + child_roots.push((prefixed_storage_key, None)); } else { - child_roots.push((storage_key, Some(child_root.encode()))); + child_roots.push((prefixed_storage_key, Some(child_root.encode()))); } } let (root, parent_txs) = self.storage_root( @@ -229,7 +229,7 @@ impl<'a, T: Backend, H: Hasher> Backend for &'a T { fn child_storage( &self, - child_info: ChildInfo, + child_info: &ChildInfo, key: &[u8], ) -> Result, Self::Error> { (*self).child_storage(child_info, key) @@ -237,7 +237,7 @@ impl<'a, T: Backend, H: Hasher> Backend for &'a T { fn for_keys_in_child_storage( &self, - child_info: ChildInfo, + child_info: &ChildInfo, f: F, ) { (*self).for_keys_in_child_storage(child_info, f) @@ -249,7 +249,7 @@ impl<'a, T: Backend, H: Hasher> Backend for &'a T { fn next_child_storage_key( &self, - child_info: ChildInfo, + child_info: &ChildInfo, key: &[u8], ) -> Result, Self::Error> { (*self).next_child_storage_key(child_info, key) @@ -261,7 +261,7 @@ impl<'a, T: Backend, H: Hasher> Backend for &'a T { fn for_child_keys_with_prefix( &self, - child_info: ChildInfo, + child_info: &ChildInfo, prefix: &[u8], f: F, ) { @@ -278,7 +278,7 @@ impl<'a, T: Backend, H: Hasher> Backend for &'a T { fn child_storage_root( &self, - child_info: ChildInfo, + child_info: &ChildInfo, delta: I, ) -> (H::Out, bool, Self::Transaction) where @@ -314,7 +314,7 @@ impl Consolidate for () { } impl Consolidate for Vec<( - Option, + Option, StorageCollection, )> { fn consolidate(&mut self, mut other: Self) { diff --git a/primitives/state-machine/src/basic.rs b/primitives/state-machine/src/basic.rs index 4f7d7bfb43e16..61ec462491b50 100644 --- a/primitives/state-machine/src/basic.rs +++ b/primitives/state-machine/src/basic.rs @@ -129,7 +129,7 @@ impl Externalities for BasicExternalities { fn child_storage( &self, - child_info: ChildInfo, + child_info: &ChildInfo, key: &[u8], ) -> Option { let storage_key = child_info.storage_key(); @@ -138,7 +138,7 @@ impl Externalities for BasicExternalities { fn child_storage_hash( &self, - child_info: ChildInfo, + child_info: &ChildInfo, key: &[u8], ) -> Option> { self.child_storage(child_info, key).map(|v| Blake2Hasher::hash(&v).encode()) @@ -146,7 +146,7 @@ impl Externalities for BasicExternalities { fn original_child_storage_hash( &self, - child_info: ChildInfo, + child_info: &ChildInfo, key: &[u8], ) -> Option> { self.child_storage_hash(child_info, key) @@ -154,7 +154,7 @@ impl Externalities for BasicExternalities { fn original_child_storage( &self, - child_info: ChildInfo, + child_info: &ChildInfo, key: &[u8], ) -> Option { Externalities::child_storage(self, child_info, key) @@ -167,7 +167,7 @@ impl Externalities for BasicExternalities { fn next_child_storage_key( &self, - child_info: ChildInfo, + child_info: &ChildInfo, key: &[u8], ) -> Option { let storage_key = child_info.storage_key(); @@ -190,7 +190,7 @@ impl Externalities for BasicExternalities { fn place_child_storage( &mut self, - child_info: ChildInfo, + child_info: &ChildInfo, key: StorageKey, value: Option, ) { @@ -209,7 +209,7 @@ impl Externalities for BasicExternalities { fn kill_child_storage( &mut self, - child_info: ChildInfo, + child_info: &ChildInfo, ) { let storage_key = child_info.storage_key(); self.inner.children.remove(storage_key); @@ -237,7 +237,7 @@ impl Externalities for BasicExternalities { fn clear_child_prefix( &mut self, - child_info: ChildInfo, + child_info: &ChildInfo, prefix: &[u8], ) { let storage_key = child_info.storage_key(); @@ -258,19 +258,19 @@ impl Externalities for BasicExternalities { fn storage_root(&mut self) -> Vec { let mut top = self.inner.top.clone(); - let keys: Vec<_> = self.inner.children.iter().map(|(k, v)| { - (k.to_vec(), v.child_info.clone()) + let keys: Vec<_> = self.inner.children.iter().map(|(_k, v)| { + (v.child_info.prefixed_storage_key(), v.child_info.clone()) }).collect(); // Single child trie implementation currently allows using the same child // empty root for all child trie. Using null storage key until multiple // type of child trie support. let empty_hash = default_child_trie_root::>(); - for (storage_key, child_info) in keys { - let child_root = self.child_storage_root(child_info.as_ref()); + for (prefixed_storage_key, child_info) in keys { + let child_root = self.child_storage_root(&child_info); if &empty_hash[..] == &child_root[..] { - top.remove(storage_key.as_slice()); + top.remove(prefixed_storage_key.as_slice()); } else { - top.insert(storage_key, child_root); + top.insert(prefixed_storage_key, child_root); } } @@ -279,13 +279,13 @@ impl Externalities for BasicExternalities { fn child_storage_root( &mut self, - child_info: ChildInfo, + child_info: &ChildInfo, ) -> Vec { if let Some(child) = self.inner.children.get(child_info.storage_key()) { let delta = child.data.clone().into_iter().map(|(k, v)| (k, Some(v))); InMemoryBackend::::default() - .child_storage_root(child.child_info.as_ref(), delta).0 + .child_storage_root(&child.child_info, delta).0 } else { default_child_trie_root::>() }.encode() @@ -311,10 +311,6 @@ mod tests { use sp_core::storage::well_known_keys::CODE; use hex_literal::hex; - const CHILD_INFO_1: ChildInfo<'static> = ChildInfo::default_unchecked( - b":child_storage:default:unique_id_1" - ); - #[test] fn commit_should_work() { let mut ext = BasicExternalities::default(); @@ -338,26 +334,28 @@ mod tests { #[test] fn children_works() { + let child_info = ChildInfo::new_default(b"storage_key"); + let child_info = &child_info; let mut ext = BasicExternalities::new(Storage { top: Default::default(), children: map![ - CHILD_INFO_1.storage_key().to_vec() => StorageChild { + child_info.storage_key().to_vec() => StorageChild { data: map![ b"doe".to_vec() => b"reindeer".to_vec() ], - child_info: CHILD_INFO_1.to_owned(), + child_info: child_info.to_owned(), } ] }); - assert_eq!(ext.child_storage(CHILD_INFO_1, b"doe"), Some(b"reindeer".to_vec())); + assert_eq!(ext.child_storage(child_info, b"doe"), Some(b"reindeer".to_vec())); - ext.set_child_storage(CHILD_INFO_1, b"dog".to_vec(), b"puppy".to_vec()); - assert_eq!(ext.child_storage(CHILD_INFO_1, b"dog"), Some(b"puppy".to_vec())); + ext.set_child_storage(child_info, b"dog".to_vec(), b"puppy".to_vec()); + assert_eq!(ext.child_storage(child_info, b"dog"), Some(b"puppy".to_vec())); - ext.clear_child_storage(CHILD_INFO_1, b"dog"); - assert_eq!(ext.child_storage(CHILD_INFO_1, b"dog"), None); + ext.clear_child_storage(child_info, b"dog"); + assert_eq!(ext.child_storage(child_info, b"dog"), None); - ext.kill_child_storage(CHILD_INFO_1); - assert_eq!(ext.child_storage(CHILD_INFO_1, b"doe"), None); + ext.kill_child_storage(child_info); + assert_eq!(ext.child_storage(child_info, b"doe"), None); } #[test] diff --git a/primitives/state-machine/src/changes_trie/build.rs b/primitives/state-machine/src/changes_trie/build.rs index d3dadebf8d977..3d5ca3d41ba21 100644 --- a/primitives/state-machine/src/changes_trie/build.rs +++ b/primitives/state-machine/src/changes_trie/build.rs @@ -138,7 +138,7 @@ fn prepare_extrinsics_input_inner<'a, B, H, Number>( Number: BlockNumber, { let (committed, prospective, child_info) = if let Some(sk) = storage_key.as_ref() { - let child_info = changes.child_info(sk).cloned(); + let child_info = changes.default_child_info(sk).cloned(); ( changes.committed.children.get(sk).map(|c| &c.0), changes.prospective.children.get(sk).map(|c| &c.0), @@ -158,7 +158,7 @@ fn prepare_extrinsics_input_inner<'a, B, H, Number>( if let Some(sk) = storage_key.as_ref() { if !changes.child_storage(sk, k).map(|v| v.is_some()).unwrap_or_default() { if let Some(child_info) = child_info.as_ref() { - if !backend.exists_child_storage(child_info.as_ref(), k) + if !backend.exists_child_storage(&child_info, k) .map_err(|e| format!("{}", e))? { return Ok(map); } @@ -351,15 +351,14 @@ mod test { use crate::overlayed_changes::{OverlayedValue, OverlayedChangeSet}; use super::*; - const CHILD_INFO_1: ChildInfo<'static> = ChildInfo::default_unchecked(b":child_storage:default:unique_id_1"); - const CHILD_INFO_2: ChildInfo<'static> = ChildInfo::default_unchecked(b":child_storage:default:unique_id_2"); - fn prepare_for_build(zero: u64) -> ( InMemoryBackend, InMemoryStorage, OverlayedChanges, Configuration, ) { + let child_info_1 = ChildInfo::new_default(b"storage_key1"); + let child_info_2 = ChildInfo::new_default(b"storage_key2"); let backend: InMemoryBackend<_> = vec![ (vec![100], vec![255]), (vec![101], vec![255]), @@ -368,8 +367,8 @@ mod test { (vec![104], vec![255]), (vec![105], vec![255]), ].into_iter().collect::>().into(); - let child_trie_key1 = b"1".to_vec(); - let child_trie_key2 = b"2".to_vec(); + let child_trie_key1 = child_info_1.storage_key().to_vec(); + let child_trie_key2 = child_info_2.storage_key().to_vec(); let storage = InMemoryStorage::with_inputs(vec![ (zero + 1, vec![ InputPair::ExtrinsicIndex(ExtrinsicIndex { block: zero + 1, key: vec![100] }, vec![1, 3]), @@ -436,13 +435,13 @@ mod test { value: Some(vec![200]), extrinsics: Some(vec![0, 2].into_iter().collect()) }) - ].into_iter().collect(), CHILD_INFO_1.to_owned())), + ].into_iter().collect(), child_info_1.to_owned())), (child_trie_key2, (vec![ (vec![100], OverlayedValue { value: Some(vec![200]), extrinsics: Some(vec![0, 2].into_iter().collect()) }) - ].into_iter().collect(), CHILD_INFO_2.to_owned())), + ].into_iter().collect(), child_info_2.to_owned())), ].into_iter().collect() }, committed: OverlayedChangeSet { top: vec![ @@ -465,7 +464,7 @@ mod test { value: Some(vec![202]), extrinsics: Some(vec![3].into_iter().collect()) }) - ].into_iter().collect(), CHILD_INFO_1.to_owned())), + ].into_iter().collect(), child_info_1.to_owned())), ].into_iter().collect(), }, collect_extrinsics: true, @@ -486,6 +485,8 @@ mod test { #[test] fn build_changes_trie_nodes_on_non_digest_block() { fn test_with_zero(zero: u64) { + let child_trie_key1 = ChildInfo::new_default(b"storage_key1").storage_key().to_vec(); + let child_trie_key2 = ChildInfo::new_default(b"storage_key2").storage_key().to_vec(); let (backend, storage, changes, config) = prepare_for_build(zero); let parent = AnchorBlockId { hash: Default::default(), number: zero + 4 }; let changes_trie_nodes = prepare_input( @@ -502,11 +503,11 @@ mod test { ]); assert_eq!(changes_trie_nodes.1.into_iter() .map(|(k,v)| (k, v.collect::>())).collect::>(), vec![ - (ChildIndex { block: zero + 5u64, storage_key: b"1".to_vec() }, + (ChildIndex { block: zero + 5u64, storage_key: child_trie_key1 }, vec![ InputPair::ExtrinsicIndex(ExtrinsicIndex { block: zero + 5u64, key: vec![100] }, vec![0, 2, 3]), ]), - (ChildIndex { block: zero + 5, storage_key: b"2".to_vec() }, + (ChildIndex { block: zero + 5, storage_key: child_trie_key2 }, vec![ InputPair::ExtrinsicIndex(ExtrinsicIndex { block: zero + 5, key: vec![100] }, vec![0, 2]), ]), @@ -522,6 +523,8 @@ mod test { #[test] fn build_changes_trie_nodes_on_digest_block_l1() { fn test_with_zero(zero: u64) { + let child_trie_key1 = ChildInfo::new_default(b"storage_key1").storage_key().to_vec(); + let child_trie_key2 = ChildInfo::new_default(b"storage_key2").storage_key().to_vec(); let (backend, storage, changes, config) = prepare_for_build(zero); let parent = AnchorBlockId { hash: Default::default(), number: zero + 3 }; let changes_trie_nodes = prepare_input( @@ -543,7 +546,7 @@ mod test { ]); assert_eq!(changes_trie_nodes.1.into_iter() .map(|(k,v)| (k, v.collect::>())).collect::>(), vec![ - (ChildIndex { block: zero + 4u64, storage_key: b"1".to_vec() }, + (ChildIndex { block: zero + 4u64, storage_key: child_trie_key1.clone() }, vec![ InputPair::ExtrinsicIndex(ExtrinsicIndex { block: zero + 4u64, key: vec![100] }, vec![0, 2, 3]), @@ -552,7 +555,7 @@ mod test { InputPair::DigestIndex(DigestIndex { block: zero + 4, key: vec![102] }, vec![zero + 2]), InputPair::DigestIndex(DigestIndex { block: zero + 4, key: vec![105] }, vec![zero + 1]), ]), - (ChildIndex { block: zero + 4, storage_key: b"2".to_vec() }, + (ChildIndex { block: zero + 4, storage_key: child_trie_key2.clone() }, vec![ InputPair::ExtrinsicIndex(ExtrinsicIndex { block: zero + 4, key: vec![100] }, vec![0, 2]), ]), @@ -567,6 +570,8 @@ mod test { #[test] fn build_changes_trie_nodes_on_digest_block_l2() { fn test_with_zero(zero: u64) { + let child_trie_key1 = ChildInfo::new_default(b"storage_key1").storage_key().to_vec(); + let child_trie_key2 = ChildInfo::new_default(b"storage_key2").storage_key().to_vec(); let (backend, storage, changes, config) = prepare_for_build(zero); let parent = AnchorBlockId { hash: Default::default(), number: zero + 15 }; let changes_trie_nodes = prepare_input( @@ -589,13 +594,13 @@ mod test { ]); assert_eq!(changes_trie_nodes.1.into_iter() .map(|(k,v)| (k, v.collect::>())).collect::>(), vec![ - (ChildIndex { block: zero + 16u64, storage_key: b"1".to_vec() }, + (ChildIndex { block: zero + 16u64, storage_key: child_trie_key1.clone() }, vec![ InputPair::ExtrinsicIndex(ExtrinsicIndex { block: zero + 16u64, key: vec![100] }, vec![0, 2, 3]), InputPair::DigestIndex(DigestIndex { block: zero + 16, key: vec![102] }, vec![zero + 4]), ]), - (ChildIndex { block: zero + 16, storage_key: b"2".to_vec() }, + (ChildIndex { block: zero + 16, storage_key: child_trie_key2.clone() }, vec![ InputPair::ExtrinsicIndex(ExtrinsicIndex { block: zero + 16, key: vec![100] }, vec![0, 2]), ]), @@ -656,6 +661,8 @@ mod test { #[test] fn build_changes_trie_nodes_ignores_temporary_storage_values() { fn test_with_zero(zero: u64) { + let child_trie_key1 = ChildInfo::new_default(b"storage_key1").storage_key().to_vec(); + let child_trie_key2 = ChildInfo::new_default(b"storage_key2").storage_key().to_vec(); let (backend, storage, mut changes, config) = prepare_for_build(zero); // 110: missing from backend, set to None in overlay @@ -684,7 +691,7 @@ mod test { ]); assert_eq!(changes_trie_nodes.1.into_iter() .map(|(k,v)| (k, v.collect::>())).collect::>(), vec![ - (ChildIndex { block: zero + 4u64, storage_key: b"1".to_vec() }, + (ChildIndex { block: zero + 4u64, storage_key: child_trie_key1.clone() }, vec![ InputPair::ExtrinsicIndex(ExtrinsicIndex { block: zero + 4u64, key: vec![100] }, vec![0, 2, 3]), @@ -693,7 +700,7 @@ mod test { InputPair::DigestIndex(DigestIndex { block: zero + 4, key: vec![102] }, vec![zero + 2]), InputPair::DigestIndex(DigestIndex { block: zero + 4, key: vec![105] }, vec![zero + 1]), ]), - (ChildIndex { block: zero + 4, storage_key: b"2".to_vec() }, + (ChildIndex { block: zero + 4, storage_key: child_trie_key2.clone() }, vec![ InputPair::ExtrinsicIndex(ExtrinsicIndex { block: zero + 4, key: vec![100] }, vec![0, 2]), ]), @@ -708,6 +715,8 @@ mod test { #[test] fn cache_is_used_when_changes_trie_is_built() { + let child_trie_key1 = ChildInfo::new_default(b"storage_key1").storage_key().to_vec(); + let child_trie_key2 = ChildInfo::new_default(b"storage_key2").storage_key().to_vec(); let (backend, mut storage, changes, config) = prepare_for_build(0); let parent = AnchorBlockId { hash: Default::default(), number: 15 }; @@ -727,8 +736,8 @@ mod test { let cached_data4 = IncompleteCacheAction::CacheBuildData(IncompleteCachedBuildData::new()) .set_digest_input_blocks(vec![1, 2, 3]) .insert(None, vec![vec![100], vec![102]].into_iter().collect()) - .insert(Some(b"1".to_vec()), vec![vec![103], vec![104]].into_iter().collect()) - .insert(Some(b"2".to_vec()), vec![vec![105], vec![106]].into_iter().collect()) + .insert(Some(child_trie_key1.clone()), vec![vec![103], vec![104]].into_iter().collect()) + .insert(Some(child_trie_key2.clone()), vec![vec![105], vec![106]].into_iter().collect()) .complete(4, &trie_root4); storage.cache_mut().perform(cached_data4); @@ -754,7 +763,10 @@ mod test { .map(|(k, i)| (k, i.collect::>())) .collect::>(); assert_eq!( - child_changes_tries_nodes.get(&ChildIndex { block: 16u64, storage_key: b"1".to_vec() }).unwrap(), + child_changes_tries_nodes.get(&ChildIndex { + block: 16u64, + storage_key: child_trie_key1.clone(), + }).unwrap(), &vec![ InputPair::ExtrinsicIndex(ExtrinsicIndex { block: 16u64, key: vec![100] }, vec![0, 2, 3]), @@ -763,7 +775,7 @@ mod test { ], ); assert_eq!( - child_changes_tries_nodes.get(&ChildIndex { block: 16u64, storage_key: b"2".to_vec() }).unwrap(), + child_changes_tries_nodes.get(&ChildIndex { block: 16u64, storage_key: child_trie_key2.to_vec() }).unwrap(), &vec![ InputPair::ExtrinsicIndex(ExtrinsicIndex { block: 16u64, key: vec![100] }, vec![0, 2]), diff --git a/primitives/state-machine/src/ext.rs b/primitives/state-machine/src/ext.rs index aa2a7d5fa2ea4..d5f12643d00d4 100644 --- a/primitives/state-machine/src/ext.rs +++ b/primitives/state-machine/src/ext.rs @@ -205,7 +205,7 @@ where fn child_storage( &self, - child_info: ChildInfo, + child_info: &ChildInfo, key: &[u8], ) -> Option { let _guard = sp_panic_handler::AbortGuard::force_abort(); @@ -229,7 +229,7 @@ where fn child_storage_hash( &self, - child_info: ChildInfo, + child_info: &ChildInfo, key: &[u8], ) -> Option> { let _guard = sp_panic_handler::AbortGuard::force_abort(); @@ -253,7 +253,7 @@ where fn original_child_storage( &self, - child_info: ChildInfo, + child_info: &ChildInfo, key: &[u8], ) -> Option { let _guard = sp_panic_handler::AbortGuard::force_abort(); @@ -273,7 +273,7 @@ where fn original_child_storage_hash( &self, - child_info: ChildInfo, + child_info: &ChildInfo, key: &[u8], ) -> Option> { let _guard = sp_panic_handler::AbortGuard::force_abort(); @@ -308,7 +308,7 @@ where fn exists_child_storage( &self, - child_info: ChildInfo, + child_info: &ChildInfo, key: &[u8], ) -> bool { let _guard = sp_panic_handler::AbortGuard::force_abort(); @@ -346,7 +346,7 @@ where fn next_child_storage_key( &self, - child_info: ChildInfo, + child_info: &ChildInfo, key: &[u8], ) -> Option { let next_backend_key = self.backend @@ -389,7 +389,7 @@ where fn place_child_storage( &mut self, - child_info: ChildInfo, + child_info: &ChildInfo, key: StorageKey, value: Option, ) { @@ -407,7 +407,7 @@ where fn kill_child_storage( &mut self, - child_info: ChildInfo, + child_info: &ChildInfo, ) { trace!(target: "state-trace", "{:04x}: KillChild({})", self.id, @@ -442,7 +442,7 @@ where fn clear_child_prefix( &mut self, - child_info: ChildInfo, + child_info: &ChildInfo, prefix: &[u8], ) { trace!(target: "state-trace", "{:04x}: ClearChildPrefix({}) {}", @@ -480,13 +480,14 @@ where fn child_storage_root( &mut self, - child_info: ChildInfo, + child_info: &ChildInfo, ) -> Vec { let _guard = sp_panic_handler::AbortGuard::force_abort(); let storage_key = child_info.storage_key(); + let prefixed_storage_key = child_info.prefixed_storage_key(); if self.storage_transaction_cache.transaction_storage_root.is_some() { let root = self - .storage(storage_key) + .storage(prefixed_storage_key.as_slice()) .and_then(|k| Decode::decode(&mut &k[..]).ok()) .unwrap_or( default_child_trie_root::>() @@ -499,7 +500,7 @@ where root.encode() } else { - if let Some(child_info) = self.overlay.child_info(storage_key).cloned() { + if let Some(child_info) = self.overlay.default_child_info(storage_key).cloned() { let (root, is_empty, _) = { let delta = self.overlay.committed.children.get(storage_key) .into_iter() @@ -510,7 +511,7 @@ where .flat_map(|(map, _)| map.clone().into_iter().map(|(k, v)| (k, v.value))) ); - self.backend.child_storage_root(child_info.as_ref(), delta) + self.backend.child_storage_root(&child_info, delta) }; let root = root.encode(); @@ -520,9 +521,9 @@ where // A better design would be to manage 'child_storage_transaction' in a // similar way as 'storage_transaction' but for each child trie. if is_empty { - self.overlay.set_storage(storage_key.into(), None); + self.overlay.set_storage(prefixed_storage_key, None); } else { - self.overlay.set_storage(storage_key.into(), Some(root.clone())); + self.overlay.set_storage(prefixed_storage_key, Some(root.clone())); } trace!(target: "state-trace", "{:04x}: ChildRoot({}) {}", @@ -534,7 +535,7 @@ where } else { // empty overlay let root = self - .storage(storage_key.as_ref()) + .storage(prefixed_storage_key.as_slice()) .and_then(|k| Decode::decode(&mut &k[..]).ok()) .unwrap_or( default_child_trie_root::>() @@ -623,10 +624,6 @@ mod tests { type TestBackend = InMemoryBackend; type TestExt<'a> = Ext<'a, Blake2Hasher, u64, TestBackend>; - const CHILD_INFO_1: ChildInfo<'static> = ChildInfo::default_unchecked( - b":child_storage:default:Child1" - ); - fn prepare_overlay_with_changes() -> OverlayedChanges { OverlayedChanges { @@ -738,20 +735,23 @@ mod tests { #[test] fn next_child_storage_key_works() { + let child_info = ChildInfo::new_default(b"Child1"); + let child_info = &child_info; + let mut cache = StorageTransactionCache::default(); let mut overlay = OverlayedChanges::default(); - overlay.set_child_storage(CHILD_INFO_1, vec![20], None); - overlay.set_child_storage(CHILD_INFO_1, vec![30], Some(vec![31])); + overlay.set_child_storage(child_info, vec![20], None); + overlay.set_child_storage(child_info, vec![30], Some(vec![31])); let backend = Storage { top: map![], children: map![ - CHILD_INFO_1.storage_key().to_vec() => StorageChild { + child_info.storage_key().to_vec() => StorageChild { data: map![ vec![10] => vec![10], vec![20] => vec![20], vec![40] => vec![40] ], - child_info: CHILD_INFO_1.to_owned(), + child_info: child_info.to_owned(), } ], }.into(); @@ -760,67 +760,68 @@ mod tests { let ext = TestExt::new(&mut overlay, &mut cache, &backend, None, None); // next_backend < next_overlay - assert_eq!(ext.next_child_storage_key(CHILD_INFO_1, &[5]), Some(vec![10])); + assert_eq!(ext.next_child_storage_key(child_info, &[5]), Some(vec![10])); // next_backend == next_overlay but next_overlay is a delete - assert_eq!(ext.next_child_storage_key(CHILD_INFO_1, &[10]), Some(vec![30])); + assert_eq!(ext.next_child_storage_key(child_info, &[10]), Some(vec![30])); // next_overlay < next_backend - assert_eq!(ext.next_child_storage_key(CHILD_INFO_1, &[20]), Some(vec![30])); + assert_eq!(ext.next_child_storage_key(child_info, &[20]), Some(vec![30])); // next_backend exist but next_overlay doesn't exist - assert_eq!(ext.next_child_storage_key(CHILD_INFO_1, &[30]), Some(vec![40])); + assert_eq!(ext.next_child_storage_key(child_info, &[30]), Some(vec![40])); drop(ext); - overlay.set_child_storage(CHILD_INFO_1, vec![50], Some(vec![50])); + overlay.set_child_storage(child_info, vec![50], Some(vec![50])); let ext = TestExt::new(&mut overlay, &mut cache, &backend, None, None); // next_overlay exist but next_backend doesn't exist - assert_eq!(ext.next_child_storage_key(CHILD_INFO_1, &[40]), Some(vec![50])); + assert_eq!(ext.next_child_storage_key(child_info, &[40]), Some(vec![50])); } #[test] fn child_storage_works() { + let child_info = ChildInfo::new_default(b"Child1"); + let child_info = &child_info; let mut cache = StorageTransactionCache::default(); let mut overlay = OverlayedChanges::default(); - overlay.set_child_storage(CHILD_INFO_1, vec![20], None); - overlay.set_child_storage(CHILD_INFO_1, vec![30], Some(vec![31])); + overlay.set_child_storage(child_info, vec![20], None); + overlay.set_child_storage(child_info, vec![30], Some(vec![31])); let backend = Storage { top: map![], children: map![ - CHILD_INFO_1.storage_key().to_vec() => StorageChild { + child_info.storage_key().to_vec() => StorageChild { data: map![ vec![10] => vec![10], vec![20] => vec![20], vec![30] => vec![40] ], - child_info: CHILD_INFO_1.to_owned(), + child_info: child_info.to_owned(), } ], }.into(); let ext = TestExt::new(&mut overlay, &mut cache, &backend, None, None); - assert_eq!(ext.child_storage(CHILD_INFO_1, &[10]), Some(vec![10])); - assert_eq!(ext.original_child_storage(CHILD_INFO_1, &[10]), Some(vec![10])); + assert_eq!(ext.child_storage(child_info, &[10]), Some(vec![10])); + assert_eq!(ext.original_child_storage(child_info, &[10]), Some(vec![10])); assert_eq!( - ext.child_storage_hash(CHILD_INFO_1, &[10]), + ext.child_storage_hash(child_info, &[10]), Some(Blake2Hasher::hash(&[10]).as_ref().to_vec()), ); - assert_eq!(ext.child_storage(CHILD_INFO_1, &[20]), None); - assert_eq!(ext.original_child_storage(CHILD_INFO_1, &[20]), Some(vec![20])); + assert_eq!(ext.child_storage(child_info, &[20]), None); + assert_eq!(ext.original_child_storage(child_info, &[20]), Some(vec![20])); assert_eq!( - ext.child_storage_hash(CHILD_INFO_1, &[20]), + ext.child_storage_hash(child_info, &[20]), None, ); - assert_eq!(ext.child_storage(CHILD_INFO_1, &[30]), Some(vec![31])); - assert_eq!(ext.original_child_storage(CHILD_INFO_1, &[30]), Some(vec![40])); + assert_eq!(ext.child_storage(child_info, &[30]), Some(vec![31])); + assert_eq!(ext.original_child_storage(child_info, &[30]), Some(vec![40])); assert_eq!( - ext.child_storage_hash(CHILD_INFO_1, &[30]), + ext.child_storage_hash(child_info, &[30]), Some(Blake2Hasher::hash(&[31]).as_ref().to_vec()), ); - } } diff --git a/primitives/state-machine/src/in_memory_backend.rs b/primitives/state-machine/src/in_memory_backend.rs index 1a977e1d14076..f4cdb7315c756 100644 --- a/primitives/state-machine/src/in_memory_backend.rs +++ b/primitives/state-machine/src/in_memory_backend.rs @@ -27,7 +27,7 @@ use sp_trie::{ MemoryDB, child_trie_root, default_child_trie_root, TrieConfiguration, trie_types::Layout, }; use codec::Codec; -use sp_core::storage::{ChildInfo, OwnedChildInfo, ChildType, Storage}; +use sp_core::storage::{ChildInfo, ChildType, Storage}; /// Error impossible. // FIXME: use `!` type when stabilized. https://github.com/rust-lang/rust/issues/35121 @@ -47,7 +47,7 @@ impl error::Error for Void { /// In-memory backend. Fully recomputes tries each time `as_trie_backend` is called but useful for /// tests and proof checking. pub struct InMemory { - inner: HashMap, BTreeMap>, + inner: HashMap, BTreeMap>, // This field is only needed for returning reference in `as_trie_backend`. trie: Option, H>>, _hasher: PhantomData, @@ -88,7 +88,7 @@ impl PartialEq for InMemory { impl InMemory { /// Copy the state, with applied updates pub fn update< - T: IntoIterator, StorageCollection)> + T: IntoIterator, StorageCollection)> >( &self, changes: T, @@ -107,10 +107,10 @@ impl InMemory { } } -impl From, BTreeMap>> +impl From, BTreeMap>> for InMemory { - fn from(inner: HashMap, BTreeMap>) -> Self { + fn from(inner: HashMap, BTreeMap>) -> Self { InMemory { inner, trie: None, @@ -121,7 +121,7 @@ impl From, BTreeMap From for InMemory { fn from(inners: Storage) -> Self { - let mut inner: HashMap, BTreeMap> + let mut inner: HashMap, BTreeMap> = inners.children.into_iter().map(|(_k, c)| (Some(c.child_info), c.data)).collect(); inner.insert(None, inners.top); InMemory { @@ -144,12 +144,12 @@ impl From> for InMemory { } } -impl From, StorageCollection)>> +impl From, StorageCollection)>> for InMemory { fn from( - inner: Vec<(Option, StorageCollection)>, + inner: Vec<(Option, StorageCollection)>, ) -> Self { - let mut expanded: HashMap, BTreeMap> + let mut expanded: HashMap, BTreeMap> = HashMap::new(); for (child_info, key_values) in inner { let entry = expanded.entry(child_info).or_default(); @@ -164,18 +164,16 @@ impl From, StorageCollection)>> } impl InMemory { - /// child storage key iterator - pub fn child_storage_keys(&self) -> impl Iterator { - self.inner.iter().filter_map(|item| - item.0.as_ref().map(|v| v.as_ref()) - ) + /// Child storage infos iterator. + pub fn child_storage_infos(&self) -> impl Iterator { + self.inner.iter().filter_map(|item| item.0.as_ref()) } } impl Backend for InMemory where H::Out: Codec { type Error = Void; type Transaction = Vec<( - Option, + Option, StorageCollection, )>; type TrieBackendStorage = MemoryDB; @@ -186,7 +184,7 @@ impl Backend for InMemory where H::Out: Codec { fn child_storage( &self, - child_info: ChildInfo, + child_info: &ChildInfo, key: &[u8], ) -> Result, Self::Error> { Ok(self.inner.get(&Some(child_info.to_owned())) @@ -209,7 +207,7 @@ impl Backend for InMemory where H::Out: Codec { fn for_keys_in_child_storage( &self, - child_info: ChildInfo, + child_info: &ChildInfo, mut f: F, ) { self.inner.get(&Some(child_info.to_owned())) @@ -218,7 +216,7 @@ impl Backend for InMemory where H::Out: Codec { fn for_child_keys_with_prefix( &self, - child_info: ChildInfo, + child_info: &ChildInfo, prefix: &[u8], f: F, ) { @@ -249,7 +247,7 @@ impl Backend for InMemory where H::Out: Codec { fn child_storage_root( &self, - child_info: ChildInfo, + child_info: &ChildInfo, delta: I, ) -> (H::Out, bool, Self::Transaction) where @@ -290,7 +288,7 @@ impl Backend for InMemory where H::Out: Codec { fn next_child_storage_key( &self, - child_info: ChildInfo, + child_info: &ChildInfo, key: &[u8], ) -> Result, Self::Error> { let range = (ops::Bound::Excluded(key), ops::Bound::Unbounded); @@ -316,7 +314,7 @@ impl Backend for InMemory where H::Out: Codec { fn child_keys( &self, - child_info: ChildInfo, + child_info: &ChildInfo, prefix: &[u8], ) -> Vec { self.inner.get(&Some(child_info.to_owned())) @@ -331,7 +329,7 @@ impl Backend for InMemory where H::Out: Codec { let mut root_map = None; for (child_info, map) in &self.inner { if let Some(child_info) = child_info.as_ref() { - let prefix_storage_key = child_info.as_ref().storage_key().to_vec(); + let prefix_storage_key = child_info.prefixed_storage_key(); let ch = insert_into_memory_db::(&mut mdb, map.clone().into_iter())?; new_child_roots.push((prefix_storage_key, ch.as_ref().into())); } else { @@ -361,7 +359,8 @@ mod tests { #[test] fn in_memory_with_child_trie_only() { let storage = InMemory::::default(); - let child_info = OwnedChildInfo::new_default(b"1".to_vec()); + let child_info = ChildInfo::new_default(b"1"); + let child_info = &child_info; let mut storage = storage.update( vec![( Some(child_info.clone()), @@ -369,10 +368,9 @@ mod tests { )] ); let trie_backend = storage.as_trie_backend().unwrap(); - assert_eq!(trie_backend.child_storage(child_info.as_ref(), b"2").unwrap(), + assert_eq!(trie_backend.child_storage(child_info, b"2").unwrap(), Some(b"3".to_vec())); - let child_info = child_info.as_ref(); - let storage_key = child_info.storage_key(); - assert!(trie_backend.storage(storage_key).unwrap().is_some()); + let storage_key = child_info.prefixed_storage_key(); + assert!(trie_backend.storage(storage_key.as_slice()).unwrap().is_some()); } } diff --git a/primitives/state-machine/src/lib.rs b/primitives/state-machine/src/lib.rs index 8f63aa0da8e40..640a57b37f8d8 100644 --- a/primitives/state-machine/src/lib.rs +++ b/primitives/state-machine/src/lib.rs @@ -550,7 +550,7 @@ where /// Generate child storage read proof. pub fn prove_child_read( mut backend: B, - child_info: ChildInfo, + child_info: &ChildInfo, keys: I, ) -> Result> where @@ -589,7 +589,7 @@ where /// Generate storage read proof on pre-created trie backend. pub fn prove_child_read_on_trie_backend( trie_backend: &TrieBackend, - child_info: ChildInfo, + child_info: &ChildInfo, keys: I, ) -> Result> where @@ -602,7 +602,7 @@ where let proving_backend = proving_backend::ProvingBackend::<_, H>::new(trie_backend); for key in keys.into_iter() { proving_backend - .child_storage(child_info.clone(), key.as_ref()) + .child_storage(child_info, key.as_ref()) .map_err(|e| Box::new(e) as Box)?; } Ok(proving_backend.extract_proof()) @@ -633,7 +633,7 @@ where pub fn read_child_proof_check( root: H::Out, proof: StorageProof, - child_info: ChildInfo, + child_info: &ChildInfo, keys: I, ) -> Result, Option>>, Box> where @@ -670,7 +670,7 @@ where /// Check child storage read proof on pre-created proving backend. pub fn read_child_proof_check_on_proving_backend( proving_backend: &TrieBackend, H>, - child_info: ChildInfo, + child_info: &ChildInfo, key: &[u8], ) -> Result>, Box> where @@ -699,10 +699,6 @@ mod tests { fallback_succeeds: bool, } - const CHILD_INFO_1: ChildInfo<'static> = ChildInfo::default_unchecked( - b":child_storage:default:sub1" - ); - impl CodeExecutor for DummyCodeExecutor { type Error = u8; @@ -931,6 +927,8 @@ mod tests { #[test] fn set_child_storage_works() { + let child_info = ChildInfo::new_default(b"sub1"); + let child_info = &child_info; let mut state = InMemoryBackend::::default(); let backend = state.as_trie_backend().unwrap(); let mut overlay = OverlayedChanges::default(); @@ -944,23 +942,23 @@ mod tests { ); ext.set_child_storage( - CHILD_INFO_1, + child_info, b"abc".to_vec(), b"def".to_vec() ); assert_eq!( ext.child_storage( - CHILD_INFO_1, + child_info, b"abc" ), Some(b"def".to_vec()) ); ext.kill_child_storage( - CHILD_INFO_1, + child_info, ); assert_eq!( ext.child_storage( - CHILD_INFO_1, + child_info, b"abc" ), None @@ -969,6 +967,8 @@ mod tests { #[test] fn prove_read_and_proof_check_works() { + let child_info = ChildInfo::new_default(b"sub1"); + let child_info = &child_info; // fetch read proof from 'remote' full node let remote_backend = trie_backend::tests::test_trie(); let remote_root = remote_backend.storage_root(::std::iter::empty()).0; @@ -995,19 +995,19 @@ mod tests { let remote_root = remote_backend.storage_root(::std::iter::empty()).0; let remote_proof = prove_child_read( remote_backend, - CHILD_INFO_1, + child_info, &[b"value3"], ).unwrap(); let local_result1 = read_child_proof_check::( remote_root, remote_proof.clone(), - CHILD_INFO_1, + child_info, &[b"value3"], ).unwrap(); let local_result2 = read_child_proof_check::( remote_root, remote_proof.clone(), - CHILD_INFO_1, + child_info, &[b"value2"], ).unwrap(); assert_eq!( @@ -1023,12 +1023,8 @@ mod tests { #[test] fn child_storage_uuid() { - const CHILD_INFO_1: ChildInfo<'static> = ChildInfo::default_unchecked( - b":child_storage:default:sub_test1" - ); - const CHILD_INFO_2: ChildInfo<'static> = ChildInfo::default_unchecked( - b":child_storage:default:sub_test2" - ); + let child_info_1 = ChildInfo::new_default(b"sub_test1"); + let child_info_2 = ChildInfo::new_default(b"sub_test2"); use crate::trie_backend::tests::test_trie; let mut overlay = OverlayedChanges::default(); @@ -1043,8 +1039,8 @@ mod tests { changes_trie::disabled_state::<_, u64>(), None, ); - ext.set_child_storage(CHILD_INFO_1, b"abc".to_vec(), b"def".to_vec()); - ext.set_child_storage(CHILD_INFO_2, b"abc".to_vec(), b"def".to_vec()); + ext.set_child_storage(&child_info_1, b"abc".to_vec(), b"def".to_vec()); + ext.set_child_storage(&child_info_2, b"abc".to_vec(), b"def".to_vec()); ext.storage_root(); cache.transaction.unwrap() }; diff --git a/primitives/state-machine/src/overlayed_changes.rs b/primitives/state-machine/src/overlayed_changes.rs index 7dcbbdd2a0e40..71f5d66b4ba72 100644 --- a/primitives/state-machine/src/overlayed_changes.rs +++ b/primitives/state-machine/src/overlayed_changes.rs @@ -28,7 +28,7 @@ use crate::{ use std::iter::FromIterator; use std::collections::{HashMap, BTreeMap, BTreeSet}; use codec::{Decode, Encode}; -use sp_core::storage::{well_known_keys::EXTRINSIC_INDEX, OwnedChildInfo, ChildInfo}; +use sp_core::storage::{well_known_keys::EXTRINSIC_INDEX, ChildInfo}; use std::{mem, ops}; use hash_db::Hasher; @@ -77,7 +77,7 @@ pub struct OverlayedChangeSet { /// Top level storage changes. pub top: BTreeMap, /// Child storage changes. - pub children: HashMap, OwnedChildInfo)>, + pub children: HashMap, ChildInfo)>, } /// A storage changes structure that can be generated by the data collected in [`OverlayedChanges`]. @@ -247,7 +247,7 @@ impl OverlayedChanges { /// `None` can be used to delete a value specified by the given key. pub(crate) fn set_child_storage( &mut self, - child_info: ChildInfo, + child_info: &ChildInfo, key: StorageKey, val: Option, ) { @@ -275,7 +275,7 @@ impl OverlayedChanges { /// [`discard_prospective`]: #method.discard_prospective pub(crate) fn clear_child_storage( &mut self, - child_info: ChildInfo, + child_info: &ChildInfo, ) { let extrinsic_index = self.extrinsic_index(); let storage_key = child_info.storage_key(); @@ -349,7 +349,7 @@ impl OverlayedChanges { pub(crate) fn clear_child_prefix( &mut self, - child_info: ChildInfo, + child_info: &ChildInfo, prefix: &[u8], ) { let extrinsic_index = self.extrinsic_index(); @@ -430,7 +430,7 @@ impl OverlayedChanges { /// Will panic if there are any uncommitted prospective changes. fn drain_committed(&mut self) -> ( impl Iterator)>, - impl Iterator)>, OwnedChildInfo))>, + impl Iterator)>, ChildInfo))>, ) { assert!(self.prospective.is_empty()); ( @@ -538,7 +538,7 @@ impl OverlayedChanges { .chain(self.committed.children.keys()); let child_delta_iter = child_storage_keys.map(|storage_key| ( - self.child_info(storage_key).cloned() + self.default_child_info(storage_key).cloned() .expect("child info initialized in either committed or prospective"), self.committed.children.get(storage_key) .into_iter() @@ -594,7 +594,7 @@ impl OverlayedChanges { /// Get child info for a storage key. /// Take the latest value so prospective first. - pub fn child_info(&self, storage_key: &[u8]) -> Option<&OwnedChildInfo> { + pub fn default_child_info(&self, storage_key: &[u8]) -> Option<&ChildInfo> { if let Some((_, ci)) = self.prospective.children.get(storage_key) { return Some(&ci); } @@ -850,9 +850,8 @@ mod tests { #[test] fn next_child_storage_key_change_works() { - let child = b"Child1".to_vec(); - let child_info = OwnedChildInfo::new_default(child.clone()); - let child_info = child_info.as_ref(); + let child_info = ChildInfo::new_default(b"Child1"); + let child_info = &child_info; let child = child_info.storage_key(); let mut overlay = OverlayedChanges::default(); overlay.set_child_storage(child_info, vec![20], Some(vec![20])); diff --git a/primitives/state-machine/src/proving_backend.rs b/primitives/state-machine/src/proving_backend.rs index ec0ef6a4692ee..8542bdbef732c 100644 --- a/primitives/state-machine/src/proving_backend.rs +++ b/primitives/state-machine/src/proving_backend.rs @@ -143,7 +143,7 @@ impl<'a, S, H> ProvingBackendRecorder<'a, S, H> /// Produce proof for a child key query. pub fn child_storage( &mut self, - child_info: ChildInfo, + child_info: &ChildInfo, key: &[u8] ) -> Result>, String> { let storage_key = child_info.storage_key(); @@ -276,7 +276,7 @@ impl<'a, S, H> Backend for ProvingBackend<'a, S, H> fn child_storage( &self, - child_info: ChildInfo, + child_info: &ChildInfo, key: &[u8], ) -> Result>, Self::Error> { self.0.child_storage(child_info, key) @@ -284,7 +284,7 @@ impl<'a, S, H> Backend for ProvingBackend<'a, S, H> fn for_keys_in_child_storage( &self, - child_info: ChildInfo, + child_info: &ChildInfo, f: F, ) { self.0.for_keys_in_child_storage(child_info, f) @@ -296,7 +296,7 @@ impl<'a, S, H> Backend for ProvingBackend<'a, S, H> fn next_child_storage_key( &self, - child_info: ChildInfo, + child_info: &ChildInfo, key: &[u8], ) -> Result>, Self::Error> { self.0.next_child_storage_key(child_info, key) @@ -312,7 +312,7 @@ impl<'a, S, H> Backend for ProvingBackend<'a, S, H> fn for_child_keys_with_prefix( &self, - child_info: ChildInfo, + child_info: &ChildInfo, prefix: &[u8], f: F, ) { @@ -329,7 +329,7 @@ impl<'a, S, H> Backend for ProvingBackend<'a, S, H> fn child_keys( &self, - child_info: ChildInfo, + child_info: &ChildInfo, prefix: &[u8], ) -> Vec> { self.0.child_keys(child_info, prefix) @@ -343,7 +343,7 @@ impl<'a, S, H> Backend for ProvingBackend<'a, S, H> fn child_storage_root( &self, - child_info: ChildInfo, + child_info: &ChildInfo, delta: I, ) -> (H::Out, bool, Self::Transaction) where @@ -395,13 +395,6 @@ mod tests { use crate::proving_backend::create_proof_check_backend; use sp_trie::PrefixedMemoryDB; - const CHILD_INFO_1: ChildInfo<'static> = ChildInfo::default_unchecked( - b":child_storage:default:sub1" - ); - const CHILD_INFO_2: ChildInfo<'static> = ChildInfo::default_unchecked( - b":child_storage:default:sub2" - ); - fn test_proving<'a>( trie_backend: &'a TrieBackend,Blake2Hasher>, ) -> ProvingBackend<'a, PrefixedMemoryDB, Blake2Hasher> { @@ -469,29 +462,33 @@ mod tests { #[test] fn proof_recorded_and_checked_with_child() { + let child_info_1 = ChildInfo::new_default(b"sub1"); + let child_info_2 = ChildInfo::new_default(b"sub2"); + let child_info_1 = &child_info_1; + let child_info_2 = &child_info_2; let contents = vec![ (None, (0..64).map(|i| (vec![i], Some(vec![i]))).collect()), - (Some(CHILD_INFO_1.to_owned()), + (Some(child_info_1.clone()), (28..65).map(|i| (vec![i], Some(vec![i]))).collect()), - (Some(CHILD_INFO_2.to_owned()), + (Some(child_info_2.clone()), (10..15).map(|i| (vec![i], Some(vec![i]))).collect()), ]; let in_memory = InMemoryBackend::::default(); let mut in_memory = in_memory.update(contents); let in_memory_root = in_memory.full_storage_root::<_, Vec<_>, _>( ::std::iter::empty(), - in_memory.child_storage_keys().map(|k|(k.to_owned(), Vec::new())) + in_memory.child_storage_infos().map(|k|(k.to_owned(), Vec::new())) ).0; (0..64).for_each(|i| assert_eq!( in_memory.storage(&[i]).unwrap().unwrap(), vec![i] )); (28..65).for_each(|i| assert_eq!( - in_memory.child_storage(CHILD_INFO_1, &[i]).unwrap().unwrap(), + in_memory.child_storage(child_info_1, &[i]).unwrap().unwrap(), vec![i] )); (10..15).for_each(|i| assert_eq!( - in_memory.child_storage(CHILD_INFO_2, &[i]).unwrap().unwrap(), + in_memory.child_storage(child_info_2, &[i]).unwrap().unwrap(), vec![i] )); @@ -519,7 +516,7 @@ mod tests { assert_eq!(proof_check.storage(&[64]).unwrap(), None); let proving = ProvingBackend::new(trie); - assert_eq!(proving.child_storage(CHILD_INFO_1, &[64]), Ok(Some(vec![64]))); + assert_eq!(proving.child_storage(child_info_1, &[64]), Ok(Some(vec![64]))); let proof = proving.extract_proof(); let proof_check = create_proof_check_backend::( @@ -527,7 +524,7 @@ mod tests { proof ).unwrap(); assert_eq!( - proof_check.child_storage(CHILD_INFO_1, &[64]).unwrap().unwrap(), + proof_check.child_storage(child_info_1, &[64]).unwrap().unwrap(), vec![64] ); } diff --git a/primitives/state-machine/src/trie_backend.rs b/primitives/state-machine/src/trie_backend.rs index 29a31be210c77..2c09c049b542d 100644 --- a/primitives/state-machine/src/trie_backend.rs +++ b/primitives/state-machine/src/trie_backend.rs @@ -80,7 +80,7 @@ impl, H: Hasher> Backend for TrieBackend where fn child_storage( &self, - child_info: ChildInfo, + child_info: &ChildInfo, key: &[u8], ) -> Result, Self::Error> { self.essence.child_storage(child_info, key) @@ -92,7 +92,7 @@ impl, H: Hasher> Backend for TrieBackend where fn next_child_storage_key( &self, - child_info: ChildInfo, + child_info: &ChildInfo, key: &[u8], ) -> Result, Self::Error> { self.essence.next_child_storage_key(child_info, key) @@ -108,7 +108,7 @@ impl, H: Hasher> Backend for TrieBackend where fn for_keys_in_child_storage( &self, - child_info: ChildInfo, + child_info: &ChildInfo, f: F, ) { self.essence.for_keys_in_child_storage(child_info, f) @@ -116,7 +116,7 @@ impl, H: Hasher> Backend for TrieBackend where fn for_child_keys_with_prefix( &self, - child_info: ChildInfo, + child_info: &ChildInfo, prefix: &[u8], f: F, ) { @@ -190,7 +190,7 @@ impl, H: Hasher> Backend for TrieBackend where fn child_storage_root( &self, - child_info: ChildInfo, + child_info: &ChildInfo, delta: I, ) -> (H::Out, bool, Self::Transaction) where @@ -202,8 +202,8 @@ impl, H: Hasher> Backend for TrieBackend where }; let mut write_overlay = S::Overlay::default(); - let storage_key = child_info.storage_key(); - let mut root = match self.storage(storage_key) { + let prefixed_storage_key = child_info.prefixed_storage_key(); + let mut root = match self.storage(prefixed_storage_key.as_slice()) { Ok(value) => value.and_then(|r| Decode::decode(&mut &r[..]).ok()).unwrap_or(default_root.clone()), Err(e) => { @@ -247,15 +247,14 @@ pub mod tests { use sp_trie::{TrieMut, PrefixedMemoryDB, trie_types::TrieDBMut, KeySpacedDBMut}; use super::*; - const CHILD_INFO_1: ChildInfo<'static> = ChildInfo::default_unchecked( - b":child_storage:default:sub1" - ); + const CHILD_KEY_1: &[u8] = b"sub1"; fn test_db() -> (PrefixedMemoryDB, H256) { + let child_info = ChildInfo::new_default(CHILD_KEY_1); let mut root = H256::default(); let mut mdb = PrefixedMemoryDB::::default(); { - let mut mdb = KeySpacedDBMut::new(&mut mdb, CHILD_INFO_1.keyspace()); + let mut mdb = KeySpacedDBMut::new(&mut mdb, child_info.keyspace()); let mut trie = TrieDBMut::new(&mut mdb, &mut root); trie.insert(b"value3", &[142]).expect("insert failed"); trie.insert(b"value4", &[124]).expect("insert failed"); @@ -265,7 +264,8 @@ pub mod tests { let mut sub_root = Vec::new(); root.encode_to(&mut sub_root); let mut trie = TrieDBMut::new(&mut mdb, &mut root); - trie.insert(CHILD_INFO_1.storage_key(), &sub_root[..]).expect("insert failed"); + trie.insert(child_info.prefixed_storage_key().as_slice(), &sub_root[..]) + .expect("insert failed"); trie.insert(b"key", b"value").expect("insert failed"); trie.insert(b"value1", &[42]).expect("insert failed"); trie.insert(b"value2", &[24]).expect("insert failed"); @@ -291,7 +291,7 @@ pub mod tests { fn read_from_child_storage_returns_some() { let test_trie = test_trie(); assert_eq!( - test_trie.child_storage(CHILD_INFO_1, b"value3").unwrap(), + test_trie.child_storage(&ChildInfo::new_default(CHILD_KEY_1), b"value3").unwrap(), Some(vec![142u8]), ); } diff --git a/primitives/state-machine/src/trie_backend_essence.rs b/primitives/state-machine/src/trie_backend_essence.rs index 9a8ad14445c5f..763f57bd6b7d5 100644 --- a/primitives/state-machine/src/trie_backend_essence.rs +++ b/primitives/state-machine/src/trie_backend_essence.rs @@ -72,15 +72,15 @@ impl, H: Hasher> TrieBackendEssence where H::Out: } /// Access the root of the child storage in its parent trie - fn child_root(&self, child_info: ChildInfo) -> Result, String> { - self.storage(child_info.storage_key()) + fn child_root(&self, child_info: &ChildInfo) -> Result, String> { + self.storage(child_info.prefixed_storage_key().as_slice()) } /// Return the next key in the child trie i.e. the minimum key that is strictly superior to /// `key` in lexicographic order. pub fn next_child_storage_key( &self, - child_info: ChildInfo, + child_info: &ChildInfo, key: &[u8], ) -> Result, String> { let child_root = match self.child_root(child_info)? { @@ -103,7 +103,7 @@ impl, H: Hasher> TrieBackendEssence where H::Out: fn next_storage_key_from_root( &self, root: &H::Out, - child_info: Option, + child_info: Option<&ChildInfo>, key: &[u8], ) -> Result, String> { let mut read_overlay = S::Overlay::default(); @@ -165,7 +165,7 @@ impl, H: Hasher> TrieBackendEssence where H::Out: /// Get the value of child storage at given key. pub fn child_storage( &self, - child_info: ChildInfo, + child_info: &ChildInfo, key: &[u8], ) -> Result, String> { let root = self.child_root(child_info)? @@ -186,7 +186,7 @@ impl, H: Hasher> TrieBackendEssence where H::Out: /// Retrieve all entries keys of child storage and call `f` for each of those keys. pub fn for_keys_in_child_storage( &self, - child_info: ChildInfo, + child_info: &ChildInfo, f: F, ) { let root = match self.child_root(child_info) { @@ -216,7 +216,7 @@ impl, H: Hasher> TrieBackendEssence where H::Out: /// Execute given closure for all keys starting with prefix. pub fn for_child_keys_with_prefix( &self, - child_info: ChildInfo, + child_info: &ChildInfo, prefix: &[u8], mut f: F, ) { @@ -242,7 +242,7 @@ impl, H: Hasher> TrieBackendEssence where H::Out: root: &H::Out, prefix: &[u8], mut f: F, - child_info: Option, + child_info: Option<&ChildInfo>, ) { let mut read_overlay = S::Overlay::default(); let eph = Ephemeral { @@ -436,9 +436,8 @@ mod test { #[test] fn next_storage_key_and_next_child_storage_key_work() { - let child_info = ChildInfo::default_unchecked( - b":child_storage:default:MyChild" - ); + let child_info = ChildInfo::new_default(b"MyChild"); + let child_info = &child_info; // Contains values let mut root_1 = H256::default(); // Contains child trie @@ -462,7 +461,7 @@ mod test { } { let mut trie = TrieDBMut::new(&mut mdb, &mut root_2); - trie.insert(child_info.storage_key(), root_1.as_ref()) + trie.insert(child_info.prefixed_storage_key().as_slice(), root_1.as_ref()) .expect("insert failed"); }; diff --git a/primitives/storage/src/lib.rs b/primitives/storage/src/lib.rs index df0b9a932af10..30677f0f617c2 100644 --- a/primitives/storage/src/lib.rs +++ b/primitives/storage/src/lib.rs @@ -53,7 +53,7 @@ pub struct StorageChild { pub data: StorageMap, /// Associated child info for a child /// trie. - pub child_info: OwnedChildInfo, + pub child_info: ChildInfo, } #[cfg(feature = "std")] @@ -129,62 +129,60 @@ pub mod well_known_keys { } } -#[derive(Clone, Copy)] /// Information related to a child state. -pub enum ChildInfo<'a> { - ParentKeyId(ChildTrie<'a>), -} - -/// Owned version of `ChildInfo`. -/// To be use in persistence layers. #[derive(Debug, Clone)] #[cfg_attr(feature = "std", derive(PartialEq, Eq, Hash, PartialOrd, Ord))] -pub enum OwnedChildInfo { - ParentKeyId(OwnedChildTrie), +pub enum ChildInfo { + ParentKeyId(ChildTrie), } -impl<'a> ChildInfo<'a> { - /// Instantiates information for a default child trie. - /// This is a rather unsafe method and requires to be - /// use from a valid payload such as: - /// ``` - /// use sp_storage::{ChildInfo, ChildType, OwnedChildInfo}; - /// - /// let info1 = ChildInfo::default_unchecked( - /// b":child_storage:default:stor_key", - /// ); - /// let info2 = OwnedChildInfo::new_default( - /// b"stor_key".to_vec(), - /// ); - /// - /// assert!(info1.info() == info2.as_ref().info()); - /// ``` - pub const fn default_unchecked(encoded: &'a[u8]) -> Self { +impl ChildInfo { + /// Instantiates info for a default child trie with a default unprefixed parent + /// storage key. + pub fn new_default(storage_key: &[u8]) -> Self { + let data = storage_key.to_vec(); + ChildInfo::ParentKeyId(ChildTrie { data }) + } + + /// Instantiates info for a default child trie with a default unprefixed parent + /// owned storage key. + pub fn new_default_from_vec(storage_key: Vec) -> Self { ChildInfo::ParentKeyId(ChildTrie { - data: encoded, + data: storage_key, }) } + /// Try to update with another instance, return false if both instance + /// are not compatible. + pub fn try_update(&mut self, other: &ChildInfo) -> bool { + match self { + ChildInfo::ParentKeyId(child_trie) => child_trie.try_update(other), + } + } + /// Create child info from a linear byte packed value and a given type. - pub fn resolve_child_info(child_type: u32, info: &'a [u8]) -> Option { - match child_type { - x if x == ChildType::ParentKeyId as u32 => { + pub fn resolve_child_info(child_type: u32, info: &[u8]) -> Option { + match ChildType::new(child_type) { + Some(ChildType::ParentKeyId) => { debug_assert!( info.starts_with(ChildType::ParentKeyId.parent_prefix()) ); - Some(Self::default_unchecked(info)) + Some(Self::new_default(info)) }, - _ => None, + None => None, } } - /// Instantiates a owned version of this child info. - pub fn to_owned(&self) -> OwnedChildInfo { + /// Top trie defined as the unique crypto id trie with + /// 0 length unique id. + pub fn top_trie() -> Self { + Self::new_default(&[]) + } + + /// Is this child info a the top trie. + pub fn is_top_trie(&self) -> bool { match self { - ChildInfo::ParentKeyId(ChildTrie { data }) - => OwnedChildInfo::ParentKeyId(OwnedChildTrie { - data: data.to_vec(), - }), + ChildInfo::ParentKeyId(ChildTrie { data }) => data.len() == 0 } } @@ -198,20 +196,30 @@ impl<'a> ChildInfo<'a> { } } + /// Owned variant of `info`. + pub fn into_info(self) -> (Vec, u32) { + match self { + ChildInfo::ParentKeyId(ChildTrie { + data, + }) => (data, ChildType::ParentKeyId as u32), + } + } + /// Return byte sequence (keyspace) that can be use by underlying db to isolate keys. /// This is a unique id of the child trie. The collision resistance of this value /// depends on the type of child info use. For `ChildInfo::Default` it is and need to be. pub fn keyspace(&self) -> &[u8] { match self { - ChildInfo::ParentKeyId(..) => self.unprefixed_storage_key(), + ChildInfo::ParentKeyId(..) => self.storage_key(), } } - /// Return a reference to the full location in the direct parent of + /// Return a reference to the location in the direct parent of /// this trie. /// If the trie got no parent this returns the empty slice, /// so by nature an empty slice is not a valid parent location. /// This does not include child type related prefix. + /// The static part of the storage key is omitted. pub fn storage_key(&self) -> &[u8] { match self { ChildInfo::ParentKeyId(ChildTrie { @@ -220,17 +228,25 @@ impl<'a> ChildInfo<'a> { } } - /// Return a reference to the location in the direct parent of + /// Return a the full location in the direct parent of /// this trie. - /// The static part of the storage key is omitted. - pub fn unprefixed_storage_key(&self) -> &[u8] { + pub fn prefixed_storage_key(&self) -> Vec { match self { ChildInfo::ParentKeyId(ChildTrie { data, - }) => if data.len() != 0 { - &data[ChildType::ParentKeyId.parent_prefix().len()..] - } else { - &[] + }) => ChildType::ParentKeyId.new_prefixed_key(data.as_slice()), + } + } + + /// Return a the full location in the direct parent of + /// this trie. + pub fn into_prefixed_storage_key(self) -> Vec { + match self { + ChildInfo::ParentKeyId(ChildTrie { + mut data, + }) => { + ChildType::ParentKeyId.do_prefix_key(&mut data); + data }, } } @@ -247,16 +263,34 @@ impl<'a> ChildInfo<'a> { /// It does not strictly define different child type, it can also /// be related to technical consideration or api variant. #[repr(u32)] +#[derive(Clone, Copy, PartialEq)] +#[cfg_attr(feature = "std", derive(Debug))] pub enum ChildType { /// If runtime module ensures that the child key is a unique id that will - /// only be used once, this parent key is used as a child trie unique id. + /// only be used once, its parent key is used as a child trie unique id. ParentKeyId = 1, } impl ChildType { + /// Try to get a child type from its `u32` representation. + fn new(repr: u32) -> Option { + Some(match repr { + r if r == ChildType::ParentKeyId as u32 => ChildType::ParentKeyId, + _ => return None, + }) + } + /// Change a key to get prefixed with the parent prefix. - /// TODO try to make this method non public - pub fn do_prefix_key(&self, key: &mut Vec) { + fn new_prefixed_key(&self, key: &[u8]) -> Vec { + let parent_prefix = self.parent_prefix(); + let mut result = Vec::with_capacity(parent_prefix.len() + key.len()); + result.extend_from_slice(parent_prefix); + result.extend_from_slice(key); + result + } + + /// Change a key to get prefixed with the parent prefix. + fn do_prefix_key(&self, key: &mut Vec) { let parent_prefix = self.parent_prefix(); let key_len = key.len(); if parent_prefix.len() > 0 { @@ -275,79 +309,24 @@ impl ChildType { } } -impl OwnedChildInfo { - /// Instantiates info for a default child trie with a default parent. - pub fn new_default(mut storage_key: Vec) -> Self { - ChildType::ParentKeyId.do_prefix_key(&mut storage_key); - OwnedChildInfo::ParentKeyId(OwnedChildTrie { - data: storage_key, - }) - } - - /// Try to update with another instance, return false if both instance - /// are not compatible. - pub fn try_update(&mut self, other: ChildInfo) -> bool { - match self { - OwnedChildInfo::ParentKeyId(owned_child_trie) => owned_child_trie.try_update(other), - } - } - - /// Owned variant of `info`. - pub fn owned_info(self) -> (Vec, u32) { - match self { - OwnedChildInfo::ParentKeyId(OwnedChildTrie { - data, - }) => (data, ChildType::ParentKeyId as u32), - } - } - - /// Return a reference to the full location in the direct parent of - /// this trie. - pub fn storage_key(self) -> Vec { - match self { - OwnedChildInfo::ParentKeyId(OwnedChildTrie { - data, - }) => data, - } - } - - /// Get `ChildInfo` reference to this owned child info. - pub fn as_ref(&self) -> ChildInfo { - match self { - OwnedChildInfo::ParentKeyId(OwnedChildTrie { data }) - => ChildInfo::ParentKeyId(ChildTrie { - data: data.as_slice(), - }), - - } - } -} - /// A child trie of default type. -/// Default is the same implementation as the top trie. -/// It share its trie node storage with any kind of key, -/// and its unique id needs to be collision free (eg strong -/// crypto hash). -#[derive(Clone, Copy)] -pub struct ChildTrie<'a> { - /// Data containing unique id. - /// Unique id must but unique and free of any possible key collision - /// (depending on its storage behavior). - data: &'a[u8], -} - -/// Owned version of default child trie `ChildTrie`. +/// It uses the same default implementation as the top trie, +/// top trie being a child trie with no keyspace and no storage key. +/// Its keyspace is the variable (unprefixed) part of its storage key. +/// It shares its trie nodes backend storage with every other +/// child trie, so its storage key needs to be a unique id +/// that will be use only once. #[derive(Debug, Clone)] #[cfg_attr(feature = "std", derive(PartialEq, Eq, Hash, PartialOrd, Ord))] -pub struct OwnedChildTrie { - /// See `ChildTrie` reference field documentation. +pub struct ChildTrie { + /// Data is the full prefixed storage key. data: Vec, } -impl OwnedChildTrie { +impl ChildTrie { /// Try to update with another instance, return false if both instance /// are not compatible. - fn try_update(&mut self, other: ChildInfo) -> bool { + fn try_update(&mut self, other: &ChildInfo) -> bool { match other { ChildInfo::ParentKeyId(other) => self.data[..] == other.data[..], } @@ -357,9 +336,9 @@ impl OwnedChildTrie { const DEFAULT_CHILD_TYPE_PARENT_PREFIX: &'static [u8] = b":child_storage:default:"; #[test] -fn assert_default_trie_in_child_trie() { - let child_info = OwnedChildInfo::new_default(b"any key".to_vec()); - let child_info = child_info.as_ref(); +fn test_prefix_default_child_info() { + let child_info = ChildInfo::new_default(b"any key"); let prefix = child_info.child_type().parent_prefix(); assert!(prefix.starts_with(well_known_keys::CHILD_STORAGE_KEY_PREFIX)); + assert!(prefix.starts_with(DEFAULT_CHILD_TYPE_PARENT_PREFIX)); } diff --git a/test-utils/client/src/lib.rs b/test-utils/client/src/lib.rs index 9267989a40c53..e248986f67c62 100644 --- a/test-utils/client/src/lib.rs +++ b/test-utils/client/src/lib.rs @@ -129,17 +129,17 @@ impl TestClientBuilder, - child_key: impl AsRef<[u8]>, - child_info: ChildInfo, value: impl AsRef<[u8]>, ) -> Self { - let entry = self.child_storage_extension.entry(key.as_ref().to_vec()) + let storage_key = child_info.storage_key(); + let entry = self.child_storage_extension.entry(storage_key.to_vec()) .or_insert_with(|| StorageChild { data: Default::default(), - child_info: child_info.to_owned(), + child_info: child_info.clone(), }); - entry.data.insert(child_key.as_ref().to_vec(), value.as_ref().to_vec()); + entry.data.insert(key.as_ref().to_vec(), value.as_ref().to_vec()); self } diff --git a/test-utils/runtime/client/src/lib.rs b/test-utils/runtime/client/src/lib.rs index 2c6967ff2e0f5..c6e1d4752705b 100644 --- a/test-utils/runtime/client/src/lib.rs +++ b/test-utils/runtime/client/src/lib.rs @@ -127,9 +127,8 @@ impl substrate_test_client::GenesisInit for GenesisParameters { let state_root = <<::Header as HeaderT>::Hashing as HashT>::trie_root( child_content.data.clone().into_iter().collect() ); - let child_info = child_content.child_info.as_ref(); - let storage_key = child_info.storage_key().to_vec(); - (storage_key, state_root.encode()) + let prefixed_storage_key = child_content.child_info.prefixed_storage_key(); + (prefixed_storage_key, state_root.encode()) }); let state_root = <<::Header as HeaderT>::Hashing as HashT>::trie_root( storage.top.clone().into_iter().chain(child_roots).collect() @@ -196,7 +195,7 @@ pub trait TestClientBuilderExt: Sized { /// Panics if the key is empty. fn add_extra_child_storage>, V: Into>>( mut self, - child_info: ChildInfo, + child_info: &ChildInfo, key: K, value: V, ) -> Self { @@ -208,7 +207,7 @@ pub trait TestClientBuilderExt: Sized { .entry(storage_key) .or_insert_with(|| StorageChild { data: Default::default(), - child_info: child_info.to_owned(), + child_info: child_info.clone(), }).data.insert(key, value.into()); self } diff --git a/test-utils/runtime/src/lib.rs b/test-utils/runtime/src/lib.rs index 0c3459bbb7f18..5d3c7ecfcfea1 100644 --- a/test-utils/runtime/src/lib.rs +++ b/test-utils/runtime/src/lib.rs @@ -872,7 +872,7 @@ fn test_read_storage() { } fn test_read_child_storage() { - const STORAGE_KEY: &[u8] = b":child_storage:default:unique_id_1"; + const STORAGE_KEY: &[u8] = b"unique_id_1"; const KEY: &[u8] = b":read_child_storage"; sp_io::default_child_storage::set( STORAGE_KEY, From b3ccc93a54db9b9c597f90b207da0d42ef8afcbb Mon Sep 17 00:00:00 2001 From: cheme Date: Tue, 18 Feb 2020 13:01:56 +0100 Subject: [PATCH 46/85] bump version --- bin/node/runtime/src/lib.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/bin/node/runtime/src/lib.rs b/bin/node/runtime/src/lib.rs index 97424950fc569..9ff6d46a3b971 100644 --- a/bin/node/runtime/src/lib.rs +++ b/bin/node/runtime/src/lib.rs @@ -82,7 +82,7 @@ pub const VERSION: RuntimeVersion = RuntimeVersion { // and set impl_version to 0. If only runtime // implementation changes and behavior does not, then leave spec_version as // is and increment impl_version. - spec_version: 219, + spec_version: 220, impl_version: 0, apis: RUNTIME_API_VERSIONS, }; From afe85ae775511bd143c83c525cde0150045cb5e6 Mon Sep 17 00:00:00 2001 From: cheme Date: Tue, 18 Feb 2020 13:37:36 +0100 Subject: [PATCH 47/85] fix tabs and doc. --- .../src/protocol/light_client_handler.rs | 2 +- primitives/io/src/lib.rs | 14 ------ primitives/state-machine/src/basic.rs | 16 +++---- .../state-machine/src/in_memory_backend.rs | 2 +- primitives/storage/src/lib.rs | 48 +++++++------------ 5 files changed, 24 insertions(+), 58 deletions(-) diff --git a/client/network/src/protocol/light_client_handler.rs b/client/network/src/protocol/light_client_handler.rs index a9accd7f158d1..f90a19ec036b4 100644 --- a/client/network/src/protocol/light_client_handler.rs +++ b/client/network/src/protocol/light_client_handler.rs @@ -511,7 +511,7 @@ where let block = Decode::decode(&mut request.block.as_ref())?; let child_info = ChildInfo::new_default(&request.storage_key); - let proof = match self.chain.read_child_proof(&block, &child_info, &request.keys) { + let proof = match self.chain.read_child_proof(&block, &child_info, &request.keys) { Ok(proof) => proof, Err(error) => { log::trace!("remote read child request {} from {} ({} {} at {:?}) failed with: {}", diff --git a/primitives/io/src/lib.rs b/primitives/io/src/lib.rs index befc3434761d8..fd7f247ecdc27 100644 --- a/primitives/io/src/lib.rs +++ b/primitives/io/src/lib.rs @@ -161,8 +161,6 @@ pub trait DefaultChildStorage { /// doesn't exist at all. /// If `value_out` length is smaller than the returned length, only `value_out` length bytes /// are copied into `value_out`. - /// - /// See `child_get` for common child api parameters. fn read( &self, storage_key: &[u8], @@ -182,8 +180,6 @@ pub trait DefaultChildStorage { } /// Set `key` to `value` in the child storage denoted by `storage_key`. - /// - /// See `child_get` for common child api parameters. fn set( &mut self, storage_key: &[u8], @@ -195,8 +191,6 @@ pub trait DefaultChildStorage { } /// Clear the given child storage of the given `key` and its value. - /// - /// See `child_get` for common child api parameters. fn clear ( &mut self, storage_key: &[u8], @@ -207,8 +201,6 @@ pub trait DefaultChildStorage { } /// Clear an entire child storage. - /// - /// See `child_get` for common child api parameters. fn storage_kill( &mut self, storage_key: &[u8], @@ -218,8 +210,6 @@ pub trait DefaultChildStorage { } /// Check whether the given `key` exists in storage. - /// - /// See `child_get` for common child api parameters. fn exists( &self, storage_key: &[u8], @@ -230,8 +220,6 @@ pub trait DefaultChildStorage { } /// Clear the child storage of each key-value pair where the key starts with the given `prefix`. - /// - /// See `child_get` for common child api parameters. fn clear_prefix( &mut self, storage_key: &[u8], @@ -246,8 +234,6 @@ pub trait DefaultChildStorage { /// The hashing algorithm is defined by the `Block`. /// /// Returns the SCALE encoded hash. - /// - /// See `child_get` for common child api parameters. fn root( &mut self, storage_key: &[u8], diff --git a/primitives/state-machine/src/basic.rs b/primitives/state-machine/src/basic.rs index 61ec462491b50..e1c10a83023b0 100644 --- a/primitives/state-machine/src/basic.rs +++ b/primitives/state-machine/src/basic.rs @@ -132,8 +132,8 @@ impl Externalities for BasicExternalities { child_info: &ChildInfo, key: &[u8], ) -> Option { - let storage_key = child_info.storage_key(); - self.inner.children.get(storage_key).and_then(|child| child.data.get(key)).cloned() + self.inner.children.get(child_info.storage_key()) + .and_then(|child| child.data.get(key)).cloned() } fn child_storage_hash( @@ -170,9 +170,8 @@ impl Externalities for BasicExternalities { child_info: &ChildInfo, key: &[u8], ) -> Option { - let storage_key = child_info.storage_key(); let range = (Bound::Excluded(key), Bound::Unbounded); - self.inner.children.get(storage_key) + self.inner.children.get(child_info.storage_key()) .and_then(|child| child.data.range::<[u8], _>(range).next().map(|(k, _)| k).cloned()) } @@ -194,8 +193,7 @@ impl Externalities for BasicExternalities { key: StorageKey, value: Option, ) { - let storage_key = child_info.storage_key().to_vec(); - let child_map = self.inner.children.entry(storage_key) + let child_map = self.inner.children.entry(child_info.storage_key().to_vec()) .or_insert_with(|| StorageChild { data: Default::default(), child_info: child_info.to_owned(), @@ -211,8 +209,7 @@ impl Externalities for BasicExternalities { &mut self, child_info: &ChildInfo, ) { - let storage_key = child_info.storage_key(); - self.inner.children.remove(storage_key); + self.inner.children.remove(child_info.storage_key()); } fn clear_prefix(&mut self, prefix: &[u8]) { @@ -240,8 +237,7 @@ impl Externalities for BasicExternalities { child_info: &ChildInfo, prefix: &[u8], ) { - let storage_key = child_info.storage_key(); - if let Some(child) = self.inner.children.get_mut(storage_key) { + if let Some(child) = self.inner.children.get_mut(child_info.storage_key()) { let to_remove = child.data.range::<[u8], _>((Bound::Included(prefix), Bound::Unbounded)) .map(|(k, _)| k) .take_while(|k| k.starts_with(prefix)) diff --git a/primitives/state-machine/src/in_memory_backend.rs b/primitives/state-machine/src/in_memory_backend.rs index f4cdb7315c756..04a6a2f6b5cb3 100644 --- a/primitives/state-machine/src/in_memory_backend.rs +++ b/primitives/state-machine/src/in_memory_backend.rs @@ -166,7 +166,7 @@ impl From, StorageCollection)>> impl InMemory { /// Child storage infos iterator. pub fn child_storage_infos(&self) -> impl Iterator { - self.inner.iter().filter_map(|item| item.0.as_ref()) + self.inner.iter().filter_map(|item| item.0.as_ref()) } } diff --git a/primitives/storage/src/lib.rs b/primitives/storage/src/lib.rs index 30677f0f617c2..e960bc6435f36 100644 --- a/primitives/storage/src/lib.rs +++ b/primitives/storage/src/lib.rs @@ -63,9 +63,9 @@ pub struct Storage { /// Top trie storage data. pub top: StorageMap, /// Children trie storage data. - /// Note that the key is not including child prefix, this will - /// not be possible if a different kind of trie than `default` - /// get in use. + /// The key does not including prefix, for the `default` + /// trie kind, so this is exclusively for the `ChildType::ParentKeyId` + /// tries. pub children: std::collections::HashMap, StorageChild>, } @@ -137,15 +137,15 @@ pub enum ChildInfo { } impl ChildInfo { - /// Instantiates info for a default child trie with a default unprefixed parent + /// Instantiates child information for a default child trie + /// of kind `ChildType::ParentKeyId`, using an unprefixed parent /// storage key. pub fn new_default(storage_key: &[u8]) -> Self { let data = storage_key.to_vec(); ChildInfo::ParentKeyId(ChildTrie { data }) } - /// Instantiates info for a default child trie with a default unprefixed parent - /// owned storage key. + /// Same as `new_default` but with `Vec` as input. pub fn new_default_from_vec(storage_key: Vec) -> Self { ChildInfo::ParentKeyId(ChildTrie { data: storage_key, @@ -173,20 +173,7 @@ impl ChildInfo { } } - /// Top trie defined as the unique crypto id trie with - /// 0 length unique id. - pub fn top_trie() -> Self { - Self::new_default(&[]) - } - - /// Is this child info a the top trie. - pub fn is_top_trie(&self) -> bool { - match self { - ChildInfo::ParentKeyId(ChildTrie { data }) => data.len() == 0 - } - } - - /// Return a single byte vector containing packed child info content and its child info type. + /// Returns a single byte vector containing packed child info content and its child info type. /// This can be use as input for `resolve_child_info`. pub fn info(&self) -> (&[u8], u32) { match self { @@ -205,7 +192,7 @@ impl ChildInfo { } } - /// Return byte sequence (keyspace) that can be use by underlying db to isolate keys. + /// Returns byte sequence (keyspace) that can be use by underlying db to isolate keys. /// This is a unique id of the child trie. The collision resistance of this value /// depends on the type of child info use. For `ChildInfo::Default` it is and need to be. pub fn keyspace(&self) -> &[u8] { @@ -214,12 +201,9 @@ impl ChildInfo { } } - /// Return a reference to the location in the direct parent of - /// this trie. - /// If the trie got no parent this returns the empty slice, - /// so by nature an empty slice is not a valid parent location. - /// This does not include child type related prefix. - /// The static part of the storage key is omitted. + /// Returns a reference to the location in the direct parent of + /// this trie but without the common prefix for this kind of + /// child trie. pub fn storage_key(&self) -> &[u8] { match self { ChildInfo::ParentKeyId(ChildTrie { @@ -238,7 +222,7 @@ impl ChildInfo { } } - /// Return a the full location in the direct parent of + /// Returns a the full location in the direct parent of /// this trie. pub fn into_prefixed_storage_key(self) -> Vec { match self { @@ -251,7 +235,7 @@ impl ChildInfo { } } - /// Return the type for this child info. + /// Returns the type for this child info. pub fn child_type(&self) -> ChildType { match self { ChildInfo::ParentKeyId(..) => ChildType::ParentKeyId, @@ -280,7 +264,7 @@ impl ChildType { }) } - /// Change a key to get prefixed with the parent prefix. + /// Produce a prefixed key for a given child type. fn new_prefixed_key(&self, key: &[u8]) -> Vec { let parent_prefix = self.parent_prefix(); let mut result = Vec::with_capacity(parent_prefix.len() + key.len()); @@ -289,7 +273,7 @@ impl ChildType { result } - /// Change a key to get prefixed with the parent prefix. + /// Prefixes a vec with the prefix for this child type. fn do_prefix_key(&self, key: &mut Vec) { let parent_prefix = self.parent_prefix(); let key_len = key.len(); @@ -300,7 +284,7 @@ impl ChildType { } } - /// Return the location reserved for this child trie in their parent trie if there + /// Returns the location reserved for this child trie in their parent trie if there /// is one. fn parent_prefix(&self) -> &'static [u8] { match self { From ed480fa4661ef1611a499a2047476473abe2d4e8 Mon Sep 17 00:00:00 2001 From: cheme Date: Wed, 19 Feb 2020 11:02:18 +0100 Subject: [PATCH 48/85] Apply more consistant naming 'storage_key' instead of 'child_storage_key' in rpc crate. --- client/rpc/src/state/mod.rs | 26 +++++++++++++------------- client/rpc/src/state/state_light.rs | 10 +++++----- 2 files changed, 18 insertions(+), 18 deletions(-) diff --git a/client/rpc/src/state/mod.rs b/client/rpc/src/state/mod.rs index 57a4b6cab897e..856369164db13 100644 --- a/client/rpc/src/state/mod.rs +++ b/client/rpc/src/state/mod.rs @@ -107,7 +107,7 @@ pub trait StateBackend: Send + Sync + 'static fn child_storage_keys( &self, block: Option, - child_storage_key: StorageKey, + storage_key: StorageKey, prefix: StorageKey, ) -> FutureResult>; @@ -115,7 +115,7 @@ pub trait StateBackend: Send + Sync + 'static fn child_storage( &self, block: Option, - child_storage_key: StorageKey, + storage_key: StorageKey, key: StorageKey, ) -> FutureResult>; @@ -123,7 +123,7 @@ pub trait StateBackend: Send + Sync + 'static fn child_storage_hash( &self, block: Option, - child_storage_key: StorageKey, + storage_key: StorageKey, key: StorageKey, ) -> FutureResult>; @@ -131,10 +131,10 @@ pub trait StateBackend: Send + Sync + 'static fn child_storage_size( &self, block: Option, - child_storage_key: StorageKey, + storage_key: StorageKey, key: StorageKey, ) -> FutureResult> { - Box::new(self.child_storage(block, child_storage_key, key) + Box::new(self.child_storage(block, storage_key, key) .map(|x| x.map(|x| x.0.len() as u64))) } @@ -294,38 +294,38 @@ impl StateApi for State fn child_storage( &self, - child_storage_key: StorageKey, + storage_key: StorageKey, key: StorageKey, block: Option ) -> FutureResult> { - self.backend.child_storage(block, child_storage_key, key) + self.backend.child_storage(block, storage_key, key) } fn child_storage_keys( &self, - child_storage_key: StorageKey, + storage_key: StorageKey, key_prefix: StorageKey, block: Option ) -> FutureResult> { - self.backend.child_storage_keys(block, child_storage_key, key_prefix) + self.backend.child_storage_keys(block, storage_key, key_prefix) } fn child_storage_hash( &self, - child_storage_key: StorageKey, + storage_key: StorageKey, key: StorageKey, block: Option ) -> FutureResult> { - self.backend.child_storage_hash(block, child_storage_key, key) + self.backend.child_storage_hash(block, storage_key, key) } fn child_storage_size( &self, - child_storage_key: StorageKey, + storage_key: StorageKey, key: StorageKey, block: Option ) -> FutureResult> { - self.backend.child_storage_size(block, child_storage_key, key) + self.backend.child_storage_size(block, storage_key, key) } fn metadata(&self, block: Option) -> FutureResult { diff --git a/client/rpc/src/state/state_light.rs b/client/rpc/src/state/state_light.rs index 485950de97c00..c65f86c9f2ba5 100644 --- a/client/rpc/src/state/state_light.rs +++ b/client/rpc/src/state/state_light.rs @@ -249,7 +249,7 @@ impl StateBackend for LightState, - _child_storage_key: StorageKey, + _storage_key: StorageKey, _prefix: StorageKey, ) -> FutureResult> { Box::new(result(Err(client_err(ClientError::NotAvailableOnLightClient)))) @@ -258,7 +258,7 @@ impl StateBackend for LightState, - child_storage_key: StorageKey, + storage_key: StorageKey, key: StorageKey, ) -> FutureResult> { let block = self.block_or_best(block); @@ -268,7 +268,7 @@ impl StateBackend for LightState Either::Left(fetcher.remote_read_child(RemoteReadChildRequest { block, header, - storage_key: child_storage_key.0, + storage_key: storage_key.0, keys: vec![key.0.clone()], retry_count: Default::default(), }).then(move |result| ready(result @@ -288,11 +288,11 @@ impl StateBackend for LightState, - child_storage_key: StorageKey, + storage_key: StorageKey, key: StorageKey, ) -> FutureResult> { Box::new(self - .child_storage(block, child_storage_key, key) + .child_storage(block, storage_key, key) .and_then(|maybe_storage| result(Ok(maybe_storage.map(|storage| HasherFor::::hash(&storage.0)))) ) From e103c2a3f2ea0bb24cef6d8178e96dcf4548a2f6 Mon Sep 17 00:00:00 2001 From: cheme Date: Fri, 21 Feb 2020 08:10:56 +0100 Subject: [PATCH 49/85] Update primitives/storage/src/lib.rs Co-Authored-By: thiolliere --- primitives/storage/src/lib.rs | 1 + 1 file changed, 1 insertion(+) diff --git a/primitives/storage/src/lib.rs b/primitives/storage/src/lib.rs index e960bc6435f36..8034bb2acccd5 100644 --- a/primitives/storage/src/lib.rs +++ b/primitives/storage/src/lib.rs @@ -133,6 +133,7 @@ pub mod well_known_keys { #[derive(Debug, Clone)] #[cfg_attr(feature = "std", derive(PartialEq, Eq, Hash, PartialOrd, Ord))] pub enum ChildInfo { + /// This is the one used by default. ParentKeyId(ChildTrie), } From 958b6268cdcd3217e160a69b6e94571c9d4ca9aa Mon Sep 17 00:00:00 2001 From: cheme Date: Fri, 21 Feb 2020 08:55:50 +0100 Subject: [PATCH 50/85] use prefixed storage key in change trie --- .../state-machine/src/changes_trie/build.rs | 32 +++++++++---------- 1 file changed, 16 insertions(+), 16 deletions(-) diff --git a/primitives/state-machine/src/changes_trie/build.rs b/primitives/state-machine/src/changes_trie/build.rs index 3d5ca3d41ba21..53bf2c585a7f3 100644 --- a/primitives/state-machine/src/changes_trie/build.rs +++ b/primitives/state-machine/src/changes_trie/build.rs @@ -105,13 +105,13 @@ fn prepare_extrinsics_input<'a, B, H, Number>( Number: BlockNumber, { - let mut children_keys = BTreeSet::::new(); + let mut children_prefixed_keys = BTreeSet::::new(); let mut children_result = BTreeMap::new(); - for (storage_key, _) in changes.prospective.children.iter() + for (_storage_key, (_map, child_info)) in changes.prospective.children.iter() .chain(changes.committed.children.iter()) { - children_keys.insert(storage_key.clone()); + children_prefixed_keys.insert(child_info.prefixed_storage_key()); } - for storage_key in children_keys { + for storage_key in children_prefixed_keys { let child_index = ChildIndex:: { block: block.clone(), storage_key: storage_key.clone(), @@ -367,8 +367,8 @@ mod test { (vec![104], vec![255]), (vec![105], vec![255]), ].into_iter().collect::>().into(); - let child_trie_key1 = child_info_1.storage_key().to_vec(); - let child_trie_key2 = child_info_2.storage_key().to_vec(); + let child_trie_key1 = child_info_1.prefixed_storage_key(); + let child_trie_key2 = child_info_2.prefixed_storage_key(); let storage = InMemoryStorage::with_inputs(vec![ (zero + 1, vec![ InputPair::ExtrinsicIndex(ExtrinsicIndex { block: zero + 1, key: vec![100] }, vec![1, 3]), @@ -485,8 +485,8 @@ mod test { #[test] fn build_changes_trie_nodes_on_non_digest_block() { fn test_with_zero(zero: u64) { - let child_trie_key1 = ChildInfo::new_default(b"storage_key1").storage_key().to_vec(); - let child_trie_key2 = ChildInfo::new_default(b"storage_key2").storage_key().to_vec(); + let child_trie_key1 = ChildInfo::new_default(b"storage_key1").prefixed_storage_key(); + let child_trie_key2 = ChildInfo::new_default(b"storage_key2").prefixed_storage_key(); let (backend, storage, changes, config) = prepare_for_build(zero); let parent = AnchorBlockId { hash: Default::default(), number: zero + 4 }; let changes_trie_nodes = prepare_input( @@ -523,8 +523,8 @@ mod test { #[test] fn build_changes_trie_nodes_on_digest_block_l1() { fn test_with_zero(zero: u64) { - let child_trie_key1 = ChildInfo::new_default(b"storage_key1").storage_key().to_vec(); - let child_trie_key2 = ChildInfo::new_default(b"storage_key2").storage_key().to_vec(); + let child_trie_key1 = ChildInfo::new_default(b"storage_key1").prefixed_storage_key(); + let child_trie_key2 = ChildInfo::new_default(b"storage_key2").prefixed_storage_key(); let (backend, storage, changes, config) = prepare_for_build(zero); let parent = AnchorBlockId { hash: Default::default(), number: zero + 3 }; let changes_trie_nodes = prepare_input( @@ -570,8 +570,8 @@ mod test { #[test] fn build_changes_trie_nodes_on_digest_block_l2() { fn test_with_zero(zero: u64) { - let child_trie_key1 = ChildInfo::new_default(b"storage_key1").storage_key().to_vec(); - let child_trie_key2 = ChildInfo::new_default(b"storage_key2").storage_key().to_vec(); + let child_trie_key1 = ChildInfo::new_default(b"storage_key1").prefixed_storage_key(); + let child_trie_key2 = ChildInfo::new_default(b"storage_key2").prefixed_storage_key(); let (backend, storage, changes, config) = prepare_for_build(zero); let parent = AnchorBlockId { hash: Default::default(), number: zero + 15 }; let changes_trie_nodes = prepare_input( @@ -661,8 +661,8 @@ mod test { #[test] fn build_changes_trie_nodes_ignores_temporary_storage_values() { fn test_with_zero(zero: u64) { - let child_trie_key1 = ChildInfo::new_default(b"storage_key1").storage_key().to_vec(); - let child_trie_key2 = ChildInfo::new_default(b"storage_key2").storage_key().to_vec(); + let child_trie_key1 = ChildInfo::new_default(b"storage_key1").prefixed_storage_key(); + let child_trie_key2 = ChildInfo::new_default(b"storage_key2").prefixed_storage_key(); let (backend, storage, mut changes, config) = prepare_for_build(zero); // 110: missing from backend, set to None in overlay @@ -715,8 +715,8 @@ mod test { #[test] fn cache_is_used_when_changes_trie_is_built() { - let child_trie_key1 = ChildInfo::new_default(b"storage_key1").storage_key().to_vec(); - let child_trie_key2 = ChildInfo::new_default(b"storage_key2").storage_key().to_vec(); + let child_trie_key1 = ChildInfo::new_default(b"storage_key1").prefixed_storage_key(); + let child_trie_key2 = ChildInfo::new_default(b"storage_key2").prefixed_storage_key(); let (backend, mut storage, changes, config) = prepare_for_build(0); let parent = AnchorBlockId { hash: Default::default(), number: 15 }; From f471d56b7f22077970573ff1dc03de4c3e3028e7 Mon Sep 17 00:00:00 2001 From: cheme Date: Fri, 21 Feb 2020 09:00:27 +0100 Subject: [PATCH 51/85] renaming 'default_child_trie_root' to 'empty_child_trie_root' --- primitives/state-machine/src/basic.rs | 6 +++--- primitives/state-machine/src/ext.rs | 6 +++--- primitives/state-machine/src/in_memory_backend.rs | 4 ++-- primitives/state-machine/src/proving_backend.rs | 4 ++-- primitives/state-machine/src/trie_backend.rs | 4 ++-- primitives/state-machine/src/trie_backend_essence.rs | 8 ++++---- primitives/trie/src/lib.rs | 4 ++-- 7 files changed, 18 insertions(+), 18 deletions(-) diff --git a/primitives/state-machine/src/basic.rs b/primitives/state-machine/src/basic.rs index e1c10a83023b0..3dbc2c1e0bb4e 100644 --- a/primitives/state-machine/src/basic.rs +++ b/primitives/state-machine/src/basic.rs @@ -21,7 +21,7 @@ use std::{ }; use crate::{Backend, InMemoryBackend, StorageKey, StorageValue}; use hash_db::Hasher; -use sp_trie::{TrieConfiguration, default_child_trie_root}; +use sp_trie::{TrieConfiguration, empty_child_trie_root}; use sp_trie::trie_types::Layout; use sp_core::{ storage::{ @@ -260,7 +260,7 @@ impl Externalities for BasicExternalities { // Single child trie implementation currently allows using the same child // empty root for all child trie. Using null storage key until multiple // type of child trie support. - let empty_hash = default_child_trie_root::>(); + let empty_hash = empty_child_trie_root::>(); for (prefixed_storage_key, child_info) in keys { let child_root = self.child_storage_root(&child_info); if &empty_hash[..] == &child_root[..] { @@ -283,7 +283,7 @@ impl Externalities for BasicExternalities { InMemoryBackend::::default() .child_storage_root(&child.child_info, delta).0 } else { - default_child_trie_root::>() + empty_child_trie_root::>() }.encode() } diff --git a/primitives/state-machine/src/ext.rs b/primitives/state-machine/src/ext.rs index d5f12643d00d4..2c1c3bd01cf51 100644 --- a/primitives/state-machine/src/ext.rs +++ b/primitives/state-machine/src/ext.rs @@ -27,7 +27,7 @@ use sp_core::{ storage::{well_known_keys::is_child_storage_key, ChildInfo}, traits::Externalities, hexdisplay::HexDisplay, }; -use sp_trie::{trie_types::Layout, default_child_trie_root}; +use sp_trie::{trie_types::Layout, empty_child_trie_root}; use sp_externalities::Extensions; use codec::{Decode, Encode}; @@ -490,7 +490,7 @@ where .storage(prefixed_storage_key.as_slice()) .and_then(|k| Decode::decode(&mut &k[..]).ok()) .unwrap_or( - default_child_trie_root::>() + empty_child_trie_root::>() ); trace!(target: "state-trace", "{:04x}: ChildRoot({}) (cached) {}", self.id, @@ -538,7 +538,7 @@ where .storage(prefixed_storage_key.as_slice()) .and_then(|k| Decode::decode(&mut &k[..]).ok()) .unwrap_or( - default_child_trie_root::>() + empty_child_trie_root::>() ); trace!(target: "state-trace", "{:04x}: ChildRoot({}) (no change) {}", self.id, diff --git a/primitives/state-machine/src/in_memory_backend.rs b/primitives/state-machine/src/in_memory_backend.rs index 04a6a2f6b5cb3..5a7f2ced5952a 100644 --- a/primitives/state-machine/src/in_memory_backend.rs +++ b/primitives/state-machine/src/in_memory_backend.rs @@ -24,7 +24,7 @@ use crate::{ use std::{error, fmt, collections::{BTreeMap, HashMap}, marker::PhantomData, ops}; use hash_db::Hasher; use sp_trie::{ - MemoryDB, child_trie_root, default_child_trie_root, TrieConfiguration, trie_types::Layout, + MemoryDB, child_trie_root, empty_child_trie_root, TrieConfiguration, trie_types::Layout, }; use codec::Codec; use sp_core::storage::{ChildInfo, ChildType, Storage}; @@ -272,7 +272,7 @@ impl Backend for InMemory where H::Out: Codec { let full_transaction = transaction.into_iter().collect(); let is_default = match child_type { - ChildType::ParentKeyId => root == default_child_trie_root::>(), + ChildType::ParentKeyId => root == empty_child_trie_root::>(), }; (root, is_default, vec![(child_info, full_transaction)]) diff --git a/primitives/state-machine/src/proving_backend.rs b/primitives/state-machine/src/proving_backend.rs index 67f90f92e9f98..0ba7b91e52348 100644 --- a/primitives/state-machine/src/proving_backend.rs +++ b/primitives/state-machine/src/proving_backend.rs @@ -22,7 +22,7 @@ use codec::{Decode, Encode, Codec}; use log::debug; use hash_db::{Hasher, HashDB, EMPTY_PREFIX, Prefix}; use sp_trie::{ - MemoryDB, default_child_trie_root, read_trie_value_with, read_child_trie_value_with, + MemoryDB, empty_child_trie_root, read_trie_value_with, read_child_trie_value_with, record_all_keys }; pub use sp_trie::Recorder; @@ -149,7 +149,7 @@ impl<'a, S, H> ProvingBackendRecorder<'a, S, H> let storage_key = child_info.storage_key(); let root = self.storage(storage_key)? .and_then(|r| Decode::decode(&mut &r[..]).ok()) - .unwrap_or(default_child_trie_root::>()); + .unwrap_or(empty_child_trie_root::>()); let mut read_overlay = S::Overlay::default(); let eph = Ephemeral::new( diff --git a/primitives/state-machine/src/trie_backend.rs b/primitives/state-machine/src/trie_backend.rs index 2c09c049b542d..4762192ece61f 100644 --- a/primitives/state-machine/src/trie_backend.rs +++ b/primitives/state-machine/src/trie_backend.rs @@ -18,7 +18,7 @@ use log::{warn, debug}; use hash_db::Hasher; -use sp_trie::{Trie, delta_trie_root, default_child_trie_root, child_delta_trie_root}; +use sp_trie::{Trie, delta_trie_root, empty_child_trie_root, child_delta_trie_root}; use sp_trie::trie_types::{TrieDB, TrieError, Layout}; use sp_core::storage::{ChildInfo, ChildType}; use codec::{Codec, Decode}; @@ -198,7 +198,7 @@ impl, H: Hasher> Backend for TrieBackend where H::Out: Ord, { let default_root = match child_info.child_type() { - ChildType::ParentKeyId => default_child_trie_root::>() + ChildType::ParentKeyId => empty_child_trie_root::>() }; let mut write_overlay = S::Overlay::default(); diff --git a/primitives/state-machine/src/trie_backend_essence.rs b/primitives/state-machine/src/trie_backend_essence.rs index 763f57bd6b7d5..28d1c68ca2e40 100644 --- a/primitives/state-machine/src/trie_backend_essence.rs +++ b/primitives/state-machine/src/trie_backend_essence.rs @@ -22,7 +22,7 @@ use std::sync::Arc; use log::{debug, warn}; use hash_db::{self, Hasher, EMPTY_PREFIX, Prefix}; use sp_trie::{Trie, MemoryDB, PrefixedMemoryDB, DBValue, - default_child_trie_root, read_trie_value, read_child_trie_value, + empty_child_trie_root, read_trie_value, read_child_trie_value, for_keys_in_child_trie, KeySpacedDB, TrieDBIterator}; use sp_trie::trie_types::{TrieDB, TrieError, Layout}; use crate::{backend::Consolidate, StorageKey, StorageValue}; @@ -169,7 +169,7 @@ impl, H: Hasher> TrieBackendEssence where H::Out: key: &[u8], ) -> Result, String> { let root = self.child_root(child_info)? - .unwrap_or(default_child_trie_root::>().encode()); + .unwrap_or(empty_child_trie_root::>().encode()); let mut read_overlay = S::Overlay::default(); let eph = Ephemeral { @@ -190,7 +190,7 @@ impl, H: Hasher> TrieBackendEssence where H::Out: f: F, ) { let root = match self.child_root(child_info) { - Ok(v) => v.unwrap_or(default_child_trie_root::>().encode()), + Ok(v) => v.unwrap_or(empty_child_trie_root::>().encode()), Err(e) => { debug!(target: "trie", "Error while iterating child storage: {}", e); return; @@ -221,7 +221,7 @@ impl, H: Hasher> TrieBackendEssence where H::Out: mut f: F, ) { let root_vec = match self.child_root(child_info) { - Ok(v) => v.unwrap_or(default_child_trie_root::>().encode()), + Ok(v) => v.unwrap_or(empty_child_trie_root::>().encode()), Err(e) => { debug!(target: "trie", "Error while iterating child storage: {}", e); return; diff --git a/primitives/trie/src/lib.rs b/primitives/trie/src/lib.rs index 01466f3ed48fc..a7edf01a0473a 100644 --- a/primitives/trie/src/lib.rs +++ b/primitives/trie/src/lib.rs @@ -209,8 +209,8 @@ pub fn read_trie_value_with< Ok(TrieDB::::new(&*db, root)?.get_with(key, query).map(|x| x.map(|val| val.to_vec()))?) } -/// Determine the default child trie root. -pub fn default_child_trie_root( +/// Determine the empty child trie root. +pub fn empty_child_trie_root( ) -> ::Out { L::trie_root::<_, Vec, Vec>(core::iter::empty()) } From 02ff227286c3c4b43add22f6afdd506f22b894da Mon Sep 17 00:00:00 2001 From: cheme Date: Fri, 21 Feb 2020 11:09:20 +0100 Subject: [PATCH 52/85] apply some renaming, rpc change are really ugly, will revert them --- bin/node/executor/tests/basic.rs | 12 +- bin/node/executor/tests/fees.rs | 2 +- client/api/src/light.rs | 10 +- client/chain-spec/src/chain_spec.rs | 44 ++--- client/db/src/bench.rs | 2 +- client/db/src/lib.rs | 6 +- client/executor/src/integration_tests/mod.rs | 4 +- client/network/src/on_demand_layer.rs | 6 +- client/network/src/protocol.rs | 19 ++- .../src/protocol/light_client_handler.rs | 36 ++-- client/network/src/protocol/light_dispatch.rs | 49 +++--- client/network/src/protocol/message.rs | 8 +- .../src/protocol/schema/light.v1.proto | 6 +- client/rpc-api/src/state/mod.rs | 16 +- client/rpc/src/state/mod.rs | 29 ++-- client/rpc/src/state/state_full.rs | 6 +- client/rpc/src/state/state_light.rs | 12 +- client/rpc/src/state/tests.rs | 8 +- client/src/in_mem.rs | 7 +- client/src/light/backend.rs | 4 +- client/src/light/fetcher.rs | 10 +- frame/support/test/tests/instance.rs | 2 +- frame/system/src/lib.rs | 2 +- primitives/io/src/lib.rs | 154 +++++++++++++++++- primitives/runtime/src/lib.rs | 6 +- primitives/state-machine/src/basic.rs | 26 +-- .../state-machine/src/changes_trie/build.rs | 12 +- primitives/state-machine/src/ext.rs | 10 +- .../state-machine/src/in_memory_backend.rs | 2 +- .../state-machine/src/overlayed_changes.rs | 44 ++--- primitives/state-machine/src/testing.rs | 6 +- primitives/storage/src/lib.rs | 22 +-- test-utils/client/src/lib.rs | 2 +- test-utils/runtime/client/src/lib.rs | 11 +- test-utils/runtime/src/genesismap.rs | 4 +- test-utils/runtime/src/system.rs | 2 +- 36 files changed, 369 insertions(+), 232 deletions(-) diff --git a/bin/node/executor/tests/basic.rs b/bin/node/executor/tests/basic.rs index 100bdf3fe60ee..79512527d19ab 100644 --- a/bin/node/executor/tests/basic.rs +++ b/bin/node/executor/tests/basic.rs @@ -173,7 +173,7 @@ fn panic_execution_with_foreign_code_gives_error() { vec![0u8; 32] } ], - children: map![], + children_default: map![], }); let r = executor_call:: _>( @@ -209,7 +209,7 @@ fn bad_extrinsic_with_native_equivalent_code_gives_error() { vec![0u8; 32] } ], - children: map![], + children_default: map![], }); let r = executor_call:: _>( @@ -243,7 +243,7 @@ fn successful_execution_with_native_equivalent_code_gives_ok() { }, >::hashed_key_for(0) => vec![0u8; 32] ], - children: map![], + children_default: map![], }); let r = executor_call:: _>( @@ -285,7 +285,7 @@ fn successful_execution_with_foreign_code_gives_ok() { }, >::hashed_key_for(0) => vec![0u8; 32] ], - children: map![], + children_default: map![], }); let r = executor_call:: _>( @@ -706,7 +706,7 @@ fn panic_execution_gives_error() { }, >::hashed_key_for(0) => vec![0u8; 32] ], - children: map![], + children_default: map![], }); let r = executor_call:: _>( @@ -740,7 +740,7 @@ fn successful_execution_gives_ok() { }, >::hashed_key_for(0) => vec![0u8; 32] ], - children: map![], + children_default: map![], }); let r = executor_call:: _>( diff --git a/bin/node/executor/tests/fees.rs b/bin/node/executor/tests/fees.rs index ba303a6feb6ff..46c8fe332a9e2 100644 --- a/bin/node/executor/tests/fees.rs +++ b/bin/node/executor/tests/fees.rs @@ -145,7 +145,7 @@ fn transaction_fee_is_correct_ultimate() { }, >::hashed_key_for(0) => vec![0u8; 32] ], - children: map![], + children_default: map![], }); let tip = 1_000_000; diff --git a/client/api/src/light.rs b/client/api/src/light.rs index 2911d77f18209..61f56628d5866 100644 --- a/client/api/src/light.rs +++ b/client/api/src/light.rs @@ -75,7 +75,7 @@ pub struct RemoteReadRequest { /// Remote storage read child request. #[derive(Clone, Debug, PartialEq, Eq, Hash)] -pub struct RemoteReadChildRequest { +pub struct RemoteReadDefaultChildRequest { /// Read at state of given block. pub block: Header::Hash, /// Header of block at which read is performed. @@ -175,7 +175,7 @@ pub trait Fetcher: Send + Sync { /// Fetch remote storage child value. fn remote_read_child( &self, - request: RemoteReadChildRequest + request: RemoteReadDefaultChildRequest ) -> Self::RemoteReadResult; /// Fetch remote call result. fn remote_call(&self, request: RemoteCallRequest) -> Self::RemoteCallResult; @@ -205,9 +205,9 @@ pub trait FetchChecker: Send + Sync { remote_proof: StorageProof, ) -> ClientResult, Option>>>; /// Check remote storage read proof. - fn check_read_child_proof( + fn check_read_default_child_proof( &self, - request: &RemoteReadChildRequest, + request: &RemoteReadDefaultChildRequest, remote_proof: StorageProof, ) -> ClientResult, Option>>>; /// Check remote method execution proof. @@ -330,7 +330,7 @@ pub mod tests { not_implemented_in_tests() } - fn remote_read_child(&self, _request: RemoteReadChildRequest
) -> Self::RemoteReadResult { + fn remote_read_child(&self, _request: RemoteReadDefaultChildRequest
) -> Self::RemoteReadResult { not_implemented_in_tests() } diff --git a/client/chain-spec/src/chain_spec.rs b/client/chain-spec/src/chain_spec.rs index ae53559aa9f43..ea6dae7724713 100644 --- a/client/chain-spec/src/chain_spec.rs +++ b/client/chain-spec/src/chain_spec.rs @@ -74,17 +74,14 @@ impl BuildStorage for ChainSpec { fn build_storage(&self) -> Result { match self.genesis.resolve()? { Genesis::Runtime(gc) => gc.build_storage(), - Genesis::Raw(RawGenesis { top: map, children: children_map }) => Ok(Storage { + Genesis::Raw(RawGenesis { top: map, children_default: children_map }) => Ok(Storage { top: map.into_iter().map(|(k, v)| (k.0, v.0)).collect(), - children: children_map.into_iter().map(|(storage_key, child_content)| { - let child_info = ChildInfo::resolve_child_info( - child_content.child_type, - child_content.child_info.as_slice(), - ).expect("chain spec contains correct content").to_owned(); + children_default: children_map.into_iter().map(|(storage_key, child_content)| { + let child_info = ChildInfo::new_default(storage_key.0.as_slice()); ( storage_key.0, StorageChild { - data: child_content.data.into_iter().map(|(k, v)| (k.0, v.0)).collect(), + data: child_content.into_iter().map(|(k, v)| (k.0, v.0)).collect(), child_info, }, ) @@ -103,22 +100,13 @@ impl BuildStorage for ChainSpec { type GenesisStorage = HashMap; -#[derive(Serialize, Deserialize)] -#[serde(rename_all = "camelCase")] -#[serde(deny_unknown_fields)] -struct ChildRawStorage { - data: GenesisStorage, - child_info: Vec, - child_type: u32, -} - #[derive(Serialize, Deserialize)] #[serde(rename_all = "camelCase")] #[serde(deny_unknown_fields)] /// Storage content for genesis block. struct RawGenesis { pub top: GenesisStorage, - pub children: HashMap, + pub children_default: HashMap, } #[derive(Serialize, Deserialize)] @@ -285,22 +273,16 @@ impl ChainSpec { let top = storage.top.into_iter() .map(|(k, v)| (StorageKey(k), StorageData(v))) .collect(); - let children = storage.children.into_iter() - .map(|(sk, child)| { - let (info, ci_type) = child.child_info.info(); - ( - StorageKey(sk), - ChildRawStorage { - data: child.data.into_iter() - .map(|(k, v)| (StorageKey(k), StorageData(v))) - .collect(), - child_info: info.to_vec(), - child_type: ci_type, - }, - )}) + let children_default = storage.children_default.into_iter() + .map(|(sk, child)| ( + StorageKey(sk), + child.data.into_iter() + .map(|(k, v)| (StorageKey(k), StorageData(v))) + .collect(), + )) .collect(); - Genesis::Raw(RawGenesis { top, children }) + Genesis::Raw(RawGenesis { top, children_default }) }, (_, genesis) => genesis, }; diff --git a/client/db/src/bench.rs b/client/db/src/bench.rs index fce759590e531..ec8b975aa24fd 100644 --- a/client/db/src/bench.rs +++ b/client/db/src/bench.rs @@ -77,7 +77,7 @@ impl BenchmarkingState { }; state.reopen()?; - let child_delta = genesis.children.into_iter().map(|(_storage_key, child_content)| ( + let child_delta = genesis.children_default.into_iter().map(|(_storage_key, child_content)| ( child_content.child_info, child_content.data.into_iter().map(|(k, v)| (k, Some(v))), )); diff --git a/client/db/src/lib.rs b/client/db/src/lib.rs index 598b372b440c0..f108b1d737710 100644 --- a/client/db/src/lib.rs +++ b/client/db/src/lib.rs @@ -581,7 +581,7 @@ impl sc_client_api::backend::BlockImportOperation for Bloc return Err(sp_blockchain::Error::GenesisInvalid.into()); } - let child_delta = storage.children.into_iter().map(|(_storage_key, child_content)|( + let child_delta = storage.children_default.into_iter().map(|(_storage_key, child_content)|( child_content.child_info, child_content.data.into_iter().map(|(k, v)| (k, Some(v))), )); @@ -1782,7 +1782,7 @@ pub(crate) mod tests { op.reset_storage(Storage { top: storage.iter().cloned().collect(), - children: Default::default(), + children_default: Default::default(), }).unwrap(); op.set_block_data( header.clone(), @@ -1867,7 +1867,7 @@ pub(crate) mod tests { op.reset_storage(Storage { top: storage.iter().cloned().collect(), - children: Default::default(), + children_default: Default::default(), }).unwrap(); key = op.db_updates.insert(EMPTY_PREFIX, b"hello"); diff --git a/client/executor/src/integration_tests/mod.rs b/client/executor/src/integration_tests/mod.rs index c0516d3ac7dfa..ca5e72aedf7a9 100644 --- a/client/executor/src/integration_tests/mod.rs +++ b/client/executor/src/integration_tests/mod.rs @@ -180,7 +180,7 @@ fn storage_should_work(wasm_method: WasmExecutionMethod) { b"foo".to_vec() => b"bar".to_vec(), b"baz".to_vec() => b"bar".to_vec() ], - children: map![], + children_default: map![], }); assert_eq!(ext, expected); } @@ -214,7 +214,7 @@ fn clear_prefix_should_work(wasm_method: WasmExecutionMethod) { b"aab".to_vec() => b"2".to_vec(), b"bbb".to_vec() => b"5".to_vec() ], - children: map![], + children_default: map![], }); assert_eq!(expected, ext); } diff --git a/client/network/src/on_demand_layer.rs b/client/network/src/on_demand_layer.rs index d672ed0b7f569..330daf590d7a7 100644 --- a/client/network/src/on_demand_layer.rs +++ b/client/network/src/on_demand_layer.rs @@ -23,7 +23,7 @@ use parking_lot::Mutex; use sp_blockchain::Error as ClientError; use sc_client_api::{ Fetcher, FetchChecker, RemoteHeaderRequest, RemoteCallRequest, RemoteReadRequest, - RemoteChangesRequest, RemoteReadChildRequest, RemoteBodyRequest, + RemoteChangesRequest, RemoteReadDefaultChildRequest, RemoteBodyRequest, }; use sp_runtime::traits::{Block as BlockT, Header as HeaderT, NumberFor}; @@ -103,10 +103,10 @@ impl Fetcher for OnDemand where fn remote_read_child( &self, - request: RemoteReadChildRequest + request: RemoteReadDefaultChildRequest ) -> Self::RemoteReadResult { let (sender, receiver) = oneshot::channel(); - let _ = self.requests_send.unbounded_send(RequestData::RemoteReadChild(request, sender)); + let _ = self.requests_send.unbounded_send(RequestData::RemoteReadDefaultChild(request, sender)); RemoteResponse { receiver } } diff --git a/client/network/src/protocol.rs b/client/network/src/protocol.rs index 33fad6b4c5fe6..3622b96685649 100644 --- a/client/network/src/protocol.rs +++ b/client/network/src/protocol.rs @@ -245,7 +245,7 @@ impl<'a, B: BlockT> LightDispatchNetwork for LightDispatchIn<'a> { self.behaviour.send_packet(who, message.encode()) } - fn send_read_child_request( + fn send_read_default_child_request( &mut self, who: &PeerId, id: RequestId, @@ -253,12 +253,13 @@ impl<'a, B: BlockT> LightDispatchNetwork for LightDispatchIn<'a> { storage_key: Vec, keys: Vec>, ) { - let message: Message = message::generic::Message::RemoteReadChildRequest(message::RemoteReadChildRequest { - id, - block, - storage_key, - keys, - }); + let message: Message = message::generic::Message::RemoteReadDefaultChildRequest( + message::RemoteReadDefaultChildRequest { + id, + block, + storage_key, + keys, + }); self.behaviour.send_packet(who, message.encode()) } @@ -639,7 +640,7 @@ impl, H: ExHashT> Protocol { self.on_finality_proof_request(who, request), GenericMessage::FinalityProofResponse(response) => return self.on_finality_proof_response(who, response), - GenericMessage::RemoteReadChildRequest(request) => + GenericMessage::RemoteReadDefaultChildRequest(request) => self.on_remote_read_child_request(who, request), GenericMessage::Consensus(msg) => return if self.registered_notif_protocols.contains(&msg.engine_id) { @@ -1547,7 +1548,7 @@ impl, H: ExHashT> Protocol { fn on_remote_read_child_request( &mut self, who: PeerId, - request: message::RemoteReadChildRequest, + request: message::RemoteReadDefaultChildRequest, ) { if request.keys.is_empty() { debug!(target: "sync", "Invalid remote child read request sent by {}", who); diff --git a/client/network/src/protocol/light_client_handler.rs b/client/network/src/protocol/light_client_handler.rs index 568e2caa8a1bd..1c49c20b0c4b0 100644 --- a/client/network/src/protocol/light_client_handler.rs +++ b/client/network/src/protocol/light_client_handler.rs @@ -169,8 +169,8 @@ pub enum Request { request: fetcher::RemoteReadRequest, sender: oneshot::Sender, Option>>, ClientError>> }, - ReadChild { - request: fetcher::RemoteReadChildRequest, + ReadDefaultChild { + request: fetcher::RemoteReadDefaultChildRequest, sender: oneshot::Sender, Option>>, ClientError>> }, Call { @@ -367,9 +367,9 @@ where let reply = self.checker.check_read_proof(&request, proof)?; Ok(Reply::MapVecU8OptVecU8(reply)) } - Request::ReadChild { request, .. } => { + Request::ReadDefaultChild { request, .. } => { let proof = Decode::decode(&mut response.proof.as_ref())?; - let reply = self.checker.check_read_child_proof(&request, proof)?; + let reply = self.checker.check_read_default_child_proof(&request, proof)?; Ok(Reply::MapVecU8OptVecU8(reply)) } _ => Err(Error::UnexpectedResponse) @@ -496,7 +496,7 @@ where ( &mut self , peer: &PeerId , request_id: u64 - , request: &api::v1::light::RemoteReadChildRequest + , request: &api::v1::light::RemoteReadDefaultChildRequest ) -> Result { if request.keys.is_empty() { @@ -692,7 +692,7 @@ where self.on_remote_read_request(&peer, request.id, r), Some(api::v1::light::request::Request::RemoteHeaderRequest(r)) => self.on_remote_header_request(&peer, request.id, r), - Some(api::v1::light::request::Request::RemoteReadChildRequest(r)) => + Some(api::v1::light::request::Request::RemoteReadDefaultChildRequest(r)) => self.on_remote_read_child_request(&peer, request.id, r), Some(api::v1::light::request::Request::RemoteChangesRequest(r)) => self.on_remote_changes_request(&peer, request.id, r), @@ -888,7 +888,7 @@ fn required_block(request: &Request) -> NumberFor { match request { Request::Header { request, .. } => request.block, Request::Read { request, .. } => *request.header.number(), - Request::ReadChild { request, .. } => *request.header.number(), + Request::ReadDefaultChild { request, .. } => *request.header.number(), Request::Call { request, .. } => *request.header.number(), Request::Changes { request, .. } => request.max_block.0, } @@ -898,7 +898,7 @@ fn retries(request: &Request) -> usize { let rc = match request { Request::Header { request, .. } => request.retry_count, Request::Read { request, .. } => request.retry_count, - Request::ReadChild { request, .. } => request.retry_count, + Request::ReadDefaultChild { request, .. } => request.retry_count, Request::Call { request, .. } => request.retry_count, Request::Changes { request, .. } => request.retry_count, }; @@ -918,13 +918,13 @@ fn serialize_request(id: u64, request: &Request) -> api::v1::light: }; api::v1::light::request::Request::RemoteReadRequest(r) } - Request::ReadChild { request, .. } => { - let r = api::v1::light::RemoteReadChildRequest { + Request::ReadDefaultChild { request, .. } => { + let r = api::v1::light::RemoteReadDefaultChildRequest { block: request.block.encode(), storage_key: request.storage_key.clone(), keys: request.keys.clone(), }; - api::v1::light::request::Request::RemoteReadChildRequest(r) + api::v1::light::request::Request::RemoteReadDefaultChildRequest(r) } Request::Call { request, .. } => { let r = api::v1::light::RemoteCallRequest { @@ -965,7 +965,7 @@ fn send_reply(result: Result, ClientError>, request: Request< Ok(Reply::MapVecU8OptVecU8(x)) => send(Ok(x), sender), reply => log::error!("invalid reply for read request: {:?}, {:?}", reply, request), } - Request::ReadChild { request, sender } => match result { + Request::ReadDefaultChild { request, sender } => match result { Err(e) => send(Err(e), sender), Ok(Reply::MapVecU8OptVecU8(x)) => send(Ok(x), sender), reply => log::error!("invalid reply for read child request: {:?}, {:?}", reply, request), @@ -1545,7 +1545,7 @@ mod tests { response: Some(api::v1::light::response::Response::RemoteReadResponse(r)), } } - Request::ReadChild{..} => { + Request::ReadDefaultChild{..} => { let r = api::v1::light::RemoteReadResponse { proof: empty_proof() }; api::v1::light::Response { id: 1, @@ -1620,14 +1620,14 @@ mod tests { #[test] fn receives_remote_read_child_response() { let mut chan = oneshot::channel(); - let request = fetcher::RemoteReadChildRequest { + let request = fetcher::RemoteReadDefaultChildRequest { header: dummy_header(), block: Default::default(), storage_key: b":child_storage:sub".to_vec(), keys: vec![b":key".to_vec()], retry_count: None, }; - issue_request(Request::ReadChild { request, sender: chan.0 }); + issue_request(Request::ReadDefaultChild { request, sender: chan.0 }); assert_matches!(chan.1.try_recv(), Ok(Some(Ok(_)))) } @@ -1720,16 +1720,16 @@ mod tests { #[test] fn send_receive_read_child() { let chan = oneshot::channel(); - let request = fetcher::RemoteReadChildRequest { + let request = fetcher::RemoteReadDefaultChildRequest { header: dummy_header(), block: Default::default(), storage_key: b"sub".to_vec(), keys: vec![b":key".to_vec()], retry_count: None, }; - send_receive(Request::ReadChild { request, sender: chan.0 }); + send_receive(Request::ReadDefaultChild { request, sender: chan.0 }); assert_eq!(Some(vec![42]), task::block_on(chan.1).unwrap().unwrap().remove(&b":key"[..]).unwrap()); - // ^--- from `DummyFetchChecker::check_read_child_proof` + // ^--- from `DummyFetchChecker::check_read_default_child_proof` } #[test] diff --git a/client/network/src/protocol/light_dispatch.rs b/client/network/src/protocol/light_dispatch.rs index 6654895971001..e2b4ff7874095 100644 --- a/client/network/src/protocol/light_dispatch.rs +++ b/client/network/src/protocol/light_dispatch.rs @@ -29,7 +29,7 @@ use linked_hash_map::{Entry, LinkedHashMap}; use sp_blockchain::Error as ClientError; use sc_client_api::{FetchChecker, RemoteHeaderRequest, RemoteCallRequest, RemoteReadRequest, RemoteChangesRequest, ChangesProof, - RemoteReadChildRequest, RemoteBodyRequest, StorageProof}; + RemoteReadDefaultChildRequest, RemoteBodyRequest, StorageProof}; use crate::message::{self, BlockAttributes, Direction, FromBlock, RequestId}; use libp2p::PeerId; use crate::config::Roles; @@ -64,7 +64,7 @@ pub trait LightDispatchNetwork { ); /// Send to `who` a child read request. - fn send_read_child_request( + fn send_read_default_child_request( &mut self, who: &PeerId, id: RequestId, @@ -147,8 +147,8 @@ pub(crate) enum RequestData { RemoteReadRequest, OneShotSender, Option>>, ClientError>>, ), - RemoteReadChild( - RemoteReadChildRequest, + RemoteReadDefaultChild( + RemoteReadDefaultChildRequest, OneShotSender, Option>>, ClientError>> ), RemoteCall(RemoteCallRequest, OneShotSender, ClientError>>), @@ -189,9 +189,9 @@ impl FetchChecker for AlwaysBadChecker { Err(ClientError::Msg("AlwaysBadChecker".into())) } - fn check_read_child_proof( + fn check_read_default_child_proof( &self, - _request: &RemoteReadChildRequest, + _request: &RemoteReadDefaultChildRequest, _remote_proof: StorageProof, ) -> Result, Option>>, ClientError> { Err(ClientError::Msg("AlwaysBadChecker".into())) @@ -403,8 +403,8 @@ impl LightDispatch where RequestData::RemoteRead(request, sender) ), }}, - RequestData::RemoteReadChild(request, sender) => { - match checker.check_read_child_proof(&request, response.proof) { + RequestData::RemoteReadDefaultChild(request, sender) => { + match checker.check_read_default_child_proof(&request, response.proof) { Ok(response) => { // we do not bother if receiver has been dropped already let _ = sender.send(Ok(response)); @@ -412,7 +412,7 @@ impl LightDispatch where }, Err(error) => Accept::CheckFailed( error, - RequestData::RemoteReadChild(request, sender) + RequestData::RemoteReadDefaultChild(request, sender) ), }}, data => Accept::Unexpected(data), @@ -595,7 +595,7 @@ impl Request { match self.data { RequestData::RemoteHeader(ref data, _) => data.block, RequestData::RemoteRead(ref data, _) => *data.header.number(), - RequestData::RemoteReadChild(ref data, _) => *data.header.number(), + RequestData::RemoteReadDefaultChild(ref data, _) => *data.header.number(), RequestData::RemoteCall(ref data, _) => *data.header.number(), RequestData::RemoteChanges(ref data, _) => data.max_block.0, RequestData::RemoteBody(ref data, _) => *data.header.number(), @@ -617,8 +617,8 @@ impl Request { data.block, data.keys.clone(), ), - RequestData::RemoteReadChild(ref data, _) => - out.send_read_child_request( + RequestData::RemoteReadDefaultChild(ref data, _) => + out.send_read_default_child_request( peer, self.id, data.block, @@ -665,7 +665,7 @@ impl RequestData { RequestData::RemoteHeader(_, sender) => { let _ = sender.send(Err(error)); }, RequestData::RemoteCall(_, sender) => { let _ = sender.send(Err(error)); }, RequestData::RemoteRead(_, sender) => { let _ = sender.send(Err(error)); }, - RequestData::RemoteReadChild(_, sender) => { let _ = sender.send(Err(error)); }, + RequestData::RemoteReadDefaultChild(_, sender) => { let _ = sender.send(Err(error)); }, RequestData::RemoteChanges(_, sender) => { let _ = sender.send(Err(error)); }, RequestData::RemoteBody(_, sender) => { let _ = sender.send(Err(error)); }, } @@ -682,7 +682,7 @@ pub mod tests { use sp_blockchain::{Error as ClientError, Result as ClientResult}; use sc_client_api::{FetchChecker, RemoteHeaderRequest, ChangesProof, RemoteCallRequest, RemoteReadRequest, - RemoteReadChildRequest, RemoteChangesRequest, RemoteBodyRequest}; + RemoteReadDefaultChildRequest, RemoteChangesRequest, RemoteBodyRequest}; use crate::config::Roles; use crate::message::{self, BlockAttributes, Direction, FromBlock, RequestId}; use libp2p::PeerId; @@ -729,9 +729,9 @@ pub mod tests { } } - fn check_read_child_proof( + fn check_read_default_child_proof( &self, - request: &RemoteReadChildRequest, + request: &RemoteReadDefaultChildRequest, _: StorageProof, ) -> ClientResult, Option>>> { match self.ok { @@ -817,7 +817,7 @@ pub mod tests { } fn send_header_request(&mut self, _: &PeerId, _: RequestId, _: <::Header as HeaderT>::Number) {} fn send_read_request(&mut self, _: &PeerId, _: RequestId, _: ::Hash, _: Vec>) {} - fn send_read_child_request(&mut self, _: &PeerId, _: RequestId, _: ::Hash, _: Vec, + fn send_read_default_child_request(&mut self, _: &PeerId, _: RequestId, _: ::Hash, _: Vec, _: Vec>) {} fn send_call_request(&mut self, _: &PeerId, _: RequestId, _: ::Hash, _: String, _: Vec) {} fn send_changes_request(&mut self, _: &PeerId, _: RequestId, _: ::Hash, _: ::Hash, @@ -1040,13 +1040,14 @@ pub mod tests { light_dispatch.on_connect(&mut network_interface, peer0.clone(), Roles::FULL, 1000); let (tx, response) = oneshot::channel(); - light_dispatch.add_request(&mut network_interface, RequestData::RemoteReadChild(RemoteReadChildRequest { - header: dummy_header(), - block: Default::default(), - storage_key: b"sub".to_vec(), - keys: vec![b":key".to_vec()], - retry_count: None, - }, tx)); + light_dispatch.add_request(&mut network_interface, RequestData::RemoteReadDefaultChild( + RemoteReadDefaultChildRequest { + header: dummy_header(), + block: Default::default(), + storage_key: b"sub".to_vec(), + keys: vec![b":key".to_vec()], + retry_count: None, + }, tx)); light_dispatch.on_remote_read_response(&mut network_interface, peer0.clone(), message::RemoteReadResponse { diff --git a/client/network/src/protocol/message.rs b/client/network/src/protocol/message.rs index be5a4f5acc871..ed9cd811006de 100644 --- a/client/network/src/protocol/message.rs +++ b/client/network/src/protocol/message.rs @@ -24,7 +24,7 @@ pub use self::generic::{ RemoteHeaderRequest, RemoteHeaderResponse, RemoteChangesRequest, RemoteChangesResponse, FinalityProofRequest, FinalityProofResponse, - FromBlock, RemoteReadChildRequest, + FromBlock, RemoteReadDefaultChildRequest, }; use sc_client_api::StorageProof; @@ -212,7 +212,7 @@ pub mod generic { /// Remote changes response. RemoteChangesResponse(RemoteChangesResponse), /// Remote child storage read request. - RemoteReadChildRequest(RemoteReadChildRequest), + RemoteReadDefaultChildRequest(RemoteReadDefaultChildRequest), /// Finality proof request. FinalityProofRequest(FinalityProofRequest), /// Finality proof response. @@ -242,7 +242,7 @@ pub mod generic { Message::RemoteHeaderResponse(_) => "RemoteHeaderResponse", Message::RemoteChangesRequest(_) => "RemoteChangesRequest", Message::RemoteChangesResponse(_) => "RemoteChangesResponse", - Message::RemoteReadChildRequest(_) => "RemoteReadChildRequest", + Message::RemoteReadDefaultChildRequest(_) => "RemoteReadDefaultChildRequest", Message::FinalityProofRequest(_) => "FinalityProofRequest", Message::FinalityProofResponse(_) => "FinalityProofResponse", Message::ConsensusBatch(_) => "ConsensusBatch", @@ -417,7 +417,7 @@ pub mod generic { #[derive(Debug, PartialEq, Eq, Clone, Encode, Decode)] /// Remote storage read child request. - pub struct RemoteReadChildRequest { + pub struct RemoteReadDefaultChildRequest { /// Unique request id. pub id: RequestId, /// Block at which to perform call. diff --git a/client/network/src/protocol/schema/light.v1.proto b/client/network/src/protocol/schema/light.v1.proto index 930d229b0bf7c..1895f6275fe48 100644 --- a/client/network/src/protocol/schema/light.v1.proto +++ b/client/network/src/protocol/schema/light.v1.proto @@ -20,7 +20,7 @@ message Request { RemoteCallRequest remote_call_request = 2; RemoteReadRequest remote_read_request = 3; RemoteHeaderRequest remote_header_request = 4; - RemoteReadChildRequest remote_read_child_request = 5; + RemoteReadDefaultChildRequest remote_read_default_child_request = 5; RemoteChangesRequest remote_changes_request = 6; } } @@ -68,13 +68,13 @@ message RemoteReadResponse { } // Remote storage read child request. -message RemoteReadChildRequest { +message RemoteReadDefaultChildRequest { // Block at which to perform call. bytes block = 2; // Child Storage key. bytes storage_key = 3; // Storage keys. - repeated bytes keys = 4; + repeated bytes keys = 6; } // Remote header request. diff --git a/client/rpc-api/src/state/mod.rs b/client/rpc-api/src/state/mod.rs index 48d363bb8921c..540eb67d5e7ea 100644 --- a/client/rpc-api/src/state/mod.rs +++ b/client/rpc-api/src/state/mod.rs @@ -73,8 +73,8 @@ pub trait StateApi { fn storage_size(&self, key: StorageKey, hash: Option) -> FutureResult>; /// Returns the keys with prefix from a child storage, leave empty to get all the keys - #[rpc(name = "state_getChildKeys")] - fn child_storage_keys( + #[rpc(name = "state_getDefaultChildKeys")] + fn default_child_storage_keys( &self, child_storage_key: StorageKey, prefix: StorageKey, @@ -82,8 +82,8 @@ pub trait StateApi { ) -> FutureResult>; /// Returns a child storage entry at a specific block's state. - #[rpc(name = "state_getChildStorage")] - fn child_storage( + #[rpc(name = "state_getDefaultChildStorage")] + fn default_child_storage( &self, child_storage_key: StorageKey, key: StorageKey, @@ -91,8 +91,8 @@ pub trait StateApi { ) -> FutureResult>; /// Returns the hash of a child storage entry at a block's state. - #[rpc(name = "state_getChildStorageHash")] - fn child_storage_hash( + #[rpc(name = "state_getDefaultChildStorageHash")] + fn default_child_storage_hash( &self, child_storage_key: StorageKey, key: StorageKey, @@ -100,8 +100,8 @@ pub trait StateApi { ) -> FutureResult>; /// Returns the size of a child storage entry at a block's state. - #[rpc(name = "state_getChildStorageSize")] - fn child_storage_size( + #[rpc(name = "state_getDefaultChildStorageSize")] + fn default_child_storage_size( &self, child_storage_key: StorageKey, key: StorageKey, diff --git a/client/rpc/src/state/mod.rs b/client/rpc/src/state/mod.rs index 856369164db13..1d0c322f9803f 100644 --- a/client/rpc/src/state/mod.rs +++ b/client/rpc/src/state/mod.rs @@ -103,8 +103,9 @@ pub trait StateBackend: Send + Sync + 'static .map(|x| x.map(|x| x.0.len() as u64))) } - /// Returns the keys with prefix from a child storage, leave empty to get all the keys - fn child_storage_keys( + /// Returns the keys with prefix from a defaultchild storage, + /// leave empty to get all the keys + fn default_child_storage_keys( &self, block: Option, storage_key: StorageKey, @@ -112,7 +113,7 @@ pub trait StateBackend: Send + Sync + 'static ) -> FutureResult>; /// Returns a child storage entry at a specific block's state. - fn child_storage( + fn default_child_storage( &self, block: Option, storage_key: StorageKey, @@ -120,7 +121,7 @@ pub trait StateBackend: Send + Sync + 'static ) -> FutureResult>; /// Returns the hash of a child storage entry at a block's state. - fn child_storage_hash( + fn default_child_storage_hash( &self, block: Option, storage_key: StorageKey, @@ -128,13 +129,13 @@ pub trait StateBackend: Send + Sync + 'static ) -> FutureResult>; /// Returns the size of a child storage entry at a block's state. - fn child_storage_size( + fn default_child_storage_size( &self, block: Option, storage_key: StorageKey, key: StorageKey, ) -> FutureResult> { - Box::new(self.child_storage(block, storage_key, key) + Box::new(self.default_child_storage(block, storage_key, key) .map(|x| x.map(|x| x.0.len() as u64))) } @@ -292,40 +293,40 @@ impl StateApi for State self.backend.storage_size(block, key) } - fn child_storage( + fn default_child_storage( &self, storage_key: StorageKey, key: StorageKey, block: Option ) -> FutureResult> { - self.backend.child_storage(block, storage_key, key) + self.backend.default_child_storage(block, storage_key, key) } - fn child_storage_keys( + fn default_child_storage_keys( &self, storage_key: StorageKey, key_prefix: StorageKey, block: Option ) -> FutureResult> { - self.backend.child_storage_keys(block, storage_key, key_prefix) + self.backend.default_child_storage_keys(block, storage_key, key_prefix) } - fn child_storage_hash( + fn default_child_storage_hash( &self, storage_key: StorageKey, key: StorageKey, block: Option ) -> FutureResult> { - self.backend.child_storage_hash(block, storage_key, key) + self.backend.default_child_storage_hash(block, storage_key, key) } - fn child_storage_size( + fn default_child_storage_size( &self, storage_key: StorageKey, key: StorageKey, block: Option ) -> FutureResult> { - self.backend.child_storage_size(block, storage_key, key) + self.backend.default_child_storage_size(block, storage_key, key) } fn metadata(&self, block: Option) -> FutureResult { diff --git a/client/rpc/src/state/state_full.rs b/client/rpc/src/state/state_full.rs index a949cee862845..ca237dbfa230f 100644 --- a/client/rpc/src/state/state_full.rs +++ b/client/rpc/src/state/state_full.rs @@ -305,7 +305,7 @@ impl StateBackend for FullState, storage_key: StorageKey, @@ -324,7 +324,7 @@ impl StateBackend for FullState, storage_key: StorageKey, @@ -343,7 +343,7 @@ impl StateBackend for FullState, storage_key: StorageKey, diff --git a/client/rpc/src/state/state_light.rs b/client/rpc/src/state/state_light.rs index c65f86c9f2ba5..d9f56d9fb584f 100644 --- a/client/rpc/src/state/state_light.rs +++ b/client/rpc/src/state/state_light.rs @@ -45,7 +45,7 @@ use sc_client::{ BlockchainEvents, Client, CallExecutor, light::{ blockchain::{future_header, RemoteBlockchain}, - fetcher::{Fetcher, RemoteCallRequest, RemoteReadRequest, RemoteReadChildRequest}, + fetcher::{Fetcher, RemoteCallRequest, RemoteReadRequest, RemoteReadDefaultChildRequest}, }, }; use sp_core::{ @@ -246,7 +246,7 @@ impl StateBackend for LightState, _storage_key: StorageKey, @@ -255,7 +255,7 @@ impl StateBackend for LightState, storage_key: StorageKey, @@ -265,7 +265,7 @@ impl StateBackend for LightState Either::Left(fetcher.remote_read_child(RemoteReadChildRequest { + Ok(header) => Either::Left(fetcher.remote_read_child(RemoteReadDefaultChildRequest { block, header, storage_key: storage_key.0, @@ -285,14 +285,14 @@ impl StateBackend for LightState, storage_key: StorageKey, key: StorageKey, ) -> FutureResult> { Box::new(self - .child_storage(block, storage_key, key) + .default_child_storage(block, storage_key, key) .and_then(|maybe_storage| result(Ok(maybe_storage.map(|storage| HasherFor::::hash(&storage.0)))) ) diff --git a/client/rpc/src/state/tests.rs b/client/rpc/src/state/tests.rs index 22fd142347077..b579003e6c01c 100644 --- a/client/rpc/src/state/tests.rs +++ b/client/rpc/src/state/tests.rs @@ -65,7 +65,7 @@ fn should_return_storage() { ); assert_eq!( core.block_on( - client.child_storage(storage_key, key, Some(genesis_hash).into()) + client.default_child_storage(storage_key, key, Some(genesis_hash).into()) .map(|x| x.map(|x| x.0.len())) ).unwrap().unwrap() as usize, CHILD_VALUE.len(), @@ -87,7 +87,7 @@ fn should_return_child_storage() { assert_matches!( - client.child_storage( + client.default_child_storage( child_key.clone(), key.clone(), Some(genesis_hash).into(), @@ -95,7 +95,7 @@ fn should_return_child_storage() { Ok(Some(StorageData(ref d))) if d[0] == 42 && d.len() == 1 ); assert_matches!( - client.child_storage_hash( + client.default_child_storage_hash( child_key.clone(), key.clone(), Some(genesis_hash).into(), @@ -103,7 +103,7 @@ fn should_return_child_storage() { Ok(true) ); assert_matches!( - client.child_storage_size( + client.default_child_storage_size( child_key.clone(), key.clone(), None, diff --git a/client/src/in_mem.rs b/client/src/in_mem.rs index 3986c70116c01..991cc9fb74d6f 100644 --- a/client/src/in_mem.rs +++ b/client/src/in_mem.rs @@ -515,7 +515,7 @@ impl backend::BlockImportOperation for BlockImportOperatio fn reset_storage(&mut self, storage: Storage) -> sp_blockchain::Result { check_genesis_storage(&storage)?; - let child_delta = storage.children.into_iter() + let child_delta = storage.children_default.into_iter() .map(|(_storage_key, child_content)| (child_content.child_info, child_content.data.into_iter().map(|(k, v)| (k, Some(v))))); @@ -724,8 +724,9 @@ pub fn check_genesis_storage(storage: &Storage) -> sp_blockchain::Result<()> { return Err(sp_blockchain::Error::GenesisInvalid.into()); } - if storage.children.keys().any(|child_key| !well_known_keys::is_child_storage_key(&child_key)) { - return Err(sp_blockchain::Error::GenesisInvalid.into()); + if storage.children_default.keys() + .any(|child_key| !well_known_keys::is_child_storage_key(&child_key)) { + return Err(sp_blockchain::Error::GenesisInvalid.into()); } Ok(()) diff --git a/client/src/light/backend.rs b/client/src/light/backend.rs index 4fba83b882c68..067feb316c8ea 100644 --- a/client/src/light/backend.rs +++ b/client/src/light/backend.rs @@ -316,12 +316,12 @@ impl BlockImportOperation for ImportOperation storage.insert(None, input.top); // create a list of children keys to re-compute roots for - let child_delta = input.children.iter() + let child_delta = input.children_default.iter() .map(|(_storage_key, storage_child)| (storage_child.child_info.clone(), None)) .collect::>(); // make sure to persist the child storage - for (_child_key, storage_child) in input.children { + for (_child_key, storage_child) in input.children_default { storage.insert(Some(storage_child.child_info), storage_child.data); } diff --git a/client/src/light/fetcher.rs b/client/src/light/fetcher.rs index f37c06bea247d..4aafbfc630fe3 100644 --- a/client/src/light/fetcher.rs +++ b/client/src/light/fetcher.rs @@ -39,7 +39,7 @@ use sp_blockchain::{Error as ClientError, Result as ClientResult}; use crate::cht; pub use sc_client_api::{ light::{ - RemoteCallRequest, RemoteHeaderRequest, RemoteReadRequest, RemoteReadChildRequest, + RemoteCallRequest, RemoteHeaderRequest, RemoteReadRequest, RemoteReadDefaultChildRequest, RemoteChangesRequest, ChangesProof, RemoteBodyRequest, Fetcher, FetchChecker, Storage as BlockchainStorage, }, @@ -236,9 +236,9 @@ impl FetchChecker for LightDataChecker ).map_err(Into::into) } - fn check_read_child_proof( + fn check_read_default_child_proof( &self, - request: &RemoteReadChildRequest, + request: &RemoteReadDefaultChildRequest, remote_proof: StorageProof, ) -> ClientResult, Option>>> { let child_trie = ChildInfo::new_default(&request.storage_key); @@ -502,8 +502,8 @@ pub mod tests { remote_read_proof, result, ) = prepare_for_read_child_proof_check(); - assert_eq!((&local_checker as &dyn FetchChecker).check_read_child_proof( - &RemoteReadChildRequest::
{ + assert_eq!((&local_checker as &dyn FetchChecker).check_read_default_child_proof( + &RemoteReadDefaultChildRequest::
{ block: remote_block_header.hash(), header: remote_block_header, storage_key: b"child1".to_vec(), diff --git a/frame/support/test/tests/instance.rs b/frame/support/test/tests/instance.rs index 6fa2806dd3483..9fdd695b86a3e 100644 --- a/frame/support/test/tests/instance.rs +++ b/frame/support/test/tests/instance.rs @@ -303,7 +303,7 @@ fn new_test_ext() -> sp_io::TestExternalities { fn storage_instance_independence() { let mut storage = sp_core::storage::Storage { top: std::collections::BTreeMap::new(), - children: std::collections::HashMap::new() + children_default: std::collections::HashMap::new() }; sp_state_machine::BasicExternalities::execute_with_storage(&mut storage, || { module2::Value::::put(0); diff --git a/frame/system/src/lib.rs b/frame/system/src/lib.rs index 57be7b157cb33..db1dd51e10e0d 100644 --- a/frame/system/src/lib.rs +++ b/frame/system/src/lib.rs @@ -815,7 +815,7 @@ impl Module { >::hashed_key().to_vec() => T::BlockNumber::one().encode(), >::hashed_key().to_vec() => [69u8; 32].encode() ], - children: map![], + children_default: map![], }) } diff --git a/primitives/io/src/lib.rs b/primitives/io/src/lib.rs index 68f15aaee6958..d7bdc3ee27d57 100644 --- a/primitives/io/src/lib.rs +++ b/primitives/io/src/lib.rs @@ -68,6 +68,14 @@ pub enum EcdsaVerifyError { BadSignature, } +/// Deprecated function, ensure that this is a default prefixed key. +#[cfg(feature = "std")] +fn child_storage_key_or_panic(storage_key: &[u8]) { + if !storage_key.starts_with(&ChildInfo::new_default(&[]).prefixed_storage_key()[..]) { + panic!("child storage key is invalid") + } +} + /// Interface for accessing the storage from within the runtime. #[runtime_interface] pub trait Storage { @@ -136,6 +144,146 @@ pub trait Storage { self.next_storage_key(&key) } + + /// Deprecated, please use dedicated runtime apis. + fn child_get( + &self, + storage_key: &[u8], + child_definition: &[u8], + child_type: u32, + key: &[u8], + ) -> Option> { + child_storage_key_or_panic(storage_key); + if child_type != 1 { panic!("Invalid child definition"); } + let child_info = ChildInfo::resolve_child_info(child_type, child_definition) + .expect("Invalid child definition"); + self.child_storage(&child_info, key).map(|s| s.to_vec()) + } + + /// Deprecated, please use dedicated runtime apis. + fn child_read( + &self, + storage_key: &[u8], + child_definition: &[u8], + child_type: u32, + key: &[u8], + value_out: &mut [u8], + value_offset: u32, + ) -> Option { + child_storage_key_or_panic(storage_key); + if child_type != 1 { panic!("Invalid child definition"); } + let child_info = ChildInfo::resolve_child_info(child_type, child_definition) + .expect("Invalid child definition"); + self.child_storage(&child_info, key) + .map(|value| { + let value_offset = value_offset as usize; + let data = &value[value_offset.min(value.len())..]; + let written = std::cmp::min(data.len(), value_out.len()); + value_out[..written].copy_from_slice(&data[..written]); + value.len() as u32 + }) + } + + /// Deprecated, please use dedicated runtime apis. + fn child_set( + &mut self, + storage_key: &[u8], + child_definition: &[u8], + child_type: u32, + key: &[u8], + value: &[u8], + ) { + child_storage_key_or_panic(storage_key); + if child_type != 1 { panic!("Invalid child definition"); } + let child_info = ChildInfo::resolve_child_info(child_type, child_definition) + .expect("Invalid child definition"); + self.set_child_storage(&child_info, key.to_vec(), value.to_vec()); + } + + /// Deprecated, please use dedicated runtime apis. + fn child_clear( + &mut self, + storage_key: &[u8], + child_definition: &[u8], + child_type: u32, + key: &[u8], + ) { + child_storage_key_or_panic(storage_key); + if child_type != 1 { panic!("Invalid child definition"); } + let child_info = ChildInfo::resolve_child_info(child_type, child_definition) + .expect("Invalid child definition"); + self.clear_child_storage(&child_info, key); + } + + /// Deprecated, please use dedicated runtime apis. + fn child_storage_kill( + &mut self, + storage_key: &[u8], + child_definition: &[u8], + child_type: u32, + ) { + child_storage_key_or_panic(storage_key); + if child_type != 1 { panic!("Invalid child definition"); } + let child_info = ChildInfo::resolve_child_info(child_type, child_definition) + .expect("Invalid child definition"); + self.kill_child_storage(&child_info); + } + + /// Deprecated, please use dedicated runtime apis. + fn child_exists( + &self, + storage_key: &[u8], + child_definition: &[u8], + child_type: u32, + key: &[u8], + ) -> bool { + child_storage_key_or_panic(storage_key); + if child_type != 1 { panic!("Invalid child definition"); } + let child_info = ChildInfo::resolve_child_info(child_type, child_definition) + .expect("Invalid child definition"); + self.exists_child_storage(&child_info, key) + } + + /// Deprecated, please use dedicated runtime apis. + fn child_clear_prefix( + &mut self, + storage_key: &[u8], + child_definition: &[u8], + child_type: u32, + prefix: &[u8], + ) { + child_storage_key_or_panic(storage_key); + if child_type != 1 { panic!("Invalid child definition"); } + let child_info = ChildInfo::resolve_child_info(child_type, child_definition) + .expect("Invalid child definition"); + self.clear_child_prefix(&child_info, prefix); + } + + /// Deprecated, please use dedicated runtime apis. + fn child_root( + &mut self, + storage_key: &[u8], + ) -> Vec { + child_storage_key_or_panic(storage_key); + let child_info = ChildInfo::new_default(storage_key); + self.child_storage_root(&child_info) + } + + /// Deprecated, please use dedicated runtime apis. + fn child_next_key( + &mut self, + storage_key: &[u8], + child_definition: &[u8], + child_type: u32, + key: &[u8], + ) -> Option> { + child_storage_key_or_panic(storage_key); + if child_type != 1 { panic!("Invalid child definition"); } + let child_info = ChildInfo::resolve_child_info(child_type, child_definition) + .expect("Invalid child definition"); + self.next_child_storage_key(&child_info, key) + } + } @@ -911,7 +1059,7 @@ mod tests { t = BasicExternalities::new(Storage { top: map![b"foo".to_vec() => b"bar".to_vec()], - children: map![], + children_default: map![], }); t.execute_with(|| { @@ -924,7 +1072,7 @@ mod tests { fn read_storage_works() { let mut t = BasicExternalities::new(Storage { top: map![b":test".to_vec() => b"\x0b\0\0\0Hello world".to_vec()], - children: map![], + children_default: map![], }); t.execute_with(|| { @@ -946,7 +1094,7 @@ mod tests { b":abc".to_vec() => b"\x0b\0\0\0Hello world".to_vec(), b":abdd".to_vec() => b"\x0b\0\0\0Hello world".to_vec() ], - children: map![], + children_default: map![], }); t.execute_with(|| { diff --git a/primitives/runtime/src/lib.rs b/primitives/runtime/src/lib.rs index 60f5da9cb389b..0409cb085256a 100644 --- a/primitives/runtime/src/lib.rs +++ b/primitives/runtime/src/lib.rs @@ -132,15 +132,15 @@ impl BuildStorage for sp_core::storage::Storage { storage: &mut sp_core::storage::Storage, )-> Result<(), String> { storage.top.extend(self.top.iter().map(|(k, v)| (k.clone(), v.clone()))); - for (k, other_map) in self.children.iter() { + for (k, other_map) in self.children_default.iter() { let k = k.clone(); - if let Some(map) = storage.children.get_mut(&k) { + if let Some(map) = storage.children_default.get_mut(&k) { map.data.extend(other_map.data.iter().map(|(k, v)| (k.clone(), v.clone()))); if !map.child_info.try_update(&other_map.child_info) { return Err("Incompatible child info update".to_string()); } } else { - storage.children.insert(k, other_map.clone()); + storage.children_default.insert(k, other_map.clone()); } } Ok(()) diff --git a/primitives/state-machine/src/basic.rs b/primitives/state-machine/src/basic.rs index 3dbc2c1e0bb4e..8c34f0e041ff4 100644 --- a/primitives/state-machine/src/basic.rs +++ b/primitives/state-machine/src/basic.rs @@ -64,7 +64,7 @@ impl BasicExternalities { ) -> R { let mut ext = Self { inner: Storage { top: std::mem::replace(&mut storage.top, Default::default()), - children: std::mem::replace(&mut storage.children, Default::default()), + children_default: std::mem::replace(&mut storage.children_default, Default::default()), }}; let r = ext.execute_with(f); @@ -85,7 +85,7 @@ impl BasicExternalities { impl PartialEq for BasicExternalities { fn eq(&self, other: &BasicExternalities) -> bool { self.inner.top.eq(&other.inner.top) - && self.inner.children.eq(&other.inner.children) + && self.inner.children_default.eq(&other.inner.children_default) } } @@ -105,7 +105,7 @@ impl From> for BasicExternalities { fn from(hashmap: BTreeMap) -> Self { BasicExternalities { inner: Storage { top: hashmap, - children: Default::default(), + children_default: Default::default(), }} } } @@ -132,7 +132,7 @@ impl Externalities for BasicExternalities { child_info: &ChildInfo, key: &[u8], ) -> Option { - self.inner.children.get(child_info.storage_key()) + self.inner.children_default.get(child_info.storage_key()) .and_then(|child| child.data.get(key)).cloned() } @@ -171,7 +171,7 @@ impl Externalities for BasicExternalities { key: &[u8], ) -> Option { let range = (Bound::Excluded(key), Bound::Unbounded); - self.inner.children.get(child_info.storage_key()) + self.inner.children_default.get(child_info.storage_key()) .and_then(|child| child.data.range::<[u8], _>(range).next().map(|(k, _)| k).cloned()) } @@ -193,7 +193,7 @@ impl Externalities for BasicExternalities { key: StorageKey, value: Option, ) { - let child_map = self.inner.children.entry(child_info.storage_key().to_vec()) + let child_map = self.inner.children_default.entry(child_info.storage_key().to_vec()) .or_insert_with(|| StorageChild { data: Default::default(), child_info: child_info.to_owned(), @@ -209,7 +209,7 @@ impl Externalities for BasicExternalities { &mut self, child_info: &ChildInfo, ) { - self.inner.children.remove(child_info.storage_key()); + self.inner.children_default.remove(child_info.storage_key()); } fn clear_prefix(&mut self, prefix: &[u8]) { @@ -237,7 +237,7 @@ impl Externalities for BasicExternalities { child_info: &ChildInfo, prefix: &[u8], ) { - if let Some(child) = self.inner.children.get_mut(child_info.storage_key()) { + if let Some(child) = self.inner.children_default.get_mut(child_info.storage_key()) { let to_remove = child.data.range::<[u8], _>((Bound::Included(prefix), Bound::Unbounded)) .map(|(k, _)| k) .take_while(|k| k.starts_with(prefix)) @@ -254,14 +254,14 @@ impl Externalities for BasicExternalities { fn storage_root(&mut self) -> Vec { let mut top = self.inner.top.clone(); - let keys: Vec<_> = self.inner.children.iter().map(|(_k, v)| { + let prefixed_keys: Vec<_> = self.inner.children_default.iter().map(|(_k, v)| { (v.child_info.prefixed_storage_key(), v.child_info.clone()) }).collect(); // Single child trie implementation currently allows using the same child // empty root for all child trie. Using null storage key until multiple // type of child trie support. let empty_hash = empty_child_trie_root::>(); - for (prefixed_storage_key, child_info) in keys { + for (prefixed_storage_key, child_info) in prefixed_keys { let child_root = self.child_storage_root(&child_info); if &empty_hash[..] == &child_root[..] { top.remove(prefixed_storage_key.as_slice()); @@ -277,7 +277,7 @@ impl Externalities for BasicExternalities { &mut self, child_info: &ChildInfo, ) -> Vec { - if let Some(child) = self.inner.children.get(child_info.storage_key()) { + if let Some(child) = self.inner.children_default.get(child_info.storage_key()) { let delta = child.data.clone().into_iter().map(|(k, v)| (k, Some(v))); InMemoryBackend::::default() @@ -334,7 +334,7 @@ mod tests { let child_info = &child_info; let mut ext = BasicExternalities::new(Storage { top: Default::default(), - children: map![ + children_default: map![ child_info.storage_key().to_vec() => StorageChild { data: map![ b"doe".to_vec() => b"reindeer".to_vec() ], child_info: child_info.to_owned(), @@ -359,6 +359,6 @@ mod tests { // Make sure no values are set by default in `BasicExternalities`. let storage = BasicExternalities::new(Default::default()).into_storages(); assert!(storage.top.is_empty()); - assert!(storage.children.is_empty()); + assert!(storage.children_default.is_empty()); } } diff --git a/primitives/state-machine/src/changes_trie/build.rs b/primitives/state-machine/src/changes_trie/build.rs index 53bf2c585a7f3..c206090fa4e18 100644 --- a/primitives/state-machine/src/changes_trie/build.rs +++ b/primitives/state-machine/src/changes_trie/build.rs @@ -107,8 +107,8 @@ fn prepare_extrinsics_input<'a, B, H, Number>( let mut children_prefixed_keys = BTreeSet::::new(); let mut children_result = BTreeMap::new(); - for (_storage_key, (_map, child_info)) in changes.prospective.children.iter() - .chain(changes.committed.children.iter()) { + for (_storage_key, (_map, child_info)) in changes.prospective.children_default.iter() + .chain(changes.committed.children_default.iter()) { children_prefixed_keys.insert(child_info.prefixed_storage_key()); } for storage_key in children_prefixed_keys { @@ -140,8 +140,8 @@ fn prepare_extrinsics_input_inner<'a, B, H, Number>( let (committed, prospective, child_info) = if let Some(sk) = storage_key.as_ref() { let child_info = changes.default_child_info(sk).cloned(); ( - changes.committed.children.get(sk).map(|c| &c.0), - changes.prospective.children.get(sk).map(|c| &c.0), + changes.committed.children_default.get(sk).map(|c| &c.0), + changes.prospective.children_default.get(sk).map(|c| &c.0), child_info, ) } else { @@ -429,7 +429,7 @@ mod test { extrinsics: Some(vec![0, 1].into_iter().collect()) }), ].into_iter().collect(), - children: vec![ + children_default: vec![ (child_trie_key1.clone(), (vec![ (vec![100], OverlayedValue { value: Some(vec![200]), @@ -458,7 +458,7 @@ mod test { extrinsics: Some(vec![1].into_iter().collect()) }), ].into_iter().collect(), - children: vec![ + children_default: vec![ (child_trie_key1, (vec![ (vec![100], OverlayedValue { value: Some(vec![202]), diff --git a/primitives/state-machine/src/ext.rs b/primitives/state-machine/src/ext.rs index 2c1c3bd01cf51..77ae9a0820fb7 100644 --- a/primitives/state-machine/src/ext.rs +++ b/primitives/state-machine/src/ext.rs @@ -502,11 +502,11 @@ where if let Some(child_info) = self.overlay.default_child_info(storage_key).cloned() { let (root, is_empty, _) = { - let delta = self.overlay.committed.children.get(storage_key) + let delta = self.overlay.committed.children_default.get(storage_key) .into_iter() .flat_map(|(map, _)| map.clone().into_iter().map(|(k, v)| (k, v.value))) .chain( - self.overlay.prospective.children.get(storage_key) + self.overlay.prospective.children_default.get(storage_key) .into_iter() .flat_map(|(map, _)| map.clone().into_iter().map(|(k, v)| (k, v.value))) ); @@ -708,7 +708,7 @@ mod tests { vec![20] => vec![20], vec![40] => vec![40] ], - children: map![] + children_default: map![] }.into(); let ext = TestExt::new(&mut overlay, &mut cache, &backend, None, None); @@ -744,7 +744,7 @@ mod tests { overlay.set_child_storage(child_info, vec![30], Some(vec![31])); let backend = Storage { top: map![], - children: map![ + children_default: map![ child_info.storage_key().to_vec() => StorageChild { data: map![ vec![10] => vec![10], @@ -789,7 +789,7 @@ mod tests { overlay.set_child_storage(child_info, vec![30], Some(vec![31])); let backend = Storage { top: map![], - children: map![ + children_default: map![ child_info.storage_key().to_vec() => StorageChild { data: map![ vec![10] => vec![10], diff --git a/primitives/state-machine/src/in_memory_backend.rs b/primitives/state-machine/src/in_memory_backend.rs index 5a7f2ced5952a..6c8aecf775d8a 100644 --- a/primitives/state-machine/src/in_memory_backend.rs +++ b/primitives/state-machine/src/in_memory_backend.rs @@ -122,7 +122,7 @@ impl From, BTreeMap From for InMemory { fn from(inners: Storage) -> Self { let mut inner: HashMap, BTreeMap> - = inners.children.into_iter().map(|(_k, c)| (Some(c.child_info), c.data)).collect(); + = inners.children_default.into_iter().map(|(_k, c)| (Some(c.child_info), c.data)).collect(); inner.insert(None, inners.top); InMemory { inner, diff --git a/primitives/state-machine/src/overlayed_changes.rs b/primitives/state-machine/src/overlayed_changes.rs index a361884fe786e..b9e25fc547013 100644 --- a/primitives/state-machine/src/overlayed_changes.rs +++ b/primitives/state-machine/src/overlayed_changes.rs @@ -77,7 +77,7 @@ pub struct OverlayedChangeSet { /// Top level storage changes. pub top: BTreeMap, /// Child storage changes. - pub children: HashMap, ChildInfo)>, + pub children_default: HashMap, ChildInfo)>, } /// A storage changes structure that can be generated by the data collected in [`OverlayedChanges`]. @@ -171,7 +171,7 @@ impl FromIterator<(StorageKey, OverlayedValue)> for OverlayedChangeSet { fn from_iter>(iter: T) -> Self { Self { top: iter.into_iter().collect(), - children: Default::default(), + children_default: Default::default(), } } } @@ -179,13 +179,13 @@ impl FromIterator<(StorageKey, OverlayedValue)> for OverlayedChangeSet { impl OverlayedChangeSet { /// Whether the change set is empty. pub fn is_empty(&self) -> bool { - self.top.is_empty() && self.children.is_empty() + self.top.is_empty() && self.children_default.is_empty() } /// Clear the change set. pub fn clear(&mut self) { self.top.clear(); - self.children.clear(); + self.children_default.clear(); } } @@ -213,13 +213,13 @@ impl OverlayedChanges { /// to the backend); Some(None) if the key has been deleted. Some(Some(...)) for a key whose /// value has been set. pub fn child_storage(&self, storage_key: &[u8], key: &[u8]) -> Option> { - if let Some(map) = self.prospective.children.get(storage_key) { + if let Some(map) = self.prospective.children_default.get(storage_key) { if let Some(val) = map.0.get(key) { return Some(val.value.as_ref().map(AsRef::as_ref)); } } - if let Some(map) = self.committed.children.get(storage_key) { + if let Some(map) = self.committed.children_default.get(storage_key) { if let Some(val) = map.0.get(key) { return Some(val.value.as_ref().map(AsRef::as_ref)); } @@ -253,7 +253,7 @@ impl OverlayedChanges { ) { let extrinsic_index = self.extrinsic_index(); let storage_key = child_info.storage_key().to_vec(); - let map_entry = self.prospective.children.entry(storage_key) + let map_entry = self.prospective.children_default.entry(storage_key) .or_insert_with(|| (Default::default(), child_info.to_owned())); let updatable = map_entry.1.try_update(child_info); debug_assert!(updatable); @@ -279,7 +279,7 @@ impl OverlayedChanges { ) { let extrinsic_index = self.extrinsic_index(); let storage_key = child_info.storage_key(); - let map_entry = self.prospective.children.entry(storage_key.to_vec()) + let map_entry = self.prospective.children_default.entry(storage_key.to_vec()) .or_insert_with(|| (Default::default(), child_info.to_owned())); let updatable = map_entry.1.try_update(child_info); debug_assert!(updatable); @@ -293,7 +293,7 @@ impl OverlayedChanges { e.value = None; }); - if let Some((committed_map, _child_info)) = self.committed.children.get(storage_key) { + if let Some((committed_map, _child_info)) = self.committed.children_default.get(storage_key) { for (key, value) in committed_map.iter() { if !map_entry.0.contains_key(key) { map_entry.0.insert(key.clone(), OverlayedValue { @@ -354,7 +354,7 @@ impl OverlayedChanges { ) { let extrinsic_index = self.extrinsic_index(); let storage_key = child_info.storage_key(); - let map_entry = self.prospective.children.entry(storage_key.to_vec()) + let map_entry = self.prospective.children_default.entry(storage_key.to_vec()) .or_insert_with(|| (Default::default(), child_info.to_owned())); let updatable = map_entry.1.try_update(child_info); debug_assert!(updatable); @@ -370,7 +370,7 @@ impl OverlayedChanges { } } - if let Some((child_committed, _child_info)) = self.committed.children.get(storage_key) { + if let Some((child_committed, _child_info)) = self.committed.children_default.get(storage_key) { // Then do the same with keys from committed changes. // NOTE that we are making changes in the prospective change set. for key in child_committed.keys() { @@ -407,8 +407,8 @@ impl OverlayedChanges { .extend(prospective_extrinsics); } } - for (storage_key, (map, child_info)) in self.prospective.children.drain() { - let child_content = self.committed.children.entry(storage_key) + for (storage_key, (map, child_info)) in self.prospective.children_default.drain() { + let child_content = self.committed.children_default.entry(storage_key) .or_insert_with(|| (Default::default(), child_info)); // No update to child info at this point (will be needed for deletion). for (key, val) in map.into_iter() { @@ -437,7 +437,7 @@ impl OverlayedChanges { std::mem::replace(&mut self.committed.top, Default::default()) .into_iter() .map(|(k, v)| (k, v.value)), - std::mem::replace(&mut self.committed.children, Default::default()) + std::mem::replace(&mut self.committed.children_default, Default::default()) .into_iter() .map(|(sk, (v, ci))| (sk, (v.into_iter().map(|(k, v)| (k, v.value)), ci))), ) @@ -534,17 +534,17 @@ impl OverlayedChanges { ) -> H::Out where H::Out: Ord + Encode, { - let child_storage_keys = self.prospective.children.keys() - .chain(self.committed.children.keys()); + let child_storage_keys = self.prospective.children_default.keys() + .chain(self.committed.children_default.keys()); let child_delta_iter = child_storage_keys.map(|storage_key| ( self.default_child_info(storage_key).cloned() .expect("child info initialized in either committed or prospective"), - self.committed.children.get(storage_key) + self.committed.children_default.get(storage_key) .into_iter() .flat_map(|(map, _)| map.iter().map(|(k, v)| (k.clone(), v.value.clone()))) .chain( - self.prospective.children.get(storage_key) + self.prospective.children_default.get(storage_key) .into_iter() .flat_map(|(map, _)| map.iter().map(|(k, v)| (k.clone(), v.value.clone()))) ), @@ -595,10 +595,10 @@ impl OverlayedChanges { /// Get child info for a storage key. /// Take the latest value so prospective first. pub fn default_child_info(&self, storage_key: &[u8]) -> Option<&ChildInfo> { - if let Some((_, ci)) = self.prospective.children.get(storage_key) { + if let Some((_, ci)) = self.prospective.children_default.get(storage_key) { return Some(&ci); } - if let Some((_, ci)) = self.committed.children.get(storage_key) { + if let Some((_, ci)) = self.committed.children_default.get(storage_key) { return Some(&ci); } None @@ -638,10 +638,10 @@ impl OverlayedChanges { ) -> Option<(&[u8], &OverlayedValue)> { let range = (ops::Bound::Excluded(key), ops::Bound::Unbounded); - let next_prospective_key = self.prospective.children.get(storage_key) + let next_prospective_key = self.prospective.children_default.get(storage_key) .and_then(|(map, _)| map.range::<[u8], _>(range).next().map(|(k, v)| (&k[..], v))); - let next_committed_key = self.committed.children.get(storage_key) + let next_committed_key = self.committed.children_default.get(storage_key) .and_then(|(map, _)| map.range::<[u8], _>(range).next().map(|(k, v)| (&k[..], v))); match (next_committed_key, next_prospective_key) { diff --git a/primitives/state-machine/src/testing.rs b/primitives/state-machine/src/testing.rs index 6ff6d42aba3f8..9cf773f79b906 100644 --- a/primitives/state-machine/src/testing.rs +++ b/primitives/state-machine/src/testing.rs @@ -89,7 +89,7 @@ impl TestExternalities overlay.set_collect_extrinsics(changes_trie_config.is_some()); assert!(storage.top.keys().all(|key| !is_child_storage_key(key))); - assert!(storage.children.keys().all(|key| is_child_storage_key(key))); + assert!(storage.children_default.keys().all(|key| is_child_storage_key(key))); storage.top.insert(HEAP_PAGES.to_vec(), 8u64.encode()); storage.top.insert(CODE.to_vec(), code.to_vec()); @@ -126,8 +126,8 @@ impl TestExternalities .map(|(k, v)| (k, v.value)).collect(); let mut transaction = vec![(None, top)]; - self.overlay.committed.children.clone().into_iter() - .chain(self.overlay.prospective.children.clone().into_iter()) + self.overlay.committed.children_default.clone().into_iter() + .chain(self.overlay.prospective.children_default.clone().into_iter()) .for_each(|(_storage_key, (map, child_info))| { transaction.push(( Some(child_info), diff --git a/primitives/storage/src/lib.rs b/primitives/storage/src/lib.rs index 8034bb2acccd5..924fd1e67849a 100644 --- a/primitives/storage/src/lib.rs +++ b/primitives/storage/src/lib.rs @@ -66,7 +66,7 @@ pub struct Storage { /// The key does not including prefix, for the `default` /// trie kind, so this is exclusively for the `ChildType::ParentKeyId` /// tries. - pub children: std::collections::HashMap, StorageChild>, + pub children_default: std::collections::HashMap, StorageChild>, } /// Storage change set @@ -134,7 +134,7 @@ pub mod well_known_keys { #[cfg_attr(feature = "std", derive(PartialEq, Eq, Hash, PartialOrd, Ord))] pub enum ChildInfo { /// This is the one used by default. - ParentKeyId(ChildTrie), + ParentKeyId(ChildTrieParentKeyId), } impl ChildInfo { @@ -143,12 +143,12 @@ impl ChildInfo { /// storage key. pub fn new_default(storage_key: &[u8]) -> Self { let data = storage_key.to_vec(); - ChildInfo::ParentKeyId(ChildTrie { data }) + ChildInfo::ParentKeyId(ChildTrieParentKeyId { data }) } /// Same as `new_default` but with `Vec` as input. pub fn new_default_from_vec(storage_key: Vec) -> Self { - ChildInfo::ParentKeyId(ChildTrie { + ChildInfo::ParentKeyId(ChildTrieParentKeyId { data: storage_key, }) } @@ -178,7 +178,7 @@ impl ChildInfo { /// This can be use as input for `resolve_child_info`. pub fn info(&self) -> (&[u8], u32) { match self { - ChildInfo::ParentKeyId(ChildTrie { + ChildInfo::ParentKeyId(ChildTrieParentKeyId { data, }) => (data, ChildType::ParentKeyId as u32), } @@ -187,7 +187,7 @@ impl ChildInfo { /// Owned variant of `info`. pub fn into_info(self) -> (Vec, u32) { match self { - ChildInfo::ParentKeyId(ChildTrie { + ChildInfo::ParentKeyId(ChildTrieParentKeyId { data, }) => (data, ChildType::ParentKeyId as u32), } @@ -207,7 +207,7 @@ impl ChildInfo { /// child trie. pub fn storage_key(&self) -> &[u8] { match self { - ChildInfo::ParentKeyId(ChildTrie { + ChildInfo::ParentKeyId(ChildTrieParentKeyId { data, }) => &data[..], } @@ -217,7 +217,7 @@ impl ChildInfo { /// this trie. pub fn prefixed_storage_key(&self) -> Vec { match self { - ChildInfo::ParentKeyId(ChildTrie { + ChildInfo::ParentKeyId(ChildTrieParentKeyId { data, }) => ChildType::ParentKeyId.new_prefixed_key(data.as_slice()), } @@ -227,7 +227,7 @@ impl ChildInfo { /// this trie. pub fn into_prefixed_storage_key(self) -> Vec { match self { - ChildInfo::ParentKeyId(ChildTrie { + ChildInfo::ParentKeyId(ChildTrieParentKeyId { mut data, }) => { ChildType::ParentKeyId.do_prefix_key(&mut data); @@ -303,12 +303,12 @@ impl ChildType { /// that will be use only once. #[derive(Debug, Clone)] #[cfg_attr(feature = "std", derive(PartialEq, Eq, Hash, PartialOrd, Ord))] -pub struct ChildTrie { +pub struct ChildTrieParentKeyId { /// Data is the full prefixed storage key. data: Vec, } -impl ChildTrie { +impl ChildTrieParentKeyId { /// Try to update with another instance, return false if both instance /// are not compatible. fn try_update(&mut self, other: &ChildInfo) -> bool { diff --git a/test-utils/client/src/lib.rs b/test-utils/client/src/lib.rs index 0fb8e6371f29f..646238726d859 100644 --- a/test-utils/client/src/lib.rs +++ b/test-utils/client/src/lib.rs @@ -189,7 +189,7 @@ impl TestClientBuilder::Header as HeaderT>::Hashing as HashT>::trie_root( child_content.data.clone().into_iter().collect() ); @@ -203,7 +203,7 @@ pub trait TestClientBuilderExt: Sized { let key = key.into(); assert!(!storage_key.is_empty()); assert!(!key.is_empty()); - self.genesis_init_mut().extra_storage.children + self.genesis_init_mut().extra_storage.children_default .entry(storage_key) .or_insert_with(|| StorageChild { data: Default::default(), @@ -311,7 +311,10 @@ impl Fetcher for LightFetcher { unimplemented!() } - fn remote_read_child(&self, _: RemoteReadChildRequest) -> Self::RemoteReadResult { + fn remote_read_child( + &self, + _: RemoteReadDefaultChildRequest, + ) -> Self::RemoteReadResult { unimplemented!() } diff --git a/test-utils/runtime/src/genesismap.rs b/test-utils/runtime/src/genesismap.rs index 25d9a807ccee1..b9de3ab3f4cb0 100644 --- a/test-utils/runtime/src/genesismap.rs +++ b/test-utils/runtime/src/genesismap.rs @@ -73,7 +73,7 @@ impl GenesisConfig { map.extend(self.extra_storage.top.clone().into_iter()); // Assimilate the system genesis config. - let mut storage = Storage { top: map, children: self.extra_storage.children.clone()}; + let mut storage = Storage { top: map, children_default: self.extra_storage.children_default.clone()}; let mut config = system::GenesisConfig::default(); config.authorities = self.authorities.clone(); config.assimilate_storage(&mut storage).expect("Adding `system::GensisConfig` to the genesis"); @@ -85,7 +85,7 @@ impl GenesisConfig { pub fn insert_genesis_block( storage: &mut Storage, ) -> sp_core::hash::H256 { - let child_roots = storage.children.iter().map(|(sk, child_content)| { + let child_roots = storage.children_default.iter().map(|(sk, child_content)| { let state_root = <<::Header as HeaderT>::Hashing as HashT>::trie_root( child_content.data.clone().into_iter().collect(), ); diff --git a/test-utils/runtime/src/system.rs b/test-utils/runtime/src/system.rs index d0a38c7c77882..20f980cf97273 100644 --- a/test-utils/runtime/src/system.rs +++ b/test-utils/runtime/src/system.rs @@ -368,7 +368,7 @@ mod tests { vec![111u8, 0, 0, 0, 0, 0, 0, 0] } ], - children: map![], + children_default: map![], }, ) } From 8fcc11206648a93881cdd9d69b0e34b2a55fb8cb Mon Sep 17 00:00:00 2001 From: cheme Date: Fri, 21 Feb 2020 12:06:32 +0100 Subject: [PATCH 53/85] Switch back to using child_type for rpc (and light client). --- client/api/src/light.rs | 12 ++++--- client/network/src/on_demand_layer.rs | 4 +-- client/network/src/protocol.rs | 12 ++++--- .../src/protocol/light_client_handler.rs | 21 +++++++----- client/network/src/protocol/light_dispatch.rs | 29 +++++++++------- client/network/src/protocol/message.rs | 10 +++--- .../src/protocol/schema/light.v1.proto | 10 ++++-- client/rpc-api/src/state/mod.rs | 20 ++++++----- client/rpc/src/state/mod.rs | 34 ++++++++++++------- client/rpc/src/state/state_full.rs | 27 +++++++++++---- client/rpc/src/state/state_light.rs | 17 ++++++---- client/rpc/src/state/tests.rs | 11 +++--- client/src/light/fetcher.rs | 20 ++++++----- primitives/storage/src/lib.rs | 2 +- test-utils/runtime/client/src/lib.rs | 4 +-- 15 files changed, 142 insertions(+), 91 deletions(-) diff --git a/client/api/src/light.rs b/client/api/src/light.rs index 61f56628d5866..67376947d3913 100644 --- a/client/api/src/light.rs +++ b/client/api/src/light.rs @@ -75,13 +75,15 @@ pub struct RemoteReadRequest { /// Remote storage read child request. #[derive(Clone, Debug, PartialEq, Eq, Hash)] -pub struct RemoteReadDefaultChildRequest { +pub struct RemoteReadChildRequest { /// Read at state of given block. pub block: Header::Hash, /// Header of block at which read is performed. pub header: Header, /// Storage key for child. pub storage_key: Vec, + /// Child type. + pub child_type: u32, /// Child storage key to read. pub keys: Vec>, /// Number of times to retry request. None means that default RETRY_COUNT is used. @@ -175,7 +177,7 @@ pub trait Fetcher: Send + Sync { /// Fetch remote storage child value. fn remote_read_child( &self, - request: RemoteReadDefaultChildRequest + request: RemoteReadChildRequest ) -> Self::RemoteReadResult; /// Fetch remote call result. fn remote_call(&self, request: RemoteCallRequest) -> Self::RemoteCallResult; @@ -205,9 +207,9 @@ pub trait FetchChecker: Send + Sync { remote_proof: StorageProof, ) -> ClientResult, Option>>>; /// Check remote storage read proof. - fn check_read_default_child_proof( + fn check_read_child_proof( &self, - request: &RemoteReadDefaultChildRequest, + request: &RemoteReadChildRequest, remote_proof: StorageProof, ) -> ClientResult, Option>>>; /// Check remote method execution proof. @@ -330,7 +332,7 @@ pub mod tests { not_implemented_in_tests() } - fn remote_read_child(&self, _request: RemoteReadDefaultChildRequest
) -> Self::RemoteReadResult { + fn remote_read_child(&self, _request: RemoteReadChildRequest
) -> Self::RemoteReadResult { not_implemented_in_tests() } diff --git a/client/network/src/on_demand_layer.rs b/client/network/src/on_demand_layer.rs index 330daf590d7a7..3a20cb9548a76 100644 --- a/client/network/src/on_demand_layer.rs +++ b/client/network/src/on_demand_layer.rs @@ -23,7 +23,7 @@ use parking_lot::Mutex; use sp_blockchain::Error as ClientError; use sc_client_api::{ Fetcher, FetchChecker, RemoteHeaderRequest, RemoteCallRequest, RemoteReadRequest, - RemoteChangesRequest, RemoteReadDefaultChildRequest, RemoteBodyRequest, + RemoteChangesRequest, RemoteReadChildRequest, RemoteBodyRequest, }; use sp_runtime::traits::{Block as BlockT, Header as HeaderT, NumberFor}; @@ -103,7 +103,7 @@ impl Fetcher for OnDemand where fn remote_read_child( &self, - request: RemoteReadDefaultChildRequest + request: RemoteReadChildRequest ) -> Self::RemoteReadResult { let (sender, receiver) = oneshot::channel(); let _ = self.requests_send.unbounded_send(RequestData::RemoteReadDefaultChild(request, sender)); diff --git a/client/network/src/protocol.rs b/client/network/src/protocol.rs index 3622b96685649..b3514a9dc7670 100644 --- a/client/network/src/protocol.rs +++ b/client/network/src/protocol.rs @@ -245,19 +245,21 @@ impl<'a, B: BlockT> LightDispatchNetwork for LightDispatchIn<'a> { self.behaviour.send_packet(who, message.encode()) } - fn send_read_default_child_request( + fn send_read_child_request( &mut self, who: &PeerId, id: RequestId, block: ::Hash, storage_key: Vec, + child_type: u32, keys: Vec>, ) { - let message: Message = message::generic::Message::RemoteReadDefaultChildRequest( - message::RemoteReadDefaultChildRequest { + let message: Message = message::generic::Message::RemoteReadChildRequest( + message::RemoteReadChildRequest { id, block, storage_key, + child_type, keys, }); @@ -640,7 +642,7 @@ impl, H: ExHashT> Protocol { self.on_finality_proof_request(who, request), GenericMessage::FinalityProofResponse(response) => return self.on_finality_proof_response(who, response), - GenericMessage::RemoteReadDefaultChildRequest(request) => + GenericMessage::RemoteReadChildRequest(request) => self.on_remote_read_child_request(who, request), GenericMessage::Consensus(msg) => return if self.registered_notif_protocols.contains(&msg.engine_id) { @@ -1548,7 +1550,7 @@ impl, H: ExHashT> Protocol { fn on_remote_read_child_request( &mut self, who: PeerId, - request: message::RemoteReadDefaultChildRequest, + request: message::RemoteReadChildRequest, ) { if request.keys.is_empty() { debug!(target: "sync", "Invalid remote child read request sent by {}", who); diff --git a/client/network/src/protocol/light_client_handler.rs b/client/network/src/protocol/light_client_handler.rs index 1c49c20b0c4b0..30c9e0d9597b9 100644 --- a/client/network/src/protocol/light_client_handler.rs +++ b/client/network/src/protocol/light_client_handler.rs @@ -170,7 +170,7 @@ pub enum Request { sender: oneshot::Sender, Option>>, ClientError>> }, ReadDefaultChild { - request: fetcher::RemoteReadDefaultChildRequest, + request: fetcher::RemoteReadChildRequest, sender: oneshot::Sender, Option>>, ClientError>> }, Call { @@ -369,7 +369,7 @@ where } Request::ReadDefaultChild { request, .. } => { let proof = Decode::decode(&mut response.proof.as_ref())?; - let reply = self.checker.check_read_default_child_proof(&request, proof)?; + let reply = self.checker.check_read_child_proof(&request, proof)?; Ok(Reply::MapVecU8OptVecU8(reply)) } _ => Err(Error::UnexpectedResponse) @@ -496,7 +496,7 @@ where ( &mut self , peer: &PeerId , request_id: u64 - , request: &api::v1::light::RemoteReadDefaultChildRequest + , request: &api::v1::light::RemoteReadChildRequest ) -> Result { if request.keys.is_empty() { @@ -692,7 +692,7 @@ where self.on_remote_read_request(&peer, request.id, r), Some(api::v1::light::request::Request::RemoteHeaderRequest(r)) => self.on_remote_header_request(&peer, request.id, r), - Some(api::v1::light::request::Request::RemoteReadDefaultChildRequest(r)) => + Some(api::v1::light::request::Request::RemoteReadChildRequest(r)) => self.on_remote_read_child_request(&peer, request.id, r), Some(api::v1::light::request::Request::RemoteChangesRequest(r)) => self.on_remote_changes_request(&peer, request.id, r), @@ -919,12 +919,13 @@ fn serialize_request(id: u64, request: &Request) -> api::v1::light: api::v1::light::request::Request::RemoteReadRequest(r) } Request::ReadDefaultChild { request, .. } => { - let r = api::v1::light::RemoteReadDefaultChildRequest { + let r = api::v1::light::RemoteReadChildRequest { block: request.block.encode(), storage_key: request.storage_key.clone(), + child_type: request.child_type, keys: request.keys.clone(), }; - api::v1::light::request::Request::RemoteReadDefaultChildRequest(r) + api::v1::light::request::Request::RemoteReadChildRequest(r) } Request::Call { request, .. } => { let r = api::v1::light::RemoteCallRequest { @@ -1620,10 +1621,11 @@ mod tests { #[test] fn receives_remote_read_child_response() { let mut chan = oneshot::channel(); - let request = fetcher::RemoteReadDefaultChildRequest { + let request = fetcher::RemoteReadChildRequest { header: dummy_header(), block: Default::default(), storage_key: b":child_storage:sub".to_vec(), + child_type: 1, keys: vec![b":key".to_vec()], retry_count: None, }; @@ -1720,16 +1722,17 @@ mod tests { #[test] fn send_receive_read_child() { let chan = oneshot::channel(); - let request = fetcher::RemoteReadDefaultChildRequest { + let request = fetcher::RemoteReadChildRequest { header: dummy_header(), block: Default::default(), storage_key: b"sub".to_vec(), + child_type: 1, keys: vec![b":key".to_vec()], retry_count: None, }; send_receive(Request::ReadDefaultChild { request, sender: chan.0 }); assert_eq!(Some(vec![42]), task::block_on(chan.1).unwrap().unwrap().remove(&b":key"[..]).unwrap()); - // ^--- from `DummyFetchChecker::check_read_default_child_proof` + // ^--- from `DummyFetchChecker::check_read_child_proof` } #[test] diff --git a/client/network/src/protocol/light_dispatch.rs b/client/network/src/protocol/light_dispatch.rs index e2b4ff7874095..15b5b42fd3ba2 100644 --- a/client/network/src/protocol/light_dispatch.rs +++ b/client/network/src/protocol/light_dispatch.rs @@ -29,7 +29,7 @@ use linked_hash_map::{Entry, LinkedHashMap}; use sp_blockchain::Error as ClientError; use sc_client_api::{FetchChecker, RemoteHeaderRequest, RemoteCallRequest, RemoteReadRequest, RemoteChangesRequest, ChangesProof, - RemoteReadDefaultChildRequest, RemoteBodyRequest, StorageProof}; + RemoteReadChildRequest, RemoteBodyRequest, StorageProof}; use crate::message::{self, BlockAttributes, Direction, FromBlock, RequestId}; use libp2p::PeerId; use crate::config::Roles; @@ -64,12 +64,13 @@ pub trait LightDispatchNetwork { ); /// Send to `who` a child read request. - fn send_read_default_child_request( + fn send_read_child_request( &mut self, who: &PeerId, id: RequestId, block: ::Hash, storage_key: Vec, + child_type: u32, keys: Vec>, ); @@ -148,7 +149,7 @@ pub(crate) enum RequestData { OneShotSender, Option>>, ClientError>>, ), RemoteReadDefaultChild( - RemoteReadDefaultChildRequest, + RemoteReadChildRequest, OneShotSender, Option>>, ClientError>> ), RemoteCall(RemoteCallRequest, OneShotSender, ClientError>>), @@ -189,9 +190,9 @@ impl FetchChecker for AlwaysBadChecker { Err(ClientError::Msg("AlwaysBadChecker".into())) } - fn check_read_default_child_proof( + fn check_read_child_proof( &self, - _request: &RemoteReadDefaultChildRequest, + _request: &RemoteReadChildRequest, _remote_proof: StorageProof, ) -> Result, Option>>, ClientError> { Err(ClientError::Msg("AlwaysBadChecker".into())) @@ -404,7 +405,7 @@ impl LightDispatch where ), }}, RequestData::RemoteReadDefaultChild(request, sender) => { - match checker.check_read_default_child_proof(&request, response.proof) { + match checker.check_read_child_proof(&request, response.proof) { Ok(response) => { // we do not bother if receiver has been dropped already let _ = sender.send(Ok(response)); @@ -618,11 +619,12 @@ impl Request { data.keys.clone(), ), RequestData::RemoteReadDefaultChild(ref data, _) => - out.send_read_default_child_request( + out.send_read_child_request( peer, self.id, data.block, data.storage_key.clone(), + data.child_type, data.keys.clone(), ), RequestData::RemoteCall(ref data, _) => @@ -682,7 +684,7 @@ pub mod tests { use sp_blockchain::{Error as ClientError, Result as ClientResult}; use sc_client_api::{FetchChecker, RemoteHeaderRequest, ChangesProof, RemoteCallRequest, RemoteReadRequest, - RemoteReadDefaultChildRequest, RemoteChangesRequest, RemoteBodyRequest}; + RemoteReadChildRequest, RemoteChangesRequest, RemoteBodyRequest}; use crate::config::Roles; use crate::message::{self, BlockAttributes, Direction, FromBlock, RequestId}; use libp2p::PeerId; @@ -729,9 +731,9 @@ pub mod tests { } } - fn check_read_default_child_proof( + fn check_read_child_proof( &self, - request: &RemoteReadDefaultChildRequest, + request: &RemoteReadChildRequest, _: StorageProof, ) -> ClientResult, Option>>> { match self.ok { @@ -817,8 +819,8 @@ pub mod tests { } fn send_header_request(&mut self, _: &PeerId, _: RequestId, _: <::Header as HeaderT>::Number) {} fn send_read_request(&mut self, _: &PeerId, _: RequestId, _: ::Hash, _: Vec>) {} - fn send_read_default_child_request(&mut self, _: &PeerId, _: RequestId, _: ::Hash, _: Vec, - _: Vec>) {} + fn send_read_child_request(&mut self, _: &PeerId, _: RequestId, _: ::Hash, _: Vec, + _: u32, _: Vec>) {} fn send_call_request(&mut self, _: &PeerId, _: RequestId, _: ::Hash, _: String, _: Vec) {} fn send_changes_request(&mut self, _: &PeerId, _: RequestId, _: ::Hash, _: ::Hash, _: ::Hash, _: ::Hash, _: Option>, _: Vec) {} @@ -1041,10 +1043,11 @@ pub mod tests { let (tx, response) = oneshot::channel(); light_dispatch.add_request(&mut network_interface, RequestData::RemoteReadDefaultChild( - RemoteReadDefaultChildRequest { + RemoteReadChildRequest { header: dummy_header(), block: Default::default(), storage_key: b"sub".to_vec(), + child_type: 1, keys: vec![b":key".to_vec()], retry_count: None, }, tx)); diff --git a/client/network/src/protocol/message.rs b/client/network/src/protocol/message.rs index ed9cd811006de..d44e13b06eab1 100644 --- a/client/network/src/protocol/message.rs +++ b/client/network/src/protocol/message.rs @@ -24,7 +24,7 @@ pub use self::generic::{ RemoteHeaderRequest, RemoteHeaderResponse, RemoteChangesRequest, RemoteChangesResponse, FinalityProofRequest, FinalityProofResponse, - FromBlock, RemoteReadDefaultChildRequest, + FromBlock, RemoteReadChildRequest, }; use sc_client_api::StorageProof; @@ -212,7 +212,7 @@ pub mod generic { /// Remote changes response. RemoteChangesResponse(RemoteChangesResponse), /// Remote child storage read request. - RemoteReadDefaultChildRequest(RemoteReadDefaultChildRequest), + RemoteReadChildRequest(RemoteReadChildRequest), /// Finality proof request. FinalityProofRequest(FinalityProofRequest), /// Finality proof response. @@ -242,7 +242,7 @@ pub mod generic { Message::RemoteHeaderResponse(_) => "RemoteHeaderResponse", Message::RemoteChangesRequest(_) => "RemoteChangesRequest", Message::RemoteChangesResponse(_) => "RemoteChangesResponse", - Message::RemoteReadDefaultChildRequest(_) => "RemoteReadDefaultChildRequest", + Message::RemoteReadChildRequest(_) => "RemoteReadChildRequest", Message::FinalityProofRequest(_) => "FinalityProofRequest", Message::FinalityProofResponse(_) => "FinalityProofResponse", Message::ConsensusBatch(_) => "ConsensusBatch", @@ -417,13 +417,15 @@ pub mod generic { #[derive(Debug, PartialEq, Eq, Clone, Encode, Decode)] /// Remote storage read child request. - pub struct RemoteReadDefaultChildRequest { + pub struct RemoteReadChildRequest { /// Unique request id. pub id: RequestId, /// Block at which to perform call. pub block: H, /// Child Storage key. pub storage_key: Vec, + /// Child type. + pub child_type: u32, /// Storage key. pub keys: Vec>, } diff --git a/client/network/src/protocol/schema/light.v1.proto b/client/network/src/protocol/schema/light.v1.proto index 1895f6275fe48..c4aff40c9626d 100644 --- a/client/network/src/protocol/schema/light.v1.proto +++ b/client/network/src/protocol/schema/light.v1.proto @@ -20,7 +20,7 @@ message Request { RemoteCallRequest remote_call_request = 2; RemoteReadRequest remote_read_request = 3; RemoteHeaderRequest remote_header_request = 4; - RemoteReadDefaultChildRequest remote_read_default_child_request = 5; + RemoteReadChildRequest remote_read_child_request = 5; RemoteChangesRequest remote_changes_request = 6; } } @@ -68,11 +68,15 @@ message RemoteReadResponse { } // Remote storage read child request. -message RemoteReadDefaultChildRequest { +message RemoteReadChildRequest { // Block at which to perform call. bytes block = 2; - // Child Storage key. + // Child Storage key, this is relative + // to the child type storage location. bytes storage_key = 3; + /// Child type, its required to resolve + /// child storage final location. + uint32 child_type = 5; // Storage keys. repeated bytes keys = 6; } diff --git a/client/rpc-api/src/state/mod.rs b/client/rpc-api/src/state/mod.rs index 540eb67d5e7ea..41690134009b8 100644 --- a/client/rpc-api/src/state/mod.rs +++ b/client/rpc-api/src/state/mod.rs @@ -73,37 +73,41 @@ pub trait StateApi { fn storage_size(&self, key: StorageKey, hash: Option) -> FutureResult>; /// Returns the keys with prefix from a child storage, leave empty to get all the keys - #[rpc(name = "state_getDefaultChildKeys")] - fn default_child_storage_keys( + #[rpc(name = "state_getChildKeys")] + fn child_storage_keys( &self, child_storage_key: StorageKey, + child_type: u32, prefix: StorageKey, hash: Option ) -> FutureResult>; /// Returns a child storage entry at a specific block's state. - #[rpc(name = "state_getDefaultChildStorage")] - fn default_child_storage( + #[rpc(name = "state_getChildStorage")] + fn child_storage( &self, child_storage_key: StorageKey, + child_type: u32, key: StorageKey, hash: Option ) -> FutureResult>; /// Returns the hash of a child storage entry at a block's state. - #[rpc(name = "state_getDefaultChildStorageHash")] - fn default_child_storage_hash( + #[rpc(name = "state_getChildStorageHash")] + fn child_storage_hash( &self, child_storage_key: StorageKey, + child_type: u32, key: StorageKey, hash: Option ) -> FutureResult>; /// Returns the size of a child storage entry at a block's state. - #[rpc(name = "state_getDefaultChildStorageSize")] - fn default_child_storage_size( + #[rpc(name = "state_getDefaultStorageSize")] + fn child_storage_size( &self, child_storage_key: StorageKey, + child_type: u32, key: StorageKey, hash: Option ) -> FutureResult>; diff --git a/client/rpc/src/state/mod.rs b/client/rpc/src/state/mod.rs index 1d0c322f9803f..a25828a869b00 100644 --- a/client/rpc/src/state/mod.rs +++ b/client/rpc/src/state/mod.rs @@ -105,37 +105,41 @@ pub trait StateBackend: Send + Sync + 'static /// Returns the keys with prefix from a defaultchild storage, /// leave empty to get all the keys - fn default_child_storage_keys( + fn child_storage_keys( &self, block: Option, storage_key: StorageKey, + child_type: u32, prefix: StorageKey, ) -> FutureResult>; /// Returns a child storage entry at a specific block's state. - fn default_child_storage( + fn child_storage( &self, block: Option, storage_key: StorageKey, + child_type: u32, key: StorageKey, ) -> FutureResult>; /// Returns the hash of a child storage entry at a block's state. - fn default_child_storage_hash( + fn child_storage_hash( &self, block: Option, storage_key: StorageKey, + child_type: u32, key: StorageKey, ) -> FutureResult>; /// Returns the size of a child storage entry at a block's state. - fn default_child_storage_size( + fn child_storage_size( &self, block: Option, storage_key: StorageKey, + child_type: u32, key: StorageKey, ) -> FutureResult> { - Box::new(self.default_child_storage(block, storage_key, key) + Box::new(self.child_storage(block, storage_key, child_type, key) .map(|x| x.map(|x| x.0.len() as u64))) } @@ -293,40 +297,44 @@ impl StateApi for State self.backend.storage_size(block, key) } - fn default_child_storage( + fn child_storage( &self, storage_key: StorageKey, + child_type: u32, key: StorageKey, block: Option ) -> FutureResult> { - self.backend.default_child_storage(block, storage_key, key) + self.backend.child_storage(block, storage_key, child_type, key) } - fn default_child_storage_keys( + fn child_storage_keys( &self, storage_key: StorageKey, + child_type: u32, key_prefix: StorageKey, block: Option ) -> FutureResult> { - self.backend.default_child_storage_keys(block, storage_key, key_prefix) + self.backend.child_storage_keys(block, storage_key, child_type, key_prefix) } - fn default_child_storage_hash( + fn child_storage_hash( &self, storage_key: StorageKey, + child_type: u32, key: StorageKey, block: Option ) -> FutureResult> { - self.backend.default_child_storage_hash(block, storage_key, key) + self.backend.child_storage_hash(block, storage_key, child_type, key) } - fn default_child_storage_size( + fn child_storage_size( &self, storage_key: StorageKey, + child_type: u32, key: StorageKey, block: Option ) -> FutureResult> { - self.backend.default_child_storage_size(block, storage_key, key) + self.backend.child_storage_size(block, storage_key, child_type, key) } fn metadata(&self, block: Option) -> FutureResult { diff --git a/client/rpc/src/state/state_full.rs b/client/rpc/src/state/state_full.rs index ca237dbfa230f..508ff8c74417d 100644 --- a/client/rpc/src/state/state_full.rs +++ b/client/rpc/src/state/state_full.rs @@ -33,7 +33,8 @@ use sc_client::{ Client, CallExecutor, BlockchainEvents }; use sp_core::{ - Bytes, storage::{well_known_keys, StorageKey, StorageData, StorageChangeSet, ChildInfo}, + Bytes, storage::{well_known_keys, StorageKey, StorageData, StorageChangeSet, + ChildInfo, ChildType}, }; use sp_version::RuntimeVersion; use sp_runtime::{ @@ -305,16 +306,20 @@ impl StateBackend for FullState, storage_key: StorageKey, + child_type: u32, prefix: StorageKey, ) -> FutureResult> { Box::new(result( self.block_or_best(block) .and_then(|block| { - let child_info = ChildInfo::new_default_from_vec(storage_key.0); + let child_info = match ChildType::new(child_type) { + Some(ChildType::ParentKeyId) => ChildInfo::new_default_from_vec(storage_key.0), + None => return Err("Invalid child type".into()), + }; self.client.child_storage_keys( &BlockId::Hash(block), &child_info, @@ -324,16 +329,20 @@ impl StateBackend for FullState, storage_key: StorageKey, + child_type: u32, key: StorageKey, ) -> FutureResult> { Box::new(result( self.block_or_best(block) .and_then(|block| { - let child_info = ChildInfo::new_default_from_vec(storage_key.0); + let child_info = match ChildType::new(child_type) { + Some(ChildType::ParentKeyId) => ChildInfo::new_default_from_vec(storage_key.0), + None => return Err("Invalid child type".into()), + }; self.client.child_storage( &BlockId::Hash(block), &child_info, @@ -343,16 +352,20 @@ impl StateBackend for FullState, storage_key: StorageKey, + child_type: u32, key: StorageKey, ) -> FutureResult> { Box::new(result( self.block_or_best(block) .and_then(|block| { - let child_info = ChildInfo::new_default_from_vec(storage_key.0); + let child_info = match ChildType::new(child_type) { + Some(ChildType::ParentKeyId) => ChildInfo::new_default_from_vec(storage_key.0), + None => return Err("Invalid child type".into()), + }; self.client.child_storage_hash( &BlockId::Hash(block), &child_info, diff --git a/client/rpc/src/state/state_light.rs b/client/rpc/src/state/state_light.rs index d9f56d9fb584f..80d43f8ccee82 100644 --- a/client/rpc/src/state/state_light.rs +++ b/client/rpc/src/state/state_light.rs @@ -45,7 +45,7 @@ use sc_client::{ BlockchainEvents, Client, CallExecutor, light::{ blockchain::{future_header, RemoteBlockchain}, - fetcher::{Fetcher, RemoteCallRequest, RemoteReadRequest, RemoteReadDefaultChildRequest}, + fetcher::{Fetcher, RemoteCallRequest, RemoteReadRequest, RemoteReadChildRequest}, }, }; use sp_core::{ @@ -246,29 +246,32 @@ impl StateBackend for LightState, _storage_key: StorageKey, + _child_type: u32, _prefix: StorageKey, ) -> FutureResult> { Box::new(result(Err(client_err(ClientError::NotAvailableOnLightClient)))) } - fn default_child_storage( + fn child_storage( &self, block: Option, storage_key: StorageKey, + child_type: u32, key: StorageKey, ) -> FutureResult> { let block = self.block_or_best(block); let fetcher = self.fetcher.clone(); let child_storage = resolve_header(&*self.remote_blockchain, &*self.fetcher, block) .then(move |result| match result { - Ok(header) => Either::Left(fetcher.remote_read_child(RemoteReadDefaultChildRequest { + Ok(header) => Either::Left(fetcher.remote_read_child(RemoteReadChildRequest { block, header, storage_key: storage_key.0, + child_type, keys: vec![key.0.clone()], retry_count: Default::default(), }).then(move |result| ready(result @@ -285,14 +288,14 @@ impl StateBackend for LightState, storage_key: StorageKey, + child_type: u32, key: StorageKey, ) -> FutureResult> { - Box::new(self - .default_child_storage(block, storage_key, key) + Box::new(self.child_storage(block, storage_key, child_type, key) .and_then(|maybe_storage| result(Ok(maybe_storage.map(|storage| HasherFor::::hash(&storage.0)))) ) diff --git a/client/rpc/src/state/tests.rs b/client/rpc/src/state/tests.rs index b579003e6c01c..417912e4b9585 100644 --- a/client/rpc/src/state/tests.rs +++ b/client/rpc/src/state/tests.rs @@ -65,7 +65,7 @@ fn should_return_storage() { ); assert_eq!( core.block_on( - client.default_child_storage(storage_key, key, Some(genesis_hash).into()) + client.child_storage(storage_key, 1, key, Some(genesis_hash).into()) .map(|x| x.map(|x| x.0.len())) ).unwrap().unwrap() as usize, CHILD_VALUE.len(), @@ -87,24 +87,27 @@ fn should_return_child_storage() { assert_matches!( - client.default_child_storage( + client.child_storage( child_key.clone(), + 1, key.clone(), Some(genesis_hash).into(), ).wait(), Ok(Some(StorageData(ref d))) if d[0] == 42 && d.len() == 1 ); assert_matches!( - client.default_child_storage_hash( + client.child_storage_hash( child_key.clone(), + 1, key.clone(), Some(genesis_hash).into(), ).wait().map(|x| x.is_some()), Ok(true) ); assert_matches!( - client.default_child_storage_size( + client.child_storage_size( child_key.clone(), + 1, key.clone(), None, ).wait(), diff --git a/client/src/light/fetcher.rs b/client/src/light/fetcher.rs index 4aafbfc630fe3..7a7ef6e0a91df 100644 --- a/client/src/light/fetcher.rs +++ b/client/src/light/fetcher.rs @@ -23,7 +23,7 @@ use std::marker::PhantomData; use hash_db::{HashDB, Hasher, EMPTY_PREFIX}; use codec::{Decode, Encode}; use sp_core::{convert_hash, traits::CodeExecutor}; -use sp_core::storage::ChildInfo; +use sp_core::storage::{ChildInfo, ChildType}; use sp_runtime::traits::{ Block as BlockT, Header as HeaderT, Hash, HashFor, NumberFor, AtLeast32Bit, CheckedConversion, @@ -39,7 +39,7 @@ use sp_blockchain::{Error as ClientError, Result as ClientResult}; use crate::cht; pub use sc_client_api::{ light::{ - RemoteCallRequest, RemoteHeaderRequest, RemoteReadRequest, RemoteReadDefaultChildRequest, + RemoteCallRequest, RemoteHeaderRequest, RemoteReadRequest, RemoteReadChildRequest, RemoteChangesRequest, ChangesProof, RemoteBodyRequest, Fetcher, FetchChecker, Storage as BlockchainStorage, }, @@ -236,16 +236,19 @@ impl FetchChecker for LightDataChecker ).map_err(Into::into) } - fn check_read_default_child_proof( + fn check_read_child_proof( &self, - request: &RemoteReadDefaultChildRequest, + request: &RemoteReadChildRequest, remote_proof: StorageProof, ) -> ClientResult, Option>>> { - let child_trie = ChildInfo::new_default(&request.storage_key); + let child_info = match ChildType::new(request.child_type) { + Some(ChildType::ParentKeyId) => ChildInfo::new_default(&request.storage_key[..]), + None => return Err("Invalid child type".into()), + }; read_child_proof_check::( convert_hash(request.header.state_root()), remote_proof, - &child_trie, + &child_info, request.keys.iter(), ).map_err(Into::into) } @@ -502,11 +505,12 @@ pub mod tests { remote_read_proof, result, ) = prepare_for_read_child_proof_check(); - assert_eq!((&local_checker as &dyn FetchChecker).check_read_default_child_proof( - &RemoteReadDefaultChildRequest::
{ + assert_eq!((&local_checker as &dyn FetchChecker).check_read_child_proof( + &RemoteReadChildRequest::
{ block: remote_block_header.hash(), header: remote_block_header, storage_key: b"child1".to_vec(), + child_type: 1, keys: vec![b"key1".to_vec()], retry_count: None, }, diff --git a/primitives/storage/src/lib.rs b/primitives/storage/src/lib.rs index 924fd1e67849a..b89a4c43450c5 100644 --- a/primitives/storage/src/lib.rs +++ b/primitives/storage/src/lib.rs @@ -258,7 +258,7 @@ pub enum ChildType { impl ChildType { /// Try to get a child type from its `u32` representation. - fn new(repr: u32) -> Option { + pub fn new(repr: u32) -> Option { Some(match repr { r if r == ChildType::ParentKeyId as u32 => ChildType::ParentKeyId, _ => return None, diff --git a/test-utils/runtime/client/src/lib.rs b/test-utils/runtime/client/src/lib.rs index a16a3596cadef..7685157d96a13 100644 --- a/test-utils/runtime/client/src/lib.rs +++ b/test-utils/runtime/client/src/lib.rs @@ -37,7 +37,7 @@ use sp_runtime::traits::{Block as BlockT, Header as HeaderT, Hash as HashT, Numb use sc_client::{ light::fetcher::{ Fetcher, - RemoteHeaderRequest, RemoteReadRequest, RemoteReadDefaultChildRequest, + RemoteHeaderRequest, RemoteReadRequest, RemoteReadChildRequest, RemoteCallRequest, RemoteChangesRequest, RemoteBodyRequest, }, }; @@ -313,7 +313,7 @@ impl Fetcher for LightFetcher { fn remote_read_child( &self, - _: RemoteReadDefaultChildRequest, + _: RemoteReadChildRequest, ) -> Self::RemoteReadResult { unimplemented!() } From 74aa3f8a34a72f9895ecd4e8cc559f2fdc762322 Mon Sep 17 00:00:00 2001 From: Emeric Chevalier Date: Wed, 26 Feb 2020 15:45:49 +0100 Subject: [PATCH 54/85] bump runtime version --- bin/node/runtime/src/lib.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/bin/node/runtime/src/lib.rs b/bin/node/runtime/src/lib.rs index 730a983a43818..aaffe520e8013 100644 --- a/bin/node/runtime/src/lib.rs +++ b/bin/node/runtime/src/lib.rs @@ -81,7 +81,7 @@ pub const VERSION: RuntimeVersion = RuntimeVersion { // and set impl_version to 0. If only runtime // implementation changes and behavior does not, then leave spec_version as // is and increment impl_version. - spec_version: 226, + spec_version: 227, impl_version: 0, apis: RUNTIME_API_VERSIONS, }; From 4c40ea7c750245ab4b286ad5cc433853140dc71a Mon Sep 17 00:00:00 2001 From: Emeric Chevalier Date: Wed, 26 Feb 2020 19:15:58 +0100 Subject: [PATCH 55/85] Resolve merging change of api from #4857 --- bin/node/runtime/src/lib.rs | 5 - client/chain-spec/src/chain_spec.rs | 24 -- client/db/src/bench.rs | 30 +-- client/db/src/lib.rs | 49 +---- client/db/src/storage_cache.rs | 28 --- client/network/src/chain.rs | 8 - client/network/src/protocol.rs | 22 -- .../src/protocol/light_client_handler.rs | 34 --- client/rpc/src/state/state_full.rs | 30 --- client/rpc/src/state/tests.rs | 26 --- client/src/client.rs | 16 -- client/src/light/backend.rs | 24 -- client/src/light/fetcher.rs | 27 --- frame/contracts/src/account_db.rs | 44 +--- frame/contracts/src/exec.rs | 10 +- frame/contracts/src/lib.rs | 18 +- frame/contracts/src/rent.rs | 8 - frame/contracts/src/tests.rs | 4 +- frame/support/src/storage/child.rs | 56 ----- primitives/externalities/src/lib.rs | 44 ---- primitives/io/src/lib.rs | 63 ------ primitives/state-machine/src/backend.rs | 87 +------- primitives/state-machine/src/basic.rs | 67 +----- .../state-machine/src/changes_trie/build.rs | 32 +-- primitives/state-machine/src/ext.rs | 205 ++---------------- .../state-machine/src/in_memory_backend.rs | 86 +------- primitives/state-machine/src/lib.rs | 95 -------- .../state-machine/src/overlayed_changes.rs | 49 +---- .../state-machine/src/proving_backend.rs | 84 +------ primitives/state-machine/src/trie_backend.rs | 83 +------ .../state-machine/src/trie_backend_essence.rs | 157 +------------- primitives/storage/src/lib.rs | 130 +---------- primitives/trie/src/lib.rs | 204 +---------------- test-utils/client/src/lib.rs | 5 - test-utils/runtime/client/src/lib.rs | 4 - 35 files changed, 77 insertions(+), 1781 deletions(-) diff --git a/bin/node/runtime/src/lib.rs b/bin/node/runtime/src/lib.rs index e9cf74b528bec..aaffe520e8013 100644 --- a/bin/node/runtime/src/lib.rs +++ b/bin/node/runtime/src/lib.rs @@ -81,13 +81,8 @@ pub const VERSION: RuntimeVersion = RuntimeVersion { // and set impl_version to 0. If only runtime // implementation changes and behavior does not, then leave spec_version as // is and increment impl_version. -<<<<<<< HEAD - spec_version: 216, - impl_version: 3, -======= spec_version: 227, impl_version: 0, ->>>>>>> child_trie_w3_change apis: RUNTIME_API_VERSIONS, }; diff --git a/client/chain-spec/src/chain_spec.rs b/client/chain-spec/src/chain_spec.rs index fab0b587a75bd..e67deab30f952 100644 --- a/client/chain-spec/src/chain_spec.rs +++ b/client/chain-spec/src/chain_spec.rs @@ -76,16 +76,8 @@ impl BuildStorage for ChainSpec { Genesis::Runtime(gc) => gc.build_storage(), Genesis::Raw(RawGenesis { top: map, children_default: children_map }) => Ok(Storage { top: map.into_iter().map(|(k, v)| (k.0, v.0)).collect(), -<<<<<<< HEAD - children: children_map.into_iter().map(|(sk, child_content)| { - let child_info = ChildInfo::resolve_child_info( - child_content.child_type, - child_content.child_info.as_slice(), - ).expect("chain spec contains correct content"); -======= children_default: children_map.into_iter().map(|(storage_key, child_content)| { let child_info = ChildInfo::new_default(storage_key.0.as_slice()); ->>>>>>> child_trie_w3_change ( storage_key.0, StorageChild { @@ -281,21 +273,6 @@ impl ChainSpec { let top = storage.top.into_iter() .map(|(k, v)| (StorageKey(k), StorageData(v))) .collect(); -<<<<<<< HEAD - let children = storage.children.into_iter() - .map(|(sk, child)| { - let (info, ci_type) = child.child_info.info(); - ( - StorageKey(sk), - ChildRawStorage { - data: child.data.into_iter() - .map(|(k, v)| (StorageKey(k), StorageData(v))) - .collect(), - child_info: info.to_vec(), - child_type: ci_type, - }, - )}) -======= let children_default = storage.children_default.into_iter() .map(|(sk, child)| ( StorageKey(sk), @@ -303,7 +280,6 @@ impl ChainSpec { .map(|(k, v)| (StorageKey(k), StorageData(v))) .collect(), )) ->>>>>>> child_trie_w3_change .collect(); Genesis::Raw(RawGenesis { top, children_default }) diff --git a/client/db/src/bench.rs b/client/db/src/bench.rs index 372fb8e1c90f6..55561e5e50ad7 100644 --- a/client/db/src/bench.rs +++ b/client/db/src/bench.rs @@ -146,10 +146,6 @@ impl StateBackend> for BenchmarkingState { fn child_storage( &self, -<<<<<<< HEAD - storage_key: &[u8], -======= ->>>>>>> child_trie_w3_change child_info: &ChildInfo, key: &[u8], ) -> Result>, Self::Error> { @@ -162,10 +158,6 @@ impl StateBackend> for BenchmarkingState { fn exists_child_storage( &self, -<<<<<<< HEAD - storage_key: &[u8], -======= ->>>>>>> child_trie_w3_change child_info: &ChildInfo, key: &[u8], ) -> Result { @@ -178,10 +170,6 @@ impl StateBackend> for BenchmarkingState { fn next_child_storage_key( &self, -<<<<<<< HEAD - storage_key: &[u8], -======= ->>>>>>> child_trie_w3_change child_info: &ChildInfo, key: &[u8], ) -> Result>, Self::Error> { @@ -202,10 +190,6 @@ impl StateBackend> for BenchmarkingState { fn for_keys_in_child_storage( &self, -<<<<<<< HEAD - storage_key: &[u8], -======= ->>>>>>> child_trie_w3_change child_info: &ChildInfo, f: F, ) { @@ -216,10 +200,6 @@ impl StateBackend> for BenchmarkingState { fn for_child_keys_with_prefix( &self, -<<<<<<< HEAD - storage_key: &[u8], -======= ->>>>>>> child_trie_w3_change child_info: &ChildInfo, prefix: &[u8], f: F, @@ -237,10 +217,6 @@ impl StateBackend> for BenchmarkingState { fn child_storage_root( &self, -<<<<<<< HEAD - storage_key: &[u8], -======= ->>>>>>> child_trie_w3_change child_info: &ChildInfo, delta: I, ) -> (B::Hash, bool, Self::Transaction) where @@ -259,10 +235,6 @@ impl StateBackend> for BenchmarkingState { fn child_keys( &self, -<<<<<<< HEAD - storage_key: &[u8], -======= ->>>>>>> child_trie_w3_change child_info: &ChildInfo, prefix: &[u8], ) -> Vec> { @@ -283,7 +255,7 @@ impl StateBackend> for BenchmarkingState { let mut keyspace = crate::Keyspaced::new(&[]); for (info, mut updates) in transaction.into_iter() { // child info with strong unique id are using the same state-db with prefixed key - if info.child_type() != ChildType::CryptoUniqueId { + if info.child_type() != ChildType::ParentKeyId { // Unhandled child kind unimplemented!( "Data for {:?} without a backend implementation", diff --git a/client/db/src/lib.rs b/client/db/src/lib.rs index a1fbe41f89348..f6d066ca98c1f 100644 --- a/client/db/src/lib.rs +++ b/client/db/src/lib.rs @@ -152,10 +152,6 @@ impl StateBackend> for RefTrackingState { fn child_storage( &self, -<<<<<<< HEAD - storage_key: &[u8], -======= ->>>>>>> child_trie_w3_change child_info: &ChildInfo, key: &[u8], ) -> Result>, Self::Error> { @@ -168,10 +164,6 @@ impl StateBackend> for RefTrackingState { fn exists_child_storage( &self, -<<<<<<< HEAD - storage_key: &[u8], -======= ->>>>>>> child_trie_w3_change child_info: &ChildInfo, key: &[u8], ) -> Result { @@ -184,10 +176,6 @@ impl StateBackend> for RefTrackingState { fn next_child_storage_key( &self, -<<<<<<< HEAD - storage_key: &[u8], -======= ->>>>>>> child_trie_w3_change child_info: &ChildInfo, key: &[u8], ) -> Result>, Self::Error> { @@ -204,10 +192,6 @@ impl StateBackend> for RefTrackingState { fn for_keys_in_child_storage( &self, -<<<<<<< HEAD - storage_key: &[u8], -======= ->>>>>>> child_trie_w3_change child_info: &ChildInfo, f: F, ) { @@ -216,10 +200,6 @@ impl StateBackend> for RefTrackingState { fn for_child_keys_with_prefix( &self, -<<<<<<< HEAD - storage_key: &[u8], -======= ->>>>>>> child_trie_w3_change child_info: &ChildInfo, prefix: &[u8], f: F, @@ -236,10 +216,6 @@ impl StateBackend> for RefTrackingState { fn child_storage_root( &self, -<<<<<<< HEAD - storage_key: &[u8], -======= ->>>>>>> child_trie_w3_change child_info: &ChildInfo, delta: I, ) -> (B::Hash, bool, Self::Transaction) @@ -259,10 +235,6 @@ impl StateBackend> for RefTrackingState { fn child_keys( &self, -<<<<<<< HEAD - storage_key: &[u8], -======= ->>>>>>> child_trie_w3_change child_info: &ChildInfo, prefix: &[u8], ) -> Vec> { @@ -1149,7 +1121,7 @@ impl Backend { let mut keyspace = Keyspaced::new(&[]); for (info, mut updates) in operation.db_updates.into_iter() { // child info with strong unique id are using the same state-db with prefixed key - if info.child_type() != ChildType::CryptoUniqueId { + if info.child_type() != ChildType::ParentKeyId { // Unhandled child kind return Err(ClientError::Backend(format!( "Data for {:?} without a backend implementation", @@ -1869,8 +1841,7 @@ pub(crate) mod tests { fn set_state_data() { let db = Backend::::new_test(2, 0); - let child_info = sp_core::storage::ChildInfo::new_default(b"unique_id"); - let storage_key = b":child_storage:default:key1"; + let child_info = sp_core::storage::ChildInfo::new_default(b"key1"); let hash = { let mut op = db.begin_operation().unwrap(); @@ -1897,23 +1868,19 @@ pub(crate) mod tests { .iter() .cloned() .map(|(x, y)| (x, Some(y))), - vec![(storage_key.to_vec(), child_storage.clone(), child_info.clone())], + vec![(child_info.clone(), child_storage.clone())], false, ).0.into(); let hash = header.hash(); - let mut children = HashMap::default(); - children.insert(storage_key.to_vec(), sp_core::storage::StorageChild { + let mut children_default = HashMap::default(); + children_default.insert(child_info.storage_key().to_vec(), sp_core::storage::StorageChild { child_info: child_info.clone(), data: child_storage.iter().map(|(k, v)| (k.clone(), v.clone().unwrap())).collect(), }); op.reset_storage(Storage { top: storage.iter().cloned().collect(), -<<<<<<< HEAD - children, -======= - children_default: Default::default(), ->>>>>>> child_trie_w3_change + children_default, }).unwrap(); op.set_block_data( header.clone(), @@ -1930,7 +1897,7 @@ pub(crate) mod tests { assert_eq!(state.storage(&[1, 2, 3]).unwrap(), Some(vec![9, 9, 9])); assert_eq!(state.storage(&[5, 5, 5]).unwrap(), None); assert_eq!( - state.child_storage(&storage_key[..], &child_info, &[2, 3, 5]).unwrap(), + state.child_storage(&child_info, &[2, 3, 5]).unwrap(), Some(vec![4, 4, 6]), ); @@ -1972,7 +1939,7 @@ pub(crate) mod tests { assert_eq!(state.storage(&[1, 2, 3]).unwrap(), Some(vec![9, 9, 9])); assert_eq!(state.storage(&[5, 5, 5]).unwrap(), Some(vec![4, 5, 6])); assert_eq!( - state.child_storage(&storage_key[..], &child_info, &[2, 3, 5]).unwrap(), + state.child_storage(&child_info, &[2, 3, 5]).unwrap(), Some(vec![4, 4, 6]), ); diff --git a/client/db/src/storage_cache.rs b/client/db/src/storage_cache.rs index db4aaf40409fe..07766288541f4 100644 --- a/client/db/src/storage_cache.rs +++ b/client/db/src/storage_cache.rs @@ -539,10 +539,6 @@ impl>, B: BlockT> StateBackend> for Ca fn child_storage( &self, -<<<<<<< HEAD - storage_key: &[u8], -======= ->>>>>>> child_trie_w3_change child_info: &ChildInfo, key: &[u8], ) -> Result>, Self::Error> { @@ -579,10 +575,6 @@ impl>, B: BlockT> StateBackend> for Ca fn exists_child_storage( &self, -<<<<<<< HEAD - storage_key: &[u8], -======= ->>>>>>> child_trie_w3_change child_info: &ChildInfo, key: &[u8], ) -> Result { @@ -591,10 +583,6 @@ impl>, B: BlockT> StateBackend> for Ca fn for_keys_in_child_storage( &self, -<<<<<<< HEAD - storage_key: &[u8], -======= ->>>>>>> child_trie_w3_change child_info: &ChildInfo, f: F, ) { @@ -607,10 +595,6 @@ impl>, B: BlockT> StateBackend> for Ca fn next_child_storage_key( &self, -<<<<<<< HEAD - storage_key: &[u8], -======= ->>>>>>> child_trie_w3_change child_info: &ChildInfo, key: &[u8], ) -> Result>, Self::Error> { @@ -627,10 +611,6 @@ impl>, B: BlockT> StateBackend> for Ca fn for_child_keys_with_prefix( &self, -<<<<<<< HEAD - storage_key: &[u8], -======= ->>>>>>> child_trie_w3_change child_info: &ChildInfo, prefix: &[u8], f: F, @@ -647,10 +627,6 @@ impl>, B: BlockT> StateBackend> for Ca fn child_storage_root( &self, -<<<<<<< HEAD - storage_key: &[u8], -======= ->>>>>>> child_trie_w3_change child_info: &ChildInfo, delta: I, ) -> (B::Hash, bool, Self::Transaction) @@ -670,10 +646,6 @@ impl>, B: BlockT> StateBackend> for Ca fn child_keys( &self, -<<<<<<< HEAD - storage_key: &[u8], -======= ->>>>>>> child_trie_w3_change child_info: &ChildInfo, prefix: &[u8], ) -> Vec> { diff --git a/client/network/src/chain.rs b/client/network/src/chain.rs index 3371b8fee4b49..442334cb4f015 100644 --- a/client/network/src/chain.rs +++ b/client/network/src/chain.rs @@ -56,10 +56,6 @@ pub trait Client: Send + Sync { fn read_child_proof( &self, block: &Block::Hash, -<<<<<<< HEAD - storage_key: &[u8], -======= ->>>>>>> child_trie_w3_change child_info: &ChildInfo, keys: &[Vec], ) -> Result; @@ -141,10 +137,6 @@ impl Client for SubstrateClient where fn read_child_proof( &self, block: &Block::Hash, -<<<<<<< HEAD - storage_key: &[u8], -======= ->>>>>>> child_trie_w3_change child_info: &ChildInfo, keys: &[Vec], ) -> Result { diff --git a/client/network/src/protocol.rs b/client/network/src/protocol.rs index 19102831f72a8..00984dcf3cbb6 100644 --- a/client/network/src/protocol.rs +++ b/client/network/src/protocol.rs @@ -1517,27 +1517,6 @@ impl Protocol { trace!(target: "sync", "Remote read child request {} from {} ({} {} at {})", request.id, who, request.storage_key.to_hex::(), keys_str(), request.block); -<<<<<<< HEAD - let proof = if let Some(child_info) = ChildInfo::resolve_child_info(request.child_type, &request.child_info[..]) { - match self.context_data.chain.read_child_proof( - &request.block, - &request.storage_key, - &child_info, - &request.keys, - ) { - Ok(proof) => proof, - Err(error) => { - trace!(target: "sync", "Remote read child request {} from {} ({} {} at {}) failed with: {}", - request.id, - who, - request.storage_key.to_hex::(), - keys_str(), - request.block, - error - ); - StorageProof::empty() - } -======= let child_info = ChildInfo::new_default(&request.storage_key); let proof = match self.context_data.chain.read_child_proof( &request.block, @@ -1555,7 +1534,6 @@ impl Protocol { error ); StorageProof::empty() ->>>>>>> child_trie_w3_change } }; self.send_message( diff --git a/client/network/src/protocol/light_client_handler.rs b/client/network/src/protocol/light_client_handler.rs index b88651e5c10f7..a7d3bf4dbbfe3 100644 --- a/client/network/src/protocol/light_client_handler.rs +++ b/client/network/src/protocol/light_client_handler.rs @@ -514,29 +514,10 @@ where let block = Decode::decode(&mut request.block.as_ref())?; -<<<<<<< HEAD - let proof = - if let Some(info) = ChildInfo::resolve_child_info(request.child_type, &request.child_info[..]) { - match self.chain.read_child_proof(&block, &request.storage_key, &info, &request.keys) { - Ok(proof) => proof, - Err(error) => { - log::trace!("remote read child request {} from {} ({} {} at {:?}) failed with: {}", - request_id, - peer, - request.storage_key.to_hex::(), - fmt_keys(request.keys.first(), request.keys.last()), - request.block, - error); - StorageProof::empty() - } - } - } else { -======= let child_info = ChildInfo::new_default(&request.storage_key); let proof = match self.chain.read_child_proof(&block, &child_info, &request.keys) { Ok(proof) => proof, Err(error) => { ->>>>>>> child_trie_w3_change log::trace!("remote read child request {} from {} ({} {} at {:?}) failed with: {}", request_id, peer, @@ -1150,11 +1131,6 @@ mod tests { use super::{Event, LightClientHandler, Request, OutboundProtocol, PeerStatus}; use void::Void; -<<<<<<< HEAD - const CHILD_UUID: &[u8] = b"foobarbaz"; - -======= ->>>>>>> child_trie_w3_change type Block = sp_runtime::generic::Block, substrate_test_runtime::Extrinsic>; type Handler = LightClientHandler; type Swarm = libp2p::swarm::Swarm; @@ -1645,11 +1621,6 @@ mod tests { #[test] fn receives_remote_read_child_response() { -<<<<<<< HEAD - let child_info = ChildInfo::new_default(CHILD_UUID); - let info = child_info.info(); -======= ->>>>>>> child_trie_w3_change let mut chan = oneshot::channel(); let request = fetcher::RemoteReadChildRequest { header: dummy_header(), @@ -1751,11 +1722,6 @@ mod tests { #[test] fn send_receive_read_child() { -<<<<<<< HEAD - let child_info = ChildInfo::new_default(CHILD_UUID); - let info = child_info.info(); -======= ->>>>>>> child_trie_w3_change let chan = oneshot::channel(); let request = fetcher::RemoteReadChildRequest { header: dummy_header(), diff --git a/client/rpc/src/state/state_full.rs b/client/rpc/src/state/state_full.rs index 687667daab542..508ff8c74417d 100644 --- a/client/rpc/src/state/state_full.rs +++ b/client/rpc/src/state/state_full.rs @@ -315,15 +315,6 @@ impl StateBackend for FullState FutureResult> { Box::new(result( self.block_or_best(block) -<<<<<<< HEAD - .and_then(|block| self.client.child_storage_keys( - &BlockId::Hash(block), - &child_storage_key, - &ChildInfo::resolve_child_info(child_type, &child_info.0[..]) - .ok_or_else(child_resolution_error)?, - &prefix, - )) -======= .and_then(|block| { let child_info = match ChildType::new(child_type) { Some(ChildType::ParentKeyId) => ChildInfo::new_default_from_vec(storage_key.0), @@ -335,7 +326,6 @@ impl StateBackend for FullState>>>>>> child_trie_w3_change .map_err(client_err))) } @@ -348,15 +338,6 @@ impl StateBackend for FullState FutureResult> { Box::new(result( self.block_or_best(block) -<<<<<<< HEAD - .and_then(|block| self.client.child_storage( - &BlockId::Hash(block), - &child_storage_key, - &ChildInfo::resolve_child_info(child_type, &child_info.0[..]) - .ok_or_else(child_resolution_error)?, - &key, - )) -======= .and_then(|block| { let child_info = match ChildType::new(child_type) { Some(ChildType::ParentKeyId) => ChildInfo::new_default_from_vec(storage_key.0), @@ -368,7 +349,6 @@ impl StateBackend for FullState>>>>>> child_trie_w3_change .map_err(client_err))) } @@ -381,15 +361,6 @@ impl StateBackend for FullState FutureResult> { Box::new(result( self.block_or_best(block) -<<<<<<< HEAD - .and_then(|block| self.client.child_storage_hash( - &BlockId::Hash(block), - &child_storage_key, - &ChildInfo::resolve_child_info(child_type, &child_info.0[..]) - .ok_or_else(child_resolution_error)?, - &key, - )) -======= .and_then(|block| { let child_info = match ChildType::new(child_type) { Some(ChildType::ParentKeyId) => ChildInfo::new_default_from_vec(storage_key.0), @@ -401,7 +372,6 @@ impl StateBackend for FullState>>>>>> child_trie_w3_change .map_err(client_err))) } diff --git a/client/rpc/src/state/tests.rs b/client/rpc/src/state/tests.rs index f913837539171..df7c83e1dfe87 100644 --- a/client/rpc/src/state/tests.rs +++ b/client/rpc/src/state/tests.rs @@ -30,41 +30,24 @@ use substrate_test_runtime_client::{ runtime, }; -<<<<<<< HEAD -const CHILD_UID: &'static [u8] = b"unique_id"; -======= const STORAGE_KEY: &[u8] = b"child"; ->>>>>>> child_trie_w3_change #[test] fn should_return_storage() { const KEY: &[u8] = b":mock"; const VALUE: &[u8] = b"hello world"; const CHILD_VALUE: &[u8] = b"hello world !"; -<<<<<<< HEAD - let child_info1 = ChildInfo::new_default(CHILD_UID); - let mut core = tokio::runtime::Runtime::new().unwrap(); - let client = TestClientBuilder::new() - .add_extra_storage(KEY.to_vec(), VALUE.to_vec()) - .add_extra_child_storage(STORAGE_KEY.to_vec(), &child_info1, KEY.to_vec(), CHILD_VALUE.to_vec()) -======= let child_info = ChildInfo::new_default(STORAGE_KEY); let mut core = tokio::runtime::Runtime::new().unwrap(); let client = TestClientBuilder::new() .add_extra_storage(KEY.to_vec(), VALUE.to_vec()) .add_extra_child_storage(&child_info, KEY.to_vec(), CHILD_VALUE.to_vec()) ->>>>>>> child_trie_w3_change .build(); let genesis_hash = client.genesis_hash(); let client = new_full(Arc::new(client), Subscriptions::new(Arc::new(core.executor()))); let key = StorageKey(KEY.to_vec()); let storage_key = StorageKey(STORAGE_KEY.to_vec()); -<<<<<<< HEAD - let (child_info, child_type) = child_info1.info(); - let child_info = StorageKey(child_info.to_vec()); -======= ->>>>>>> child_trie_w3_change assert_eq!( client.storage(key.clone(), Some(genesis_hash).into()).wait() @@ -92,19 +75,10 @@ fn should_return_storage() { #[test] fn should_return_child_storage() { -<<<<<<< HEAD - let child_info1 = ChildInfo::new_default(CHILD_UID); - let (child_info, child_type) = child_info1.info(); - let child_info = StorageKey(child_info.to_vec()); - let core = tokio::runtime::Runtime::new().unwrap(); - let client = Arc::new(substrate_test_runtime_client::TestClientBuilder::new() - .add_child_storage("test", "key", &child_info1, vec![42_u8]) -======= let child_info = ChildInfo::new_default(STORAGE_KEY); let core = tokio::runtime::Runtime::new().unwrap(); let client = Arc::new(substrate_test_runtime_client::TestClientBuilder::new() .add_child_storage(&child_info, "key", vec![42_u8]) ->>>>>>> child_trie_w3_change .build()); let genesis_hash = client.genesis_hash(); let client = new_full(client, Subscriptions::new(Arc::new(core.executor()))); diff --git a/client/src/client.rs b/client/src/client.rs index 3fc91090376fb..1131ab78de223 100644 --- a/client/src/client.rs +++ b/client/src/client.rs @@ -334,10 +334,6 @@ impl Client where pub fn child_storage_keys( &self, id: &BlockId, -<<<<<<< HEAD - child_storage_key: &StorageKey, -======= ->>>>>>> child_trie_w3_change child_info: &ChildInfo, key_prefix: &StorageKey ) -> sp_blockchain::Result> { @@ -353,10 +349,6 @@ impl Client where pub fn child_storage( &self, id: &BlockId, -<<<<<<< HEAD - storage_key: &StorageKey, -======= ->>>>>>> child_trie_w3_change child_info: &ChildInfo, key: &StorageKey ) -> sp_blockchain::Result> { @@ -370,10 +362,6 @@ impl Client where pub fn child_storage_hash( &self, id: &BlockId, -<<<<<<< HEAD - storage_key: &StorageKey, -======= ->>>>>>> child_trie_w3_change child_info: &ChildInfo, key: &StorageKey ) -> sp_blockchain::Result> { @@ -415,10 +403,6 @@ impl Client where pub fn read_child_proof( &self, id: &BlockId, -<<<<<<< HEAD - storage_key: &[u8], -======= ->>>>>>> child_trie_w3_change child_info: &ChildInfo, keys: I, ) -> sp_blockchain::Result where diff --git a/client/src/light/backend.rs b/client/src/light/backend.rs index 94b37916d57b9..a032f5e5e19fe 100644 --- a/client/src/light/backend.rs +++ b/client/src/light/backend.rs @@ -312,11 +312,7 @@ impl BlockImportOperation for ImportOperation self.changes_trie_config_update = Some(changes_trie_config); // this is only called when genesis block is imported => shouldn't be performance bottleneck -<<<<<<< HEAD - let mut storage: HashMap, ChildInfo)>, _> = HashMap::new(); -======= let mut storage: HashMap, _> = HashMap::new(); ->>>>>>> child_trie_w3_change storage.insert(None, input.top); // create a list of children keys to re-compute roots for @@ -390,10 +386,6 @@ impl StateBackend for GenesisOrUnavailableState fn child_storage( &self, -<<<<<<< HEAD - storage_key: &[u8], -======= ->>>>>>> child_trie_w3_change child_info: &ChildInfo, key: &[u8], ) -> ClientResult>> { @@ -414,10 +406,6 @@ impl StateBackend for GenesisOrUnavailableState fn next_child_storage_key( &self, -<<<<<<< HEAD - storage_key: &[u8], -======= ->>>>>>> child_trie_w3_change child_info: &ChildInfo, key: &[u8], ) -> Result>, Self::Error> { @@ -446,10 +434,6 @@ impl StateBackend for GenesisOrUnavailableState fn for_keys_in_child_storage( &self, -<<<<<<< HEAD - storage_key: &[u8], -======= ->>>>>>> child_trie_w3_change child_info: &ChildInfo, action: A, ) { @@ -462,10 +446,6 @@ impl StateBackend for GenesisOrUnavailableState fn for_child_keys_with_prefix( &self, -<<<<<<< HEAD - storage_key: &[u8], -======= ->>>>>>> child_trie_w3_change child_info: &ChildInfo, prefix: &[u8], action: A, @@ -490,10 +470,6 @@ impl StateBackend for GenesisOrUnavailableState fn child_storage_root( &self, -<<<<<<< HEAD - storage_key: &[u8], -======= ->>>>>>> child_trie_w3_change child_info: &ChildInfo, delta: I, ) -> (H::Out, bool, Self::Transaction) diff --git a/client/src/light/fetcher.rs b/client/src/light/fetcher.rs index bae2c7f7bfbce..8d6c68d6f7d37 100644 --- a/client/src/light/fetcher.rs +++ b/client/src/light/fetcher.rs @@ -356,11 +356,6 @@ pub mod tests { use sp_state_machine::Backend; use super::*; -<<<<<<< HEAD - const CHILD_UID_1: &'static [u8] = b"unique_id_1"; - -======= ->>>>>>> child_trie_w3_change type TestChecker = LightDataChecker< NativeExecutor, Blake2Hasher, @@ -407,7 +402,6 @@ pub mod tests { } fn prepare_for_read_child_proof_check() -> (TestChecker, Header, StorageProof, Vec) { - let child_info1 = ChildInfo::new_default(CHILD_UID_1); use substrate_test_runtime_client::DefaultTestClientBuilderExt; use substrate_test_runtime_client::TestClientBuilderExt; let child_info = ChildInfo::new_default(b"child1"); @@ -415,12 +409,7 @@ pub mod tests { // prepare remote client let remote_client = substrate_test_runtime_client::TestClientBuilder::new() .add_extra_child_storage( -<<<<<<< HEAD - b":child_storage:default:child1".to_vec(), - &child_info1, -======= child_info, ->>>>>>> child_trie_w3_change b"key1".to_vec(), b"value1".to_vec(), ).build(); @@ -433,23 +422,13 @@ pub mod tests { // 'fetch' child read proof from remote node let child_value = remote_client.child_storage( &remote_block_id, -<<<<<<< HEAD - &StorageKey(b":child_storage:default:child1".to_vec()), - &child_info1, -======= child_info, ->>>>>>> child_trie_w3_change &StorageKey(b"key1".to_vec()), ).unwrap().unwrap().0; assert_eq!(b"value1"[..], child_value[..]); let remote_read_proof = remote_client.read_child_proof( &remote_block_id, -<<<<<<< HEAD - b":child_storage:default:child1", - &child_info1, -======= child_info, ->>>>>>> child_trie_w3_change &[b"key1"], ).unwrap(); @@ -527,12 +506,6 @@ pub mod tests { remote_read_proof, result, ) = prepare_for_read_child_proof_check(); -<<<<<<< HEAD - - let child_info = ChildInfo::new_default(CHILD_UID_1); - let child_infos = child_info.info(); -======= ->>>>>>> child_trie_w3_change assert_eq!((&local_checker as &dyn FetchChecker).check_read_child_proof( &RemoteReadChildRequest::
{ block: remote_block_header.hash(), diff --git a/frame/contracts/src/account_db.rs b/frame/contracts/src/account_db.rs index 92e69c44eaef0..bff5061a0b875 100644 --- a/frame/contracts/src/account_db.rs +++ b/frame/contracts/src/account_db.rs @@ -17,7 +17,7 @@ //! Auxiliaries to help with managing partial changes to accounts state. use super::{ - AliveContractInfo, BalanceOf, CodeHash, ContractInfo, ContractInfoOf, Trait, TrieId, + AliveContractInfo, BalanceOf, CodeHash, ContractInfo, ContractInfoOf, Trait, TrieIdGenerator, }; use crate::exec::StorageKey; @@ -26,13 +26,8 @@ use sp_std::collections::btree_map::{BTreeMap, Entry}; use sp_std::prelude::*; use sp_io::hashing::blake2_256; use sp_runtime::traits::{Bounded, Zero}; -<<<<<<< HEAD -use frame_support::traits::{Currency, Get, Imbalance, SignedImbalance, UpdateBalanceOutcome}; -use frame_support::{storage::child, StorageMap, storage::child::ChildInfo}; -======= use frame_support::traits::{Currency, Get, Imbalance, SignedImbalance}; -use frame_support::{storage::child, StorageMap}; ->>>>>>> child_trie_w3_change +use frame_support::{storage::child, StorageMap, storage::child::ChildInfo}; use frame_system; // Note: we don't provide Option because we can't create @@ -116,7 +111,7 @@ pub trait AccountDb { fn get_storage( &self, account: &T::AccountId, - trie_id: Option<(&TrieId, &ChildInfo)>, + trie_id: Option<&ChildInfo>, location: &StorageKey ) -> Option>; /// If account has an alive contract then return the code hash associated. @@ -135,14 +130,10 @@ impl AccountDb for DirectAccountDb { fn get_storage( &self, _account: &T::AccountId, - trie_id: Option<(&TrieId, &ChildInfo)>, + trie_id: Option<&ChildInfo>, location: &StorageKey ) -> Option> { -<<<<<<< HEAD - trie_id.and_then(|(id, child_info)| child::get_raw(id, child_info, &blake2_256(location))) -======= - trie_id.and_then(|id| child::get_raw(&crate::trie_unique_id(&id[..]), &blake2_256(location))) ->>>>>>> child_trie_w3_change + trie_id.and_then(|child_info| child::get_raw(child_info, &blake2_256(location))) } fn get_code_hash(&self, account: &T::AccountId) -> Option> { >::get(account).and_then(|i| i.as_alive().map(|i| i.code_hash)) @@ -189,21 +180,13 @@ impl AccountDb for DirectAccountDb { (false, Some(info), _) => info, // Existing contract is being removed. (true, Some(info), None) => { -<<<<<<< HEAD - child::kill_storage(&info.trie_id, &info.child_trie_unique_id()); -======= child::kill_storage(&info.child_trie_unique_id()); ->>>>>>> child_trie_w3_change >::remove(&address); continue; } // Existing contract is being replaced by a new one. (true, Some(info), Some(code_hash)) => { -<<<<<<< HEAD - child::kill_storage(&info.trie_id, &info.child_trie_unique_id()); -======= child::kill_storage(&info.child_trie_unique_id()); ->>>>>>> child_trie_w3_change AliveContractInfo:: { code_hash, storage_size: T::StorageSizeOffset::get(), @@ -243,27 +226,16 @@ impl AccountDb for DirectAccountDb { let child_info = &new_info.child_trie_unique_id(); for (k, v) in changed.storage.into_iter() { if let Some(value) = child::get_raw( -<<<<<<< HEAD - &new_info.trie_id[..], child_info, -======= - &new_info.child_trie_unique_id(), ->>>>>>> child_trie_w3_change &blake2_256(&k), ) { new_info.storage_size -= value.len() as u32; } if let Some(value) = v { new_info.storage_size += value.len() as u32; -<<<<<<< HEAD - child::put_raw(&new_info.trie_id[..], child_info, &blake2_256(&k), &value[..]); - } else { - child::kill(&new_info.trie_id[..], child_info, &blake2_256(&k)); -======= - child::put_raw(&new_info.child_trie_unique_id(), &blake2_256(&k), &value[..]); + child::put_raw(child_info, &blake2_256(&k), &value[..]); } else { - child::kill(&new_info.child_trie_unique_id(), &blake2_256(&k)); ->>>>>>> child_trie_w3_change + child::kill(child_info, &blake2_256(&k)); } } @@ -368,7 +340,7 @@ impl<'a, T: Trait> AccountDb for OverlayAccountDb<'a, T> { fn get_storage( &self, account: &T::AccountId, - trie_id: Option<(&TrieId, &ChildInfo)>, + trie_id: Option<&ChildInfo>, location: &StorageKey ) -> Option> { self.local diff --git a/frame/contracts/src/exec.rs b/frame/contracts/src/exec.rs index 77cf6f7f7de90..905aef1957479 100644 --- a/frame/contracts/src/exec.rs +++ b/frame/contracts/src/exec.rs @@ -277,7 +277,7 @@ pub enum DeferredAction { pub struct ExecutionContext<'a, T: Trait + 'a, V, L> { pub parent: Option<&'a ExecutionContext<'a, T, V, L>>, pub self_account: T::AccountId, - pub self_trie_info: Option<(TrieId, ChildInfo)>, + pub self_trie_info: Option, pub overlay: OverlayAccountDb<'a, T>, pub depth: usize, pub deferred: Vec>, @@ -314,7 +314,7 @@ where } } - fn nested<'b, 'c: 'b>(&'c self, dest: T::AccountId, trie_info: Option<(TrieId, ChildInfo)>) + fn nested<'b, 'c: 'b>(&'c self, dest: T::AccountId, trie_info: Option) -> ExecutionContext<'b, T, V, L> { ExecutionContext { @@ -531,8 +531,7 @@ where { let (output, change_set, deferred) = { let mut nested = self.nested(dest, trie_id.map(|trie_id| { - let child_info = crate::trie_unique_id(&trie_id); - (trie_id, child_info) + crate::trie_unique_id(&trie_id) })); let output = func(&mut nested)?; (output, nested.overlay.into_change_set(), nested.deferred) @@ -684,8 +683,7 @@ where type T = T; fn get_storage(&self, key: &StorageKey) -> Option> { - let trie_id = self.ctx.self_trie_info.as_ref() - .map(|info| ((&info.0, &info.1))); + let trie_id = self.ctx.self_trie_info.as_ref(); self.ctx.overlay.get_storage( &self.ctx.self_account, trie_id, diff --git a/frame/contracts/src/lib.rs b/frame/contracts/src/lib.rs index 5c423fa5796df..b92c57b431021 100644 --- a/frame/contracts/src/lib.rs +++ b/frame/contracts/src/lib.rs @@ -689,7 +689,7 @@ impl Module { let maybe_value = AccountDb::::get_storage( &DirectAccountDb, &address, - Some((&contract_info.trie_id, &child_info)), + Some(&child_info), &key, ); Ok(maybe_value) @@ -805,18 +805,10 @@ impl Module { let key_values_taken = delta.iter() .filter_map(|key| { child::get_raw( -<<<<<<< HEAD - &origin_contract.trie_id, -======= ->>>>>>> child_trie_w3_change &origin_contract.child_trie_unique_id(), &blake2_256(key), ).map(|value| { child::kill( -<<<<<<< HEAD - &origin_contract.trie_id, -======= ->>>>>>> child_trie_w3_change &origin_contract.child_trie_unique_id(), &blake2_256(key), ); @@ -838,10 +830,6 @@ impl Module { if tombstone != dest_tombstone { for (key, value) in key_values_taken { child::put_raw( -<<<<<<< HEAD - &origin_contract.trie_id, -======= ->>>>>>> child_trie_w3_change &origin_contract.child_trie_unique_id(), &blake2_256(key), &value, @@ -946,11 +934,7 @@ decl_storage! { impl OnKilledAccount for Module { fn on_killed_account(who: &T::AccountId) { if let Some(ContractInfo::Alive(info)) = >::take(who) { -<<<<<<< HEAD - child::kill_storage(&info.trie_id, &info.child_trie_unique_id()); -======= child::kill_storage(&info.child_trie_unique_id()); ->>>>>>> child_trie_w3_change } } } diff --git a/frame/contracts/src/rent.rs b/frame/contracts/src/rent.rs index 6d26519dd63fb..dfcbc997c5b22 100644 --- a/frame/contracts/src/rent.rs +++ b/frame/contracts/src/rent.rs @@ -223,10 +223,6 @@ fn enact_verdict( Verdict::Kill => { >::remove(account); child::kill_storage( -<<<<<<< HEAD - &alive_contract_info.trie_id, -======= ->>>>>>> child_trie_w3_change &alive_contract_info.child_trie_unique_id(), ); >::deposit_event(RawEvent::Evicted(account.clone(), false)); @@ -250,10 +246,6 @@ fn enact_verdict( >::insert(account, &tombstone_info); child::kill_storage( -<<<<<<< HEAD - &alive_contract_info.trie_id, -======= ->>>>>>> child_trie_w3_change &alive_contract_info.child_trie_unique_id(), ); diff --git a/frame/contracts/src/tests.rs b/frame/contracts/src/tests.rs index 49472fd3e0053..04574351fc289 100644 --- a/frame/contracts/src/tests.rs +++ b/frame/contracts/src/tests.rs @@ -311,8 +311,8 @@ fn account_removal_removes_storage() { let trie_id2 = ::TrieIdGenerator::trie_id(&2); let child_info1 = crate::trie_unique_id(trie_id1.as_ref()); let child_info2 = crate::trie_unique_id(trie_id2.as_ref()); - let child_info1 = Some((&trie_id1, &child_info1)); - let child_info2 = Some((&trie_id2, &child_info2)); + let child_info1 = Some(&child_info1); + let child_info2 = Some(&child_info2); let key1 = &[1; 32]; let key2 = &[2; 32]; diff --git a/frame/support/src/storage/child.rs b/frame/support/src/storage/child.rs index e6050d8d43500..658908d258a2f 100644 --- a/frame/support/src/storage/child.rs +++ b/frame/support/src/storage/child.rs @@ -26,10 +26,6 @@ pub use sp_core::storage::{ChildInfo, ChildType}; /// Return the value of the item in storage under `key`, or `None` if there is no explicit entry. pub fn get( -<<<<<<< HEAD - storage_key: &[u8], -======= ->>>>>>> child_trie_w3_change child_info: &ChildInfo, key: &[u8], ) -> Option { @@ -53,10 +49,6 @@ pub fn get( /// Return the value of the item in storage under `key`, or the type's default if there is no /// explicit entry. pub fn get_or_default( -<<<<<<< HEAD - storage_key: &[u8], -======= ->>>>>>> child_trie_w3_change child_info: &ChildInfo, key: &[u8], ) -> T { @@ -66,10 +58,6 @@ pub fn get_or_default( /// Return the value of the item in storage under `key`, or `default_value` if there is no /// explicit entry. pub fn get_or( -<<<<<<< HEAD - storage_key: &[u8], -======= ->>>>>>> child_trie_w3_change child_info: &ChildInfo, key: &[u8], default_value: T, @@ -80,10 +68,6 @@ pub fn get_or( /// Return the value of the item in storage under `key`, or `default_value()` if there is no /// explicit entry. pub fn get_or_else T>( -<<<<<<< HEAD - storage_key: &[u8], -======= ->>>>>>> child_trie_w3_change child_info: &ChildInfo, key: &[u8], default_value: F, @@ -93,10 +77,6 @@ pub fn get_or_else T>( /// Put `value` in storage under `key`. pub fn put( -<<<<<<< HEAD - storage_key: &[u8], -======= ->>>>>>> child_trie_w3_change child_info: &ChildInfo, key: &[u8], value: &T, @@ -114,10 +94,6 @@ pub fn put( /// Remove `key` from storage, returning its value if it had an explicit entry or `None` otherwise. pub fn take( -<<<<<<< HEAD - storage_key: &[u8], -======= ->>>>>>> child_trie_w3_change child_info: &ChildInfo, key: &[u8], ) -> Option { @@ -131,10 +107,6 @@ pub fn take( /// Remove `key` from storage, returning its value, or, if there was no explicit entry in storage, /// the default for its type. pub fn take_or_default( -<<<<<<< HEAD - storage_key: &[u8], -======= ->>>>>>> child_trie_w3_change child_info: &ChildInfo, key: &[u8], ) -> T { @@ -144,10 +116,6 @@ pub fn take_or_default( /// Return the value of the item in storage under `key`, or `default_value` if there is no /// explicit entry. Ensure there is no explicit entry on return. pub fn take_or( -<<<<<<< HEAD - storage_key: &[u8], -======= ->>>>>>> child_trie_w3_change child_info: &ChildInfo, key: &[u8], default_value: T, @@ -158,10 +126,6 @@ pub fn take_or( /// Return the value of the item in storage under `key`, or `default_value()` if there is no /// explicit entry. Ensure there is no explicit entry on return. pub fn take_or_else T>( -<<<<<<< HEAD - storage_key: &[u8], -======= ->>>>>>> child_trie_w3_change child_info: &ChildInfo, key: &[u8], default_value: F, @@ -171,10 +135,6 @@ pub fn take_or_else T>( /// Check to see if `key` has an explicit entry in storage. pub fn exists( -<<<<<<< HEAD - storage_key: &[u8], -======= ->>>>>>> child_trie_w3_change child_info: &ChildInfo, key: &[u8], ) -> bool { @@ -188,10 +148,6 @@ pub fn exists( /// Remove all `storage_key` key/values pub fn kill_storage( -<<<<<<< HEAD - storage_key: &[u8], -======= ->>>>>>> child_trie_w3_change child_info: &ChildInfo, ) { match child_info.child_type() { @@ -203,10 +159,6 @@ pub fn kill_storage( /// Ensure `key` has no explicit entry in storage. pub fn kill( -<<<<<<< HEAD - storage_key: &[u8], -======= ->>>>>>> child_trie_w3_change child_info: &ChildInfo, key: &[u8], ) { @@ -222,10 +174,6 @@ pub fn kill( /// Get a Vec of bytes from storage. pub fn get_raw( -<<<<<<< HEAD - storage_key: &[u8], -======= ->>>>>>> child_trie_w3_change child_info: &ChildInfo, key: &[u8], ) -> Option> { @@ -239,10 +187,6 @@ pub fn get_raw( /// Put a raw byte slice into storage. pub fn put_raw( -<<<<<<< HEAD - storage_key: &[u8], -======= ->>>>>>> child_trie_w3_change child_info: &ChildInfo, key: &[u8], value: &[u8], diff --git a/primitives/externalities/src/lib.rs b/primitives/externalities/src/lib.rs index 188cb3e351eab..4d2d61998637f 100644 --- a/primitives/externalities/src/lib.rs +++ b/primitives/externalities/src/lib.rs @@ -47,10 +47,6 @@ pub trait Externalities: ExtensionStore { /// Returns an `Option` that holds the SCALE encoded hash. fn child_storage_hash( &self, -<<<<<<< HEAD - storage_key: ChildStorageKey, -======= ->>>>>>> child_trie_w3_change child_info: &ChildInfo, key: &[u8], ) -> Option>; @@ -63,10 +59,6 @@ pub trait Externalities: ExtensionStore { /// Returns an `Option` that holds the SCALE encoded hash. fn original_child_storage( &self, -<<<<<<< HEAD - storage_key: ChildStorageKey, -======= ->>>>>>> child_trie_w3_change child_info: &ChildInfo, key: &[u8], ) -> Option>; @@ -83,10 +75,6 @@ pub trait Externalities: ExtensionStore { /// Returns an `Option` that holds the SCALE encoded hash. fn original_child_storage_hash( &self, -<<<<<<< HEAD - storage_key: ChildStorageKey, -======= ->>>>>>> child_trie_w3_change child_info: &ChildInfo, key: &[u8], ) -> Option>; @@ -96,10 +84,6 @@ pub trait Externalities: ExtensionStore { /// Returns an `Option` that holds the SCALE encoded hash. fn child_storage( &self, -<<<<<<< HEAD - storage_key: ChildStorageKey, -======= ->>>>>>> child_trie_w3_change child_info: &ChildInfo, key: &[u8], ) -> Option>; @@ -112,10 +96,6 @@ pub trait Externalities: ExtensionStore { /// Set child storage entry `key` of current contract being called (effective immediately). fn set_child_storage( &mut self, -<<<<<<< HEAD - storage_key: ChildStorageKey, -======= ->>>>>>> child_trie_w3_change child_info: &ChildInfo, key: Vec, value: Vec, @@ -131,10 +111,6 @@ pub trait Externalities: ExtensionStore { /// Clear a child storage entry (`key`) of current contract being called (effective immediately). fn clear_child_storage( &mut self, -<<<<<<< HEAD - storage_key: ChildStorageKey, -======= ->>>>>>> child_trie_w3_change child_info: &ChildInfo, key: &[u8], ) { @@ -149,10 +125,6 @@ pub trait Externalities: ExtensionStore { /// Whether a child storage entry exists. fn exists_child_storage( &self, -<<<<<<< HEAD - storage_key: ChildStorageKey, -======= ->>>>>>> child_trie_w3_change child_info: &ChildInfo, key: &[u8], ) -> bool { @@ -165,20 +137,12 @@ pub trait Externalities: ExtensionStore { /// Returns the key immediately following the given key, if it exists, in child storage. fn next_child_storage_key( &self, -<<<<<<< HEAD - storage_key: ChildStorageKey, -======= ->>>>>>> child_trie_w3_change child_info: &ChildInfo, key: &[u8], ) -> Option>; /// Clear an entire child storage. -<<<<<<< HEAD - fn kill_child_storage(&mut self, storage_key: ChildStorageKey, child_info: &ChildInfo); -======= fn kill_child_storage(&mut self, child_info: &ChildInfo); ->>>>>>> child_trie_w3_change /// Clear storage entries which keys are start with the given prefix. fn clear_prefix(&mut self, prefix: &[u8]); @@ -186,10 +150,6 @@ pub trait Externalities: ExtensionStore { /// Clear child storage entries which keys are start with the given prefix. fn clear_child_prefix( &mut self, -<<<<<<< HEAD - storage_key: ChildStorageKey, -======= ->>>>>>> child_trie_w3_change child_info: &ChildInfo, prefix: &[u8], ); @@ -200,10 +160,6 @@ pub trait Externalities: ExtensionStore { /// Set or clear a child storage entry. Return whether the operation succeeds. fn place_child_storage( &mut self, -<<<<<<< HEAD - storage_key: ChildStorageKey, -======= ->>>>>>> child_trie_w3_change child_info: &ChildInfo, key: Vec, value: Option>, diff --git a/primitives/io/src/lib.rs b/primitives/io/src/lib.rs index fb997c47f0b7b..ef18e3f3dd496 100644 --- a/primitives/io/src/lib.rs +++ b/primitives/io/src/lib.rs @@ -84,33 +84,6 @@ pub trait Storage { self.storage(key).map(|s| s.to_vec()) } -<<<<<<< HEAD - /// All Child api uses : - /// - A `child_storage_key` to define the anchor point for the child proof - /// (commonly the location where the child root is stored in its parent trie). - /// - A `child_storage_types` to identify the kind of the child type and how its - /// `child definition` parameter is encoded. - /// - A `child_definition_parameter` which is the additional information required - /// to use the child trie. For instance defaults child tries requires this to - /// contain a collision free unique id. - /// - /// This function specifically returns the data for `key` in the child storage or `None` - /// if the key can not be found. - fn child_get( - &self, - child_storage_key: &[u8], - child_definition: &[u8], - child_type: u32, - key: &[u8], - ) -> Option> { - let storage_key = child_storage_key_or_panic(child_storage_key); - let child_info = ChildInfo::resolve_child_info(child_type, child_definition) - .expect("Invalid child definition"); - self.child_storage(storage_key, &child_info, key).map(|s| s.to_vec()) - } - -======= ->>>>>>> child_trie_w3_change /// Get `key` from storage, placing the value into `value_out` and return the number of /// bytes that the entry in storage has beyond the offset or `None` if the storage entry /// doesn't exist at all. @@ -201,11 +174,7 @@ pub trait Storage { if child_type != 1 { panic!("Invalid child definition"); } let child_info = ChildInfo::resolve_child_info(child_type, child_definition) .expect("Invalid child definition"); -<<<<<<< HEAD - self.child_storage(storage_key, &child_info, key) -======= self.child_storage(&child_info, key) ->>>>>>> child_trie_w3_change .map(|value| { let value_offset = value_offset as usize; let data = &value[value_offset.min(value.len())..]; @@ -228,16 +197,7 @@ pub trait Storage { if child_type != 1 { panic!("Invalid child definition"); } let child_info = ChildInfo::resolve_child_info(child_type, child_definition) .expect("Invalid child definition"); -<<<<<<< HEAD - self.set_child_storage(storage_key, &child_info, key.to_vec(), value.to_vec()); - } - - /// Clear the storage of the given `key` and its value. - fn clear(&mut self, key: &[u8]) { - self.clear_storage(key) -======= self.set_child_storage(&child_info, key.to_vec(), value.to_vec()); ->>>>>>> child_trie_w3_change } /// Deprecated, please use dedicated runtime apis. @@ -252,11 +212,7 @@ pub trait Storage { if child_type != 1 { panic!("Invalid child definition"); } let child_info = ChildInfo::resolve_child_info(child_type, child_definition) .expect("Invalid child definition"); -<<<<<<< HEAD - self.clear_child_storage(storage_key, &child_info, key); -======= self.clear_child_storage(&child_info, key); ->>>>>>> child_trie_w3_change } /// Deprecated, please use dedicated runtime apis. @@ -270,11 +226,7 @@ pub trait Storage { if child_type != 1 { panic!("Invalid child definition"); } let child_info = ChildInfo::resolve_child_info(child_type, child_definition) .expect("Invalid child definition"); -<<<<<<< HEAD - self.kill_child_storage(storage_key, &child_info); -======= self.kill_child_storage(&child_info); ->>>>>>> child_trie_w3_change } /// Deprecated, please use dedicated runtime apis. @@ -289,11 +241,7 @@ pub trait Storage { if child_type != 1 { panic!("Invalid child definition"); } let child_info = ChildInfo::resolve_child_info(child_type, child_definition) .expect("Invalid child definition"); -<<<<<<< HEAD - self.exists_child_storage(storage_key, &child_info, key) -======= self.exists_child_storage(&child_info, key) ->>>>>>> child_trie_w3_change } /// Deprecated, please use dedicated runtime apis. @@ -308,11 +256,7 @@ pub trait Storage { if child_type != 1 { panic!("Invalid child definition"); } let child_info = ChildInfo::resolve_child_info(child_type, child_definition) .expect("Invalid child definition"); -<<<<<<< HEAD - self.clear_child_prefix(storage_key, &child_info, prefix); -======= self.clear_child_prefix(&child_info, prefix); ->>>>>>> child_trie_w3_change } /// Deprecated, please use dedicated runtime apis. @@ -452,15 +396,8 @@ pub trait DefaultChildStorage { storage_key: &[u8], key: &[u8], ) -> Option> { -<<<<<<< HEAD - let storage_key = child_storage_key_or_panic(child_storage_key); - let child_info = ChildInfo::resolve_child_info(child_type, child_definition) - .expect("Invalid child definition"); - self.next_child_storage_key(storage_key, &child_info, key) -======= let child_info = ChildInfo::new_default(storage_key); self.next_child_storage_key(&child_info, key) ->>>>>>> child_trie_w3_change } } diff --git a/primitives/state-machine/src/backend.rs b/primitives/state-machine/src/backend.rs index 5507dba6fc272..d26a8baf0bf51 100644 --- a/primitives/state-machine/src/backend.rs +++ b/primitives/state-machine/src/backend.rs @@ -20,11 +20,7 @@ use log::warn; use sp_core::{Hasher, InnerHasher}; use codec::Encode; -<<<<<<< HEAD use sp_core::storage::{ChildInfo, ChildrenMap}; -======= -use sp_core::storage::ChildInfo; ->>>>>>> child_trie_w3_change use sp_trie::{TrieMut, MemoryDB, trie_types::TrieDBMut}; use crate::{ trie_backend::TrieBackend, @@ -57,10 +53,6 @@ pub trait Backend: std::fmt::Debug { /// Get keyed child storage or None if there is nothing associated. fn child_storage( &self, -<<<<<<< HEAD - storage_key: &[u8], -======= ->>>>>>> child_trie_w3_change child_info: &ChildInfo, key: &[u8], ) -> Result, Self::Error>; @@ -68,10 +60,6 @@ pub trait Backend: std::fmt::Debug { /// Get child keyed storage value hash or None if there is nothing associated. fn child_storage_hash( &self, -<<<<<<< HEAD - storage_key: &[u8], -======= ->>>>>>> child_trie_w3_change child_info: &ChildInfo, key: &[u8], ) -> Result, Self::Error> { @@ -86,10 +74,6 @@ pub trait Backend: std::fmt::Debug { /// true if a key exists in child storage. fn exists_child_storage( &self, -<<<<<<< HEAD - storage_key: &[u8], -======= ->>>>>>> child_trie_w3_change child_info: &ChildInfo, key: &[u8], ) -> Result { @@ -102,10 +86,6 @@ pub trait Backend: std::fmt::Debug { /// Return the next key in child storage in lexicographic order or `None` if there is no value. fn next_child_storage_key( &self, -<<<<<<< HEAD - storage_key: &[u8], -======= ->>>>>>> child_trie_w3_change child_info: &ChildInfo, key: &[u8] ) -> Result, Self::Error>; @@ -113,10 +93,6 @@ pub trait Backend: std::fmt::Debug { /// Retrieve all entries keys of child storage and call `f` for each of those keys. fn for_keys_in_child_storage( &self, -<<<<<<< HEAD - storage_key: &[u8], -======= ->>>>>>> child_trie_w3_change child_info: &ChildInfo, f: F, ); @@ -136,10 +112,6 @@ pub trait Backend: std::fmt::Debug { /// call `f` for each of those keys. fn for_child_keys_with_prefix( &self, -<<<<<<< HEAD - storage_key: &[u8], -======= ->>>>>>> child_trie_w3_change child_info: &ChildInfo, prefix: &[u8], f: F, @@ -158,10 +130,6 @@ pub trait Backend: std::fmt::Debug { /// is true if child storage root equals default storage root. fn child_storage_root( &self, -<<<<<<< HEAD - storage_key: &[u8], -======= ->>>>>>> child_trie_w3_change child_info: &ChildInfo, delta: I, ) -> (H::Out, bool, Self::Transaction) @@ -182,10 +150,6 @@ pub trait Backend: std::fmt::Debug { /// Get all keys of child storage with given prefix fn child_keys( &self, -<<<<<<< HEAD - storage_key: &[u8], -======= ->>>>>>> child_trie_w3_change child_info: &ChildInfo, prefix: &[u8], ) -> Vec { @@ -211,11 +175,7 @@ pub trait Backend: std::fmt::Debug { where I1: IntoIterator)>, I2i: IntoIterator)>, -<<<<<<< HEAD - I2: IntoIterator, -======= I2: IntoIterator, ->>>>>>> child_trie_w3_change H::Out: Ord + Encode, { let mut txs: Self::Transaction = Default::default(); @@ -224,30 +184,21 @@ pub trait Backend: std::fmt::Debug { // child first for (child_info, child_delta) in child_deltas { let (child_root, empty, child_txs) = -<<<<<<< HEAD - self.child_storage_root(&storage_key[..], &child_info, child_delta); + self.child_storage_root(&child_info, child_delta); + let prefixed_storage_key = child_info.prefixed_storage_key(); txs.consolidate(child_txs); if empty { if return_child_roots { - result_child_roots.push((storage_key.clone(), None)); + result_child_roots.push((prefixed_storage_key.clone(), None)); } - child_roots.push((storage_key, None)); + child_roots.push((prefixed_storage_key, None)); } else { if return_child_roots { - child_roots.push((storage_key.clone(), Some(child_root.encode()))); - result_child_roots.push((storage_key, Some(child_root))); + child_roots.push((prefixed_storage_key.clone(), Some(child_root.encode()))); + result_child_roots.push((prefixed_storage_key, Some(child_root))); } else { - child_roots.push((storage_key, Some(child_root.encode()))); + child_roots.push((prefixed_storage_key, Some(child_root.encode()))); } -======= - self.child_storage_root(&child_info, child_delta); - let prefixed_storage_key = child_info.prefixed_storage_key(); - txs.consolidate(child_txs); - if empty { - child_roots.push((prefixed_storage_key, None)); - } else { - child_roots.push((prefixed_storage_key, Some(child_root.encode()))); ->>>>>>> child_trie_w3_change } } let (root, parent_txs) = self.storage_root( @@ -287,10 +238,6 @@ impl<'a, T: Backend, H: Hasher> Backend for &'a T { fn child_storage( &self, -<<<<<<< HEAD - storage_key: &[u8], -======= ->>>>>>> child_trie_w3_change child_info: &ChildInfo, key: &[u8], ) -> Result, Self::Error> { @@ -299,10 +246,6 @@ impl<'a, T: Backend, H: Hasher> Backend for &'a T { fn for_keys_in_child_storage( &self, -<<<<<<< HEAD - storage_key: &[u8], -======= ->>>>>>> child_trie_w3_change child_info: &ChildInfo, f: F, ) { @@ -315,10 +258,6 @@ impl<'a, T: Backend, H: Hasher> Backend for &'a T { fn next_child_storage_key( &self, -<<<<<<< HEAD - storage_key: &[u8], -======= ->>>>>>> child_trie_w3_change child_info: &ChildInfo, key: &[u8], ) -> Result, Self::Error> { @@ -331,10 +270,6 @@ impl<'a, T: Backend, H: Hasher> Backend for &'a T { fn for_child_keys_with_prefix( &self, -<<<<<<< HEAD - storage_key: &[u8], -======= ->>>>>>> child_trie_w3_change child_info: &ChildInfo, prefix: &[u8], f: F, @@ -352,10 +287,6 @@ impl<'a, T: Backend, H: Hasher> Backend for &'a T { fn child_storage_root( &self, -<<<<<<< HEAD - storage_key: &[u8], -======= ->>>>>>> child_trie_w3_change child_info: &ChildInfo, delta: I, ) -> (H::Out, bool, Self::Transaction) @@ -392,11 +323,7 @@ impl Consolidate for () { } impl Consolidate for Vec<( -<<<<<<< HEAD - Option<(StorageKey, ChildInfo)>, -======= Option, ->>>>>>> child_trie_w3_change StorageCollection, )> { fn consolidate(&mut self, mut other: Self) { diff --git a/primitives/state-machine/src/basic.rs b/primitives/state-machine/src/basic.rs index a92619a449783..9499713484d80 100644 --- a/primitives/state-machine/src/basic.rs +++ b/primitives/state-machine/src/basic.rs @@ -129,12 +129,7 @@ impl Externalities for BasicExternalities { fn child_storage( &self, -<<<<<<< HEAD - storage_key: ChildStorageKey, - _child_info: &ChildInfo, -======= child_info: &ChildInfo, ->>>>>>> child_trie_w3_change key: &[u8], ) -> Option { self.inner.children_default.get(child_info.storage_key()) @@ -143,10 +138,6 @@ impl Externalities for BasicExternalities { fn child_storage_hash( &self, -<<<<<<< HEAD - storage_key: ChildStorageKey, -======= ->>>>>>> child_trie_w3_change child_info: &ChildInfo, key: &[u8], ) -> Option> { @@ -155,10 +146,6 @@ impl Externalities for BasicExternalities { fn original_child_storage_hash( &self, -<<<<<<< HEAD - storage_key: ChildStorageKey, -======= ->>>>>>> child_trie_w3_change child_info: &ChildInfo, key: &[u8], ) -> Option> { @@ -167,10 +154,6 @@ impl Externalities for BasicExternalities { fn original_child_storage( &self, -<<<<<<< HEAD - storage_key: ChildStorageKey, -======= ->>>>>>> child_trie_w3_change child_info: &ChildInfo, key: &[u8], ) -> Option { @@ -184,12 +167,7 @@ impl Externalities for BasicExternalities { fn next_child_storage_key( &self, -<<<<<<< HEAD - storage_key: ChildStorageKey, - _child_info: &ChildInfo, -======= child_info: &ChildInfo, ->>>>>>> child_trie_w3_change key: &[u8], ) -> Option { let range = (Bound::Excluded(key), Bound::Unbounded); @@ -211,10 +189,6 @@ impl Externalities for BasicExternalities { fn place_child_storage( &mut self, -<<<<<<< HEAD - storage_key: ChildStorageKey, -======= ->>>>>>> child_trie_w3_change child_info: &ChildInfo, key: StorageKey, value: Option, @@ -233,12 +207,7 @@ impl Externalities for BasicExternalities { fn kill_child_storage( &mut self, -<<<<<<< HEAD - storage_key: ChildStorageKey, - _child_info: &ChildInfo, -======= child_info: &ChildInfo, ->>>>>>> child_trie_w3_change ) { self.inner.children_default.remove(child_info.storage_key()); } @@ -265,12 +234,7 @@ impl Externalities for BasicExternalities { fn clear_child_prefix( &mut self, -<<<<<<< HEAD - storage_key: ChildStorageKey, - _child_info: &ChildInfo, -======= child_info: &ChildInfo, ->>>>>>> child_trie_w3_change prefix: &[u8], ) { if let Some(child) = self.inner.children_default.get_mut(child_info.storage_key()) { @@ -317,11 +281,7 @@ impl Externalities for BasicExternalities { let delta = child.data.clone().into_iter().map(|(k, v)| (k, Some(v))); InMemoryBackend::::default() -<<<<<<< HEAD - .child_storage_root(storage_key.as_ref(), &child.child_info, delta).0 -======= .child_storage_root(&child.child_info, delta).0 ->>>>>>> child_trie_w3_change } else { empty_child_trie_root::>() }.encode() @@ -374,42 +334,18 @@ mod tests { #[test] fn children_works() { -<<<<<<< HEAD - let child_info1 = ChildInfo::new_default(b"unique_id_1"); - let child_storage = b":child_storage:default:test".to_vec(); - -======= let child_info = ChildInfo::new_default(b"storage_key"); let child_info = &child_info; ->>>>>>> child_trie_w3_change let mut ext = BasicExternalities::new(Storage { top: Default::default(), children_default: map![ child_info.storage_key().to_vec() => StorageChild { data: map![ b"doe".to_vec() => b"reindeer".to_vec() ], -<<<<<<< HEAD - child_info: child_info1.clone(), -======= - child_info: child_info.to_owned(), ->>>>>>> child_trie_w3_change + child_info: child_info.clone(), } ] }); -<<<<<<< HEAD - let child = || ChildStorageKey::from_vec(child_storage.clone()).unwrap(); - - assert_eq!(ext.child_storage(child(), &child_info1, b"doe"), Some(b"reindeer".to_vec())); - - ext.set_child_storage(child(), &child_info1, b"dog".to_vec(), b"puppy".to_vec()); - assert_eq!(ext.child_storage(child(), &child_info1, b"dog"), Some(b"puppy".to_vec())); - - ext.clear_child_storage(child(), &child_info1, b"dog"); - assert_eq!(ext.child_storage(child(), &child_info1, b"dog"), None); - - ext.kill_child_storage(child(), &child_info1); - assert_eq!(ext.child_storage(child(), &child_info1, b"doe"), None); -======= assert_eq!(ext.child_storage(child_info, b"doe"), Some(b"reindeer".to_vec())); ext.set_child_storage(child_info, b"dog".to_vec(), b"puppy".to_vec()); @@ -420,7 +356,6 @@ mod tests { ext.kill_child_storage(child_info); assert_eq!(ext.child_storage(child_info, b"doe"), None); ->>>>>>> child_trie_w3_change } #[test] diff --git a/primitives/state-machine/src/changes_trie/build.rs b/primitives/state-machine/src/changes_trie/build.rs index 6ea9edd10029b..d9b80987d368d 100644 --- a/primitives/state-machine/src/changes_trie/build.rs +++ b/primitives/state-machine/src/changes_trie/build.rs @@ -138,11 +138,7 @@ fn prepare_extrinsics_input_inner<'a, B, H, Number>( Number: BlockNumber, { let (committed, prospective, child_info) = if let Some(sk) = storage_key.as_ref() { -<<<<<<< HEAD - let child_info = changes.child_info(sk).clone(); -======= let child_info = changes.default_child_info(sk).cloned(); ->>>>>>> child_trie_w3_change ( changes.committed.children_default.get(sk).map(|c| &c.0), changes.prospective.children_default.get(sk).map(|c| &c.0), @@ -162,11 +158,7 @@ fn prepare_extrinsics_input_inner<'a, B, H, Number>( if let Some(sk) = storage_key.as_ref() { if !changes.child_storage(sk, k).map(|v| v.is_some()).unwrap_or_default() { if let Some(child_info) = child_info.as_ref() { -<<<<<<< HEAD - if !backend.exists_child_storage(sk, child_info, k) -======= if !backend.exists_child_storage(&child_info, k) ->>>>>>> child_trie_w3_change .map_err(|e| format!("{}", e))? { return Ok(map); } @@ -368,14 +360,8 @@ mod test { OverlayedChanges, Configuration, ) { -<<<<<<< HEAD - - let child_info1 = ChildInfo::new_default(b"unique_id_1"); - let child_info2 = ChildInfo::new_default(b"unique_id_2"); -======= let child_info_1 = ChildInfo::new_default(b"storage_key1"); let child_info_2 = ChildInfo::new_default(b"storage_key2"); ->>>>>>> child_trie_w3_change let backend: InMemoryBackend<_> = vec![ (vec![100], vec![255]), (vec![101], vec![255]), @@ -452,21 +438,13 @@ mod test { value: Some(vec![200]), extrinsics: Some(vec![0, 2].into_iter().collect()) }) -<<<<<<< HEAD - ].into_iter().collect(), child_info1.clone())), -======= - ].into_iter().collect(), child_info_1.to_owned())), ->>>>>>> child_trie_w3_change + ].into_iter().collect(), child_info_1.clone())), (child_trie_key2, (vec![ (vec![100], OverlayedValue { value: Some(vec![200]), extrinsics: Some(vec![0, 2].into_iter().collect()) }) -<<<<<<< HEAD - ].into_iter().collect(), child_info2)), -======= - ].into_iter().collect(), child_info_2.to_owned())), ->>>>>>> child_trie_w3_change + ].into_iter().collect(), child_info_2)), ].into_iter().collect() }, committed: OverlayedChangeSet { top: vec![ @@ -489,11 +467,7 @@ mod test { value: Some(vec![202]), extrinsics: Some(vec![3].into_iter().collect()) }) -<<<<<<< HEAD - ].into_iter().collect(), child_info1)), -======= - ].into_iter().collect(), child_info_1.to_owned())), ->>>>>>> child_trie_w3_change + ].into_iter().collect(), child_info_1)), ].into_iter().collect(), }, collect_extrinsics: true, diff --git a/primitives/state-machine/src/ext.rs b/primitives/state-machine/src/ext.rs index 3e44dfd6204cb..f2ce9738bb2a2 100644 --- a/primitives/state-machine/src/ext.rs +++ b/primitives/state-machine/src/ext.rs @@ -23,12 +23,8 @@ use crate::{ }; use sp_core::{ -<<<<<<< HEAD Hasher, - storage::{ChildStorageKey, well_known_keys::is_child_storage_key, ChildInfo}, -======= storage::{well_known_keys::is_child_storage_key, ChildInfo}, ->>>>>>> child_trie_w3_change traits::Externalities, hexdisplay::HexDisplay, }; use sp_trie::{trie_types::Layout, empty_child_trie_root}; @@ -209,19 +205,11 @@ where fn child_storage( &self, -<<<<<<< HEAD - storage_key: ChildStorageKey, -======= ->>>>>>> child_trie_w3_change child_info: &ChildInfo, key: &[u8], ) -> Option { if child_info.is_top_trie() { - if storage_key.is_empty() { - return self.storage(key); - } else { - return None; - } + return self.storage(key); } let _guard = sp_panic_handler::AbortGuard::force_abort(); let result = self.overlay @@ -244,19 +232,11 @@ where fn child_storage_hash( &self, -<<<<<<< HEAD - storage_key: ChildStorageKey, -======= ->>>>>>> child_trie_w3_change child_info: &ChildInfo, key: &[u8], ) -> Option> { if child_info.is_top_trie() { - if storage_key.is_empty() { - return self.storage_hash(key); - } else { - return None; - } + return self.storage_hash(key); } let _guard = sp_panic_handler::AbortGuard::force_abort(); let result = self.overlay @@ -279,19 +259,11 @@ where fn original_child_storage( &self, -<<<<<<< HEAD - storage_key: ChildStorageKey, -======= ->>>>>>> child_trie_w3_change child_info: &ChildInfo, key: &[u8], ) -> Option { if child_info.is_top_trie() { - if storage_key.is_empty() { - return self.original_storage(key); - } else { - return None; - } + return self.original_storage(key); } let _guard = sp_panic_handler::AbortGuard::force_abort(); let result = self.backend @@ -310,19 +282,11 @@ where fn original_child_storage_hash( &self, -<<<<<<< HEAD - storage_key: ChildStorageKey, -======= ->>>>>>> child_trie_w3_change child_info: &ChildInfo, key: &[u8], ) -> Option> { if child_info.is_top_trie() { - if storage_key.is_empty() { - return self.original_storage_hash(key); - } else { - return None; - } + return self.original_storage_hash(key); } let _guard = sp_panic_handler::AbortGuard::force_abort(); let result = self.backend @@ -356,19 +320,11 @@ where fn exists_child_storage( &self, -<<<<<<< HEAD - storage_key: ChildStorageKey, -======= ->>>>>>> child_trie_w3_change child_info: &ChildInfo, key: &[u8], ) -> bool { if child_info.is_top_trie() { - if storage_key.is_empty() { - return self.exists_storage(key); - } else { - return false; - } + return self.exists_storage(key); } let _guard = sp_panic_handler::AbortGuard::force_abort(); @@ -405,19 +361,11 @@ where fn next_child_storage_key( &self, -<<<<<<< HEAD - storage_key: ChildStorageKey, -======= ->>>>>>> child_trie_w3_change child_info: &ChildInfo, key: &[u8], ) -> Option { if child_info.is_top_trie() { - if storage_key.is_empty() { - return self.next_storage_key(key); - } else { - return None; - } + return self.next_storage_key(key); } let next_backend_key = self.backend .next_child_storage_key(child_info, key) @@ -459,21 +407,12 @@ where fn place_child_storage( &mut self, -<<<<<<< HEAD - storage_key: ChildStorageKey, -======= ->>>>>>> child_trie_w3_change child_info: &ChildInfo, key: StorageKey, value: Option, ) { if child_info.is_top_trie() { - if storage_key.is_empty() { - return self.place_storage(key, value); - } else { - trace!(target: "state-trace", "Ignoring place_child_storage on top trie"); - return; - } + return self.place_storage(key, value); } trace!(target: "state-trace", "{:04x}: PutChild({}) {}={:?}", self.id, @@ -489,10 +428,6 @@ where fn kill_child_storage( &mut self, -<<<<<<< HEAD - storage_key: ChildStorageKey, -======= ->>>>>>> child_trie_w3_change child_info: &ChildInfo, ) { if child_info.is_top_trie() { @@ -532,20 +467,11 @@ where fn clear_child_prefix( &mut self, -<<<<<<< HEAD - storage_key: ChildStorageKey, -======= ->>>>>>> child_trie_w3_change child_info: &ChildInfo, prefix: &[u8], ) { if child_info.is_top_trie() { - if storage_key.is_empty() { - return self.clear_prefix(prefix); - } else { - trace!(target: "state-trace", "Ignoring clear_child_prefix on top trie"); - return; - } + return self.clear_prefix(prefix); } trace!(target: "state-trace", "{:04x}: ClearChildPrefix({}) {}", @@ -589,18 +515,11 @@ where let storage_key = child_info.storage_key(); let prefixed_storage_key = child_info.prefixed_storage_key(); if self.storage_transaction_cache.transaction_storage_root.is_some() { -<<<<<<< HEAD - let root = self.storage_transaction_cache.transaction_child_storage_root.get(storage_key.as_ref()) + let root = self.storage_transaction_cache.transaction_child_storage_root + .get(&prefixed_storage_key) .map(|root| root.encode()) .unwrap_or( - default_child_trie_root::>(storage_key.as_ref()).encode() -======= - let root = self - .storage(prefixed_storage_key.as_slice()) - .and_then(|k| Decode::decode(&mut &k[..]).ok()) - .unwrap_or( - empty_child_trie_root::>() ->>>>>>> child_trie_w3_change + empty_child_trie_root::>().encode() ); trace!(target: "state-trace", "{:04x}: ChildRoot({}) (cached) {}", self.id, @@ -610,15 +529,9 @@ where root } else { -<<<<<<< HEAD - if let Some(child_info) = self.overlay.child_info(storage_key).clone() { - let (root, _is_empty, _) = { - let delta = self.overlay.committed.children.get(storage_key) -======= if let Some(child_info) = self.overlay.default_child_info(storage_key).cloned() { - let (root, is_empty, _) = { + let (root, _is_empty, _) = { let delta = self.overlay.committed.children_default.get(storage_key) ->>>>>>> child_trie_w3_change .into_iter() .flat_map(|(map, _)| map.clone().into_iter().map(|(k, v)| (k, v.value))) .chain( @@ -627,27 +540,10 @@ where .flat_map(|(map, _)| map.clone().into_iter().map(|(k, v)| (k, v.value))) ); -<<<<<<< HEAD - self.backend.child_storage_root(storage_key, child_info, delta) - }; - - let root = root.encode(); -======= self.backend.child_storage_root(&child_info, delta) }; let root = root.encode(); - // We store update in the overlay in order to be able to use 'self.storage_transaction' - // cache. This is brittle as it rely on Ext only querying the trie backend for - // storage root. - // A better design would be to manage 'child_storage_transaction' in a - // similar way as 'storage_transaction' but for each child trie. - if is_empty { - self.overlay.set_storage(prefixed_storage_key, None); - } else { - self.overlay.set_storage(prefixed_storage_key, Some(root.clone())); - } ->>>>>>> child_trie_w3_change trace!(target: "state-trace", "{:04x}: ChildRoot({}) {}", self.id, @@ -747,11 +643,6 @@ mod tests { type TestBackend = InMemoryBackend; type TestExt<'a> = Ext<'a, Blake2Hasher, u64, TestBackend>; -<<<<<<< HEAD - const CHILD_KEY_1: &[u8] = b":child_storage:default:Child1"; - const CHILD_UUID_1: &[u8] = b"unique_id_1"; -======= ->>>>>>> child_trie_w3_change fn prepare_overlay_with_changes() -> OverlayedChanges { OverlayedChanges { @@ -863,23 +754,14 @@ mod tests { #[test] fn next_child_storage_key_works() { -<<<<<<< HEAD - let child_info1 = ChildInfo::new_default(CHILD_UUID_1); -======= let child_info = ChildInfo::new_default(b"Child1"); let child_info = &child_info; ->>>>>>> child_trie_w3_change let mut cache = StorageTransactionCache::default(); let mut overlay = OverlayedChanges::default(); -<<<<<<< HEAD - overlay.set_child_storage(child().as_ref().to_vec(), &child_info1, vec![20], None); - overlay.set_child_storage(child().as_ref().to_vec(), &child_info1, vec![30], Some(vec![31])); -======= overlay.set_child_storage(child_info, vec![20], None); overlay.set_child_storage(child_info, vec![30], Some(vec![31])); ->>>>>>> child_trie_w3_change let backend = Storage { top: map![], children_default: map![ @@ -889,11 +771,7 @@ mod tests { vec![20] => vec![20], vec![40] => vec![40] ], -<<<<<<< HEAD - child_info: child_info1.clone(), -======= - child_info: child_info.to_owned(), ->>>>>>> child_trie_w3_change + child_info: child_info.clone(), } ], }.into(); @@ -902,25 +780,6 @@ mod tests { let ext = TestExt::new(&mut overlay, &mut cache, &backend, None, None); // next_backend < next_overlay -<<<<<<< HEAD - assert_eq!(ext.next_child_storage_key(child(), &child_info1, &[5]), Some(vec![10])); - - // next_backend == next_overlay but next_overlay is a delete - assert_eq!(ext.next_child_storage_key(child(), &child_info1, &[10]), Some(vec![30])); - - // next_overlay < next_backend - assert_eq!(ext.next_child_storage_key(child(), &child_info1, &[20]), Some(vec![30])); - - // next_backend exist but next_overlay doesn't exist - assert_eq!(ext.next_child_storage_key(child(), &child_info1, &[30]), Some(vec![40])); - - drop(ext); - overlay.set_child_storage(child().as_ref().to_vec(), &child_info1, vec![50], Some(vec![50])); - let ext = TestExt::new(&mut overlay, &mut cache, &backend, None, None); - - // next_overlay exist but next_backend doesn't exist - assert_eq!(ext.next_child_storage_key(child(), &child_info1, &[40]), Some(vec![50])); -======= assert_eq!(ext.next_child_storage_key(child_info, &[5]), Some(vec![10])); // next_backend == next_overlay but next_overlay is a delete @@ -938,29 +797,18 @@ mod tests { // next_overlay exist but next_backend doesn't exist assert_eq!(ext.next_child_storage_key(child_info, &[40]), Some(vec![50])); ->>>>>>> child_trie_w3_change } #[test] fn child_storage_works() { -<<<<<<< HEAD use sp_core::InnerHasher; - let child_info1 = ChildInfo::new_default(CHILD_UUID_1); - -======= let child_info = ChildInfo::new_default(b"Child1"); let child_info = &child_info; ->>>>>>> child_trie_w3_change let mut cache = StorageTransactionCache::default(); let mut overlay = OverlayedChanges::default(); -<<<<<<< HEAD - overlay.set_child_storage(child().as_ref().to_vec(), &child_info1, vec![20], None); - overlay.set_child_storage(child().as_ref().to_vec(), &child_info1, vec![30], Some(vec![31])); -======= overlay.set_child_storage(child_info, vec![20], None); overlay.set_child_storage(child_info, vec![30], Some(vec![31])); ->>>>>>> child_trie_w3_change let backend = Storage { top: map![], children_default: map![ @@ -970,37 +818,13 @@ mod tests { vec![20] => vec![20], vec![30] => vec![40] ], -<<<<<<< HEAD - child_info: child_info1.clone(), -======= - child_info: child_info.to_owned(), ->>>>>>> child_trie_w3_change + child_info: child_info.clone(), } ], }.into(); let ext = TestExt::new(&mut overlay, &mut cache, &backend, None, None); -<<<<<<< HEAD - assert_eq!(ext.child_storage(child(), &child_info1, &[10]), Some(vec![10])); - assert_eq!(ext.original_child_storage(child(), &child_info1, &[10]), Some(vec![10])); - assert_eq!( - ext.child_storage_hash(child(), &child_info1, &[10]), - Some(Blake2Hasher::hash(&[10]).as_ref().to_vec()), - ); - - assert_eq!(ext.child_storage(child(), &child_info1, &[20]), None); - assert_eq!(ext.original_child_storage(child(), &child_info1, &[20]), Some(vec![20])); - assert_eq!( - ext.child_storage_hash(child(), &child_info1, &[20]), - None, - ); - - assert_eq!(ext.child_storage(child(), &child_info1, &[30]), Some(vec![31])); - assert_eq!(ext.original_child_storage(child(), &child_info1, &[30]), Some(vec![40])); - assert_eq!( - ext.child_storage_hash(child(), &child_info1, &[30]), -======= assert_eq!(ext.child_storage(child_info, &[10]), Some(vec![10])); assert_eq!(ext.original_child_storage(child_info, &[10]), Some(vec![10])); assert_eq!( @@ -1019,7 +843,6 @@ mod tests { assert_eq!(ext.original_child_storage(child_info, &[30]), Some(vec![40])); assert_eq!( ext.child_storage_hash(child_info, &[30]), ->>>>>>> child_trie_w3_change Some(Blake2Hasher::hash(&[31]).as_ref().to_vec()), ); } diff --git a/primitives/state-machine/src/in_memory_backend.rs b/primitives/state-machine/src/in_memory_backend.rs index 101ce276acca8..4e170c3b75955 100644 --- a/primitives/state-machine/src/in_memory_backend.rs +++ b/primitives/state-machine/src/in_memory_backend.rs @@ -24,17 +24,10 @@ use crate::{ use std::{error, fmt, collections::{BTreeMap, HashMap}, marker::PhantomData, ops}; use sp_core::{Hasher, InnerHasher}; use sp_trie::{ -<<<<<<< HEAD - MemoryDB, default_child_trie_root, TrieConfiguration, trie_types::Layout, -}; -use codec::Codec; -use sp_core::storage::{ChildInfo, Storage}; -======= - MemoryDB, child_trie_root, empty_child_trie_root, TrieConfiguration, trie_types::Layout, + MemoryDB, empty_child_trie_root, TrieConfiguration, trie_types::Layout, }; use codec::Codec; use sp_core::storage::{ChildInfo, ChildType, Storage}; ->>>>>>> child_trie_w3_change /// Error impossible. // FIXME: use `!` type when stabilized. https://github.com/rust-lang/rust/issues/35121 @@ -54,11 +47,7 @@ impl error::Error for Void { /// In-memory backend. Fully recomputes tries each time `as_trie_backend` is called but useful for /// tests and proof checking. pub struct InMemory { -<<<<<<< HEAD - inner: HashMap, BTreeMap>, -======= inner: HashMap, BTreeMap>, ->>>>>>> child_trie_w3_change // This field is only needed for returning reference in `as_trie_backend`. trie: Option, H>>, _hasher: PhantomData, @@ -99,11 +88,7 @@ impl PartialEq for InMemory { impl InMemory { /// Copy the state, with applied updates pub fn update< -<<<<<<< HEAD - T: IntoIterator, StorageCollection)> -======= T: IntoIterator, StorageCollection)> ->>>>>>> child_trie_w3_change >( &self, changes: T, @@ -122,17 +107,10 @@ impl InMemory { } } -<<<<<<< HEAD -impl From, BTreeMap>> - for InMemory -{ - fn from(inner: HashMap, BTreeMap>) -> Self { -======= impl From, BTreeMap>> for InMemory { fn from(inner: HashMap, BTreeMap>) -> Self { ->>>>>>> child_trie_w3_change InMemory { inner, trie: None, @@ -143,13 +121,8 @@ impl From, BTreeMap From for InMemory { fn from(inners: Storage) -> Self { -<<<<<<< HEAD - let mut inner: HashMap, BTreeMap> - = inners.children.into_iter().map(|(k, c)| (Some((k, c.child_info)), c.data)).collect(); -======= let mut inner: HashMap, BTreeMap> = inners.children_default.into_iter().map(|(_k, c)| (Some(c.child_info), c.data)).collect(); ->>>>>>> child_trie_w3_change inner.insert(None, inners.top); InMemory { inner, @@ -171,21 +144,12 @@ impl From> for InMemory { } } -<<<<<<< HEAD -impl From, StorageCollection)>> - for InMemory { - fn from( - inner: Vec<(Option<(StorageKey, ChildInfo)>, StorageCollection)>, - ) -> Self { - let mut expanded: HashMap, BTreeMap> -======= impl From, StorageCollection)>> for InMemory { fn from( inner: Vec<(Option, StorageCollection)>, ) -> Self { let mut expanded: HashMap, BTreeMap> ->>>>>>> child_trie_w3_change = HashMap::new(); for (child_info, key_values) in inner { let entry = expanded.entry(child_info).or_default(); @@ -200,28 +164,16 @@ impl From, StorageCollection)>> } impl InMemory { -<<<<<<< HEAD - /// child storage key iterator - pub fn child_storage_keys(&self) -> impl Iterator { - self.inner.iter().filter_map(|item| - item.0.as_ref().map(|v|(&v.0[..], &v.1)) - ) -======= /// Child storage infos iterator. pub fn child_storage_infos(&self) -> impl Iterator { self.inner.iter().filter_map(|item| item.0.as_ref()) ->>>>>>> child_trie_w3_change } } impl Backend for InMemory where H::Out: Codec { type Error = Void; type Transaction = Vec<( -<<<<<<< HEAD - Option<(StorageKey, ChildInfo)>, -======= Option, ->>>>>>> child_trie_w3_change StorageCollection, )>; type TrieBackendStorage = MemoryDB; @@ -232,10 +184,6 @@ impl Backend for InMemory where H::Out: Codec { fn child_storage( &self, -<<<<<<< HEAD - storage_key: &[u8], -======= ->>>>>>> child_trie_w3_change child_info: &ChildInfo, key: &[u8], ) -> Result, Self::Error> { @@ -259,10 +207,6 @@ impl Backend for InMemory where H::Out: Codec { fn for_keys_in_child_storage( &self, -<<<<<<< HEAD - storage_key: &[u8], -======= ->>>>>>> child_trie_w3_change child_info: &ChildInfo, mut f: F, ) { @@ -272,10 +216,6 @@ impl Backend for InMemory where H::Out: Codec { fn for_child_keys_with_prefix( &self, -<<<<<<< HEAD - storage_key: &[u8], -======= ->>>>>>> child_trie_w3_change child_info: &ChildInfo, prefix: &[u8], f: F, @@ -307,10 +247,6 @@ impl Backend for InMemory where H::Out: Codec { fn child_storage_root( &self, -<<<<<<< HEAD - storage_key: &[u8], -======= ->>>>>>> child_trie_w3_change child_info: &ChildInfo, delta: I, ) -> (H::Out, bool, Self::Transaction) @@ -326,11 +262,7 @@ impl Backend for InMemory where H::Out: Codec { .flat_map(|map| map.iter().map(|(k, v)| (k.clone(), Some(v.clone())))); let transaction: Vec<_> = delta.into_iter().collect(); -<<<<<<< HEAD let root = Layout::::trie_root( -======= - let root = child_trie_root::, _, _, _>( ->>>>>>> child_trie_w3_change existing_pairs.chain(transaction.iter().cloned()) .collect::>() .into_iter() @@ -356,10 +288,6 @@ impl Backend for InMemory where H::Out: Codec { fn next_child_storage_key( &self, -<<<<<<< HEAD - storage_key: &[u8], -======= ->>>>>>> child_trie_w3_change child_info: &ChildInfo, key: &[u8], ) -> Result, Self::Error> { @@ -386,10 +314,6 @@ impl Backend for InMemory where H::Out: Codec { fn child_keys( &self, -<<<<<<< HEAD - storage_key: &[u8], -======= ->>>>>>> child_trie_w3_change child_info: &ChildInfo, prefix: &[u8], ) -> Vec { @@ -435,12 +359,8 @@ mod tests { #[test] fn in_memory_with_child_trie_only() { let storage = InMemory::::default(); -<<<<<<< HEAD - let child_info = ChildInfo::new_default(b"unique_id_1"); -======= let child_info = ChildInfo::new_default(b"1"); let child_info = &child_info; ->>>>>>> child_trie_w3_change let mut storage = storage.update( vec![( Some(child_info.clone()), @@ -448,11 +368,7 @@ mod tests { )] ); let trie_backend = storage.as_trie_backend().unwrap(); -<<<<<<< HEAD - assert_eq!(trie_backend.child_storage(b"1", &child_info, b"2").unwrap(), -======= assert_eq!(trie_backend.child_storage(child_info, b"2").unwrap(), ->>>>>>> child_trie_w3_change Some(b"3".to_vec())); let storage_key = child_info.prefixed_storage_key(); assert!(trie_backend.storage(storage_key.as_slice()).unwrap().is_some()); diff --git a/primitives/state-machine/src/lib.rs b/primitives/state-machine/src/lib.rs index 1bdf52e36a82b..1a735943c7ea0 100644 --- a/primitives/state-machine/src/lib.rs +++ b/primitives/state-machine/src/lib.rs @@ -550,10 +550,6 @@ where /// Generate child storage read proof. pub fn prove_child_read( mut backend: B, -<<<<<<< HEAD - storage_key: &[u8], -======= ->>>>>>> child_trie_w3_change child_info: &ChildInfo, keys: I, ) -> Result> @@ -593,10 +589,6 @@ where /// Generate storage read proof on pre-created trie backend. pub fn prove_child_read_on_trie_backend( trie_backend: &TrieBackend, -<<<<<<< HEAD - storage_key: &[u8], -======= ->>>>>>> child_trie_w3_change child_info: &ChildInfo, keys: I, ) -> Result> @@ -610,11 +602,7 @@ where let proving_backend = proving_backend::ProvingBackend::<_, H>::new(trie_backend); for key in keys.into_iter() { proving_backend -<<<<<<< HEAD - .child_storage(storage_key, child_info, key.as_ref()) -======= .child_storage(child_info, key.as_ref()) ->>>>>>> child_trie_w3_change .map_err(|e| Box::new(e) as Box)?; } Ok(proving_backend.extract_proof()) @@ -689,12 +677,7 @@ where H: Hasher, H::Out: Ord + Codec, { -<<<<<<< HEAD - // Not a prefixed memory db, using empty unique id and include root resolution. - proving_backend.child_storage(storage_key, &ChildInfo::top_trie(), key) -======= proving_backend.child_storage(child_info, key) ->>>>>>> child_trie_w3_change .map_err(|e| Box::new(e) as Box) } @@ -716,11 +699,6 @@ mod tests { fallback_succeeds: bool, } -<<<<<<< HEAD - const CHILD_UID_1: &'static [u8] = b"unique_id_1"; - -======= ->>>>>>> child_trie_w3_change impl CodeExecutor for DummyCodeExecutor { type Error = u8; @@ -949,13 +927,8 @@ mod tests { #[test] fn set_child_storage_works() { -<<<<<<< HEAD - - let child_info1 = ChildInfo::new_default(CHILD_UID_1); -======= let child_info = ChildInfo::new_default(b"sub1"); let child_info = &child_info; ->>>>>>> child_trie_w3_change let mut state = InMemoryBackend::::default(); let backend = state.as_trie_backend().unwrap(); let mut overlay = OverlayedChanges::default(); @@ -969,43 +942,23 @@ mod tests { ); ext.set_child_storage( -<<<<<<< HEAD - ChildStorageKey::from_slice(b":child_storage:default:testchild").unwrap(), - &child_info1, -======= child_info, ->>>>>>> child_trie_w3_change b"abc".to_vec(), b"def".to_vec() ); assert_eq!( ext.child_storage( -<<<<<<< HEAD - ChildStorageKey::from_slice(b":child_storage:default:testchild").unwrap(), - &child_info1, -======= child_info, ->>>>>>> child_trie_w3_change b"abc" ), Some(b"def".to_vec()) ); ext.kill_child_storage( -<<<<<<< HEAD - ChildStorageKey::from_slice(b":child_storage:default:testchild").unwrap(), - &child_info1, - ); - assert_eq!( - ext.child_storage( - ChildStorageKey::from_slice(b":child_storage:default:testchild").unwrap(), - &child_info1, -======= child_info, ); assert_eq!( ext.child_storage( child_info, ->>>>>>> child_trie_w3_change b"abc" ), None @@ -1014,13 +967,8 @@ mod tests { #[test] fn prove_read_and_proof_check_works() { -<<<<<<< HEAD - - let child_info1 = ChildInfo::new_default(CHILD_UID_1); -======= let child_info = ChildInfo::new_default(b"sub1"); let child_info = &child_info; ->>>>>>> child_trie_w3_change // fetch read proof from 'remote' full node let remote_backend = trie_backend::tests::test_trie(); let remote_root = remote_backend.storage_root(::std::iter::empty()).0; @@ -1047,12 +995,7 @@ mod tests { let remote_root = remote_backend.storage_root(::std::iter::empty()).0; let remote_proof = prove_child_read( remote_backend, -<<<<<<< HEAD - b":child_storage:default:sub1", - &child_info1, -======= child_info, ->>>>>>> child_trie_w3_change &[b"value3"], ).unwrap(); let local_result1 = read_child_proof_check::( @@ -1076,42 +1019,4 @@ mod tests { vec![(b"value2".to_vec(), None)], ); } -<<<<<<< HEAD -======= - - #[test] - fn child_storage_uuid() { - - let child_info_1 = ChildInfo::new_default(b"sub_test1"); - let child_info_2 = ChildInfo::new_default(b"sub_test2"); - - use crate::trie_backend::tests::test_trie; - let mut overlay = OverlayedChanges::default(); - - let mut transaction = { - let backend = test_trie(); - let mut cache = StorageTransactionCache::default(); - let mut ext = Ext::new( - &mut overlay, - &mut cache, - &backend, - changes_trie::disabled_state::<_, u64>(), - None, - ); - ext.set_child_storage(&child_info_1, b"abc".to_vec(), b"def".to_vec()); - ext.set_child_storage(&child_info_2, b"abc".to_vec(), b"def".to_vec()); - ext.storage_root(); - cache.transaction.unwrap() - }; - let mut duplicate = false; - for (k, (value, rc)) in transaction.drain().iter() { - // look for a key inserted twice: transaction rc is 2 - if *rc == 2 { - duplicate = true; - println!("test duplicate for {:?} {:?}", k, value); - } - } - assert!(!duplicate); - } ->>>>>>> child_trie_w3_change } diff --git a/primitives/state-machine/src/overlayed_changes.rs b/primitives/state-machine/src/overlayed_changes.rs index 4fa7f89d1991b..517073ea92853 100644 --- a/primitives/state-machine/src/overlayed_changes.rs +++ b/primitives/state-machine/src/overlayed_changes.rs @@ -36,6 +36,9 @@ use sp_core::Hasher; /// Storage key. pub type StorageKey = Vec; +/// Storage key. +pub type PrefixedStorageKey = Vec; + /// Storage value. pub type StorageValue = Vec; @@ -77,11 +80,7 @@ pub struct OverlayedChangeSet { /// Top level storage changes. pub top: BTreeMap, /// Child storage changes. -<<<<<<< HEAD - pub children: HashMap, ChildInfo)>, -======= pub children_default: HashMap, ChildInfo)>, ->>>>>>> child_trie_w3_change } /// A storage changes structure that can be generated by the data collected in [`OverlayedChanges`]. @@ -135,7 +134,7 @@ pub struct StorageTransactionCache { /// The storage root after applying the transaction. pub(crate) transaction_storage_root: Option, /// The storage child roots after applying the transaction. - pub(crate) transaction_child_storage_root: BTreeMap>, + pub(crate) transaction_child_storage_root: BTreeMap>, /// Contains the changes trie transaction. pub(crate) changes_trie_transaction: Option>>, /// The storage root after applying the changes trie transaction. @@ -254,10 +253,6 @@ impl OverlayedChanges { /// `None` can be used to delete a value specified by the given key. pub(crate) fn set_child_storage( &mut self, -<<<<<<< HEAD - storage_key: StorageKey, -======= ->>>>>>> child_trie_w3_change child_info: &ChildInfo, key: StorageKey, val: Option, @@ -286,10 +281,6 @@ impl OverlayedChanges { /// [`discard_prospective`]: #method.discard_prospective pub(crate) fn clear_child_storage( &mut self, -<<<<<<< HEAD - storage_key: &[u8], -======= ->>>>>>> child_trie_w3_change child_info: &ChildInfo, ) { let extrinsic_index = self.extrinsic_index(); @@ -364,10 +355,6 @@ impl OverlayedChanges { pub(crate) fn clear_child_prefix( &mut self, -<<<<<<< HEAD - storage_key: &[u8], -======= ->>>>>>> child_trie_w3_change child_info: &ChildInfo, prefix: &[u8], ) { @@ -450,11 +437,7 @@ impl OverlayedChanges { fn drain_committed(&mut self) -> ( impl Iterator)>, impl Iterator)>, ChildInfo))>, -<<<<<<< HEAD - ){ -======= ) { ->>>>>>> child_trie_w3_change assert!(self.prospective.is_empty()); ( std::mem::replace(&mut self.committed.top, Default::default()) @@ -572,12 +555,6 @@ impl OverlayedChanges { .into_iter() .flat_map(|(map, _)| map.iter().map(|(k, v)| (k.clone(), v.value.clone()))) ), -<<<<<<< HEAD - self.child_info(storage_key) - .expect("child info initialized in either committed or prospective") - .clone(), -======= ->>>>>>> child_trie_w3_change ) ); @@ -625,13 +602,8 @@ impl OverlayedChanges { /// Get child info for a storage key. /// Take the latest value so prospective first. -<<<<<<< HEAD - pub fn child_info(&self, storage_key: &[u8]) -> Option<&ChildInfo> { - if let Some((_, ci)) = self.prospective.children.get(storage_key) { -======= pub fn default_child_info(&self, storage_key: &[u8]) -> Option<&ChildInfo> { if let Some((_, ci)) = self.prospective.children_default.get(storage_key) { ->>>>>>> child_trie_w3_change return Some(&ci); } if let Some((_, ci)) = self.committed.children_default.get(storage_key) { @@ -890,21 +862,12 @@ mod tests { let child_info = &child_info; let child = child_info.storage_key(); let mut overlay = OverlayedChanges::default(); -<<<<<<< HEAD - overlay.set_child_storage(child.clone(), &child_info, vec![20], Some(vec![20])); - overlay.set_child_storage(child.clone(), &child_info, vec![30], Some(vec![30])); - overlay.set_child_storage(child.clone(), &child_info, vec![40], Some(vec![40])); - overlay.commit_prospective(); - overlay.set_child_storage(child.clone(), &child_info, vec![10], Some(vec![10])); - overlay.set_child_storage(child.clone(), &child_info, vec![30], None); -======= overlay.set_child_storage(child_info, vec![20], Some(vec![20])); overlay.set_child_storage(child_info, vec![30], Some(vec![30])); overlay.set_child_storage(child_info, vec![40], Some(vec![40])); overlay.commit_prospective(); overlay.set_child_storage(child_info, vec![10], Some(vec![10])); overlay.set_child_storage(child_info, vec![30], None); ->>>>>>> child_trie_w3_change // next_prospective < next_committed let next_to_5 = overlay.next_child_storage_key_change(child, &[5]).unwrap(); @@ -926,11 +889,7 @@ mod tests { assert_eq!(next_to_30.0.to_vec(), vec![40]); assert_eq!(next_to_30.1.value, Some(vec![40])); -<<<<<<< HEAD - overlay.set_child_storage(child.clone(), &child_info, vec![50], Some(vec![50])); -======= overlay.set_child_storage(child_info, vec![50], Some(vec![50])); ->>>>>>> child_trie_w3_change // next_prospective, no next_committed let next_to_40 = overlay.next_child_storage_key_change(child, &[40]).unwrap(); assert_eq!(next_to_40.0.to_vec(), vec![50]); diff --git a/primitives/state-machine/src/proving_backend.rs b/primitives/state-machine/src/proving_backend.rs index 486463a83a094..92abe89a19f03 100644 --- a/primitives/state-machine/src/proving_backend.rs +++ b/primitives/state-machine/src/proving_backend.rs @@ -23,13 +23,8 @@ use log::debug; use hash_db::{HashDB, EMPTY_PREFIX, Prefix}; use sp_core::{Hasher, InnerHasher}; use sp_trie::{ -<<<<<<< HEAD - MemoryDB, default_child_trie_root, read_trie_value_with, - record_all_keys, -======= - MemoryDB, empty_child_trie_root, read_trie_value_with, read_child_trie_value_with, + MemoryDB, empty_child_trie_root, read_trie_value_with, record_all_keys ->>>>>>> child_trie_w3_change }; pub use sp_trie::Recorder; pub use sp_trie::trie_types::{Layout, TrieError}; @@ -150,14 +145,8 @@ impl<'a, S, H> ProvingBackendRecorder<'a, S, H> /// Produce proof for a child key query. pub fn child_storage( &mut self, -<<<<<<< HEAD - storage_key: &[u8], child_info: &ChildInfo, key: &[u8], -======= - child_info: &ChildInfo, - key: &[u8] ->>>>>>> child_trie_w3_change ) -> Result>, String> { let storage_key = child_info.storage_key(); let root = self.storage(storage_key)? @@ -171,12 +160,7 @@ impl<'a, S, H> ProvingBackendRecorder<'a, S, H> let map_e = |e| format!("Trie lookup error: {}", e); -<<<<<<< HEAD read_trie_value_with::, _, _>( -======= - read_child_trie_value_with::, _, _>( - child_info.keyspace(), ->>>>>>> child_trie_w3_change &eph, &root, key, @@ -299,10 +283,6 @@ impl<'a, S, H> Backend for ProvingBackend<'a, S, H> fn child_storage( &self, -<<<<<<< HEAD - storage_key: &[u8], -======= ->>>>>>> child_trie_w3_change child_info: &ChildInfo, key: &[u8], ) -> Result>, Self::Error> { @@ -311,10 +291,6 @@ impl<'a, S, H> Backend for ProvingBackend<'a, S, H> fn for_keys_in_child_storage( &self, -<<<<<<< HEAD - storage_key: &[u8], -======= ->>>>>>> child_trie_w3_change child_info: &ChildInfo, f: F, ) { @@ -327,10 +303,6 @@ impl<'a, S, H> Backend for ProvingBackend<'a, S, H> fn next_child_storage_key( &self, -<<<<<<< HEAD - storage_key: &[u8], -======= ->>>>>>> child_trie_w3_change child_info: &ChildInfo, key: &[u8], ) -> Result>, Self::Error> { @@ -347,10 +319,6 @@ impl<'a, S, H> Backend for ProvingBackend<'a, S, H> fn for_child_keys_with_prefix( &self, -<<<<<<< HEAD - storage_key: &[u8], -======= ->>>>>>> child_trie_w3_change child_info: &ChildInfo, prefix: &[u8], f: F, @@ -368,10 +336,6 @@ impl<'a, S, H> Backend for ProvingBackend<'a, S, H> fn child_keys( &self, -<<<<<<< HEAD - storage_key: &[u8], -======= ->>>>>>> child_trie_w3_change child_info: &ChildInfo, prefix: &[u8], ) -> Vec> { @@ -387,10 +351,6 @@ impl<'a, S, H> Backend for ProvingBackend<'a, S, H> fn child_storage_root( &self, -<<<<<<< HEAD - storage_key: &[u8], -======= ->>>>>>> child_trie_w3_change child_info: &ChildInfo, delta: I, ) -> (H::Out, bool, Self::Transaction) @@ -398,12 +358,8 @@ impl<'a, S, H> Backend for ProvingBackend<'a, S, H> I: IntoIterator, Option>)>, H::Out: Ord { -<<<<<<< HEAD - let (root, is_empty, mut tx) = self.0.child_storage_root(storage_key, child_info, delta); + let (root, is_empty, mut tx) = self.0.child_storage_root(child_info, delta); (root, is_empty, tx.remove(child_info)) -======= - self.0.child_storage_root(child_info, delta) ->>>>>>> child_trie_w3_change } } @@ -516,19 +472,6 @@ mod tests { #[test] fn proof_recorded_and_checked_with_child() { -<<<<<<< HEAD - let child_info1 = ChildInfo::new_default(b"unique_id_1"); - let child_info2 = ChildInfo::new_default(b"unique_id_2"); - let subtrie1 = ChildStorageKey::from_slice(b":child_storage:default:sub1").unwrap(); - let subtrie2 = ChildStorageKey::from_slice(b":child_storage:default:sub2").unwrap(); - let own1 = subtrie1.into_owned(); - let own2 = subtrie2.into_owned(); - let contents = vec![ - (None, (0..64).map(|i| (vec![i], Some(vec![i]))).collect()), - (Some((own1.clone(), child_info1.clone())), - (28..65).map(|i| (vec![i], Some(vec![i]))).collect()), - (Some((own2.clone(), child_info2.clone())), -======= let child_info_1 = ChildInfo::new_default(b"sub1"); let child_info_2 = ChildInfo::new_default(b"sub2"); let child_info_1 = &child_info_1; @@ -538,38 +481,25 @@ mod tests { (Some(child_info_1.clone()), (28..65).map(|i| (vec![i], Some(vec![i]))).collect()), (Some(child_info_2.clone()), ->>>>>>> child_trie_w3_change (10..15).map(|i| (vec![i], Some(vec![i]))).collect()), ]; let in_memory = InMemoryBackend::::default(); let mut in_memory = in_memory.update(contents); let in_memory_root = in_memory.full_storage_root::<_, Vec<_>, _>( ::std::iter::empty(), -<<<<<<< HEAD - in_memory.child_storage_keys().map(|k|(k.0.to_vec(), Vec::new(), k.1.to_owned())), + in_memory.child_storage_infos().map(|k|(k.to_owned(), Vec::new())), false, -======= - in_memory.child_storage_infos().map(|k|(k.to_owned(), Vec::new())) ->>>>>>> child_trie_w3_change ).0; (0..64).for_each(|i| assert_eq!( in_memory.storage(&[i]).unwrap().unwrap(), vec![i] )); (28..65).for_each(|i| assert_eq!( -<<<<<<< HEAD - in_memory.child_storage(&own1[..], &child_info1, &[i]).unwrap().unwrap(), - vec![i] - )); - (10..15).for_each(|i| assert_eq!( - in_memory.child_storage(&own2[..], &child_info2, &[i]).unwrap().unwrap(), -======= in_memory.child_storage(child_info_1, &[i]).unwrap().unwrap(), vec![i] )); (10..15).for_each(|i| assert_eq!( in_memory.child_storage(child_info_2, &[i]).unwrap().unwrap(), ->>>>>>> child_trie_w3_change vec![i] )); @@ -597,11 +527,7 @@ mod tests { assert_eq!(proof_check.storage(&[64]).unwrap(), None); let proving = ProvingBackend::new(trie); -<<<<<<< HEAD - assert_eq!(proving.child_storage(&own1[..], &child_info1, &[64]), Ok(Some(vec![64]))); -======= assert_eq!(proving.child_storage(child_info_1, &[64]), Ok(Some(vec![64]))); ->>>>>>> child_trie_w3_change let proof = proving.extract_proof(); let proof_check = create_proof_check_backend::( @@ -609,11 +535,7 @@ mod tests { proof ).unwrap(); assert_eq!( -<<<<<<< HEAD - proof_check.child_storage(&own1[..], &child_info1, &[64]).unwrap().unwrap(), -======= proof_check.child_storage(child_info_1, &[64]).unwrap().unwrap(), ->>>>>>> child_trie_w3_change vec![64] ); } diff --git a/primitives/state-machine/src/trie_backend.rs b/primitives/state-machine/src/trie_backend.rs index 2b6333bae8540..d60e8e637b39d 100644 --- a/primitives/state-machine/src/trie_backend.rs +++ b/primitives/state-machine/src/trie_backend.rs @@ -16,17 +16,10 @@ //! Trie-based state machine backend. use log::{warn, debug}; -<<<<<<< HEAD use sp_core::Hasher; -use sp_trie::{Trie, delta_trie_root, default_child_trie_root}; +use sp_trie::{Trie, delta_trie_root, empty_child_trie_root}; use sp_trie::trie_types::{TrieDB, TrieError, Layout}; -use sp_core::storage::{ChildInfo, ChildrenMap}; -======= -use hash_db::Hasher; -use sp_trie::{Trie, delta_trie_root, empty_child_trie_root, child_delta_trie_root}; -use sp_trie::trie_types::{TrieDB, TrieError, Layout}; -use sp_core::storage::{ChildInfo, ChildType}; ->>>>>>> child_trie_w3_change +use sp_core::storage::{ChildInfo, ChildType, ChildrenMap}; use codec::{Codec, Decode}; use crate::{ StorageKey, StorageValue, Backend, @@ -91,22 +84,14 @@ impl, H: Hasher> Backend for TrieBackend where fn child_storage( &self, -<<<<<<< HEAD - storage_key: &[u8], child_info: &ChildInfo, key: &[u8], ) -> Result, Self::Error> { - if let Some(essence) = self.child_essence(storage_key)? { + if let Some(essence) = self.child_essence(child_info)? { essence.storage(child_info, key) } else { Ok(None) } -======= - child_info: &ChildInfo, - key: &[u8], - ) -> Result, Self::Error> { - self.essence.child_storage(child_info, key) ->>>>>>> child_trie_w3_change } fn next_storage_key(&self, key: &[u8]) -> Result, Self::Error> { @@ -115,22 +100,14 @@ impl, H: Hasher> Backend for TrieBackend where fn next_child_storage_key( &self, -<<<<<<< HEAD - storage_key: &[u8], child_info: &ChildInfo, key: &[u8], ) -> Result, Self::Error> { - if let Some(essence) = self.child_essence(storage_key)? { + if let Some(essence) = self.child_essence(child_info)? { essence.next_storage_key(child_info, key) } else { Ok(None) } -======= - child_info: &ChildInfo, - key: &[u8], - ) -> Result, Self::Error> { - self.essence.next_child_storage_key(child_info, key) ->>>>>>> child_trie_w3_change } fn for_keys_with_prefix(&self, prefix: &[u8], f: F) { @@ -143,39 +120,23 @@ impl, H: Hasher> Backend for TrieBackend where fn for_keys_in_child_storage( &self, -<<<<<<< HEAD - storage_key: &[u8], child_info: &ChildInfo, f: F, ) { - if let Ok(Some(essence)) = self.child_essence(storage_key) { + if let Ok(Some(essence)) = self.child_essence(child_info) { essence.for_keys(child_info, f) } -======= - child_info: &ChildInfo, - f: F, - ) { - self.essence.for_keys_in_child_storage(child_info, f) ->>>>>>> child_trie_w3_change } fn for_child_keys_with_prefix( &self, -<<<<<<< HEAD - storage_key: &[u8], -======= ->>>>>>> child_trie_w3_change child_info: &ChildInfo, prefix: &[u8], f: F, ) { -<<<<<<< HEAD - if let Ok(Some(essence)) = self.child_essence(storage_key) { + if let Ok(Some(essence)) = self.child_essence(child_info) { essence.for_keys_with_prefix(child_info, prefix, f) } -======= - self.essence.for_child_keys_with_prefix(child_info, prefix, f) ->>>>>>> child_trie_w3_change } fn pairs(&self) -> Vec<(StorageKey, StorageValue)> { @@ -245,10 +206,6 @@ impl, H: Hasher> Backend for TrieBackend where fn child_storage_root( &self, -<<<<<<< HEAD - storage_key: &[u8], -======= ->>>>>>> child_trie_w3_change child_info: &ChildInfo, delta: I, ) -> (H::Out, bool, Self::Transaction) @@ -261,12 +218,8 @@ impl, H: Hasher> Backend for TrieBackend where }; let mut write_overlay = S::Overlay::default(); -<<<<<<< HEAD - let mut root: H::Out = match self.storage(storage_key) { -======= let prefixed_storage_key = child_info.prefixed_storage_key(); let mut root = match self.storage(prefixed_storage_key.as_slice()) { ->>>>>>> child_trie_w3_change Ok(value) => value.and_then(|r| Decode::decode(&mut &r[..]).ok()).unwrap_or(default_root.clone()), Err(e) => { @@ -284,12 +237,7 @@ impl, H: Hasher> Backend for TrieBackend where &mut write_overlay, ); -<<<<<<< HEAD match delta_trie_root::, _, _, _, _>( -======= - match child_delta_trie_root::, _, _, _, _, _>( - child_info.keyspace(), ->>>>>>> child_trie_w3_change &mut eph, root, delta @@ -316,9 +264,9 @@ impl, H: Hasher> TrieBackend where { fn child_essence<'a>( &'a self, - storage_key: &[u8], + child_info: &ChildInfo, ) -> Result>, >::Error> { - let root: Option = self.storage(storage_key)? + let root: Option = self.storage(&child_info.prefixed_storage_key()[..])? .and_then(|encoded_root| Decode::decode(&mut &encoded_root[..]).ok()); Ok(if let Some(root) = root { Some(TrieBackendEssence::new(self.essence.backend_storage(), root)) @@ -336,23 +284,13 @@ pub mod tests { use sp_trie::{TrieMut, PrefixedMemoryDB, trie_types::TrieDBMut}; use super::*; -<<<<<<< HEAD - const CHILD_KEY_1: &[u8] = b":child_storage:default:sub1"; - - const CHILD_UUID_1: &[u8] = b"unique_id_1"; -======= const CHILD_KEY_1: &[u8] = b"sub1"; ->>>>>>> child_trie_w3_change fn test_db() -> (PrefixedMemoryDB, H256) { let child_info = ChildInfo::new_default(CHILD_KEY_1); let mut root = H256::default(); let mut mdb = PrefixedMemoryDB::::default(); { -<<<<<<< HEAD -======= - let mut mdb = KeySpacedDBMut::new(&mut mdb, child_info.keyspace()); ->>>>>>> child_trie_w3_change let mut trie = TrieDBMut::new(&mut mdb, &mut root); trie.insert(b"value3", &[142]).expect("insert failed"); trie.insert(b"value4", &[124]).expect("insert failed"); @@ -387,14 +325,9 @@ pub mod tests { #[test] fn read_from_child_storage_returns_some() { - let child_info1 = ChildInfo::new_default(CHILD_UUID_1); let test_trie = test_trie(); assert_eq!( -<<<<<<< HEAD - test_trie.child_storage(CHILD_KEY_1, &child_info1, b"value3").unwrap(), -======= test_trie.child_storage(&ChildInfo::new_default(CHILD_KEY_1), b"value3").unwrap(), ->>>>>>> child_trie_w3_change Some(vec![142u8]), ); } diff --git a/primitives/state-machine/src/trie_backend_essence.rs b/primitives/state-machine/src/trie_backend_essence.rs index f46fc96ce685e..0dc7174c205f7 100644 --- a/primitives/state-machine/src/trie_backend_essence.rs +++ b/primitives/state-machine/src/trie_backend_essence.rs @@ -24,13 +24,8 @@ use log::{debug, warn}; use sp_core::Hasher; use hash_db::{self, EMPTY_PREFIX, Prefix}; use sp_trie::{Trie, MemoryDB, PrefixedMemoryDB, DBValue, -<<<<<<< HEAD - read_trie_value, check_if_empty_root, - for_keys_in_trie, TrieDBIterator}; -======= - empty_child_trie_root, read_trie_value, read_child_trie_value, - for_keys_in_child_trie, KeySpacedDB, TrieDBIterator}; ->>>>>>> child_trie_w3_change + check_if_empty_root, read_trie_value, + TrieDBIterator, for_keys_in_trie}; use sp_trie::trie_types::{TrieDB, TrieError, Layout}; use crate::{backend::Consolidate, StorageKey, StorageValue}; use sp_core::storage::ChildInfo; @@ -79,67 +74,10 @@ impl, H: Hasher> TrieBackendEssence where H::O /// Return the next key in the trie i.e. the minimum key that is strictly superior to `key` in /// lexicographic order. -<<<<<<< HEAD pub fn next_storage_key(&self, child_info: &ChildInfo, key: &[u8]) -> Result, String> { let eph = BackendStorageDBRef::new(&self.storage, child_info); let trie = TrieDB::::new(&eph, &self.root) -======= - pub fn next_storage_key(&self, key: &[u8]) -> Result, String> { - self.next_storage_key_from_root(&self.root, None, key) - } - - /// Access the root of the child storage in its parent trie - fn child_root(&self, child_info: &ChildInfo) -> Result, String> { - self.storage(child_info.prefixed_storage_key().as_slice()) - } - - /// Return the next key in the child trie i.e. the minimum key that is strictly superior to - /// `key` in lexicographic order. - pub fn next_child_storage_key( - &self, - child_info: &ChildInfo, - key: &[u8], - ) -> Result, String> { - let child_root = match self.child_root(child_info)? { - Some(child_root) => child_root, - None => return Ok(None), - }; - - let mut hash = H::Out::default(); - - if child_root.len() != hash.as_ref().len() { - return Err(format!("Invalid child storage hash at {:?}", child_info.storage_key())); - } - // note: child_root and hash must be same size, panics otherwise. - hash.as_mut().copy_from_slice(&child_root[..]); - - self.next_storage_key_from_root(&hash, Some(child_info), key) - } - - /// Return next key from main trie or child trie by providing corresponding root. - fn next_storage_key_from_root( - &self, - root: &H::Out, - child_info: Option<&ChildInfo>, - key: &[u8], - ) -> Result, String> { - let mut read_overlay = S::Overlay::default(); - let eph = Ephemeral { - storage: &self.storage, - overlay: &mut read_overlay, - }; - let dyn_eph: &dyn hash_db::HashDBRef<_, _>; - let keyspace_eph; - if let Some(child_info) = child_info.as_ref() { - keyspace_eph = KeySpacedDB::new(&eph, child_info.keyspace()); - dyn_eph = &keyspace_eph; - } else { - dyn_eph = &eph; - } - - let trie = TrieDB::::new(dyn_eph, root) ->>>>>>> child_trie_w3_change .map_err(|e| format!("TrieDB creation error: {}", e))?; let mut iter = trie.iter() .map_err(|e| format!("TrieDB iteration error: {}", e))?; @@ -181,53 +119,11 @@ impl, H: Hasher> TrieBackendEssence where H::O pub fn for_keys( &self, child_info: &ChildInfo, -<<<<<<< HEAD f: F, ) { let eph = BackendStorageDBRef::new(&self.storage, child_info); if let Err(e) = for_keys_in_trie::, _, BackendStorageDBRef>( -======= - key: &[u8], - ) -> Result, String> { - let root = self.child_root(child_info)? - .unwrap_or(empty_child_trie_root::>().encode()); - - let mut read_overlay = S::Overlay::default(); - let eph = Ephemeral { - storage: &self.storage, - overlay: &mut read_overlay, - }; - - let map_e = |e| format!("Trie lookup error: {}", e); - - read_child_trie_value::, _>(child_info.keyspace(), &eph, &root, key) - .map_err(map_e) - } - - /// Retrieve all entries keys of child storage and call `f` for each of those keys. - pub fn for_keys_in_child_storage( - &self, - child_info: &ChildInfo, - f: F, - ) { - let root = match self.child_root(child_info) { - Ok(v) => v.unwrap_or(empty_child_trie_root::>().encode()), - Err(e) => { - debug!(target: "trie", "Error while iterating child storage: {}", e); - return; - } - }; - - let mut read_overlay = S::Overlay::default(); - let eph = Ephemeral { - storage: &self.storage, - overlay: &mut read_overlay, - }; - - if let Err(e) = for_keys_in_child_trie::, _, Ephemeral>( - child_info.keyspace(), ->>>>>>> child_trie_w3_change &eph, &self.root, f, @@ -237,32 +133,8 @@ impl, H: Hasher> TrieBackendEssence where H::O } /// Execute given closure for all keys starting with prefix. -<<<<<<< HEAD pub fn for_keys_with_prefix(&self, child_info: &ChildInfo, prefix: &[u8], mut f: F) { self.keys_values_with_prefix_inner(&self.root, prefix, |k, _v| f(k), child_info) -======= - pub fn for_child_keys_with_prefix( - &self, - child_info: &ChildInfo, - prefix: &[u8], - mut f: F, - ) { - let root_vec = match self.child_root(child_info) { - Ok(v) => v.unwrap_or(empty_child_trie_root::>().encode()), - Err(e) => { - debug!(target: "trie", "Error while iterating child storage: {}", e); - return; - } - }; - let mut root = H::Out::default(); - root.as_mut().copy_from_slice(&root_vec); - self.keys_values_with_prefix_inner(&root, prefix, |k, _v| f(k), Some(child_info)) - } - - /// Execute given closure for all keys starting with prefix. - pub fn for_keys_with_prefix(&self, prefix: &[u8], mut f: F) { - self.keys_values_with_prefix_inner(&self.root, prefix, |k, _v| f(k), None) ->>>>>>> child_trie_w3_change } fn keys_values_with_prefix_inner( @@ -270,11 +142,7 @@ impl, H: Hasher> TrieBackendEssence where H::O root: &H::Out, prefix: &[u8], mut f: F, -<<<<<<< HEAD child_info: &ChildInfo, -======= - child_info: Option<&ChildInfo>, ->>>>>>> child_trie_w3_change ) { let eph = BackendStorageDBRef::new(&self.storage, child_info); @@ -615,13 +483,8 @@ mod test { } { let mut trie = TrieDBMut::new(&mut mdb, &mut root_2); -<<<<<<< HEAD - // using top trie as child trie (both with same content) - trie.insert(b"MyChild", root_1.as_ref()).expect("insert failed"); -======= trie.insert(child_info.prefixed_storage_key().as_slice(), root_1.as_ref()) .expect("insert failed"); ->>>>>>> child_trie_w3_change }; let essence_1 = TrieBackend::new(mdb, root_1); @@ -636,21 +499,6 @@ mod test { let essence_2 = TrieBackend::new(mdb, root_2); assert_eq!( -<<<<<<< HEAD - essence_2.next_child_storage_key(b"MyChild", &child_info, b"2"), Ok(Some(b"3".to_vec())) - ); - assert_eq!( - essence_2.next_child_storage_key(b"MyChild", &child_info, b"3"), Ok(Some(b"4".to_vec())) - ); - assert_eq!( - essence_2.next_child_storage_key(b"MyChild", &child_info, b"4"), Ok(Some(b"6".to_vec())) - ); - assert_eq!( - essence_2.next_child_storage_key(b"MyChild", &child_info, b"5"), Ok(Some(b"6".to_vec())) - ); - assert_eq!( - essence_2.next_child_storage_key(b"MyChild", &child_info, b"6"), Ok(None) -======= essence_2.next_child_storage_key(child_info, b"2"), Ok(Some(b"3".to_vec())) ); assert_eq!( @@ -664,7 +512,6 @@ mod test { ); assert_eq!( essence_2.next_child_storage_key(child_info, b"6"), Ok(None) ->>>>>>> child_trie_w3_change ); } } diff --git a/primitives/storage/src/lib.rs b/primitives/storage/src/lib.rs index 95ebc33331851..12d7f124030f5 100644 --- a/primitives/storage/src/lib.rs +++ b/primitives/storage/src/lib.rs @@ -136,76 +136,6 @@ pub mod well_known_keys { } } -<<<<<<< HEAD -/// A wrapper around a child storage key. -/// -/// This wrapper ensures that the child storage key is correct and properly used. It is -/// impossible to create an instance of this struct without providing a correct `storage_key`. -pub struct ChildStorageKey<'a> { - storage_key: Cow<'a, [u8]>, -} - -impl<'a> ChildStorageKey<'a> { - /// Create new instance of `Self`. - fn new(storage_key: Cow<'a, [u8]>) -> Option { - if well_known_keys::is_child_trie_key_valid(&storage_key) { - Some(ChildStorageKey { storage_key }) - } else { - None - } - } - - /// Create a new `ChildStorageKey` from a vector. - /// - /// `storage_key` need to start with `:child_storage:default:` - /// See `is_child_trie_key_valid` for more details. - pub fn from_vec(key: Vec) -> Option { - Self::new(Cow::Owned(key)) - } - - /// Create a new `ChildStorageKey` from a slice. - /// - /// `storage_key` need to start with `:child_storage:default:` - /// See `is_child_trie_key_valid` for more details. - pub fn from_slice(key: &'a [u8]) -> Option { - Self::new(Cow::Borrowed(key)) - } - - /// Get access to the byte representation of the storage key. - /// - /// This key is guaranteed to be correct. - pub fn as_ref(&self) -> &[u8] { - &*self.storage_key - } - - /// Destruct this instance into an owned vector that represents the storage key. - /// - /// This key is guaranteed to be correct. - pub fn into_owned(self) -> Vec { - self.storage_key.into_owned() - } - - /// Return true if the variable part of the key is empty. - pub fn is_empty(&self) -> bool { - well_known_keys::is_child_trie_key_empty(&*self.storage_key) - } - -} - - -/// Information related to a child state. -#[derive(Debug, Clone, PartialEq, Eq, Hash, PartialOrd, Ord)] -pub enum ChildInfo { - Default(ChildTrie), -} - -impl ChildInfo { - /// Create a new child trie information for default - /// child type. - pub fn new_default(unique_id: &[u8]) -> Self { - ChildInfo::Default(ChildTrie { - data: unique_id.to_vec(), -======= /// Information related to a child state. #[derive(Debug, Clone)] #[cfg_attr(feature = "std", derive(PartialEq, Eq, Hash, PartialOrd, Ord))] @@ -227,7 +157,6 @@ impl ChildInfo { pub fn new_default_from_vec(storage_key: Vec) -> Self { ChildInfo::ParentKeyId(ChildTrieParentKeyId { data: storage_key, ->>>>>>> child_trie_w3_change }) } @@ -235,20 +164,7 @@ impl ChildInfo { /// are not compatible. pub fn try_update(&mut self, other: &ChildInfo) -> bool { match self { -<<<<<<< HEAD - ChildInfo::Default(child_trie) => child_trie.try_update(other), -======= ChildInfo::ParentKeyId(child_trie) => child_trie.try_update(other), ->>>>>>> child_trie_w3_change - } - } - - /// Create child info from a linear byte packed value and a given type. -<<<<<<< HEAD - pub fn resolve_child_info(child_type: u32, data: &[u8]) -> Option { - match ChildType::new(child_type) { - Some(ChildType::CryptoUniqueId) => Some(ChildInfo::new_default(data)), - None => None, } } @@ -262,8 +178,11 @@ impl ChildInfo { /// 0 length unique id. pub fn is_top_trie(&self) -> bool { match self { - ChildInfo::Default(ChildTrie { data }) => data.len() == 0 -======= + ChildInfo::ParentKeyId(ChildTrieParentKeyId { data }) => data.len() == 0, + } + } + + /// Create child info from a linear byte packed value and a given type. pub fn resolve_child_info(child_type: u32, info: &[u8]) -> Option { match ChildType::new(child_type) { Some(ChildType::ParentKeyId) => { @@ -273,7 +192,6 @@ impl ChildInfo { Some(Self::new_default(info)) }, None => None, ->>>>>>> child_trie_w3_change } } @@ -316,12 +234,6 @@ impl ChildInfo { } } -<<<<<<< HEAD - /// Return type for child trie. - pub fn child_type(&self) -> ChildType { - match self { - ChildInfo::Default(..) => ChildType::CryptoUniqueId, -======= /// Return a the full location in the direct parent of /// this trie. pub fn prefixed_storage_key(&self) -> Vec { @@ -349,7 +261,6 @@ impl ChildInfo { pub fn child_type(&self) -> ChildType { match self { ChildInfo::ParentKeyId(..) => ChildType::ParentKeyId, ->>>>>>> child_trie_w3_change } } } @@ -367,14 +278,6 @@ pub enum ChildType { } impl ChildType { -<<<<<<< HEAD - fn new(repr: u32) -> Option { - Some(match repr { - r if r == ChildType::CryptoUniqueId as u32 => ChildType::CryptoUniqueId, - _ => return None, - }) - } -======= /// Try to get a child type from its `u32` representation. pub fn new(repr: u32) -> Option { Some(match repr { @@ -410,24 +313,9 @@ impl ChildType { &ChildType::ParentKeyId => DEFAULT_CHILD_TYPE_PARENT_PREFIX, } } ->>>>>>> child_trie_w3_change -} -/// A child trie of default type. -<<<<<<< HEAD -/// Default is the same implementation as the top trie. -/// It share its trie node storage with any kind of key, -/// and its unique id needs to be collision free (eg strong -/// crypto hash). -#[derive(Debug, Clone, PartialEq, Eq, Hash, PartialOrd, Ord)] -pub struct ChildTrie { - /// Data containing unique id. - /// Unique id must but unique and free of any possible key collision - /// (depending on its storage behavior). - data: Vec, } -impl ChildTrie { -======= +/// A child trie of default type. /// It uses the same default implementation as the top trie, /// top trie being a child trie with no keyspace and no storage key. /// Its keyspace is the variable (unprefixed) part of its storage key. @@ -442,7 +330,6 @@ pub struct ChildTrieParentKeyId { } impl ChildTrieParentKeyId { ->>>>>>> child_trie_w3_change /// Try to update with another instance, return false if both instance /// are not compatible. fn try_update(&mut self, other: &ChildInfo) -> bool { @@ -452,7 +339,6 @@ impl ChildTrieParentKeyId { } } -<<<<<<< HEAD #[cfg(feature = "std")] #[derive(Clone, PartialEq, Eq, Debug)] /// Type for storing a map of child trie related information. @@ -542,7 +428,8 @@ impl IntoIterator for ChildrenMap { fn into_iter(self) -> Self::IntoIter { self.0.into_iter() } -======= +} + const DEFAULT_CHILD_TYPE_PARENT_PREFIX: &'static [u8] = b":child_storage:default:"; #[test] @@ -551,5 +438,4 @@ fn test_prefix_default_child_info() { let prefix = child_info.child_type().parent_prefix(); assert!(prefix.starts_with(well_known_keys::CHILD_STORAGE_KEY_PREFIX)); assert!(prefix.starts_with(DEFAULT_CHILD_TYPE_PARENT_PREFIX)); ->>>>>>> child_trie_w3_change } diff --git a/primitives/trie/src/lib.rs b/primitives/trie/src/lib.rs index fe71fc88b7009..07c118666b846 100644 --- a/primitives/trie/src/lib.rs +++ b/primitives/trie/src/lib.rs @@ -209,10 +209,8 @@ pub fn read_trie_value_with< Ok(TrieDB::::new(&*db, root)?.get_with(key, query).map(|x| x.map(|val| val.to_vec()))?) } -<<<<<<< HEAD -/// Determine the default child trie root. -pub fn default_child_trie_root( - _storage_key: &[u8], +/// Determine the empty child trie root. +pub fn empty_child_trie_root( ) -> ::Out { L::trie_root::<_, Vec, Vec>(core::iter::empty()) } @@ -226,65 +224,6 @@ pub fn check_if_empty_root ( /// Call `f` for all keys in a child trie. pub fn for_keys_in_trie( -======= -/// Determine the empty child trie root. -pub fn empty_child_trie_root( -) -> ::Out { - L::trie_root::<_, Vec, Vec>(core::iter::empty()) -} - -/// Determine a child trie root given its ordered contents, closed form. H is the default hasher, -/// but a generic implementation may ignore this type parameter and use other hashers. -pub fn child_trie_root( - input: I, -) -> ::Out - where - I: IntoIterator, - A: AsRef<[u8]> + Ord, - B: AsRef<[u8]>, -{ - L::trie_root(input) -} - -/// Determine a child trie root given a hash DB and delta values. H is the default hasher, -/// but a generic implementation may ignore this type parameter and use other hashers. -pub fn child_delta_trie_root( - keyspace: &[u8], - db: &mut DB, - root_data: RD, - delta: I, -) -> Result<::Out, Box>> - where - I: IntoIterator)>, - A: AsRef<[u8]> + Ord, - B: AsRef<[u8]>, - RD: AsRef<[u8]>, - DB: hash_db::HashDB - + hash_db::PlainDB, trie_db::DBValue>, -{ - let mut root = TrieHash::::default(); - // root is fetched from DB, not writable by runtime, so it's always valid. - root.as_mut().copy_from_slice(root_data.as_ref()); - - { - let mut db = KeySpacedDBMut::new(&mut *db, keyspace); - let mut trie = TrieDBMut::::from_existing(&mut db, &mut root)?; - - for (key, change) in delta { - match change { - Some(val) => trie.insert(key.as_ref(), val.as_ref())?, - None => trie.remove(key.as_ref())?, - }; - } - } - - Ok(root) -} - -/// Call `f` for all keys in a child trie. -pub fn for_keys_in_child_trie( - keyspace: &[u8], ->>>>>>> child_trie_w3_change db: &DB, root: &TrieHash, mut f: F @@ -304,7 +243,6 @@ pub fn for_keys_in_child_trie( Ok(()) } - /// Record all keys for a given root. pub fn record_all_keys( db: &DB, @@ -328,144 +266,6 @@ pub fn record_all_keys( Ok(()) } -<<<<<<< HEAD -======= -/// Read a value from the child trie. -pub fn read_child_trie_value( - keyspace: &[u8], - db: &DB, - root_slice: &[u8], - key: &[u8] -) -> Result>, Box>> - where - DB: hash_db::HashDBRef - + hash_db::PlainDBRef, trie_db::DBValue>, -{ - let mut root = TrieHash::::default(); - // root is fetched from DB, not writable by runtime, so it's always valid. - root.as_mut().copy_from_slice(root_slice); - - let db = KeySpacedDB::new(&*db, keyspace); - Ok(TrieDB::::new(&db, &root)?.get(key).map(|x| x.map(|val| val.to_vec()))?) -} - -/// Read a value from the child trie with given query. -pub fn read_child_trie_value_with, DB>( - keyspace: &[u8], - db: &DB, - root_slice: &[u8], - key: &[u8], - query: Q -) -> Result>, Box>> - where - DB: hash_db::HashDBRef - + hash_db::PlainDBRef, trie_db::DBValue>, -{ - let mut root = TrieHash::::default(); - // root is fetched from DB, not writable by runtime, so it's always valid. - root.as_mut().copy_from_slice(root_slice); - - let db = KeySpacedDB::new(&*db, keyspace); - Ok(TrieDB::::new(&db, &root)?.get_with(key, query).map(|x| x.map(|val| val.to_vec()))?) -} - -/// `HashDB` implementation that append a encoded prefix (unique id bytes) in addition to the -/// prefix of every key value. -pub struct KeySpacedDB<'a, DB, H>(&'a DB, &'a [u8], PhantomData); - -/// `HashDBMut` implementation that append a encoded prefix (unique id bytes) in addition to the -/// prefix of every key value. -/// -/// Mutable variant of `KeySpacedDB`, see [`KeySpacedDB`]. -pub struct KeySpacedDBMut<'a, DB, H>(&'a mut DB, &'a [u8], PhantomData); - -/// Utility function used to merge some byte data (keyspace) and `prefix` data -/// before calling key value database primitives. -fn keyspace_as_prefix_alloc(ks: &[u8], prefix: Prefix) -> (Vec, Option) { - let mut result = sp_std::vec![0; ks.len() + prefix.0.len()]; - result[..ks.len()].copy_from_slice(ks); - result[ks.len()..].copy_from_slice(prefix.0); - (result, prefix.1) -} - -impl<'a, DB, H> KeySpacedDB<'a, DB, H> where - H: Hasher, -{ - /// instantiate new keyspaced db - pub fn new(db: &'a DB, ks: &'a [u8]) -> Self { - KeySpacedDB(db, ks, PhantomData) - } -} - -impl<'a, DB, H> KeySpacedDBMut<'a, DB, H> where - H: Hasher, -{ - /// instantiate new keyspaced db - pub fn new(db: &'a mut DB, ks: &'a [u8]) -> Self { - KeySpacedDBMut(db, ks, PhantomData) - } -} - -impl<'a, DB, H, T> hash_db::HashDBRef for KeySpacedDB<'a, DB, H> where - DB: hash_db::HashDBRef, - H: Hasher, - T: From<&'static [u8]>, -{ - fn get(&self, key: &H::Out, prefix: Prefix) -> Option { - let derived_prefix = keyspace_as_prefix_alloc(self.1, prefix); - self.0.get(key, (&derived_prefix.0, derived_prefix.1)) - } - - fn contains(&self, key: &H::Out, prefix: Prefix) -> bool { - let derived_prefix = keyspace_as_prefix_alloc(self.1, prefix); - self.0.contains(key, (&derived_prefix.0, derived_prefix.1)) - } -} - -impl<'a, DB, H, T> hash_db::HashDB for KeySpacedDBMut<'a, DB, H> where - DB: hash_db::HashDB, - H: Hasher, - T: Default + PartialEq + for<'b> From<&'b [u8]> + Clone + Send + Sync, -{ - fn get(&self, key: &H::Out, prefix: Prefix) -> Option { - let derived_prefix = keyspace_as_prefix_alloc(self.1, prefix); - self.0.get(key, (&derived_prefix.0, derived_prefix.1)) - } - - fn contains(&self, key: &H::Out, prefix: Prefix) -> bool { - let derived_prefix = keyspace_as_prefix_alloc(self.1, prefix); - self.0.contains(key, (&derived_prefix.0, derived_prefix.1)) - } - - fn insert(&mut self, prefix: Prefix, value: &[u8]) -> H::Out { - let derived_prefix = keyspace_as_prefix_alloc(self.1, prefix); - self.0.insert((&derived_prefix.0, derived_prefix.1), value) - } - - fn emplace(&mut self, key: H::Out, prefix: Prefix, value: T) { - let derived_prefix = keyspace_as_prefix_alloc(self.1, prefix); - self.0.emplace(key, (&derived_prefix.0, derived_prefix.1), value) - } - - fn remove(&mut self, key: &H::Out, prefix: Prefix) { - let derived_prefix = keyspace_as_prefix_alloc(self.1, prefix); - self.0.remove(key, (&derived_prefix.0, derived_prefix.1)) - } -} - -impl<'a, DB, H, T> hash_db::AsHashDB for KeySpacedDBMut<'a, DB, H> where - DB: hash_db::HashDB, - H: Hasher, - T: Default + PartialEq + for<'b> From<&'b [u8]> + Clone + Send + Sync, -{ - fn as_hash_db(&self) -> &dyn hash_db::HashDB { &*self } - - fn as_hash_db_mut<'b>(&'b mut self) -> &'b mut (dyn hash_db::HashDB + 'b) { - &mut *self - } -} - ->>>>>>> child_trie_w3_change /// Constants used into trie simplification codec. mod trie_constants { pub const EMPTY_TRIE: u8 = 0; diff --git a/test-utils/client/src/lib.rs b/test-utils/client/src/lib.rs index e56a434c221dc..646238726d859 100644 --- a/test-utils/client/src/lib.rs +++ b/test-utils/client/src/lib.rs @@ -131,11 +131,6 @@ impl TestClientBuilder, -<<<<<<< HEAD - child_key: impl AsRef<[u8]>, - child_info: &ChildInfo, -======= ->>>>>>> child_trie_w3_change value: impl AsRef<[u8]>, ) -> Self { let storage_key = child_info.storage_key(); diff --git a/test-utils/runtime/client/src/lib.rs b/test-utils/runtime/client/src/lib.rs index 4bd3b261c60b6..7685157d96a13 100644 --- a/test-utils/runtime/client/src/lib.rs +++ b/test-utils/runtime/client/src/lib.rs @@ -195,10 +195,6 @@ pub trait TestClientBuilderExt: Sized { /// Panics if the key is empty. fn add_extra_child_storage>, V: Into>>( mut self, -<<<<<<< HEAD - storage_key: SK, -======= ->>>>>>> child_trie_w3_change child_info: &ChildInfo, key: K, value: V, From b6b70f325d9d651d8e8641e6d8acc3d409870f6a Mon Sep 17 00:00:00 2001 From: cheme Date: Thu, 27 Feb 2020 15:31:46 +0100 Subject: [PATCH 56/85] revert rpc related default renaming. fix sp io deprecated. --- client/network/src/on_demand_layer.rs | 2 +- client/network/src/protocol.rs | 8 ++- .../src/protocol/light_client_handler.rs | 55 +++++++++++-------- client/network/src/protocol/light_dispatch.rs | 14 ++--- client/rpc/src/state/mod.rs | 4 +- primitives/io/src/lib.rs | 45 +++++++-------- primitives/storage/src/lib.rs | 11 ++-- 7 files changed, 74 insertions(+), 65 deletions(-) diff --git a/client/network/src/on_demand_layer.rs b/client/network/src/on_demand_layer.rs index 3a20cb9548a76..d672ed0b7f569 100644 --- a/client/network/src/on_demand_layer.rs +++ b/client/network/src/on_demand_layer.rs @@ -106,7 +106,7 @@ impl Fetcher for OnDemand where request: RemoteReadChildRequest ) -> Self::RemoteReadResult { let (sender, receiver) = oneshot::channel(); - let _ = self.requests_send.unbounded_send(RequestData::RemoteReadDefaultChild(request, sender)); + let _ = self.requests_send.unbounded_send(RequestData::RemoteReadChild(request, sender)); RemoteResponse { receiver } } diff --git a/client/network/src/protocol.rs b/client/network/src/protocol.rs index 00984dcf3cbb6..dcb75e2a228c7 100644 --- a/client/network/src/protocol.rs +++ b/client/network/src/protocol.rs @@ -23,7 +23,7 @@ use libp2p::{Multiaddr, PeerId}; use libp2p::core::{ConnectedPoint, nodes::listeners::ListenerId}; use libp2p::swarm::{ProtocolsHandler, IntoProtocolsHandler}; use libp2p::swarm::{NetworkBehaviour, NetworkBehaviourAction, PollParameters}; -use sp_core::storage::{StorageKey, ChildInfo}; +use sp_core::storage::{StorageKey, ChildInfo, ChildType}; use sp_consensus::{ BlockOrigin, block_validation::BlockAnnounceValidator, @@ -1517,7 +1517,11 @@ impl Protocol { trace!(target: "sync", "Remote read child request {} from {} ({} {} at {})", request.id, who, request.storage_key.to_hex::(), keys_str(), request.block); - let child_info = ChildInfo::new_default(&request.storage_key); + let child_info = if let Some(ChildType::ParentKeyId) = ChildType::new(request.child_type) { + ChildInfo::new_default(&request.storage_key) + } else { + return; + }; let proof = match self.context_data.chain.read_child_proof( &request.block, &child_info, diff --git a/client/network/src/protocol/light_client_handler.rs b/client/network/src/protocol/light_client_handler.rs index a7d3bf4dbbfe3..68adb8600fcc2 100644 --- a/client/network/src/protocol/light_client_handler.rs +++ b/client/network/src/protocol/light_client_handler.rs @@ -55,7 +55,7 @@ use rustc_hex::ToHex; use sc_client::light::fetcher; use sc_client_api::StorageProof; use sc_peerset::ReputationChange; -use sp_core::storage::{ChildInfo, StorageKey}; +use sp_core::storage::{ChildInfo, ChildType, StorageKey}; use sp_blockchain::{Error as ClientError}; use sp_runtime::traits::{Block, Header, NumberFor, Zero}; use std::{ @@ -170,7 +170,7 @@ pub enum Request { request: fetcher::RemoteReadRequest, sender: oneshot::Sender, Option>>, ClientError>> }, - ReadDefaultChild { + ReadChild { request: fetcher::RemoteReadChildRequest, sender: oneshot::Sender, Option>>, ClientError>> }, @@ -368,7 +368,7 @@ where let reply = self.checker.check_read_proof(&request, proof)?; Ok(Reply::MapVecU8OptVecU8(reply)) } - Request::ReadDefaultChild { request, .. } => { + Request::ReadChild { request, .. } => { let proof = Decode::decode(&mut response.proof.as_ref())?; let reply = self.checker.check_read_child_proof(&request, proof)?; Ok(Reply::MapVecU8OptVecU8(reply)) @@ -514,19 +514,30 @@ where let block = Decode::decode(&mut request.block.as_ref())?; - let child_info = ChildInfo::new_default(&request.storage_key); - let proof = match self.chain.read_child_proof(&block, &child_info, &request.keys) { - Ok(proof) => proof, - Err(error) => { - log::trace!("remote read child request {} from {} ({} {} at {:?}) failed with: {}", - request_id, - peer, - request.storage_key.to_hex::(), - fmt_keys(request.keys.first(), request.keys.last()), - request.block, - error); - StorageProof::empty() + let proof = if let Some(child_type) = ChildType::new(request.child_type) { + let child_info = ChildInfo::new_default(&request.storage_key); + match self.chain.read_child_proof(&block, &child_info, &request.keys) { + Ok(proof) => proof, + Err(error) => { + log::trace!("remote read child request {} from {} ({} {} at {:?}) failed with: {}", + request_id, + peer, + request.storage_key.to_hex::(), + fmt_keys(request.keys.first(), request.keys.last()), + request.block, + error); + StorageProof::empty() + } } + } else { + log::trace!("remote read child request {} from {} ({} {} at {:?}) failed with: {}", + request_id, + peer, + request.storage_key.to_hex::(), + fmt_keys(request.keys.first(), request.keys.last()), + request.block, + "Unknown child type"); + StorageProof::empty() }; let response = { @@ -889,7 +900,7 @@ fn required_block(request: &Request) -> NumberFor { match request { Request::Header { request, .. } => request.block, Request::Read { request, .. } => *request.header.number(), - Request::ReadDefaultChild { request, .. } => *request.header.number(), + Request::ReadChild { request, .. } => *request.header.number(), Request::Call { request, .. } => *request.header.number(), Request::Changes { request, .. } => request.max_block.0, } @@ -899,7 +910,7 @@ fn retries(request: &Request) -> usize { let rc = match request { Request::Header { request, .. } => request.retry_count, Request::Read { request, .. } => request.retry_count, - Request::ReadDefaultChild { request, .. } => request.retry_count, + Request::ReadChild { request, .. } => request.retry_count, Request::Call { request, .. } => request.retry_count, Request::Changes { request, .. } => request.retry_count, }; @@ -919,7 +930,7 @@ fn serialize_request(id: u64, request: &Request) -> api::v1::light: }; api::v1::light::request::Request::RemoteReadRequest(r) } - Request::ReadDefaultChild { request, .. } => { + Request::ReadChild { request, .. } => { let r = api::v1::light::RemoteReadChildRequest { block: request.block.encode(), storage_key: request.storage_key.clone(), @@ -967,7 +978,7 @@ fn send_reply(result: Result, ClientError>, request: Request< Ok(Reply::MapVecU8OptVecU8(x)) => send(Ok(x), sender), reply => log::error!("invalid reply for read request: {:?}, {:?}", reply, request), } - Request::ReadDefaultChild { request, sender } => match result { + Request::ReadChild { request, sender } => match result { Err(e) => send(Err(e), sender), Ok(Reply::MapVecU8OptVecU8(x)) => send(Ok(x), sender), reply => log::error!("invalid reply for read child request: {:?}, {:?}", reply, request), @@ -1547,7 +1558,7 @@ mod tests { response: Some(api::v1::light::response::Response::RemoteReadResponse(r)), } } - Request::ReadDefaultChild{..} => { + Request::ReadChild{..} => { let r = api::v1::light::RemoteReadResponse { proof: empty_proof() }; api::v1::light::Response { id: 1, @@ -1630,7 +1641,7 @@ mod tests { keys: vec![b":key".to_vec()], retry_count: None, }; - issue_request(Request::ReadDefaultChild { request, sender: chan.0 }); + issue_request(Request::ReadChild { request, sender: chan.0 }); assert_matches!(chan.1.try_recv(), Ok(Some(Ok(_)))) } @@ -1731,7 +1742,7 @@ mod tests { keys: vec![b":key".to_vec()], retry_count: None, }; - send_receive(Request::ReadDefaultChild { request, sender: chan.0 }); + send_receive(Request::ReadChild { request, sender: chan.0 }); assert_eq!(Some(vec![42]), task::block_on(chan.1).unwrap().unwrap().remove(&b":key"[..]).unwrap()); // ^--- from `DummyFetchChecker::check_read_child_proof` } diff --git a/client/network/src/protocol/light_dispatch.rs b/client/network/src/protocol/light_dispatch.rs index e56cffaf83817..74cc1bcd3c172 100644 --- a/client/network/src/protocol/light_dispatch.rs +++ b/client/network/src/protocol/light_dispatch.rs @@ -148,7 +148,7 @@ pub(crate) enum RequestData { RemoteReadRequest, OneShotSender, Option>>, ClientError>>, ), - RemoteReadDefaultChild( + RemoteReadChild( RemoteReadChildRequest, OneShotSender, Option>>, ClientError>> ), @@ -404,7 +404,7 @@ impl LightDispatch where RequestData::RemoteRead(request, sender) ), }}, - RequestData::RemoteReadDefaultChild(request, sender) => { + RequestData::RemoteReadChild(request, sender) => { match checker.check_read_child_proof(&request, response.proof) { Ok(response) => { // we do not bother if receiver has been dropped already @@ -413,7 +413,7 @@ impl LightDispatch where }, Err(error) => Accept::CheckFailed( error, - RequestData::RemoteReadDefaultChild(request, sender) + RequestData::RemoteReadChild(request, sender) ), }}, data => Accept::Unexpected(data), @@ -596,7 +596,7 @@ impl Request { match self.data { RequestData::RemoteHeader(ref data, _) => data.block, RequestData::RemoteRead(ref data, _) => *data.header.number(), - RequestData::RemoteReadDefaultChild(ref data, _) => *data.header.number(), + RequestData::RemoteReadChild(ref data, _) => *data.header.number(), RequestData::RemoteCall(ref data, _) => *data.header.number(), RequestData::RemoteChanges(ref data, _) => data.max_block.0, RequestData::RemoteBody(ref data, _) => *data.header.number(), @@ -618,7 +618,7 @@ impl Request { data.block, data.keys.clone(), ), - RequestData::RemoteReadDefaultChild(ref data, _) => + RequestData::RemoteReadChild(ref data, _) => out.send_read_child_request( peer, self.id, @@ -667,7 +667,7 @@ impl RequestData { RequestData::RemoteHeader(_, sender) => { let _ = sender.send(Err(error)); }, RequestData::RemoteCall(_, sender) => { let _ = sender.send(Err(error)); }, RequestData::RemoteRead(_, sender) => { let _ = sender.send(Err(error)); }, - RequestData::RemoteReadDefaultChild(_, sender) => { let _ = sender.send(Err(error)); }, + RequestData::RemoteReadChild(_, sender) => { let _ = sender.send(Err(error)); }, RequestData::RemoteChanges(_, sender) => { let _ = sender.send(Err(error)); }, RequestData::RemoteBody(_, sender) => { let _ = sender.send(Err(error)); }, } @@ -1042,7 +1042,7 @@ pub mod tests { light_dispatch.on_connect(&mut network_interface, peer0.clone(), Roles::FULL, 1000); let (tx, response) = oneshot::channel(); - light_dispatch.add_request(&mut network_interface, RequestData::RemoteReadDefaultChild( + light_dispatch.add_request(&mut network_interface, RequestData::RemoteReadChild( RemoteReadChildRequest { header: dummy_header(), block: Default::default(), diff --git a/client/rpc/src/state/mod.rs b/client/rpc/src/state/mod.rs index a25828a869b00..58313236be06b 100644 --- a/client/rpc/src/state/mod.rs +++ b/client/rpc/src/state/mod.rs @@ -103,8 +103,8 @@ pub trait StateBackend: Send + Sync + 'static .map(|x| x.map(|x| x.0.len() as u64))) } - /// Returns the keys with prefix from a defaultchild storage, - /// leave empty to get all the keys + /// Returns the keys with prefix from a child storage, + /// leave prefix empty to get all the keys. fn child_storage_keys( &self, block: Option, diff --git a/primitives/io/src/lib.rs b/primitives/io/src/lib.rs index ef18e3f3dd496..c99e3ce3ced8d 100644 --- a/primitives/io/src/lib.rs +++ b/primitives/io/src/lib.rs @@ -37,7 +37,7 @@ use sp_core::{ traits::{KeystoreExt, CallInWasmExt}, offchain::{OffchainExt, TransactionPoolExt}, hexdisplay::HexDisplay, - storage::ChildInfo, + storage::{ChildInfo, ChildType}, }; use sp_core::{ @@ -68,11 +68,11 @@ pub enum EcdsaVerifyError { BadSignature, } -/// Deprecated function, ensure that this is a default prefixed key. #[cfg(feature = "std")] -fn child_storage_key_or_panic(storage_key: &[u8]) { - if !storage_key.starts_with(&ChildInfo::new_default(&[]).prefixed_storage_key()[..]) { - panic!("child storage key is invalid") +fn deprecated_storage_key_prefix_check(storage_key: &[u8]) { + let prefix = ChildType::ParentKeyId.parent_prefix(); + if !storage_key.starts_with(prefix) { + panic!("Invalid storage key"); } } @@ -153,8 +153,7 @@ pub trait Storage { child_type: u32, key: &[u8], ) -> Option> { - child_storage_key_or_panic(storage_key); - if child_type != 1 { panic!("Invalid child definition"); } + deprecated_storage_key_prefix_check(storage_key); let child_info = ChildInfo::resolve_child_info(child_type, child_definition) .expect("Invalid child definition"); self.child_storage(&child_info, key).map(|s| s.to_vec()) @@ -170,8 +169,7 @@ pub trait Storage { value_out: &mut [u8], value_offset: u32, ) -> Option { - child_storage_key_or_panic(storage_key); - if child_type != 1 { panic!("Invalid child definition"); } + deprecated_storage_key_prefix_check(storage_key); let child_info = ChildInfo::resolve_child_info(child_type, child_definition) .expect("Invalid child definition"); self.child_storage(&child_info, key) @@ -193,8 +191,7 @@ pub trait Storage { key: &[u8], value: &[u8], ) { - child_storage_key_or_panic(storage_key); - if child_type != 1 { panic!("Invalid child definition"); } + deprecated_storage_key_prefix_check(storage_key); let child_info = ChildInfo::resolve_child_info(child_type, child_definition) .expect("Invalid child definition"); self.set_child_storage(&child_info, key.to_vec(), value.to_vec()); @@ -208,8 +205,7 @@ pub trait Storage { child_type: u32, key: &[u8], ) { - child_storage_key_or_panic(storage_key); - if child_type != 1 { panic!("Invalid child definition"); } + deprecated_storage_key_prefix_check(storage_key); let child_info = ChildInfo::resolve_child_info(child_type, child_definition) .expect("Invalid child definition"); self.clear_child_storage(&child_info, key); @@ -222,8 +218,7 @@ pub trait Storage { child_definition: &[u8], child_type: u32, ) { - child_storage_key_or_panic(storage_key); - if child_type != 1 { panic!("Invalid child definition"); } + deprecated_storage_key_prefix_check(storage_key); let child_info = ChildInfo::resolve_child_info(child_type, child_definition) .expect("Invalid child definition"); self.kill_child_storage(&child_info); @@ -237,8 +232,7 @@ pub trait Storage { child_type: u32, key: &[u8], ) -> bool { - child_storage_key_or_panic(storage_key); - if child_type != 1 { panic!("Invalid child definition"); } + deprecated_storage_key_prefix_check(storage_key); let child_info = ChildInfo::resolve_child_info(child_type, child_definition) .expect("Invalid child definition"); self.exists_child_storage(&child_info, key) @@ -252,8 +246,7 @@ pub trait Storage { child_type: u32, prefix: &[u8], ) { - child_storage_key_or_panic(storage_key); - if child_type != 1 { panic!("Invalid child definition"); } + deprecated_storage_key_prefix_check(storage_key); let child_info = ChildInfo::resolve_child_info(child_type, child_definition) .expect("Invalid child definition"); self.clear_child_prefix(&child_info, prefix); @@ -264,8 +257,13 @@ pub trait Storage { &mut self, storage_key: &[u8], ) -> Vec { - child_storage_key_or_panic(storage_key); - let child_info = ChildInfo::new_default(storage_key); + let prefix = ChildType::ParentKeyId.parent_prefix(); + if !storage_key.starts_with(prefix) { + panic!("Invalid storage key"); + } + let storage_key = &storage_key[..prefix.len()]; + let child_info = ChildInfo::resolve_child_info(ChildType::ParentKeyId as u32, storage_key) + .expect("Invalid storage key"); self.child_storage_root(&child_info) } @@ -277,8 +275,7 @@ pub trait Storage { child_type: u32, key: &[u8], ) -> Option> { - child_storage_key_or_panic(storage_key); - if child_type != 1 { panic!("Invalid child definition"); } + deprecated_storage_key_prefix_check(storage_key); let child_info = ChildInfo::resolve_child_info(child_type, child_definition) .expect("Invalid child definition"); self.next_child_storage_key(&child_info, key) @@ -291,7 +288,7 @@ pub trait Storage { /// from within the runtime. #[runtime_interface] pub trait DefaultChildStorage { - /// `storage_key` is the full location of the root of the child trie in the parent trie. + /// `storage_key` is the unprefixed location of the root of the child trie in the parent trie. /// /// This function specifically returns the data for `key` in the child storage or `None` /// if the key can not be found. diff --git a/primitives/storage/src/lib.rs b/primitives/storage/src/lib.rs index b89a4c43450c5..7330444ff476c 100644 --- a/primitives/storage/src/lib.rs +++ b/primitives/storage/src/lib.rs @@ -161,14 +161,11 @@ impl ChildInfo { } } - /// Create child info from a linear byte packed value and a given type. - pub fn resolve_child_info(child_type: u32, info: &[u8]) -> Option { + /// Create child info from a prefixed storage key and a given type. + pub fn resolve_child_info(child_type: u32, storage_key: &[u8]) -> Option { match ChildType::new(child_type) { Some(ChildType::ParentKeyId) => { - debug_assert!( - info.starts_with(ChildType::ParentKeyId.parent_prefix()) - ); - Some(Self::new_default(info)) + Some(Self::new_default(storage_key)) }, None => None, } @@ -287,7 +284,7 @@ impl ChildType { /// Returns the location reserved for this child trie in their parent trie if there /// is one. - fn parent_prefix(&self) -> &'static [u8] { + pub fn parent_prefix(&self) -> &'static [u8] { match self { &ChildType::ParentKeyId => DEFAULT_CHILD_TYPE_PARENT_PREFIX, } From d563278a484d5e9ea732ad629ea84340ef6de7c6 Mon Sep 17 00:00:00 2001 From: cheme Date: Thu, 27 Feb 2020 15:39:53 +0100 Subject: [PATCH 57/85] fix slice indexing for child root --- primitives/io/src/lib.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/primitives/io/src/lib.rs b/primitives/io/src/lib.rs index c99e3ce3ced8d..62f12dfd5b5e4 100644 --- a/primitives/io/src/lib.rs +++ b/primitives/io/src/lib.rs @@ -261,7 +261,7 @@ pub trait Storage { if !storage_key.starts_with(prefix) { panic!("Invalid storage key"); } - let storage_key = &storage_key[..prefix.len()]; + let storage_key = &storage_key[prefix.len()..]; let child_info = ChildInfo::resolve_child_info(ChildType::ParentKeyId as u32, storage_key) .expect("Invalid storage key"); self.child_storage_root(&child_info) From a85bb3860e619cbabb9b4ea0e584323f23159a87 Mon Sep 17 00:00:00 2001 From: cheme Date: Thu, 27 Feb 2020 17:02:20 +0100 Subject: [PATCH 58/85] Switch back to prefixed storage key for rpc. --- client/api/src/light.rs | 2 -- client/network/src/protocol.rs | 9 +++------ .../src/protocol/light_client_handler.rs | 13 ++++++------- client/network/src/protocol/light_dispatch.rs | 7 ++----- client/network/src/protocol/message.rs | 2 -- .../network/src/protocol/schema/light.v1.proto | 3 --- client/rpc-api/src/state/mod.rs | 4 ---- client/rpc/src/state/mod.rs | 18 +++++------------- client/rpc/src/state/state_full.rs | 15 ++++++--------- client/rpc/src/state/state_light.rs | 6 +----- client/rpc/src/state/tests.rs | 10 ++++------ client/src/light/fetcher.rs | 7 +++---- primitives/storage/src/lib.rs | 18 +++++++++++++++++- 13 files changed, 47 insertions(+), 67 deletions(-) diff --git a/client/api/src/light.rs b/client/api/src/light.rs index 67376947d3913..2911d77f18209 100644 --- a/client/api/src/light.rs +++ b/client/api/src/light.rs @@ -82,8 +82,6 @@ pub struct RemoteReadChildRequest { pub header: Header, /// Storage key for child. pub storage_key: Vec, - /// Child type. - pub child_type: u32, /// Child storage key to read. pub keys: Vec>, /// Number of times to retry request. None means that default RETRY_COUNT is used. diff --git a/client/network/src/protocol.rs b/client/network/src/protocol.rs index dcb75e2a228c7..ea109c0c48701 100644 --- a/client/network/src/protocol.rs +++ b/client/network/src/protocol.rs @@ -253,7 +253,6 @@ impl<'a, B: BlockT> LightDispatchNetwork for LightDispatchIn<'a> { id: RequestId, block: ::Hash, storage_key: Vec, - child_type: u32, keys: Vec>, ) { let message: Message = message::generic::Message::RemoteReadChildRequest( @@ -261,7 +260,6 @@ impl<'a, B: BlockT> LightDispatchNetwork for LightDispatchIn<'a> { id, block, storage_key, - child_type, keys, }); @@ -1517,10 +1515,9 @@ impl Protocol { trace!(target: "sync", "Remote read child request {} from {} ({} {} at {})", request.id, who, request.storage_key.to_hex::(), keys_str(), request.block); - let child_info = if let Some(ChildType::ParentKeyId) = ChildType::new(request.child_type) { - ChildInfo::new_default(&request.storage_key) - } else { - return; + let child_info = match ChildType::from_prefixed_key(&request.storage_key) { + Some((ChildType::ParentKeyId, storage_key)) => ChildInfo::new_default(storage_key), + None => return, }; let proof = match self.context_data.chain.read_child_proof( &request.block, diff --git a/client/network/src/protocol/light_client_handler.rs b/client/network/src/protocol/light_client_handler.rs index 68adb8600fcc2..d951b58f7bca5 100644 --- a/client/network/src/protocol/light_client_handler.rs +++ b/client/network/src/protocol/light_client_handler.rs @@ -514,8 +514,10 @@ where let block = Decode::decode(&mut request.block.as_ref())?; - let proof = if let Some(child_type) = ChildType::new(request.child_type) { - let child_info = ChildInfo::new_default(&request.storage_key); + let proof = if let Some((ChildType::ParentKeyId, storage_key)) = ChildType::from_prefixed_key( + &request.storage_key, + ) { + let child_info = ChildInfo::new_default(storage_key); match self.chain.read_child_proof(&block, &child_info, &request.keys) { Ok(proof) => proof, Err(error) => { @@ -934,7 +936,6 @@ fn serialize_request(id: u64, request: &Request) -> api::v1::light: let r = api::v1::light::RemoteReadChildRequest { block: request.block.encode(), storage_key: request.storage_key.clone(), - child_type: request.child_type, keys: request.keys.clone(), }; api::v1::light::request::Request::RemoteReadChildRequest(r) @@ -1636,8 +1637,7 @@ mod tests { let request = fetcher::RemoteReadChildRequest { header: dummy_header(), block: Default::default(), - storage_key: b":child_storage:sub".to_vec(), - child_type: 1, + storage_key: b":child_storage:default:sub".to_vec(), keys: vec![b":key".to_vec()], retry_count: None, }; @@ -1737,8 +1737,7 @@ mod tests { let request = fetcher::RemoteReadChildRequest { header: dummy_header(), block: Default::default(), - storage_key: b"sub".to_vec(), - child_type: 1, + storage_key: b":child_storage:default:sub".to_vec(), keys: vec![b":key".to_vec()], retry_count: None, }; diff --git a/client/network/src/protocol/light_dispatch.rs b/client/network/src/protocol/light_dispatch.rs index 74cc1bcd3c172..8cd6ce51c49fa 100644 --- a/client/network/src/protocol/light_dispatch.rs +++ b/client/network/src/protocol/light_dispatch.rs @@ -70,7 +70,6 @@ pub trait LightDispatchNetwork { id: RequestId, block: ::Hash, storage_key: Vec, - child_type: u32, keys: Vec>, ); @@ -624,7 +623,6 @@ impl Request { self.id, data.block, data.storage_key.clone(), - data.child_type, data.keys.clone(), ), RequestData::RemoteCall(ref data, _) => @@ -820,7 +818,7 @@ pub mod tests { fn send_header_request(&mut self, _: &PeerId, _: RequestId, _: <::Header as HeaderT>::Number) {} fn send_read_request(&mut self, _: &PeerId, _: RequestId, _: ::Hash, _: Vec>) {} fn send_read_child_request(&mut self, _: &PeerId, _: RequestId, _: ::Hash, _: Vec, - _: u32, _: Vec>) {} + _: Vec>) {} fn send_call_request(&mut self, _: &PeerId, _: RequestId, _: ::Hash, _: String, _: Vec) {} fn send_changes_request(&mut self, _: &PeerId, _: RequestId, _: ::Hash, _: ::Hash, _: ::Hash, _: ::Hash, _: Option>, _: Vec) {} @@ -1046,8 +1044,7 @@ pub mod tests { RemoteReadChildRequest { header: dummy_header(), block: Default::default(), - storage_key: b"sub".to_vec(), - child_type: 1, + storage_key: b":child_storage:default:sub".to_vec(), keys: vec![b":key".to_vec()], retry_count: None, }, tx)); diff --git a/client/network/src/protocol/message.rs b/client/network/src/protocol/message.rs index 1481ec55b7ff4..0539d96234ca6 100644 --- a/client/network/src/protocol/message.rs +++ b/client/network/src/protocol/message.rs @@ -420,8 +420,6 @@ pub mod generic { pub block: H, /// Child Storage key. pub storage_key: Vec, - /// Child type. - pub child_type: u32, /// Storage key. pub keys: Vec>, } diff --git a/client/network/src/protocol/schema/light.v1.proto b/client/network/src/protocol/schema/light.v1.proto index c4aff40c9626d..fd970c79b4757 100644 --- a/client/network/src/protocol/schema/light.v1.proto +++ b/client/network/src/protocol/schema/light.v1.proto @@ -74,9 +74,6 @@ message RemoteReadChildRequest { // Child Storage key, this is relative // to the child type storage location. bytes storage_key = 3; - /// Child type, its required to resolve - /// child storage final location. - uint32 child_type = 5; // Storage keys. repeated bytes keys = 6; } diff --git a/client/rpc-api/src/state/mod.rs b/client/rpc-api/src/state/mod.rs index 41690134009b8..3263b6a4cc606 100644 --- a/client/rpc-api/src/state/mod.rs +++ b/client/rpc-api/src/state/mod.rs @@ -77,7 +77,6 @@ pub trait StateApi { fn child_storage_keys( &self, child_storage_key: StorageKey, - child_type: u32, prefix: StorageKey, hash: Option ) -> FutureResult>; @@ -87,7 +86,6 @@ pub trait StateApi { fn child_storage( &self, child_storage_key: StorageKey, - child_type: u32, key: StorageKey, hash: Option ) -> FutureResult>; @@ -97,7 +95,6 @@ pub trait StateApi { fn child_storage_hash( &self, child_storage_key: StorageKey, - child_type: u32, key: StorageKey, hash: Option ) -> FutureResult>; @@ -107,7 +104,6 @@ pub trait StateApi { fn child_storage_size( &self, child_storage_key: StorageKey, - child_type: u32, key: StorageKey, hash: Option ) -> FutureResult>; diff --git a/client/rpc/src/state/mod.rs b/client/rpc/src/state/mod.rs index 58313236be06b..f07d06578f42e 100644 --- a/client/rpc/src/state/mod.rs +++ b/client/rpc/src/state/mod.rs @@ -109,7 +109,6 @@ pub trait StateBackend: Send + Sync + 'static &self, block: Option, storage_key: StorageKey, - child_type: u32, prefix: StorageKey, ) -> FutureResult>; @@ -118,7 +117,6 @@ pub trait StateBackend: Send + Sync + 'static &self, block: Option, storage_key: StorageKey, - child_type: u32, key: StorageKey, ) -> FutureResult>; @@ -127,7 +125,6 @@ pub trait StateBackend: Send + Sync + 'static &self, block: Option, storage_key: StorageKey, - child_type: u32, key: StorageKey, ) -> FutureResult>; @@ -136,10 +133,9 @@ pub trait StateBackend: Send + Sync + 'static &self, block: Option, storage_key: StorageKey, - child_type: u32, key: StorageKey, ) -> FutureResult> { - Box::new(self.child_storage(block, storage_key, child_type, key) + Box::new(self.child_storage(block, storage_key, key) .map(|x| x.map(|x| x.0.len() as u64))) } @@ -300,41 +296,37 @@ impl StateApi for State fn child_storage( &self, storage_key: StorageKey, - child_type: u32, key: StorageKey, block: Option ) -> FutureResult> { - self.backend.child_storage(block, storage_key, child_type, key) + self.backend.child_storage(block, storage_key, key) } fn child_storage_keys( &self, storage_key: StorageKey, - child_type: u32, key_prefix: StorageKey, block: Option ) -> FutureResult> { - self.backend.child_storage_keys(block, storage_key, child_type, key_prefix) + self.backend.child_storage_keys(block, storage_key, key_prefix) } fn child_storage_hash( &self, storage_key: StorageKey, - child_type: u32, key: StorageKey, block: Option ) -> FutureResult> { - self.backend.child_storage_hash(block, storage_key, child_type, key) + self.backend.child_storage_hash(block, storage_key, key) } fn child_storage_size( &self, storage_key: StorageKey, - child_type: u32, key: StorageKey, block: Option ) -> FutureResult> { - self.backend.child_storage_size(block, storage_key, child_type, key) + self.backend.child_storage_size(block, storage_key, key) } fn metadata(&self, block: Option) -> FutureResult { diff --git a/client/rpc/src/state/state_full.rs b/client/rpc/src/state/state_full.rs index 508ff8c74417d..40cf3ade504b8 100644 --- a/client/rpc/src/state/state_full.rs +++ b/client/rpc/src/state/state_full.rs @@ -310,14 +310,13 @@ impl StateBackend for FullState, storage_key: StorageKey, - child_type: u32, prefix: StorageKey, ) -> FutureResult> { Box::new(result( self.block_or_best(block) .and_then(|block| { - let child_info = match ChildType::new(child_type) { - Some(ChildType::ParentKeyId) => ChildInfo::new_default_from_vec(storage_key.0), + let child_info = match ChildType::from_prefixed_key(&storage_key.0[..]) { + Some((ChildType::ParentKeyId, storage_key)) => ChildInfo::new_default(storage_key), None => return Err("Invalid child type".into()), }; self.client.child_storage_keys( @@ -333,14 +332,13 @@ impl StateBackend for FullState, storage_key: StorageKey, - child_type: u32, key: StorageKey, ) -> FutureResult> { Box::new(result( self.block_or_best(block) .and_then(|block| { - let child_info = match ChildType::new(child_type) { - Some(ChildType::ParentKeyId) => ChildInfo::new_default_from_vec(storage_key.0), + let child_info = match ChildType::from_prefixed_key(&storage_key.0[..]) { + Some((ChildType::ParentKeyId, storage_key)) => ChildInfo::new_default(storage_key), None => return Err("Invalid child type".into()), }; self.client.child_storage( @@ -356,14 +354,13 @@ impl StateBackend for FullState, storage_key: StorageKey, - child_type: u32, key: StorageKey, ) -> FutureResult> { Box::new(result( self.block_or_best(block) .and_then(|block| { - let child_info = match ChildType::new(child_type) { - Some(ChildType::ParentKeyId) => ChildInfo::new_default_from_vec(storage_key.0), + let child_info = match ChildType::from_prefixed_key(&storage_key.0[..]) { + Some((ChildType::ParentKeyId, storage_key)) => ChildInfo::new_default(storage_key), None => return Err("Invalid child type".into()), }; self.client.child_storage_hash( diff --git a/client/rpc/src/state/state_light.rs b/client/rpc/src/state/state_light.rs index 80d43f8ccee82..0af0b45cd8b45 100644 --- a/client/rpc/src/state/state_light.rs +++ b/client/rpc/src/state/state_light.rs @@ -250,7 +250,6 @@ impl StateBackend for LightState, _storage_key: StorageKey, - _child_type: u32, _prefix: StorageKey, ) -> FutureResult> { Box::new(result(Err(client_err(ClientError::NotAvailableOnLightClient)))) @@ -260,7 +259,6 @@ impl StateBackend for LightState, storage_key: StorageKey, - child_type: u32, key: StorageKey, ) -> FutureResult> { let block = self.block_or_best(block); @@ -271,7 +269,6 @@ impl StateBackend for LightState StateBackend for LightState, storage_key: StorageKey, - child_type: u32, key: StorageKey, ) -> FutureResult> { - Box::new(self.child_storage(block, storage_key, child_type, key) + Box::new(self.child_storage(block, storage_key, key) .and_then(|maybe_storage| result(Ok(maybe_storage.map(|storage| HasherFor::::hash(&storage.0)))) ) diff --git a/client/rpc/src/state/tests.rs b/client/rpc/src/state/tests.rs index df7c83e1dfe87..36a8f1ff0fc86 100644 --- a/client/rpc/src/state/tests.rs +++ b/client/rpc/src/state/tests.rs @@ -31,6 +31,7 @@ use substrate_test_runtime_client::{ }; const STORAGE_KEY: &[u8] = b"child"; +const PREFIXED_STORAGE_KEY: &[u8] = b":child_storage:default:child"; #[test] fn should_return_storage() { @@ -47,7 +48,7 @@ fn should_return_storage() { let genesis_hash = client.genesis_hash(); let client = new_full(Arc::new(client), Subscriptions::new(Arc::new(core.executor()))); let key = StorageKey(KEY.to_vec()); - let storage_key = StorageKey(STORAGE_KEY.to_vec()); + let storage_key = StorageKey(PREFIXED_STORAGE_KEY.to_vec()); assert_eq!( client.storage(key.clone(), Some(genesis_hash).into()).wait() @@ -65,7 +66,7 @@ fn should_return_storage() { ); assert_eq!( core.block_on( - client.child_storage(storage_key, 1, key, Some(genesis_hash).into()) + client.child_storage(storage_key, key, Some(genesis_hash).into()) .map(|x| x.map(|x| x.0.len())) ).unwrap().unwrap() as usize, CHILD_VALUE.len(), @@ -82,14 +83,13 @@ fn should_return_child_storage() { .build()); let genesis_hash = client.genesis_hash(); let client = new_full(client, Subscriptions::new(Arc::new(core.executor()))); - let child_key = StorageKey(STORAGE_KEY.to_vec()); + let child_key = StorageKey(PREFIXED_STORAGE_KEY.to_vec()); let key = StorageKey(b"key".to_vec()); assert_matches!( client.child_storage( child_key.clone(), - 1, key.clone(), Some(genesis_hash).into(), ).wait(), @@ -98,7 +98,6 @@ fn should_return_child_storage() { assert_matches!( client.child_storage_hash( child_key.clone(), - 1, key.clone(), Some(genesis_hash).into(), ).wait().map(|x| x.is_some()), @@ -107,7 +106,6 @@ fn should_return_child_storage() { assert_matches!( client.child_storage_size( child_key.clone(), - 1, key.clone(), None, ).wait(), diff --git a/client/src/light/fetcher.rs b/client/src/light/fetcher.rs index 7a7ef6e0a91df..ce3c2719cf705 100644 --- a/client/src/light/fetcher.rs +++ b/client/src/light/fetcher.rs @@ -241,8 +241,8 @@ impl FetchChecker for LightDataChecker request: &RemoteReadChildRequest, remote_proof: StorageProof, ) -> ClientResult, Option>>> { - let child_info = match ChildType::new(request.child_type) { - Some(ChildType::ParentKeyId) => ChildInfo::new_default(&request.storage_key[..]), + let child_info = match ChildType::from_prefixed_key(&request.storage_key[..]) { + Some((ChildType::ParentKeyId, storage_key)) => ChildInfo::new_default(storage_key), None => return Err("Invalid child type".into()), }; read_child_proof_check::( @@ -509,8 +509,7 @@ pub mod tests { &RemoteReadChildRequest::
{ block: remote_block_header.hash(), header: remote_block_header, - storage_key: b"child1".to_vec(), - child_type: 1, + storage_key: b":child_storage:default:child1".to_vec(), keys: vec![b"key1".to_vec()], retry_count: None, }, diff --git a/primitives/storage/src/lib.rs b/primitives/storage/src/lib.rs index 7330444ff476c..984c8e4738796 100644 --- a/primitives/storage/src/lib.rs +++ b/primitives/storage/src/lib.rs @@ -262,6 +262,20 @@ impl ChildType { }) } + /// Transform a prefixed key into a tuple of the child type + /// and the unprefixed representation of the key. + pub fn from_prefixed_key<'a>(storage_key: &'a [u8]) -> Option<(Self, &'a [u8])> { + let match_type = |storage_key: &'a [u8], child_type: ChildType| { + let prefix = child_type.parent_prefix(); + if storage_key.starts_with(prefix) { + Some((child_type, &storage_key[prefix.len()..])) + } else { + None + } + }; + match_type(storage_key, ChildType::ParentKeyId) + } + /// Produce a prefixed key for a given child type. fn new_prefixed_key(&self, key: &[u8]) -> Vec { let parent_prefix = self.parent_prefix(); @@ -298,10 +312,12 @@ impl ChildType { /// It shares its trie nodes backend storage with every other /// child trie, so its storage key needs to be a unique id /// that will be use only once. +/// Those unique id also required to be long enough to avoid any +/// unique id to be prefixed by an other unique id. #[derive(Debug, Clone)] #[cfg_attr(feature = "std", derive(PartialEq, Eq, Hash, PartialOrd, Ord))] pub struct ChildTrieParentKeyId { - /// Data is the full prefixed storage key. + /// Data is the storage key without prefix. data: Vec, } From 5f98fbe9d9f3a4ba35028f21c1bbe054be3e3433 Mon Sep 17 00:00:00 2001 From: cheme Date: Thu, 27 Feb 2020 17:25:39 +0100 Subject: [PATCH 59/85] rpc error discrepancy --- client/network/src/protocol.rs | 8 ++-- .../src/protocol/light_client_handler.rs | 40 ++++++++----------- client/rpc-api/src/state/mod.rs | 2 +- client/rpc/src/state/state_full.rs | 6 +-- 4 files changed, 24 insertions(+), 32 deletions(-) diff --git a/client/network/src/protocol.rs b/client/network/src/protocol.rs index ea109c0c48701..e9135166ceebd 100644 --- a/client/network/src/protocol.rs +++ b/client/network/src/protocol.rs @@ -1516,14 +1516,14 @@ impl Protocol { trace!(target: "sync", "Remote read child request {} from {} ({} {} at {})", request.id, who, request.storage_key.to_hex::(), keys_str(), request.block); let child_info = match ChildType::from_prefixed_key(&request.storage_key) { - Some((ChildType::ParentKeyId, storage_key)) => ChildInfo::new_default(storage_key), - None => return, + Some((ChildType::ParentKeyId, storage_key)) => Ok(ChildInfo::new_default(storage_key)), + None => Err("Invalid child storage key".into()), }; - let proof = match self.context_data.chain.read_child_proof( + let proof = match child_info.and_then(|child_info| self.context_data.chain.read_child_proof( &request.block, &child_info, &request.keys, - ) { + )) { Ok(proof) => proof, Err(error) => { trace!(target: "sync", "Remote read child request {} from {} ({} {} at {}) failed with: {}", diff --git a/client/network/src/protocol/light_client_handler.rs b/client/network/src/protocol/light_client_handler.rs index d951b58f7bca5..e661bf672554d 100644 --- a/client/network/src/protocol/light_client_handler.rs +++ b/client/network/src/protocol/light_client_handler.rs @@ -514,32 +514,24 @@ where let block = Decode::decode(&mut request.block.as_ref())?; - let proof = if let Some((ChildType::ParentKeyId, storage_key)) = ChildType::from_prefixed_key( - &request.storage_key, + let child_info = match ChildType::from_prefixed_key(&request.storage_key) { + Some((ChildType::ParentKeyId, storage_key)) => Ok(ChildInfo::new_default(storage_key)), + None => Err("Invalid child storage key".into()), + }; + let proof = match child_info.and_then(|child_info| + self.chain.read_child_proof(&block, &child_info, &request.keys) ) { - let child_info = ChildInfo::new_default(storage_key); - match self.chain.read_child_proof(&block, &child_info, &request.keys) { - Ok(proof) => proof, - Err(error) => { - log::trace!("remote read child request {} from {} ({} {} at {:?}) failed with: {}", - request_id, - peer, - request.storage_key.to_hex::(), - fmt_keys(request.keys.first(), request.keys.last()), - request.block, - error); - StorageProof::empty() - } + Ok(proof) => proof, + Err(error) => { + log::trace!("remote read child request {} from {} ({} {} at {:?}) failed with: {}", + request_id, + peer, + request.storage_key.to_hex::(), + fmt_keys(request.keys.first(), request.keys.last()), + request.block, + error); + StorageProof::empty() } - } else { - log::trace!("remote read child request {} from {} ({} {} at {:?}) failed with: {}", - request_id, - peer, - request.storage_key.to_hex::(), - fmt_keys(request.keys.first(), request.keys.last()), - request.block, - "Unknown child type"); - StorageProof::empty() }; let response = { diff --git a/client/rpc-api/src/state/mod.rs b/client/rpc-api/src/state/mod.rs index 3263b6a4cc606..48d363bb8921c 100644 --- a/client/rpc-api/src/state/mod.rs +++ b/client/rpc-api/src/state/mod.rs @@ -100,7 +100,7 @@ pub trait StateApi { ) -> FutureResult>; /// Returns the size of a child storage entry at a block's state. - #[rpc(name = "state_getDefaultStorageSize")] + #[rpc(name = "state_getChildStorageSize")] fn child_storage_size( &self, child_storage_key: StorageKey, diff --git a/client/rpc/src/state/state_full.rs b/client/rpc/src/state/state_full.rs index 40cf3ade504b8..8727810c83291 100644 --- a/client/rpc/src/state/state_full.rs +++ b/client/rpc/src/state/state_full.rs @@ -317,7 +317,7 @@ impl StateBackend for FullState ChildInfo::new_default(storage_key), - None => return Err("Invalid child type".into()), + None => return Err("Invalid child storage key".into()), }; self.client.child_storage_keys( &BlockId::Hash(block), @@ -339,7 +339,7 @@ impl StateBackend for FullState ChildInfo::new_default(storage_key), - None => return Err("Invalid child type".into()), + None => return Err("Invalid child storage key".into()), }; self.client.child_storage( &BlockId::Hash(block), @@ -361,7 +361,7 @@ impl StateBackend for FullState ChildInfo::new_default(storage_key), - None => return Err("Invalid child type".into()), + None => return Err("Invalid child storage key".into()), }; self.client.child_storage_hash( &BlockId::Hash(block), From d85dec23be5dc88cd2741cc9b234a74df6b2c716 Mon Sep 17 00:00:00 2001 From: cheme Date: Thu, 27 Feb 2020 19:38:34 +0100 Subject: [PATCH 60/85] Fix test --- primitives/runtime-interface/test-wasm/src/lib.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/primitives/runtime-interface/test-wasm/src/lib.rs b/primitives/runtime-interface/test-wasm/src/lib.rs index 2e1ab52d67741..3ac746e55c6d4 100644 --- a/primitives/runtime-interface/test-wasm/src/lib.rs +++ b/primitives/runtime-interface/test-wasm/src/lib.rs @@ -233,7 +233,7 @@ wasm_export_functions! { } fn test_ext_blake2_256() { - use sp_core::Hasher; + use sp_core::InnerHasher; let data = "hey, hash me please!"; let hash = sp_core::Blake2Hasher::hash(data.as_bytes()); From 9f704c9a00a599b17b6ccf2893d18fceaf538747 Mon Sep 17 00:00:00 2001 From: cheme Date: Fri, 28 Feb 2020 12:51:49 +0100 Subject: [PATCH 61/85] use correct parameter in overlay and fix change trie. --- .../state-machine/src/changes_trie/build.rs | 49 +++++++++---------- primitives/state-machine/src/ext.rs | 6 +-- .../state-machine/src/overlayed_changes.rs | 6 +-- 3 files changed, 30 insertions(+), 31 deletions(-) diff --git a/primitives/state-machine/src/changes_trie/build.rs b/primitives/state-machine/src/changes_trie/build.rs index c206090fa4e18..cf1a2e3bfba1b 100644 --- a/primitives/state-machine/src/changes_trie/build.rs +++ b/primitives/state-machine/src/changes_trie/build.rs @@ -32,6 +32,7 @@ use crate::{ input::{InputKey, InputPair, DigestIndex, ExtrinsicIndex, ChildIndex}, }, }; +use sp_core::storage::{ChildInfo, ChildType}; /// Prepare input pairs for building a changes trie of given block. /// @@ -105,19 +106,19 @@ fn prepare_extrinsics_input<'a, B, H, Number>( Number: BlockNumber, { - let mut children_prefixed_keys = BTreeSet::::new(); + let mut children_info = BTreeSet::::new(); let mut children_result = BTreeMap::new(); for (_storage_key, (_map, child_info)) in changes.prospective.children_default.iter() .chain(changes.committed.children_default.iter()) { - children_prefixed_keys.insert(child_info.prefixed_storage_key()); + children_info.insert(child_info.clone()); } - for storage_key in children_prefixed_keys { + for child_info in children_info { let child_index = ChildIndex:: { block: block.clone(), - storage_key: storage_key.clone(), + storage_key: child_info.prefixed_storage_key(), }; - let iter = prepare_extrinsics_input_inner(backend, block, changes, Some(storage_key))?; + let iter = prepare_extrinsics_input_inner(backend, block, changes, Some(child_info))?; children_result.insert(child_index, iter); } @@ -130,22 +131,22 @@ fn prepare_extrinsics_input_inner<'a, B, H, Number>( backend: &'a B, block: &Number, changes: &'a OverlayedChanges, - storage_key: Option, + child_info: Option, ) -> Result> + 'a, String> where B: Backend, H: Hasher, Number: BlockNumber, { - let (committed, prospective, child_info) = if let Some(sk) = storage_key.as_ref() { - let child_info = changes.default_child_info(sk).cloned(); - ( - changes.committed.children_default.get(sk).map(|c| &c.0), - changes.prospective.children_default.get(sk).map(|c| &c.0), - child_info, - ) + let (committed, prospective) = if let Some(child_info) = child_info.as_ref() { + match child_info.child_type() { + ChildType::ParentKeyId => ( + changes.committed.children_default.get(child_info.storage_key()).map(|c| &c.0), + changes.prospective.children_default.get(child_info.storage_key()).map(|c| &c.0), + ), + } } else { - (Some(&changes.committed.top), Some(&changes.prospective.top), None) + (Some(&changes.committed.top), Some(&changes.prospective.top)) }; committed.iter().flat_map(|c| c.iter()) .chain(prospective.iter().flat_map(|c| c.iter())) @@ -155,13 +156,11 @@ fn prepare_extrinsics_input_inner<'a, B, H, Number>( Entry::Vacant(entry) => { // ignore temporary values (values that have null value at the end of operation // AND are not in storage at the beginning of operation - if let Some(sk) = storage_key.as_ref() { - if !changes.child_storage(sk, k).map(|v| v.is_some()).unwrap_or_default() { - if let Some(child_info) = child_info.as_ref() { - if !backend.exists_child_storage(&child_info, k) - .map_err(|e| format!("{}", e))? { - return Ok(map); - } + if let Some(child_info) = child_info.as_ref() { + if !changes.child_storage(child_info, k).map(|v| v.is_some()).unwrap_or_default() { + if !backend.exists_child_storage(&child_info, k) + .map_err(|e| format!("{}", e))? { + return Ok(map); } } } else { @@ -344,7 +343,6 @@ mod test { use codec::Encode; use sp_core::Blake2Hasher; use sp_core::storage::well_known_keys::EXTRINSIC_INDEX; - use sp_core::storage::ChildInfo; use crate::InMemoryBackend; use crate::changes_trie::{RootsStorage, Configuration, storage::InMemoryStorage}; use crate::changes_trie::build_cache::{IncompleteCacheAction, IncompleteCachedBuildData}; @@ -367,8 +365,9 @@ mod test { (vec![104], vec![255]), (vec![105], vec![255]), ].into_iter().collect::>().into(); - let child_trie_key1 = child_info_1.prefixed_storage_key(); - let child_trie_key2 = child_info_2.prefixed_storage_key(); + let prefixed_child_trie_key1 = child_info_1.prefixed_storage_key(); + let child_trie_key1 = child_info_1.storage_key().to_vec(); + let child_trie_key2 = child_info_2.storage_key().to_vec(); let storage = InMemoryStorage::with_inputs(vec![ (zero + 1, vec![ InputPair::ExtrinsicIndex(ExtrinsicIndex { block: zero + 1, key: vec![100] }, vec![1, 3]), @@ -402,7 +401,7 @@ mod test { ]), (zero + 9, Vec::new()), (zero + 10, Vec::new()), (zero + 11, Vec::new()), (zero + 12, Vec::new()), (zero + 13, Vec::new()), (zero + 14, Vec::new()), (zero + 15, Vec::new()), - ], vec![(child_trie_key1.clone(), vec![ + ], vec![(prefixed_child_trie_key1.clone(), vec![ (zero + 1, vec![ InputPair::ExtrinsicIndex(ExtrinsicIndex { block: zero + 1, key: vec![100] }, vec![1, 3]), InputPair::ExtrinsicIndex(ExtrinsicIndex { block: zero + 1, key: vec![101] }, vec![0, 2]), diff --git a/primitives/state-machine/src/ext.rs b/primitives/state-machine/src/ext.rs index 77ae9a0820fb7..32ddac8d2e191 100644 --- a/primitives/state-machine/src/ext.rs +++ b/primitives/state-machine/src/ext.rs @@ -210,7 +210,7 @@ where ) -> Option { let _guard = sp_panic_handler::AbortGuard::force_abort(); let result = self.overlay - .child_storage(child_info.storage_key(), key) + .child_storage(child_info, key) .map(|x| x.map(|x| x.to_vec())) .unwrap_or_else(|| self.backend.child_storage(child_info, key) @@ -234,7 +234,7 @@ where ) -> Option> { let _guard = sp_panic_handler::AbortGuard::force_abort(); let result = self.overlay - .child_storage(child_info.storage_key(), key) + .child_storage(child_info, key) .map(|x| x.map(|x| H::hash(x))) .unwrap_or_else(|| self.backend.child_storage_hash(child_info, key) @@ -313,7 +313,7 @@ where ) -> bool { let _guard = sp_panic_handler::AbortGuard::force_abort(); - let result = match self.overlay.child_storage(child_info.storage_key(), key) { + let result = match self.overlay.child_storage(child_info, key) { Some(x) => x.is_some(), _ => self.backend .exists_child_storage(child_info, key) diff --git a/primitives/state-machine/src/overlayed_changes.rs b/primitives/state-machine/src/overlayed_changes.rs index b9e25fc547013..2dc56bc772d3d 100644 --- a/primitives/state-machine/src/overlayed_changes.rs +++ b/primitives/state-machine/src/overlayed_changes.rs @@ -212,14 +212,14 @@ impl OverlayedChanges { /// Returns a double-Option: None if the key is unknown (i.e. and the query should be referred /// to the backend); Some(None) if the key has been deleted. Some(Some(...)) for a key whose /// value has been set. - pub fn child_storage(&self, storage_key: &[u8], key: &[u8]) -> Option> { - if let Some(map) = self.prospective.children_default.get(storage_key) { + pub fn child_storage(&self, child_info: &ChildInfo, key: &[u8]) -> Option> { + if let Some(map) = self.prospective.children_default.get(child_info.storage_key()) { if let Some(val) = map.0.get(key) { return Some(val.value.as_ref().map(AsRef::as_ref)); } } - if let Some(map) = self.committed.children_default.get(storage_key) { + if let Some(map) = self.committed.children_default.get(child_info.storage_key()) { if let Some(val) = map.0.get(key) { return Some(val.value.as_ref().map(AsRef::as_ref)); } From 7c5d54642ed499c9ff0ab87ce79f4423fce77828 Mon Sep 17 00:00:00 2001 From: cheme Date: Mon, 2 Mar 2020 14:37:11 +0100 Subject: [PATCH 62/85] bump version --- bin/node/runtime/src/lib.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/bin/node/runtime/src/lib.rs b/bin/node/runtime/src/lib.rs index 85889a50c20b9..d05f8fa2fb444 100644 --- a/bin/node/runtime/src/lib.rs +++ b/bin/node/runtime/src/lib.rs @@ -82,7 +82,7 @@ pub const VERSION: RuntimeVersion = RuntimeVersion { // and set impl_version to 0. If only runtime // implementation changes and behavior does not, then leave spec_version as // is and increment impl_version. - spec_version: 227, + spec_version: 228, impl_version: 0, apis: RUNTIME_API_VERSIONS, }; From eff1d8c53b5f7b3655b84020c5aafab53f204242 Mon Sep 17 00:00:00 2001 From: cheme Date: Thu, 12 Mar 2020 11:13:27 +0100 Subject: [PATCH 63/85] keeping inner hasher, it is needed at state-machine level. --- primitives/runtime/src/traits.rs | 14 ++++++-------- 1 file changed, 6 insertions(+), 8 deletions(-) diff --git a/primitives/runtime/src/traits.rs b/primitives/runtime/src/traits.rs index 690c076cd0691..0ded39fd7372e 100644 --- a/primitives/runtime/src/traits.rs +++ b/primitives/runtime/src/traits.rs @@ -25,7 +25,7 @@ use std::fmt::Display; use std::str::FromStr; #[cfg(feature = "std")] use serde::{Serialize, Deserialize, de::DeserializeOwned}; -use sp_core::{self, InnerHasher as Hasher, TypeId, RuntimeDebug}; +use sp_core::{self, InnerHasher, Hasher, TypeId, RuntimeDebug}; use crate::codec::{Codec, Encode, Decode}; use crate::transaction_validity::{ ValidTransaction, TransactionValidity, TransactionValidityError, UnknownTransaction, @@ -378,20 +378,19 @@ pub trait OffchainWorker { // Stupid bug in the Rust compiler believes derived // traits must be fulfilled by all type parameters. pub trait Hash: 'static + MaybeSerializeDeserialize + Debug + Clone + Eq - + PartialEq + Hasher::Output> + sp_core::Hasher { - // TODO try fuse the alt Hasher into this?? + + PartialEq + InnerHasher::Output> + Hasher { /// The hash type produced. type Output: Member + MaybeSerializeDeserialize + Debug + sp_std::hash::Hash + AsRef<[u8]> + AsMut<[u8]> + Copy + Default + Encode + Decode; /// Produce the hash of some byte-slice. fn hash(s: &[u8]) -> Self::Output { - ::hash(s) + ::hash(s) } /// Produce the hash of some codec-encodable value. fn hash_of(s: &S) -> Self::Output { - Encode::using_encoded(s, ::hash) + Encode::using_encoded(s, ::hash) } /// The ordered Patricia tree root of the given `input`. @@ -406,7 +405,7 @@ pub trait Hash: 'static + MaybeSerializeDeserialize + Debug + Clone + Eq #[cfg_attr(feature = "std", derive(Serialize, Deserialize))] pub struct BlakeTwo256; -impl Hasher for BlakeTwo256 { +impl InnerHasher for BlakeTwo256 { type Out = sp_core::H256; type StdHasher = hash256_std_hasher::Hash256StdHasher; const LENGTH: usize = 32; @@ -428,7 +427,7 @@ impl Hash for BlakeTwo256 { } } -impl sp_core::Hasher for BlakeTwo256 { +impl Hasher for BlakeTwo256 { const EMPTY_ROOT: &'static [u8] = &[ 3, 23, 10, 46, 117, 151, 183, 183, 227, 216, 76, 5, 57, 29, 19, 154, 98, 177, 87, 231, @@ -1429,7 +1428,6 @@ mod tests { #[test] fn empty_root_const() { - use sp_core::Hasher; let empty = ::hash(&[0u8]); assert_eq!(BlakeTwo256::EMPTY_ROOT, empty.as_ref()); } From 29254aa8733da0dcc7667f3483b55fcc16c9f875 Mon Sep 17 00:00:00 2001 From: cheme Date: Tue, 17 Mar 2020 15:41:51 +0100 Subject: [PATCH 64/85] missing merge --- client/network/src/protocol.rs | 24 +----------------------- 1 file changed, 1 insertion(+), 23 deletions(-) diff --git a/client/network/src/protocol.rs b/client/network/src/protocol.rs index 5304ba71d60a3..af8a15e72e9c2 100644 --- a/client/network/src/protocol.rs +++ b/client/network/src/protocol.rs @@ -1603,13 +1603,12 @@ impl Protocol { trace!(target: "sync", "Remote read child request {} from {} ({} {} at {})", request.id, who, request.storage_key.to_hex::(), keys_str(), request.block); -<<<<<<< HEAD let child_info = match ChildType::from_prefixed_key(&request.storage_key) { Some((ChildType::ParentKeyId, storage_key)) => Ok(ChildInfo::new_default(storage_key)), None => Err("Invalid child storage key".into()), }; let proof = match child_info.and_then(|child_info| self.context_data.chain.read_child_proof( - &request.block, + &BlockId::Hash(request.block), &child_info, &mut request.keys.iter().map(AsRef::as_ref), )) { @@ -1624,27 +1623,6 @@ impl Protocol { error ); StorageProof::empty() -======= - let proof = if let Some(child_info) = ChildInfo::resolve_child_info(request.child_type, &request.child_info[..]) { - match self.context_data.chain.read_child_proof( - &BlockId::Hash(request.block), - &request.storage_key, - child_info, - &mut request.keys.iter().map(AsRef::as_ref), - ) { - Ok(proof) => proof, - Err(error) => { - trace!(target: "sync", "Remote read child request {} from {} ({} {} at {}) failed with: {}", - request.id, - who, - request.storage_key.to_hex::(), - keys_str(), - request.block, - error - ); - StorageProof::empty() - } ->>>>>>> master } }; self.send_message( From baacffacbdd4837c207adf1afc6d42dfb5cb6ba4 Mon Sep 17 00:00:00 2001 From: cheme Date: Tue, 17 Mar 2020 16:36:18 +0100 Subject: [PATCH 65/85] fix bench --- client/db/src/bench.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/client/db/src/bench.rs b/client/db/src/bench.rs index b70d713b437e6..54db9556ac9b6 100644 --- a/client/db/src/bench.rs +++ b/client/db/src/bench.rs @@ -88,7 +88,7 @@ impl BenchmarkingState { child_content.child_info, child_content.data.into_iter().map(|(k, v)| (k, Some(v))), )); - let (root, transaction): (B::Hash, _) = state.state.borrow_mut().as_mut().unwrap().full_storage_root( + let (root, transaction, _): (B::Hash, _, _) = state.state.borrow_mut().as_mut().unwrap().full_storage_root( genesis.top.into_iter().map(|(k, v)| (k, Some(v))), child_delta, false, From 160d2ea7ec0fec4412447db234a180f9e7ac2e88 Mon Sep 17 00:00:00 2001 From: cheme Date: Wed, 18 Mar 2020 19:18:37 +0100 Subject: [PATCH 66/85] Restore previous cli api (will need deprecation in a v2 of rpc), split new api into child state api. --- client/rpc-api/src/child_state/mod.rs | 66 ++++++++++ client/rpc-api/src/lib.rs | 1 + client/rpc-api/src/state/mod.rs | 12 ++ client/rpc/src/state/mod.rs | 170 ++++++++++++++++++++++---- client/rpc/src/state/state_full.rs | 162 +++++++++++++++++------- client/rpc/src/state/state_light.rs | 121 +++++++++++++----- client/rpc/src/state/tests.rs | 24 ++-- client/service/src/builder.rs | 12 +- 8 files changed, 448 insertions(+), 120 deletions(-) create mode 100644 client/rpc-api/src/child_state/mod.rs diff --git a/client/rpc-api/src/child_state/mod.rs b/client/rpc-api/src/child_state/mod.rs new file mode 100644 index 0000000000000..f9027b0f15a08 --- /dev/null +++ b/client/rpc-api/src/child_state/mod.rs @@ -0,0 +1,66 @@ +// Copyright 2017-2020 Parity Technologies (UK) Ltd. +// This file is part of Substrate. + +// Substrate is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// Substrate is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with Substrate. If not, see . + +//! Substrate state API. + +use jsonrpc_derive::rpc; +use sp_core::storage::{StorageKey, StorageData}; +use crate::state::error::FutureResult; + +pub use self::gen_client::Client as ChildStateClient; + +/// Substrate child state API +#[rpc] +pub trait ChildStateApi { + /// RPC Metadata + type Metadata; + + /// Returns the keys with prefix from a child storage, leave empty to get all the keys + #[rpc(name = "childstate_getKeys")] + fn storage_keys( + &self, + child_storage_key: StorageKey, + prefix: StorageKey, + hash: Option + ) -> FutureResult>; + + /// Returns a child storage entry at a specific block's state. + #[rpc(name = "childstate_getStorage")] + fn storage( + &self, + child_storage_key: StorageKey, + key: StorageKey, + hash: Option + ) -> FutureResult>; + + /// Returns the hash of a child storage entry at a block's state. + #[rpc(name = "childstate_getStorageHash")] + fn storage_hash( + &self, + child_storage_key: StorageKey, + key: StorageKey, + hash: Option + ) -> FutureResult>; + + /// Returns the size of a child storage entry at a block's state. + #[rpc(name = "childstate_getStorageSize")] + fn storage_size( + &self, + child_storage_key: StorageKey, + key: StorageKey, + hash: Option + ) -> FutureResult>; +} diff --git a/client/rpc-api/src/lib.rs b/client/rpc-api/src/lib.rs index 8ad2d94bfd271..82913f2dea672 100644 --- a/client/rpc-api/src/lib.rs +++ b/client/rpc-api/src/lib.rs @@ -32,4 +32,5 @@ pub mod author; pub mod chain; pub mod offchain; pub mod state; +pub mod child_state; pub mod system; diff --git a/client/rpc-api/src/state/mod.rs b/client/rpc-api/src/state/mod.rs index 48d363bb8921c..e94df46736b48 100644 --- a/client/rpc-api/src/state/mod.rs +++ b/client/rpc-api/src/state/mod.rs @@ -73,37 +73,49 @@ pub trait StateApi { fn storage_size(&self, key: StorageKey, hash: Option) -> FutureResult>; /// Returns the keys with prefix from a child storage, leave empty to get all the keys + /// This method is deprecated in favor of `childstate_getChildKeys`. #[rpc(name = "state_getChildKeys")] fn child_storage_keys( &self, child_storage_key: StorageKey, + child_info: StorageKey, + child_type: u32, prefix: StorageKey, hash: Option ) -> FutureResult>; /// Returns a child storage entry at a specific block's state. + /// This method is deprecated in favor of `childstate_getChildStorage`. #[rpc(name = "state_getChildStorage")] fn child_storage( &self, child_storage_key: StorageKey, + child_info: StorageKey, + child_type: u32, key: StorageKey, hash: Option ) -> FutureResult>; /// Returns the hash of a child storage entry at a block's state. + /// This method is deprecated in favor of `childstate_getChildStorageHash`. #[rpc(name = "state_getChildStorageHash")] fn child_storage_hash( &self, child_storage_key: StorageKey, + child_info: StorageKey, + child_type: u32, key: StorageKey, hash: Option ) -> FutureResult>; /// Returns the size of a child storage entry at a block's state. + /// This method is deprecated in favor of `childstate_getChildStorageSize`. #[rpc(name = "state_getChildStorageSize")] fn child_storage_size( &self, child_storage_key: StorageKey, + child_info: StorageKey, + child_type: u32, key: StorageKey, hash: Option ) -> FutureResult>; diff --git a/client/rpc/src/state/mod.rs b/client/rpc/src/state/mod.rs index a8a2e58b9a43d..29ad2f7538eee 100644 --- a/client/rpc/src/state/mod.rs +++ b/client/rpc/src/state/mod.rs @@ -37,6 +37,7 @@ use sp_api::{Metadata, ProvideRuntimeApi, CallApiAt}; use self::error::{Error, FutureResult}; pub use sc_rpc_api::state::*; +pub use sc_rpc_api::child_state::*; use sc_client_api::{ExecutorProvider, StorageProvider, BlockchainEvents, Backend}; use sp_blockchain::{HeaderMetadata, HeaderBackend}; @@ -103,12 +104,13 @@ pub trait StateBackend: Send + Sync + 'static .map(|x| x.map(|x| x.0.len() as u64))) } - /// Returns the keys with prefix from a child storage, - /// leave prefix empty to get all the keys. + /// Returns the keys with prefix from a child storage, leave empty to get all the keys fn child_storage_keys( &self, block: Option, - storage_key: StorageKey, + child_storage_key: StorageKey, + child_info: StorageKey, + child_type: u32, prefix: StorageKey, ) -> FutureResult>; @@ -116,7 +118,9 @@ pub trait StateBackend: Send + Sync + 'static fn child_storage( &self, block: Option, - storage_key: StorageKey, + child_storage_key: StorageKey, + child_info: StorageKey, + child_type: u32, key: StorageKey, ) -> FutureResult>; @@ -124,7 +128,9 @@ pub trait StateBackend: Send + Sync + 'static fn child_storage_hash( &self, block: Option, - storage_key: StorageKey, + child_storage_key: StorageKey, + child_info: StorageKey, + child_type: u32, key: StorageKey, ) -> FutureResult>; @@ -132,10 +138,12 @@ pub trait StateBackend: Send + Sync + 'static fn child_storage_size( &self, block: Option, - storage_key: StorageKey, + child_storage_key: StorageKey, + child_info: StorageKey, + child_type: u32, key: StorageKey, ) -> FutureResult> { - Box::new(self.child_storage(block, storage_key, key) + Box::new(self.child_storage(block, child_storage_key, child_info, child_type, key) .map(|x| x.map(|x| x.0.len() as u64))) } @@ -190,7 +198,7 @@ pub trait StateBackend: Send + Sync + 'static pub fn new_full( client: Arc, subscriptions: Subscriptions, -) -> State +) -> (State, ChildState) where Block: BlockT + 'static, BE: Backend + 'static, @@ -200,9 +208,11 @@ pub fn new_full( + ProvideRuntimeApi + Send + Sync + 'static, Client::Api: Metadata, { - State { - backend: Box::new(self::state_full::FullState::new(client, subscriptions)), - } + let child_backend = Box::new( + self::state_full::FullState::new(client.clone(), subscriptions.clone()) + ); + let backend = Box::new(self::state_full::FullState::new(client, subscriptions)); + (State { backend }, ChildState { backend: child_backend }) } /// Create new state API that works on light node. @@ -211,7 +221,7 @@ pub fn new_light>( subscriptions: Subscriptions, remote_blockchain: Arc>, fetcher: Arc, -) -> State +) -> (State, ChildState) where Block: BlockT + 'static, BE: Backend + 'static, @@ -221,14 +231,20 @@ pub fn new_light>( + Send + Sync + 'static, F: Send + Sync + 'static, { - State { - backend: Box::new(self::state_light::LightState::new( + let child_backend = Box::new(self::state_light::LightState::new( + client.clone(), + subscriptions.clone(), + remote_blockchain.clone(), + fetcher.clone(), + )); + + let backend = Box::new(self::state_light::LightState::new( client, subscriptions, remote_blockchain, fetcher, - )), - } + )); + (State { backend }, ChildState { backend: child_backend }) } /// State API with subscriptions support. @@ -295,38 +311,46 @@ impl StateApi for State fn child_storage( &self, - storage_key: StorageKey, + child_storage_key: StorageKey, + child_info: StorageKey, + child_type: u32, key: StorageKey, block: Option ) -> FutureResult> { - self.backend.child_storage(block, storage_key, key) + self.backend.child_storage(block, child_storage_key, child_info, child_type, key) } fn child_storage_keys( &self, - storage_key: StorageKey, + child_storage_key: StorageKey, + child_info: StorageKey, + child_type: u32, key_prefix: StorageKey, block: Option ) -> FutureResult> { - self.backend.child_storage_keys(block, storage_key, key_prefix) + self.backend.child_storage_keys(block, child_storage_key, child_info, child_type, key_prefix) } fn child_storage_hash( &self, - storage_key: StorageKey, + child_storage_key: StorageKey, + child_info: StorageKey, + child_type: u32, key: StorageKey, block: Option ) -> FutureResult> { - self.backend.child_storage_hash(block, storage_key, key) + self.backend.child_storage_hash(block, child_storage_key, child_info, child_type, key) } fn child_storage_size( &self, - storage_key: StorageKey, + child_storage_key: StorageKey, + child_info: StorageKey, + child_type: u32, key: StorageKey, block: Option ) -> FutureResult> { - self.backend.child_storage_size(block, storage_key, key) + self.backend.child_storage_size(block, child_storage_key, child_info, child_type, key) } fn metadata(&self, block: Option) -> FutureResult { @@ -372,6 +396,104 @@ impl StateApi for State } } +/// Child state backend API. +pub trait ChildStateBackend: Send + Sync + 'static + where + Block: BlockT + 'static, + Client: Send + Sync + 'static, +{ + /// Returns the keys with prefix from a child storage, + /// leave prefix empty to get all the keys. + fn storage_keys( + &self, + block: Option, + storage_key: StorageKey, + prefix: StorageKey, + ) -> FutureResult>; + + /// Returns a child storage entry at a specific block's state. + fn storage( + &self, + block: Option, + storage_key: StorageKey, + key: StorageKey, + ) -> FutureResult>; + + /// Returns the hash of a child storage entry at a block's state. + fn storage_hash( + &self, + block: Option, + storage_key: StorageKey, + key: StorageKey, + ) -> FutureResult>; + + /// Returns the size of a child storage entry at a block's state. + fn storage_size( + &self, + block: Option, + storage_key: StorageKey, + key: StorageKey, + ) -> FutureResult> { + Box::new(self.storage(block, storage_key, key) + .map(|x| x.map(|x| x.0.len() as u64))) + } +} + +/// Child state API with subscriptions support. +pub struct ChildState { + backend: Box>, +} + +impl ChildStateApi for ChildState + where + Block: BlockT + 'static, + Client: Send + Sync + 'static, +{ + type Metadata = crate::metadata::Metadata; + + fn storage( + &self, + storage_key: StorageKey, + key: StorageKey, + block: Option + ) -> FutureResult> { + self.backend.storage(block, storage_key, key) + } + + fn storage_keys( + &self, + storage_key: StorageKey, + key_prefix: StorageKey, + block: Option + ) -> FutureResult> { + self.backend.storage_keys(block, storage_key, key_prefix) + } + + fn storage_hash( + &self, + storage_key: StorageKey, + key: StorageKey, + block: Option + ) -> FutureResult> { + self.backend.storage_hash(block, storage_key, key) + } + + fn storage_size( + &self, + storage_key: StorageKey, + key: StorageKey, + block: Option + ) -> FutureResult> { + self.backend.storage_size(block, storage_key, key) + } +} + +const CHILD_RESOLUTION_ERROR: &str = "Unexpected child info and type"; + +fn child_resolution_error() -> Error { + client_err(sp_blockchain::Error::Msg(CHILD_RESOLUTION_ERROR.to_string())) +} + fn client_err(err: sp_blockchain::Error) -> Error { Error::Client(Box::new(err)) } diff --git a/client/rpc/src/state/state_full.rs b/client/rpc/src/state/state_full.rs index ae00bcec72aa3..d27086a02c176 100644 --- a/client/rpc/src/state/state_full.rs +++ b/client/rpc/src/state/state_full.rs @@ -39,7 +39,8 @@ use sp_runtime::{ use sp_api::{Metadata, ProvideRuntimeApi, CallApiAt}; -use super::{StateBackend, error::{FutureResult, Error, Result}, client_err}; +use super::{StateBackend, ChildStateBackend, error::{FutureResult, Error, Result}, + client_err, child_resolution_error}; use std::marker::PhantomData; use sc_client_api::{CallExecutor, StorageProvider, ExecutorProvider}; @@ -309,67 +310,58 @@ impl StateBackend for FullState, - storage_key: StorageKey, + child_storage_key: StorageKey, + _child_info: StorageKey, + child_type: u32, prefix: StorageKey, ) -> FutureResult> { - Box::new(result( - self.block_or_best(block) - .and_then(|block| { - let child_info = match ChildType::from_prefixed_key(&storage_key.0[..]) { - Some((ChildType::ParentKeyId, storage_key)) => ChildInfo::new_default(storage_key), - None => return Err("Invalid child storage key".into()), - }; - self.client.child_storage_keys( - &BlockId::Hash(block), - &child_info, - &prefix, - ) - }) - .map_err(client_err))) + if child_type != 1 { + return Box::new(result(Err(child_resolution_error()))); + } + ChildStateBackend::storage_keys( + self, + block, + child_storage_key, + prefix, + ) } fn child_storage( &self, block: Option, - storage_key: StorageKey, + child_storage_key: StorageKey, + _child_info: StorageKey, + child_type: u32, key: StorageKey, ) -> FutureResult> { - Box::new(result( - self.block_or_best(block) - .and_then(|block| { - let child_info = match ChildType::from_prefixed_key(&storage_key.0[..]) { - Some((ChildType::ParentKeyId, storage_key)) => ChildInfo::new_default(storage_key), - None => return Err("Invalid child storage key".into()), - }; - self.client.child_storage( - &BlockId::Hash(block), - &child_info, - &key, - ) - }) - .map_err(client_err))) + if child_type != 1 { + return Box::new(result(Err(child_resolution_error()))); + } + ChildStateBackend::storage( + self, + block, + child_storage_key, + key, + ) } fn child_storage_hash( &self, block: Option, - storage_key: StorageKey, + child_storage_key: StorageKey, + _child_info: StorageKey, + child_type: u32, key: StorageKey, ) -> FutureResult> { - Box::new(result( - self.block_or_best(block) - .and_then(|block| { - let child_info = match ChildType::from_prefixed_key(&storage_key.0[..]) { - Some((ChildType::ParentKeyId, storage_key)) => ChildInfo::new_default(storage_key), - None => return Err("Invalid child storage key".into()), - }; - self.client.child_storage_hash( - &BlockId::Hash(block), - &child_info, - &key, - ) - }) - .map_err(client_err))) + if child_type != 1 { + return Box::new(result(Err(child_resolution_error()))); + } + ChildStateBackend::storage_hash( + self, + block, + child_storage_key, + key, + ) } fn metadata(&self, block: Option) -> FutureResult { @@ -488,7 +480,7 @@ impl StateBackend for FullState StateBackend for FullState ChildStateBackend for FullState where + Block: BlockT + 'static, + BE: Backend + 'static, + Client: ExecutorProvider + StorageProvider + HeaderBackend + + HeaderMetadata + BlockchainEvents + + CallApiAt + ProvideRuntimeApi + + Send + Sync + 'static, + Client::Api: Metadata, +{ + fn storage_keys( + &self, + block: Option, + storage_key: StorageKey, + prefix: StorageKey, + ) -> FutureResult> { + Box::new(result( + self.block_or_best(block) + .and_then(|block| { + let child_info = match ChildType::from_prefixed_key(&storage_key.0[..]) { + Some((ChildType::ParentKeyId, storage_key)) => ChildInfo::new_default(storage_key), + None => return Err("Invalid child storage key".into()), + }; + self.client.child_storage_keys( + &BlockId::Hash(block), + &child_info, + &prefix, + ) + }) + .map_err(client_err))) + } + + fn storage( + &self, + block: Option, + storage_key: StorageKey, + key: StorageKey, + ) -> FutureResult> { + Box::new(result( + self.block_or_best(block) + .and_then(|block| { + let child_info = match ChildType::from_prefixed_key(&storage_key.0[..]) { + Some((ChildType::ParentKeyId, storage_key)) => ChildInfo::new_default(storage_key), + None => return Err("Invalid child storage key".into()), + }; + self.client.child_storage( + &BlockId::Hash(block), + &child_info, + &key, + ) + }) + .map_err(client_err))) + } + + fn storage_hash( + &self, + block: Option, + storage_key: StorageKey, + key: StorageKey, + ) -> FutureResult> { + Box::new(result( + self.block_or_best(block) + .and_then(|block| { + let child_info = match ChildType::from_prefixed_key(&storage_key.0[..]) { + Some((ChildType::ParentKeyId, storage_key)) => ChildInfo::new_default(storage_key), + None => return Err("Invalid child storage key".into()), + }; + self.client.child_storage_hash( + &BlockId::Hash(block), + &child_info, + &key, + ) + }) + .map_err(client_err))) + } +} + /// Splits passed range into two subranges where: /// - first range has at least one element in it; /// - second range (optionally) starts at given `middle` element. diff --git a/client/rpc/src/state/state_light.rs b/client/rpc/src/state/state_light.rs index 23ae3c4aede70..6e1aa6bc1f07d 100644 --- a/client/rpc/src/state/state_light.rs +++ b/client/rpc/src/state/state_light.rs @@ -53,12 +53,14 @@ use sp_core::{ use sp_version::RuntimeVersion; use sp_runtime::{generic::BlockId, traits::{Block as BlockT, HashFor}}; -use super::{StateBackend, error::{FutureResult, Error}, client_err}; +use super::{StateBackend, ChildStateBackend, error::{FutureResult, Error}, client_err, + child_resolution_error}; /// Storage data map of storage keys => (optional) storage value. type StorageMap = HashMap>; /// State API backend for light nodes. +#[derive(Clone)] pub struct LightState, Client> { client: Arc, subscriptions: Subscriptions, @@ -233,8 +235,7 @@ impl StateBackend for LightState, key: StorageKey, ) -> FutureResult> { - Box::new(self - .storage(block, key) + Box::new(StateBackend::storage(self, block, key) .and_then(|maybe_storage| result(Ok(maybe_storage.map(|storage| HashFor::::hash(&storage.0)))) ) @@ -244,7 +245,9 @@ impl StateBackend for LightState, - _storage_key: StorageKey, + _child_storage_key: StorageKey, + _child_info: StorageKey, + _child_type: u32, _prefix: StorageKey, ) -> FutureResult> { Box::new(result(Err(client_err(ClientError::NotAvailableOnLightClient)))) @@ -253,43 +256,38 @@ impl StateBackend for LightState, - storage_key: StorageKey, + child_storage_key: StorageKey, + _child_info: StorageKey, + child_type: u32, key: StorageKey, ) -> FutureResult> { - let block = self.block_or_best(block); - let fetcher = self.fetcher.clone(); - let child_storage = resolve_header(&*self.remote_blockchain, &*self.fetcher, block) - .then(move |result| match result { - Ok(header) => Either::Left(fetcher.remote_read_child(RemoteReadChildRequest { - block, - header, - storage_key: storage_key.0, - keys: vec![key.0.clone()], - retry_count: Default::default(), - }).then(move |result| ready(result - .map(|mut data| data - .remove(&key.0) - .expect("successful result has entry for all keys; qed") - .map(StorageData) - ) - .map_err(client_err) - ))), - Err(error) => Either::Right(ready(Err(error))), - }); - - Box::new(child_storage.boxed().compat()) + if child_type != 1 { + return Box::new(result(Err(child_resolution_error()))); + } + ChildStateBackend::storage( + self, + block, + child_storage_key, + key, + ) } fn child_storage_hash( &self, block: Option, - storage_key: StorageKey, + child_storage_key: StorageKey, + _child_info: StorageKey, + child_type: u32, key: StorageKey, ) -> FutureResult> { - Box::new(self.child_storage(block, storage_key, key) - .and_then(|maybe_storage| - result(Ok(maybe_storage.map(|storage| HashFor::::hash(&storage.0)))) - ) + if child_type != 1 { + return Box::new(result(Err(child_resolution_error()))); + } + ChildStateBackend::storage_hash( + self, + block, + child_storage_key, + key, ) } @@ -501,6 +499,65 @@ impl StateBackend for LightState ChildStateBackend for LightState + where + Block: BlockT, + Client: BlockchainEvents + HeaderBackend + Send + Sync + 'static, + F: Fetcher + 'static +{ + fn storage_keys( + &self, + _block: Option, + _storage_key: StorageKey, + _prefix: StorageKey, + ) -> FutureResult> { + Box::new(result(Err(client_err(ClientError::NotAvailableOnLightClient)))) + } + + fn storage( + &self, + block: Option, + storage_key: StorageKey, + key: StorageKey, + ) -> FutureResult> { + let block = self.block_or_best(block); + let fetcher = self.fetcher.clone(); + let child_storage = resolve_header(&*self.remote_blockchain, &*self.fetcher, block) + .then(move |result| match result { + Ok(header) => Either::Left(fetcher.remote_read_child(RemoteReadChildRequest { + block, + header, + storage_key: storage_key.0, + keys: vec![key.0.clone()], + retry_count: Default::default(), + }).then(move |result| ready(result + .map(|mut data| data + .remove(&key.0) + .expect("successful result has entry for all keys; qed") + .map(StorageData) + ) + .map_err(client_err) + ))), + Err(error) => Either::Right(ready(Err(error))), + }); + + Box::new(child_storage.boxed().compat()) + } + + fn storage_hash( + &self, + block: Option, + storage_key: StorageKey, + key: StorageKey, + ) -> FutureResult> { + Box::new(ChildStateBackend::storage(self, block, storage_key, key) + .and_then(|maybe_storage| + result(Ok(maybe_storage.map(|storage| HashFor::::hash(&storage.0)))) + ) + ) + } +} + /// Resolve header by hash. fn resolve_header>( remote_blockchain: &dyn RemoteBlockchain, diff --git a/client/rpc/src/state/tests.rs b/client/rpc/src/state/tests.rs index 8d351b00c71b5..24ea59dc484ec 100644 --- a/client/rpc/src/state/tests.rs +++ b/client/rpc/src/state/tests.rs @@ -47,7 +47,7 @@ fn should_return_storage() { .add_extra_child_storage(&child_info, KEY.to_vec(), CHILD_VALUE.to_vec()) .build(); let genesis_hash = client.genesis_hash(); - let client = new_full(Arc::new(client), Subscriptions::new(Arc::new(core.executor()))); + let (client, child) = new_full(Arc::new(client), Subscriptions::new(Arc::new(core.executor()))); let key = StorageKey(KEY.to_vec()); let storage_key = StorageKey(PREFIXED_STORAGE_KEY.to_vec()); @@ -67,7 +67,7 @@ fn should_return_storage() { ); assert_eq!( core.block_on( - client.child_storage(storage_key, key, Some(genesis_hash).into()) + child.storage(storage_key, key, Some(genesis_hash).into()) .map(|x| x.map(|x| x.0.len())) ).unwrap().unwrap() as usize, CHILD_VALUE.len(), @@ -83,13 +83,13 @@ fn should_return_child_storage() { .add_child_storage(&child_info, "key", vec![42_u8]) .build()); let genesis_hash = client.genesis_hash(); - let client = new_full(client, Subscriptions::new(Arc::new(core.executor()))); + let (_client, child) = new_full(client, Subscriptions::new(Arc::new(core.executor()))); let child_key = StorageKey(PREFIXED_STORAGE_KEY.to_vec()); let key = StorageKey(b"key".to_vec()); assert_matches!( - client.child_storage( + child.storage( child_key.clone(), key.clone(), Some(genesis_hash).into(), @@ -97,7 +97,7 @@ fn should_return_child_storage() { Ok(Some(StorageData(ref d))) if d[0] == 42 && d.len() == 1 ); assert_matches!( - client.child_storage_hash( + child.storage_hash( child_key.clone(), key.clone(), Some(genesis_hash).into(), @@ -105,7 +105,7 @@ fn should_return_child_storage() { Ok(true) ); assert_matches!( - client.child_storage_size( + child.storage_size( child_key.clone(), key.clone(), None, @@ -119,7 +119,7 @@ fn should_call_contract() { let core = tokio::runtime::Runtime::new().unwrap(); let client = Arc::new(substrate_test_runtime_client::new()); let genesis_hash = client.genesis_hash(); - let client = new_full(client, Subscriptions::new(Arc::new(core.executor()))); + let (client, _child) = new_full(client, Subscriptions::new(Arc::new(core.executor()))); assert_matches!( client.call("balanceOf".into(), Bytes(vec![1,2,3]), Some(genesis_hash).into()).wait(), @@ -135,7 +135,7 @@ fn should_notify_about_storage_changes() { { let mut client = Arc::new(substrate_test_runtime_client::new()); - let api = new_full(client.clone(), Subscriptions::new(Arc::new(remote))); + let (api, _child) = new_full(client.clone(), Subscriptions::new(Arc::new(remote))); api.subscribe_storage(Default::default(), subscriber, None.into()); @@ -168,7 +168,7 @@ fn should_send_initial_storage_changes_and_notifications() { { let mut client = Arc::new(substrate_test_runtime_client::new()); - let api = new_full(client.clone(), Subscriptions::new(Arc::new(remote))); + let (api, _child) = new_full(client.clone(), Subscriptions::new(Arc::new(remote))); let alice_balance_key = blake2_256(&runtime::system::balance_of_key(AccountKeyring::Alice.into())); @@ -204,7 +204,7 @@ fn should_send_initial_storage_changes_and_notifications() { fn should_query_storage() { fn run_tests(mut client: Arc) { let core = tokio::runtime::Runtime::new().unwrap(); - let api = new_full(client.clone(), Subscriptions::new(Arc::new(core.executor()))); + let (api, _child) = new_full(client.clone(), Subscriptions::new(Arc::new(core.executor()))); let mut add_block = |nonce| { let mut builder = client.new_block(Default::default()).unwrap(); @@ -389,7 +389,7 @@ fn should_return_runtime_version() { let core = tokio::runtime::Runtime::new().unwrap(); let client = Arc::new(substrate_test_runtime_client::new()); - let api = new_full(client.clone(), Subscriptions::new(Arc::new(core.executor()))); + let (api, _child) = new_full(client.clone(), Subscriptions::new(Arc::new(core.executor()))); let result = "{\"specName\":\"test\",\"implName\":\"parity-test\",\"authoringVersion\":1,\ \"specVersion\":2,\"implVersion\":2,\"apis\":[[\"0xdf6acb689907609b\",2],\ @@ -412,7 +412,7 @@ fn should_notify_on_runtime_version_initially() { { let client = Arc::new(substrate_test_runtime_client::new()); - let api = new_full(client.clone(), Subscriptions::new(Arc::new(core.executor()))); + let (api, _child) = new_full(client.clone(), Subscriptions::new(Arc::new(core.executor()))); api.subscribe_runtime_version(Default::default(), subscriber); diff --git a/client/service/src/builder.rs b/client/service/src/builder.rs index 4363e204c07a8..480a7a5d7d00f 100644 --- a/client/service/src/builder.rs +++ b/client/service/src/builder.rs @@ -1138,7 +1138,7 @@ ServiceBuilder< let subscriptions = sc_rpc::Subscriptions::new(Arc::new(tasks_builder.spawn_handle())); - let (chain, state) = if let (Some(remote_backend), Some(on_demand)) = + let (chain, state, child_state) = if let (Some(remote_backend), Some(on_demand)) = (remote_backend.as_ref(), on_demand.as_ref()) { // Light clients let chain = sc_rpc::chain::new_light( @@ -1147,19 +1147,19 @@ ServiceBuilder< remote_backend.clone(), on_demand.clone() ); - let state = sc_rpc::state::new_light( + let (state, child_state) = sc_rpc::state::new_light( client.clone(), subscriptions.clone(), remote_backend.clone(), on_demand.clone() ); - (chain, state) + (chain, state, child_state) } else { // Full nodes let chain = sc_rpc::chain::new_full(client.clone(), subscriptions.clone()); - let state = sc_rpc::state::new_full(client.clone(), subscriptions.clone()); - (chain, state) + let (state, child_state) = sc_rpc::state::new_full(client.clone(), subscriptions.clone()); + (chain, state, child_state) }; let author = sc_rpc::author::Author::new( @@ -1175,6 +1175,7 @@ ServiceBuilder< let offchain = sc_rpc::offchain::Offchain::new(storage); sc_rpc_server::rpc_handler(( state::StateApi::to_delegate(state), + state::ChildStateApi::to_delegate(child_state), chain::ChainApi::to_delegate(chain), offchain::OffchainApi::to_delegate(offchain), author::AuthorApi::to_delegate(author), @@ -1184,6 +1185,7 @@ ServiceBuilder< }, None => sc_rpc_server::rpc_handler(( state::StateApi::to_delegate(state), + state::ChildStateApi::to_delegate(child_state), chain::ChainApi::to_delegate(chain), author::AuthorApi::to_delegate(author), system::SystemApi::to_delegate(system), From 63cf6bd01d20e06009c6c96814d9ef1984418cb1 Mon Sep 17 00:00:00 2001 From: cheme Date: Thu, 19 Mar 2020 20:06:23 +0100 Subject: [PATCH 67/85] remove old rpc --- client/rpc-api/src/state/mod.rs | 48 --------------- client/rpc/src/state/mod.rs | 93 ----------------------------- client/rpc/src/state/state_full.rs | 60 +------------------ client/rpc/src/state/state_light.rs | 52 +--------------- 4 files changed, 2 insertions(+), 251 deletions(-) diff --git a/client/rpc-api/src/state/mod.rs b/client/rpc-api/src/state/mod.rs index e94df46736b48..fd709788e5e42 100644 --- a/client/rpc-api/src/state/mod.rs +++ b/client/rpc-api/src/state/mod.rs @@ -72,54 +72,6 @@ pub trait StateApi { #[rpc(name = "state_getStorageSize", alias("state_getStorageSizeAt"))] fn storage_size(&self, key: StorageKey, hash: Option) -> FutureResult>; - /// Returns the keys with prefix from a child storage, leave empty to get all the keys - /// This method is deprecated in favor of `childstate_getChildKeys`. - #[rpc(name = "state_getChildKeys")] - fn child_storage_keys( - &self, - child_storage_key: StorageKey, - child_info: StorageKey, - child_type: u32, - prefix: StorageKey, - hash: Option - ) -> FutureResult>; - - /// Returns a child storage entry at a specific block's state. - /// This method is deprecated in favor of `childstate_getChildStorage`. - #[rpc(name = "state_getChildStorage")] - fn child_storage( - &self, - child_storage_key: StorageKey, - child_info: StorageKey, - child_type: u32, - key: StorageKey, - hash: Option - ) -> FutureResult>; - - /// Returns the hash of a child storage entry at a block's state. - /// This method is deprecated in favor of `childstate_getChildStorageHash`. - #[rpc(name = "state_getChildStorageHash")] - fn child_storage_hash( - &self, - child_storage_key: StorageKey, - child_info: StorageKey, - child_type: u32, - key: StorageKey, - hash: Option - ) -> FutureResult>; - - /// Returns the size of a child storage entry at a block's state. - /// This method is deprecated in favor of `childstate_getChildStorageSize`. - #[rpc(name = "state_getChildStorageSize")] - fn child_storage_size( - &self, - child_storage_key: StorageKey, - child_info: StorageKey, - child_type: u32, - key: StorageKey, - hash: Option - ) -> FutureResult>; - /// Returns the runtime metadata as an opaque blob. #[rpc(name = "state_getMetadata")] fn metadata(&self, hash: Option) -> FutureResult; diff --git a/client/rpc/src/state/mod.rs b/client/rpc/src/state/mod.rs index 29ad2f7538eee..1805ac5351991 100644 --- a/client/rpc/src/state/mod.rs +++ b/client/rpc/src/state/mod.rs @@ -104,49 +104,6 @@ pub trait StateBackend: Send + Sync + 'static .map(|x| x.map(|x| x.0.len() as u64))) } - /// Returns the keys with prefix from a child storage, leave empty to get all the keys - fn child_storage_keys( - &self, - block: Option, - child_storage_key: StorageKey, - child_info: StorageKey, - child_type: u32, - prefix: StorageKey, - ) -> FutureResult>; - - /// Returns a child storage entry at a specific block's state. - fn child_storage( - &self, - block: Option, - child_storage_key: StorageKey, - child_info: StorageKey, - child_type: u32, - key: StorageKey, - ) -> FutureResult>; - - /// Returns the hash of a child storage entry at a block's state. - fn child_storage_hash( - &self, - block: Option, - child_storage_key: StorageKey, - child_info: StorageKey, - child_type: u32, - key: StorageKey, - ) -> FutureResult>; - - /// Returns the size of a child storage entry at a block's state. - fn child_storage_size( - &self, - block: Option, - child_storage_key: StorageKey, - child_info: StorageKey, - child_type: u32, - key: StorageKey, - ) -> FutureResult> { - Box::new(self.child_storage(block, child_storage_key, child_info, child_type, key) - .map(|x| x.map(|x| x.0.len() as u64))) - } - /// Returns the runtime metadata as an opaque blob. fn metadata(&self, block: Option) -> FutureResult; @@ -309,50 +266,6 @@ impl StateApi for State self.backend.storage_size(block, key) } - fn child_storage( - &self, - child_storage_key: StorageKey, - child_info: StorageKey, - child_type: u32, - key: StorageKey, - block: Option - ) -> FutureResult> { - self.backend.child_storage(block, child_storage_key, child_info, child_type, key) - } - - fn child_storage_keys( - &self, - child_storage_key: StorageKey, - child_info: StorageKey, - child_type: u32, - key_prefix: StorageKey, - block: Option - ) -> FutureResult> { - self.backend.child_storage_keys(block, child_storage_key, child_info, child_type, key_prefix) - } - - fn child_storage_hash( - &self, - child_storage_key: StorageKey, - child_info: StorageKey, - child_type: u32, - key: StorageKey, - block: Option - ) -> FutureResult> { - self.backend.child_storage_hash(block, child_storage_key, child_info, child_type, key) - } - - fn child_storage_size( - &self, - child_storage_key: StorageKey, - child_info: StorageKey, - child_type: u32, - key: StorageKey, - block: Option - ) -> FutureResult> { - self.backend.child_storage_size(block, child_storage_key, child_info, child_type, key) - } - fn metadata(&self, block: Option) -> FutureResult { self.backend.metadata(block) } @@ -488,12 +401,6 @@ impl ChildStateApi for ChildState } } -const CHILD_RESOLUTION_ERROR: &str = "Unexpected child info and type"; - -fn child_resolution_error() -> Error { - client_err(sp_blockchain::Error::Msg(CHILD_RESOLUTION_ERROR.to_string())) -} - fn client_err(err: sp_blockchain::Error) -> Error { Error::Client(Box::new(err)) } diff --git a/client/rpc/src/state/state_full.rs b/client/rpc/src/state/state_full.rs index d27086a02c176..599b8af349759 100644 --- a/client/rpc/src/state/state_full.rs +++ b/client/rpc/src/state/state_full.rs @@ -39,8 +39,7 @@ use sp_runtime::{ use sp_api::{Metadata, ProvideRuntimeApi, CallApiAt}; -use super::{StateBackend, ChildStateBackend, error::{FutureResult, Error, Result}, - client_err, child_resolution_error}; +use super::{StateBackend, ChildStateBackend, error::{FutureResult, Error, Result}, client_err}; use std::marker::PhantomData; use sc_client_api::{CallExecutor, StorageProvider, ExecutorProvider}; @@ -307,63 +306,6 @@ impl StateBackend for FullState, - child_storage_key: StorageKey, - _child_info: StorageKey, - child_type: u32, - prefix: StorageKey, - ) -> FutureResult> { - if child_type != 1 { - return Box::new(result(Err(child_resolution_error()))); - } - ChildStateBackend::storage_keys( - self, - block, - child_storage_key, - prefix, - ) - } - - fn child_storage( - &self, - block: Option, - child_storage_key: StorageKey, - _child_info: StorageKey, - child_type: u32, - key: StorageKey, - ) -> FutureResult> { - if child_type != 1 { - return Box::new(result(Err(child_resolution_error()))); - } - ChildStateBackend::storage( - self, - block, - child_storage_key, - key, - ) - } - - fn child_storage_hash( - &self, - block: Option, - child_storage_key: StorageKey, - _child_info: StorageKey, - child_type: u32, - key: StorageKey, - ) -> FutureResult> { - if child_type != 1 { - return Box::new(result(Err(child_resolution_error()))); - } - ChildStateBackend::storage_hash( - self, - block, - child_storage_key, - key, - ) - } - fn metadata(&self, block: Option) -> FutureResult { Box::new(result( self.block_or_best(block) diff --git a/client/rpc/src/state/state_light.rs b/client/rpc/src/state/state_light.rs index 6e1aa6bc1f07d..22bee62950d62 100644 --- a/client/rpc/src/state/state_light.rs +++ b/client/rpc/src/state/state_light.rs @@ -53,8 +53,7 @@ use sp_core::{ use sp_version::RuntimeVersion; use sp_runtime::{generic::BlockId, traits::{Block as BlockT, HashFor}}; -use super::{StateBackend, ChildStateBackend, error::{FutureResult, Error}, client_err, - child_resolution_error}; +use super::{StateBackend, ChildStateBackend, error::{FutureResult, Error}, client_err}; /// Storage data map of storage keys => (optional) storage value. type StorageMap = HashMap>; @@ -242,55 +241,6 @@ impl StateBackend for LightState, - _child_storage_key: StorageKey, - _child_info: StorageKey, - _child_type: u32, - _prefix: StorageKey, - ) -> FutureResult> { - Box::new(result(Err(client_err(ClientError::NotAvailableOnLightClient)))) - } - - fn child_storage( - &self, - block: Option, - child_storage_key: StorageKey, - _child_info: StorageKey, - child_type: u32, - key: StorageKey, - ) -> FutureResult> { - if child_type != 1 { - return Box::new(result(Err(child_resolution_error()))); - } - ChildStateBackend::storage( - self, - block, - child_storage_key, - key, - ) - } - - fn child_storage_hash( - &self, - block: Option, - child_storage_key: StorageKey, - _child_info: StorageKey, - child_type: u32, - key: StorageKey, - ) -> FutureResult> { - if child_type != 1 { - return Box::new(result(Err(child_resolution_error()))); - } - ChildStateBackend::storage_hash( - self, - block, - child_storage_key, - key, - ) - } - fn metadata(&self, block: Option) -> FutureResult { let metadata = self.call(block, "Metadata_metadata".into(), Bytes(Vec::new())) .and_then(|metadata| OpaqueMetadata::decode(&mut &metadata.0[..]) From f21606f0b0a949cefcb95922a81d3cc747c111f0 Mon Sep 17 00:00:00 2001 From: cheme Date: Fri, 20 Mar 2020 14:39:14 +0100 Subject: [PATCH 68/85] review change. --- .../network/src/protocol/light_client_handler.rs | 2 +- frame/contracts/src/account_db.rs | 12 ++++++------ frame/contracts/src/lib.rs | 16 ++++++++-------- frame/contracts/src/rent.rs | 6 +++--- .../state-machine/src/overlayed_changes.rs | 2 +- test-utils/client/src/lib.rs | 2 ++ 6 files changed, 21 insertions(+), 19 deletions(-) diff --git a/client/network/src/protocol/light_client_handler.rs b/client/network/src/protocol/light_client_handler.rs index f6588ea19836b..ecbd62e431de4 100644 --- a/client/network/src/protocol/light_client_handler.rs +++ b/client/network/src/protocol/light_client_handler.rs @@ -514,7 +514,7 @@ where Some((ChildType::ParentKeyId, storage_key)) => Ok(ChildInfo::new_default(storage_key)), None => Err("Invalid child storage key".into()), }; - let proof = match child_info.and_then(|child_info| self.chain.read_child_proof( + let proof = match child_info.and_then(|child_info| self.chain.read_child_proof( &BlockId::Hash(block), &child_info, &mut request.keys.iter().map(AsRef::as_ref) diff --git a/frame/contracts/src/account_db.rs b/frame/contracts/src/account_db.rs index f65ecb0d8cda1..fb1ec52b3d060 100644 --- a/frame/contracts/src/account_db.rs +++ b/frame/contracts/src/account_db.rs @@ -128,7 +128,7 @@ impl AccountDb for DirectAccountDb { trie_id: Option<&TrieId>, location: &StorageKey ) -> Option> { - trie_id.and_then(|id| child::get_raw(&crate::trie_unique_id(&id[..]), &blake2_256(location))) + trie_id.and_then(|id| child::get_raw(&crate::child_trie_info(&id[..]), &blake2_256(location))) } fn get_code_hash(&self, account: &T::AccountId) -> Option> { >::get(account).and_then(|i| i.as_alive().map(|i| i.code_hash)) @@ -175,13 +175,13 @@ impl AccountDb for DirectAccountDb { (false, Some(info), _) => info, // Existing contract is being removed. (true, Some(info), None) => { - child::kill_storage(&info.child_trie_unique_id()); + child::kill_storage(&info.child_trie_info()); >::remove(&address); continue; } // Existing contract is being replaced by a new one. (true, Some(info), Some(code_hash)) => { - child::kill_storage(&info.child_trie_unique_id()); + child::kill_storage(&info.child_trie_info()); AliveContractInfo:: { code_hash, storage_size: T::StorageSizeOffset::get(), @@ -220,16 +220,16 @@ impl AccountDb for DirectAccountDb { for (k, v) in changed.storage.into_iter() { if let Some(value) = child::get_raw( - &new_info.child_trie_unique_id(), + &new_info.child_trie_info(), &blake2_256(&k), ) { new_info.storage_size -= value.len() as u32; } if let Some(value) = v { new_info.storage_size += value.len() as u32; - child::put_raw(&new_info.child_trie_unique_id(), &blake2_256(&k), &value[..]); + child::put_raw(&new_info.child_trie_info(), &blake2_256(&k), &value[..]); } else { - child::kill(&new_info.child_trie_unique_id(), &blake2_256(&k)); + child::kill(&new_info.child_trie_info(), &blake2_256(&k)); } } diff --git a/frame/contracts/src/lib.rs b/frame/contracts/src/lib.rs index 8da776ae6b962..a2d194714d35c 100644 --- a/frame/contracts/src/lib.rs +++ b/frame/contracts/src/lib.rs @@ -225,13 +225,13 @@ pub struct RawAliveContractInfo { impl RawAliveContractInfo { /// Associated child trie unique id is built from the hash part of the trie id. - pub fn child_trie_unique_id(&self) -> ChildInfo { - trie_unique_id(&self.trie_id[..]) + pub fn child_trie_info(&self) -> ChildInfo { + child_trie_info(&self.trie_id[..]) } } /// Associated child trie unique id is built from the hash part of the trie id. -pub(crate) fn trie_unique_id(trie_id: &[u8]) -> ChildInfo { +pub(crate) fn child_trie_info(trie_id: &[u8]) -> ChildInfo { ChildInfo::new_default(trie_id) } @@ -804,11 +804,11 @@ impl Module { let key_values_taken = delta.iter() .filter_map(|key| { child::get_raw( - &origin_contract.child_trie_unique_id(), + &origin_contract.child_trie_info(), &blake2_256(key), ).map(|value| { child::kill( - &origin_contract.child_trie_unique_id(), + &origin_contract.child_trie_info(), &blake2_256(key), ); @@ -821,7 +821,7 @@ impl Module { // This operation is cheap enough because last_write (delta not included) // is not this block as it has been checked earlier. &child::root( - &origin_contract.child_trie_unique_id(), + &origin_contract.child_trie_info(), )[..], code_hash, ); @@ -829,7 +829,7 @@ impl Module { if tombstone != dest_tombstone { for (key, value) in key_values_taken { child::put_raw( - &origin_contract.child_trie_unique_id(), + &origin_contract.child_trie_info(), &blake2_256(key), &value, ); @@ -933,7 +933,7 @@ decl_storage! { impl OnKilledAccount for Module { fn on_killed_account(who: &T::AccountId) { if let Some(ContractInfo::Alive(info)) = >::take(who) { - child::kill_storage(&info.child_trie_unique_id()); + child::kill_storage(&info.child_trie_info()); } } } diff --git a/frame/contracts/src/rent.rs b/frame/contracts/src/rent.rs index dfcbc997c5b22..1aa52fff31435 100644 --- a/frame/contracts/src/rent.rs +++ b/frame/contracts/src/rent.rs @@ -223,7 +223,7 @@ fn enact_verdict( Verdict::Kill => { >::remove(account); child::kill_storage( - &alive_contract_info.child_trie_unique_id(), + &alive_contract_info.child_trie_info(), ); >::deposit_event(RawEvent::Evicted(account.clone(), false)); None @@ -235,7 +235,7 @@ fn enact_verdict( // Note: this operation is heavy. let child_storage_root = child::root( - &alive_contract_info.child_trie_unique_id(), + &alive_contract_info.child_trie_info(), ); let tombstone = >::new( @@ -246,7 +246,7 @@ fn enact_verdict( >::insert(account, &tombstone_info); child::kill_storage( - &alive_contract_info.child_trie_unique_id(), + &alive_contract_info.child_trie_info(), ); >::deposit_event(RawEvent::Evicted(account.clone(), true)); diff --git a/primitives/state-machine/src/overlayed_changes.rs b/primitives/state-machine/src/overlayed_changes.rs index 2dc56bc772d3d..c72cfc5c1cf58 100644 --- a/primitives/state-machine/src/overlayed_changes.rs +++ b/primitives/state-machine/src/overlayed_changes.rs @@ -76,7 +76,7 @@ pub struct OverlayedValue { pub struct OverlayedChangeSet { /// Top level storage changes. pub top: BTreeMap, - /// Child storage changes. + /// Child storage changes. The map key is the child storage key without the common prefix. pub children_default: HashMap, ChildInfo)>, } diff --git a/test-utils/client/src/lib.rs b/test-utils/client/src/lib.rs index fad08c8238669..4880b296c7048 100644 --- a/test-utils/client/src/lib.rs +++ b/test-utils/client/src/lib.rs @@ -66,6 +66,8 @@ impl GenesisInit for () { pub struct TestClientBuilder { execution_strategies: ExecutionStrategies, genesis_init: G, + /// The key is an unprefixed storage key, this only contains + /// default child trie content. child_storage_extension: HashMap, StorageChild>, backend: Arc, _executor: std::marker::PhantomData, From 1b2a30cd9938ee776ebe441279374d88e8e81a27 Mon Sep 17 00:00:00 2001 From: cheme Date: Fri, 20 Mar 2020 20:47:40 +0100 Subject: [PATCH 69/85] Using `PrefixedStorageKey` type for child storage key that are prefixed. --- Cargo.lock | 21 +++++ client/api/src/backend.rs | 4 +- client/api/src/light.rs | 6 +- client/api/src/proof_provider.rs | 4 +- client/db/src/changes_tries_storage.rs | 3 +- client/network/src/protocol.rs | 42 +++++---- .../src/protocol/light_client_handler.rs | 36 ++++--- client/network/src/protocol/light_dispatch.rs | 13 ++- client/rpc-api/src/child_state/mod.rs | 10 +- client/rpc/src/state/mod.rs | 18 ++-- client/rpc/src/state/state_full.rs | 14 +-- client/rpc/src/state/state_light.rs | 11 ++- client/rpc/src/state/tests.rs | 11 ++- client/src/client.rs | 16 ++-- client/src/light/fetcher.rs | 7 +- primitives/state-machine/src/backend.rs | 4 +- primitives/state-machine/src/basic.rs | 2 +- .../state-machine/src/changes_trie/build.rs | 6 +- .../src/changes_trie/build_cache.rs | 15 +-- .../src/changes_trie/changes_iterator.rs | 26 ++++-- .../state-machine/src/changes_trie/input.rs | 18 +++- .../state-machine/src/changes_trie/mod.rs | 3 +- .../state-machine/src/changes_trie/prune.rs | 3 +- .../state-machine/src/changes_trie/storage.rs | 5 +- primitives/state-machine/src/ext.rs | 4 +- .../state-machine/src/in_memory_backend.rs | 2 +- primitives/storage/Cargo.toml | 1 + primitives/storage/src/lib.rs | 93 ++++++++++++++----- test-utils/runtime/client/src/lib.rs | 2 +- 29 files changed, 256 insertions(+), 144 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 189f85013c0c1..1ff40208ebc79 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -5341,6 +5341,26 @@ dependencies = [ "rust-argon2", ] +[[package]] +name = "ref-cast" +version = "1.0.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "077f197a31bfe7e4169145f9eca08d32705c6c6126c139c26793acdf163ac3ef" +dependencies = [ + "ref-cast-impl", +] + +[[package]] +name = "ref-cast-impl" +version = "1.0.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "c36eb52b69b87c9e3a07387f476c88fd0dba9a1713b38e56617ed66b45392c1f" +dependencies = [ + "proc-macro2", + "quote", + "syn", +] + [[package]] name = "regex" version = "1.3.4" @@ -7420,6 +7440,7 @@ name = "sp-storage" version = "2.0.0-alpha.4" dependencies = [ "impl-serde 0.2.3", + "ref-cast", "serde", "sp-debug-derive", "sp-std", diff --git a/client/api/src/backend.rs b/client/api/src/backend.rs index 68dd61b233ae0..33a370c7cb2c5 100644 --- a/client/api/src/backend.rs +++ b/client/api/src/backend.rs @@ -26,7 +26,7 @@ use sp_state_machine::{ ChangesTrieState, ChangesTrieStorage as StateChangesTrieStorage, ChangesTrieTransaction, StorageCollection, ChildStorageCollection, }; -use sp_storage::{StorageData, StorageKey, ChildInfo}; +use sp_storage::{StorageData, StorageKey, PrefixedStorageKey, ChildInfo}; use crate::{ blockchain::{ Backend as BlockchainBackend, well_known_cache_keys @@ -349,7 +349,7 @@ pub trait StorageProvider> { &self, first: NumberFor, last: BlockId, - storage_key: Option<&StorageKey>, + storage_key: Option<&PrefixedStorageKey>, key: &StorageKey ) -> sp_blockchain::Result, u32)>>; } diff --git a/client/api/src/light.rs b/client/api/src/light.rs index 2911d77f18209..30e6d14d557f1 100644 --- a/client/api/src/light.rs +++ b/client/api/src/light.rs @@ -26,7 +26,7 @@ use sp_runtime::{ }, generic::BlockId }; -use sp_core::ChangesTrieConfigurationRange; +use sp_core::{ChangesTrieConfigurationRange, storage::PrefixedStorageKey}; use sp_state_machine::StorageProof; use sp_blockchain::{ HeaderMetadata, well_known_cache_keys, HeaderBackend, Cache as BlockchainCache, @@ -81,7 +81,7 @@ pub struct RemoteReadChildRequest { /// Header of block at which read is performed. pub header: Header, /// Storage key for child. - pub storage_key: Vec, + pub storage_key: PrefixedStorageKey, /// Child storage key to read. pub keys: Vec>, /// Number of times to retry request. None means that default RETRY_COUNT is used. @@ -105,7 +105,7 @@ pub struct RemoteChangesRequest { /// Proofs for roots of ascendants of tries_roots.0 are provided by the remote node. pub tries_roots: (Header::Number, Header::Hash, Vec), /// Optional Child Storage key to read. - pub storage_key: Option>, + pub storage_key: Option, /// Storage key to read. pub key: Vec, /// Number of times to retry request. None means that default RETRY_COUNT is used. diff --git a/client/api/src/proof_provider.rs b/client/api/src/proof_provider.rs index a805baf42b8f6..93160855eaebe 100644 --- a/client/api/src/proof_provider.rs +++ b/client/api/src/proof_provider.rs @@ -19,7 +19,7 @@ use sp_runtime::{ traits::{Block as BlockT}, }; use crate::{StorageProof, ChangesProof}; -use sp_storage::{ChildInfo, StorageKey}; +use sp_storage::{ChildInfo, StorageKey, PrefixedStorageKey}; /// Interface for providing block proving utilities. pub trait ProofProvider { @@ -64,7 +64,7 @@ pub trait ProofProvider { last: Block::Hash, min: Block::Hash, max: Block::Hash, - storage_key: Option<&StorageKey>, + storage_key: Option<&PrefixedStorageKey>, key: &StorageKey, ) -> sp_blockchain::Result>; } diff --git a/client/db/src/changes_tries_storage.rs b/client/db/src/changes_tries_storage.rs index a28cd604fe363..55e740f43462a 100644 --- a/client/db/src/changes_tries_storage.rs +++ b/client/db/src/changes_tries_storage.rs @@ -27,6 +27,7 @@ use sp_trie::MemoryDB; use sc_client_api::backend::PrunableStateChangesTrieStorage; use sp_blockchain::{well_known_cache_keys, Cache as BlockchainCache}; use sp_core::{ChangesTrieConfiguration, ChangesTrieConfigurationRange, convert_hash}; +use sp_core::storage::PrefixedStorageKey; use sp_runtime::traits::{ Block as BlockT, Header as HeaderT, HashFor, NumberFor, One, Zero, CheckedSub, }; @@ -481,7 +482,7 @@ where fn with_cached_changed_keys( &self, root: &Block::Hash, - functor: &mut dyn FnMut(&HashMap>, HashSet>>), + functor: &mut dyn FnMut(&HashMap, HashSet>>), ) -> bool { self.build_cache.read().with_changed_keys(root, functor) } diff --git a/client/network/src/protocol.rs b/client/network/src/protocol.rs index 1e6158a59ecc3..2635a24f8dd3f 100644 --- a/client/network/src/protocol.rs +++ b/client/network/src/protocol.rs @@ -23,7 +23,7 @@ use libp2p::{Multiaddr, PeerId}; use libp2p::core::{ConnectedPoint, nodes::listeners::ListenerId}; use libp2p::swarm::{ProtocolsHandler, IntoProtocolsHandler}; use libp2p::swarm::{NetworkBehaviour, NetworkBehaviourAction, PollParameters}; -use sp_core::storage::{StorageKey, ChildInfo, ChildType}; +use sp_core::storage::{StorageKey, PrefixedStorageKey, ChildInfo, ChildType}; use sp_consensus::{ BlockOrigin, block_validation::BlockAnnounceValidator, @@ -312,14 +312,14 @@ impl<'a, B: BlockT> LightDispatchNetwork for LightDispatchIn<'a> { who: &PeerId, id: RequestId, block: ::Hash, - storage_key: Vec, + storage_key: PrefixedStorageKey, keys: Vec>, ) { let message: Message = message::generic::Message::RemoteReadChildRequest( message::RemoteReadChildRequest { id, block, - storage_key, + storage_key: storage_key.key(), keys, }); @@ -352,7 +352,7 @@ impl<'a, B: BlockT> LightDispatchNetwork for LightDispatchIn<'a> { last: ::Hash, min: ::Hash, max: ::Hash, - storage_key: Option>, + storage_key: Option, key: Vec, ) { let message: Message = message::generic::Message::RemoteChangesRequest(message::RemoteChangesRequest { @@ -361,7 +361,7 @@ impl<'a, B: BlockT> LightDispatchNetwork for LightDispatchIn<'a> { last, min, max, - storage_key, + storage_key: storage_key.map(|p| p.key()), key, }); @@ -1608,7 +1608,8 @@ impl Protocol { trace!(target: "sync", "Remote read child request {} from {} ({} {} at {})", request.id, who, request.storage_key.to_hex::(), keys_str(), request.block); - let child_info = match ChildType::from_prefixed_key(&request.storage_key) { + let prefixed_key = PrefixedStorageKey::new_ref(&request.storage_key); + let child_info = match prefixed_key.and_then(|key| ChildType::from_prefixed_key(key)) { Some((ChildType::ParentKeyId, storage_key)) => Ok(ChildInfo::new_default(storage_key)), None => Err("Invalid child storage key".into()), }; @@ -1708,23 +1709,32 @@ impl Protocol { request.first, request.last ); - let storage_key = request.storage_key.map(|sk| StorageKey(sk)); let key = StorageKey(request.key); - let proof = match self.context_data.chain.key_changes_proof( - request.first, - request.last, - request.min, - request.max, - storage_key.as_ref(), + let prefixed_key = if let Some(storage_key) = request.storage_key.as_ref() { + if let Some(storage_key) = PrefixedStorageKey::new_ref(storage_key) { + Ok(Some(storage_key)) + } else { + Err("Invalid prefixed storage key".into()) + } + } else { + Ok(None) + }; + let (first, last, min, max) = (request.first, request.last, request.min, request.max); + let proof = match prefixed_key.and_then(|p_key| self.context_data.chain.key_changes_proof( + first, + last, + min, + max, + p_key, &key, - ) { + )) { Ok(proof) => proof, Err(error) => { trace!(target: "sync", "Remote changes proof request {} from {} for key {} ({}..{}) failed with: {}", request.id, who, - if let Some(sk) = storage_key { - format!("{} : {}", sk.0.to_hex::(), key.0.to_hex::()) + if let Some(sk) = request.storage_key.as_ref() { + format!("{} : {}", sk.to_hex::(), key.0.to_hex::()) } else { key.0.to_hex::() }, diff --git a/client/network/src/protocol/light_client_handler.rs b/client/network/src/protocol/light_client_handler.rs index ecbd62e431de4..085bd06e4cca5 100644 --- a/client/network/src/protocol/light_client_handler.rs +++ b/client/network/src/protocol/light_client_handler.rs @@ -55,7 +55,7 @@ use rustc_hex::ToHex; use sc_client::light::fetcher; use sc_client_api::StorageProof; use sc_peerset::ReputationChange; -use sp_core::storage::{ChildInfo, ChildType, StorageKey}; +use sp_core::storage::{ChildInfo, ChildType, StorageKey, PrefixedStorageKey}; use sp_blockchain::{Error as ClientError}; use sp_runtime::{ traits::{Block, Header, NumberFor, Zero}, @@ -510,7 +510,8 @@ where let block = Decode::decode(&mut request.block.as_ref())?; - let child_info = match ChildType::from_prefixed_key(&request.storage_key) { + let prefixed_key = PrefixedStorageKey::new_ref(&request.storage_key); + let child_info = match prefixed_key.and_then(|key| ChildType::from_prefixed_key(key)) { Some((ChildType::ParentKeyId, storage_key)) => Ok(ChildInfo::new_default(storage_key)), None => Err("Invalid child storage key".into()), }; @@ -588,20 +589,25 @@ where let min = Decode::decode(&mut request.min.as_ref())?; let max = Decode::decode(&mut request.max.as_ref())?; let key = StorageKey(request.key.clone()); - let storage_key = - if request.storage_key.is_empty() { - None + let storage_key = if request.storage_key.is_empty() { + Ok(None) + } else { + if let Some(storage_key) = PrefixedStorageKey::new_ref(&request.storage_key) { + Ok(Some(storage_key)) } else { - Some(StorageKey(request.storage_key.clone())) - }; + Err("Invalid prefix for storage key.".into()) + } + }; - let proof = match self.chain.key_changes_proof(first, last, min, max, storage_key.as_ref(), &key) { + let proof = match storage_key.and_then(|storage_key| { + self.chain.key_changes_proof(first, last, min, max, storage_key, &key) + }) { Ok(proof) => proof, Err(error) => { log::trace!("remote changes proof request from {} for key {} ({:?}..{:?}) failed with: {}", peer, - if let Some(sk) = storage_key { - format!("{} : {}", sk.0.to_hex::(), key.0.to_hex::()) + if !request.storage_key.is_empty() { + format!("{} : {}", request.storage_key.to_hex::(), key.0.to_hex::()) } else { key.0.to_hex::() }, @@ -918,7 +924,7 @@ fn serialize_request(request: &Request) -> api::v1::light::Request Request::ReadChild { request, .. } => { let r = api::v1::light::RemoteReadChildRequest { block: request.block.encode(), - storage_key: request.storage_key.clone(), + storage_key: request.storage_key.clone().key(), keys: request.keys.clone(), }; api::v1::light::request::Request::RemoteReadChildRequest(r) @@ -937,7 +943,7 @@ fn serialize_request(request: &Request) -> api::v1::light::Request last: request.last_block.1.encode(), min: request.tries_roots.1.encode(), max: request.max_block.1.encode(), - storage_key: request.storage_key.clone().unwrap_or_default(), + storage_key: request.storage_key.clone().map(|s| s.key()).unwrap_or_default(), key: request.key.clone(), }; api::v1::light::request::Request::RemoteChangesRequest(r) @@ -1562,10 +1568,11 @@ mod tests { #[test] fn receives_remote_read_child_response() { let mut chan = oneshot::channel(); + let child_info = ChildInfo::new_default(&b":child_storage:default:sub"[..]); let request = fetcher::RemoteReadChildRequest { header: dummy_header(), block: Default::default(), - storage_key: b":child_storage:default:sub".to_vec(), + storage_key: child_info.prefixed_storage_key(), keys: vec![b":key".to_vec()], retry_count: None, }; @@ -1662,10 +1669,11 @@ mod tests { #[test] fn send_receive_read_child() { let chan = oneshot::channel(); + let child_info = ChildInfo::new_default(&b":child_storage:default:sub"[..]); let request = fetcher::RemoteReadChildRequest { header: dummy_header(), block: Default::default(), - storage_key: b":child_storage:default:sub".to_vec(), + storage_key: child_info.prefixed_storage_key(), keys: vec![b":key".to_vec()], retry_count: None, }; diff --git a/client/network/src/protocol/light_dispatch.rs b/client/network/src/protocol/light_dispatch.rs index 94d2e35a1278d..d35855d9c45e0 100644 --- a/client/network/src/protocol/light_dispatch.rs +++ b/client/network/src/protocol/light_dispatch.rs @@ -35,6 +35,7 @@ use libp2p::PeerId; use crate::config::Roles; use sp_runtime::traits::{Block as BlockT, Header as HeaderT, NumberFor}; use sc_peerset::ReputationChange; +use sp_core::storage::PrefixedStorageKey; /// Remote request timeout. const REQUEST_TIMEOUT: Duration = Duration::from_secs(15); @@ -69,7 +70,7 @@ pub trait LightDispatchNetwork { who: &PeerId, id: RequestId, block: ::Hash, - storage_key: Vec, + storage_key: PrefixedStorageKey, keys: Vec>, ); @@ -92,7 +93,7 @@ pub trait LightDispatchNetwork { last: ::Hash, min: ::Hash, max: ::Hash, - storage_key: Option>, + storage_key: Option, key: Vec, ); @@ -678,6 +679,7 @@ pub mod tests { use std::sync::Arc; use std::time::Instant; use futures::channel::oneshot; + use sp_core::storage::PrefixedStorageKey; use sp_runtime::traits::{Block as BlockT, NumberFor, Header as HeaderT}; use sp_blockchain::{Error as ClientError, Result as ClientResult}; use sc_client_api::{FetchChecker, RemoteHeaderRequest, @@ -821,11 +823,11 @@ pub mod tests { } fn send_header_request(&mut self, _: &PeerId, _: RequestId, _: <::Header as HeaderT>::Number) {} fn send_read_request(&mut self, _: &PeerId, _: RequestId, _: ::Hash, _: Vec>) {} - fn send_read_child_request(&mut self, _: &PeerId, _: RequestId, _: ::Hash, _: Vec, + fn send_read_child_request(&mut self, _: &PeerId, _: RequestId, _: ::Hash, _: PrefixedStorageKey, _: Vec>) {} fn send_call_request(&mut self, _: &PeerId, _: RequestId, _: ::Hash, _: String, _: Vec) {} fn send_changes_request(&mut self, _: &PeerId, _: RequestId, _: ::Hash, _: ::Hash, - _: ::Hash, _: ::Hash, _: Option>, _: Vec) {} + _: ::Hash, _: ::Hash, _: Option, _: Vec) {} fn send_body_request(&mut self, _: &PeerId, _: RequestId, _: BlockAttributes, _: FromBlock<::Hash, <::Header as HeaderT>::Number>, _: Option, _: Direction, _: Option) {} } @@ -1043,12 +1045,13 @@ pub mod tests { let peer0 = PeerId::random(); light_dispatch.on_connect(&mut network_interface, peer0.clone(), Roles::FULL, 1000); + let child_info = sp_core::storage::ChildInfo::new_default(&b":child_storage:default:sub"[..]); let (tx, response) = oneshot::channel(); light_dispatch.add_request(&mut network_interface, RequestData::RemoteReadChild( RemoteReadChildRequest { header: dummy_header(), block: Default::default(), - storage_key: b":child_storage:default:sub".to_vec(), + storage_key: child_info.prefixed_storage_key(), keys: vec![b":key".to_vec()], retry_count: None, }, tx)); diff --git a/client/rpc-api/src/child_state/mod.rs b/client/rpc-api/src/child_state/mod.rs index f9027b0f15a08..3c530b64dec30 100644 --- a/client/rpc-api/src/child_state/mod.rs +++ b/client/rpc-api/src/child_state/mod.rs @@ -17,7 +17,7 @@ //! Substrate state API. use jsonrpc_derive::rpc; -use sp_core::storage::{StorageKey, StorageData}; +use sp_core::storage::{StorageKey, PrefixedStorageKey, StorageData}; use crate::state::error::FutureResult; pub use self::gen_client::Client as ChildStateClient; @@ -32,7 +32,7 @@ pub trait ChildStateApi { #[rpc(name = "childstate_getKeys")] fn storage_keys( &self, - child_storage_key: StorageKey, + child_storage_key: PrefixedStorageKey, prefix: StorageKey, hash: Option ) -> FutureResult>; @@ -41,7 +41,7 @@ pub trait ChildStateApi { #[rpc(name = "childstate_getStorage")] fn storage( &self, - child_storage_key: StorageKey, + child_storage_key: PrefixedStorageKey, key: StorageKey, hash: Option ) -> FutureResult>; @@ -50,7 +50,7 @@ pub trait ChildStateApi { #[rpc(name = "childstate_getStorageHash")] fn storage_hash( &self, - child_storage_key: StorageKey, + child_storage_key: PrefixedStorageKey, key: StorageKey, hash: Option ) -> FutureResult>; @@ -59,7 +59,7 @@ pub trait ChildStateApi { #[rpc(name = "childstate_getStorageSize")] fn storage_size( &self, - child_storage_key: StorageKey, + child_storage_key: PrefixedStorageKey, key: StorageKey, hash: Option ) -> FutureResult>; diff --git a/client/rpc/src/state/mod.rs b/client/rpc/src/state/mod.rs index 1805ac5351991..d61cd43773328 100644 --- a/client/rpc/src/state/mod.rs +++ b/client/rpc/src/state/mod.rs @@ -28,7 +28,7 @@ use rpc::{Result as RpcResult, futures::{Future, future::result}}; use sc_rpc_api::Subscriptions; use sc_client::{light::{blockchain::RemoteBlockchain, fetcher::Fetcher}}; -use sp_core::{Bytes, storage::{StorageKey, StorageData, StorageChangeSet}}; +use sp_core::{Bytes, storage::{StorageKey, PrefixedStorageKey, StorageData, StorageChangeSet}}; use sp_version::RuntimeVersion; use sp_runtime::traits::Block as BlockT; @@ -320,7 +320,7 @@ pub trait ChildStateBackend: Send + Sync + 'static fn storage_keys( &self, block: Option, - storage_key: StorageKey, + storage_key: PrefixedStorageKey, prefix: StorageKey, ) -> FutureResult>; @@ -328,7 +328,7 @@ pub trait ChildStateBackend: Send + Sync + 'static fn storage( &self, block: Option, - storage_key: StorageKey, + storage_key: PrefixedStorageKey, key: StorageKey, ) -> FutureResult>; @@ -336,7 +336,7 @@ pub trait ChildStateBackend: Send + Sync + 'static fn storage_hash( &self, block: Option, - storage_key: StorageKey, + storage_key: PrefixedStorageKey, key: StorageKey, ) -> FutureResult>; @@ -344,7 +344,7 @@ pub trait ChildStateBackend: Send + Sync + 'static fn storage_size( &self, block: Option, - storage_key: StorageKey, + storage_key: PrefixedStorageKey, key: StorageKey, ) -> FutureResult> { Box::new(self.storage(block, storage_key, key) @@ -366,7 +366,7 @@ impl ChildStateApi for ChildState fn storage( &self, - storage_key: StorageKey, + storage_key: PrefixedStorageKey, key: StorageKey, block: Option ) -> FutureResult> { @@ -375,7 +375,7 @@ impl ChildStateApi for ChildState fn storage_keys( &self, - storage_key: StorageKey, + storage_key: PrefixedStorageKey, key_prefix: StorageKey, block: Option ) -> FutureResult> { @@ -384,7 +384,7 @@ impl ChildStateApi for ChildState fn storage_hash( &self, - storage_key: StorageKey, + storage_key: PrefixedStorageKey, key: StorageKey, block: Option ) -> FutureResult> { @@ -393,7 +393,7 @@ impl ChildStateApi for ChildState fn storage_size( &self, - storage_key: StorageKey, + storage_key: PrefixedStorageKey, key: StorageKey, block: Option ) -> FutureResult> { diff --git a/client/rpc/src/state/state_full.rs b/client/rpc/src/state/state_full.rs index 599b8af349759..273d421dc9da5 100644 --- a/client/rpc/src/state/state_full.rs +++ b/client/rpc/src/state/state_full.rs @@ -30,7 +30,7 @@ use sp_blockchain::{Result as ClientResult, Error as ClientError, HeaderMetadata use sc_client::BlockchainEvents; use sp_core::{ Bytes, storage::{well_known_keys, StorageKey, StorageData, StorageChangeSet, - ChildInfo, ChildType}, + ChildInfo, ChildType, PrefixedStorageKey}, }; use sp_version::RuntimeVersion; use sp_runtime::{ @@ -471,13 +471,13 @@ impl ChildStateBackend for FullState, - storage_key: StorageKey, + storage_key: PrefixedStorageKey, prefix: StorageKey, ) -> FutureResult> { Box::new(result( self.block_or_best(block) .and_then(|block| { - let child_info = match ChildType::from_prefixed_key(&storage_key.0[..]) { + let child_info = match ChildType::from_prefixed_key(&storage_key) { Some((ChildType::ParentKeyId, storage_key)) => ChildInfo::new_default(storage_key), None => return Err("Invalid child storage key".into()), }; @@ -493,13 +493,13 @@ impl ChildStateBackend for FullState, - storage_key: StorageKey, + storage_key: PrefixedStorageKey, key: StorageKey, ) -> FutureResult> { Box::new(result( self.block_or_best(block) .and_then(|block| { - let child_info = match ChildType::from_prefixed_key(&storage_key.0[..]) { + let child_info = match ChildType::from_prefixed_key(&storage_key) { Some((ChildType::ParentKeyId, storage_key)) => ChildInfo::new_default(storage_key), None => return Err("Invalid child storage key".into()), }; @@ -515,13 +515,13 @@ impl ChildStateBackend for FullState, - storage_key: StorageKey, + storage_key: PrefixedStorageKey, key: StorageKey, ) -> FutureResult> { Box::new(result( self.block_or_best(block) .and_then(|block| { - let child_info = match ChildType::from_prefixed_key(&storage_key.0[..]) { + let child_info = match ChildType::from_prefixed_key(&storage_key) { Some((ChildType::ParentKeyId, storage_key)) => ChildInfo::new_default(storage_key), None => return Err("Invalid child storage key".into()), }; diff --git a/client/rpc/src/state/state_light.rs b/client/rpc/src/state/state_light.rs index 22bee62950d62..ae33bd44cc02c 100644 --- a/client/rpc/src/state/state_light.rs +++ b/client/rpc/src/state/state_light.rs @@ -48,7 +48,8 @@ use sc_client::{ }, }; use sp_core::{ - Bytes, OpaqueMetadata, storage::{StorageKey, StorageData, StorageChangeSet}, + Bytes, OpaqueMetadata, + storage::{StorageKey, PrefixedStorageKey, StorageData, StorageChangeSet}, }; use sp_version::RuntimeVersion; use sp_runtime::{generic::BlockId, traits::{Block as BlockT, HashFor}}; @@ -458,7 +459,7 @@ impl ChildStateBackend for LightState, - _storage_key: StorageKey, + _storage_key: PrefixedStorageKey, _prefix: StorageKey, ) -> FutureResult> { Box::new(result(Err(client_err(ClientError::NotAvailableOnLightClient)))) @@ -467,7 +468,7 @@ impl ChildStateBackend for LightState, - storage_key: StorageKey, + storage_key: PrefixedStorageKey, key: StorageKey, ) -> FutureResult> { let block = self.block_or_best(block); @@ -477,7 +478,7 @@ impl ChildStateBackend for LightState Either::Left(fetcher.remote_read_child(RemoteReadChildRequest { block, header, - storage_key: storage_key.0, + storage_key, keys: vec![key.0.clone()], retry_count: Default::default(), }).then(move |result| ready(result @@ -497,7 +498,7 @@ impl ChildStateBackend for LightState, - storage_key: StorageKey, + storage_key: PrefixedStorageKey, key: StorageKey, ) -> FutureResult> { Box::new(ChildStateBackend::storage(self, block, storage_key, key) diff --git a/client/rpc/src/state/tests.rs b/client/rpc/src/state/tests.rs index 24ea59dc484ec..74455c99f61f8 100644 --- a/client/rpc/src/state/tests.rs +++ b/client/rpc/src/state/tests.rs @@ -32,7 +32,11 @@ use substrate_test_runtime_client::{ }; const STORAGE_KEY: &[u8] = b"child"; -const PREFIXED_STORAGE_KEY: &[u8] = b":child_storage:default:child"; + +fn prefixed_storage_key() -> PrefixedStorageKey { + let child_info = ChildInfo::new_default(&b":child_storage:default:child"[..]); + child_info.prefixed_storage_key() +} #[test] fn should_return_storage() { @@ -49,7 +53,6 @@ fn should_return_storage() { let genesis_hash = client.genesis_hash(); let (client, child) = new_full(Arc::new(client), Subscriptions::new(Arc::new(core.executor()))); let key = StorageKey(KEY.to_vec()); - let storage_key = StorageKey(PREFIXED_STORAGE_KEY.to_vec()); assert_eq!( client.storage(key.clone(), Some(genesis_hash).into()).wait() @@ -67,7 +70,7 @@ fn should_return_storage() { ); assert_eq!( core.block_on( - child.storage(storage_key, key, Some(genesis_hash).into()) + child.storage(prefixed_storage_key(), key, Some(genesis_hash).into()) .map(|x| x.map(|x| x.0.len())) ).unwrap().unwrap() as usize, CHILD_VALUE.len(), @@ -84,7 +87,7 @@ fn should_return_child_storage() { .build()); let genesis_hash = client.genesis_hash(); let (_client, child) = new_full(client, Subscriptions::new(Arc::new(core.executor()))); - let child_key = StorageKey(PREFIXED_STORAGE_KEY.to_vec()); + let child_key = prefixed_storage_key(); let key = StorageKey(b"key".to_vec()); diff --git a/client/src/client.rs b/client/src/client.rs index 8ec045b7f57ac..f273cae650bd5 100644 --- a/client/src/client.rs +++ b/client/src/client.rs @@ -26,8 +26,8 @@ use parking_lot::{Mutex, RwLock}; use codec::{Encode, Decode}; use hash_db::Prefix; use sp_core::{ - ChangesTrieConfiguration, convert_hash, traits::CodeExecutor, - NativeOrEncoded, storage::{StorageKey, StorageData, well_known_keys, ChildInfo}, + ChangesTrieConfiguration, convert_hash, traits::CodeExecutor, NativeOrEncoded, + storage::{StorageKey, PrefixedStorageKey, StorageData, well_known_keys, ChildInfo}, }; use sc_telemetry::{telemetry, SUBSTRATE_INFO}; use sp_runtime::{ @@ -344,7 +344,7 @@ impl Client where last: Block::Hash, min: Block::Hash, max: Block::Hash, - storage_key: Option<&StorageKey>, + storage_key: Option<&PrefixedStorageKey>, key: &StorageKey, cht_size: NumberFor, ) -> sp_blockchain::Result> { @@ -393,7 +393,7 @@ impl Client where fn with_cached_changed_keys( &self, root: &Block::Hash, - functor: &mut dyn FnMut(&HashMap>, HashSet>>), + functor: &mut dyn FnMut(&HashMap, HashSet>>), ) -> bool { self.storage.with_cached_changed_keys(root, functor) } @@ -438,7 +438,7 @@ impl Client where number: last_number, }, max_number, - storage_key.as_ref().map(|x| &x.0[..]), + storage_key, &key.0, ) .map_err(|err| sp_blockchain::Error::ChangesTrieAccessFailed(err))?; @@ -1146,7 +1146,7 @@ impl ProofProvider for Client where last: Block::Hash, min: Block::Hash, max: Block::Hash, - storage_key: Option<&StorageKey>, + storage_key: Option<&PrefixedStorageKey>, key: &StorageKey, ) -> sp_blockchain::Result> { self.key_changes_proof_with_cht_size( @@ -1345,7 +1345,7 @@ impl StorageProvider for Client wher &self, first: NumberFor, last: BlockId, - storage_key: Option<&StorageKey>, + storage_key: Option<&PrefixedStorageKey>, key: &StorageKey ) -> sp_blockchain::Result, u32)>> { let last_number = self.backend.blockchain().expect_block_number_from_id(&last)?; @@ -1376,7 +1376,7 @@ impl StorageProvider for Client wher range_first, &range_anchor, best_number, - storage_key.as_ref().map(|x| &x.0[..]), + storage_key, &key.0) .and_then(|r| r.map(|r| r.map(|(block, tx)| (block, tx))).collect::>()) .map_err(|err| sp_blockchain::Error::ChangesTrieAccessFailed(err))?; diff --git a/client/src/light/fetcher.rs b/client/src/light/fetcher.rs index ce2434d6c6859..ef6a062cf3c07 100644 --- a/client/src/light/fetcher.rs +++ b/client/src/light/fetcher.rs @@ -136,7 +136,7 @@ impl> LightDataChecker { number: request.last_block.0, }, remote_max_block, - request.storage_key.as_ref().map(Vec::as_slice), + request.storage_key.as_ref(), &request.key) .map_err(|err| ClientError::ChangesTrieAccessFailed(err))?; result.extend(result_range); @@ -243,7 +243,7 @@ impl FetchChecker for LightDataChecker request: &RemoteReadChildRequest, remote_proof: StorageProof, ) -> ClientResult, Option>>> { - let child_info = match ChildType::from_prefixed_key(&request.storage_key[..]) { + let child_info = match ChildType::from_prefixed_key(&request.storage_key) { Some((ChildType::ParentKeyId, storage_key)) => ChildInfo::new_default(storage_key), None => return Err("Invalid child type".into()), }; @@ -512,6 +512,7 @@ pub mod tests { #[test] fn storage_child_read_proof_is_generated_and_checked() { + let child_info = ChildInfo::new_default(&b"child1"[..]); let ( local_checker, remote_block_header, @@ -522,7 +523,7 @@ pub mod tests { &RemoteReadChildRequest::
{ block: remote_block_header.hash(), header: remote_block_header, - storage_key: b":child_storage:default:child1".to_vec(), + storage_key: child_info.prefixed_storage_key(), keys: vec![b"key1".to_vec()], retry_count: None, }, diff --git a/primitives/state-machine/src/backend.rs b/primitives/state-machine/src/backend.rs index 5334a3b8c427f..c3b2146a73ae1 100644 --- a/primitives/state-machine/src/backend.rs +++ b/primitives/state-machine/src/backend.rs @@ -187,9 +187,9 @@ pub trait Backend: std::fmt::Debug { let prefixed_storage_key = child_info.prefixed_storage_key(); txs.consolidate(child_txs); if empty { - child_roots.push((prefixed_storage_key, None)); + child_roots.push((prefixed_storage_key.key(), None)); } else { - child_roots.push((prefixed_storage_key, Some(child_root.encode()))); + child_roots.push((prefixed_storage_key.key(), Some(child_root.encode()))); } } let (root, parent_txs) = self.storage_root( diff --git a/primitives/state-machine/src/basic.rs b/primitives/state-machine/src/basic.rs index 54c21dfc2057c..b8b3210a87c9c 100644 --- a/primitives/state-machine/src/basic.rs +++ b/primitives/state-machine/src/basic.rs @@ -242,7 +242,7 @@ impl Externalities for BasicExternalities { if &empty_hash[..] == &child_root[..] { top.remove(prefixed_storage_key.as_slice()); } else { - top.insert(prefixed_storage_key, child_root); + top.insert(prefixed_storage_key.key(), child_root); } } diff --git a/primitives/state-machine/src/changes_trie/build.rs b/primitives/state-machine/src/changes_trie/build.rs index cf1a2e3bfba1b..0f60c8e317f70 100644 --- a/primitives/state-machine/src/changes_trie/build.rs +++ b/primitives/state-machine/src/changes_trie/build.rs @@ -32,7 +32,7 @@ use crate::{ input::{InputKey, InputPair, DigestIndex, ExtrinsicIndex, ChildIndex}, }, }; -use sp_core::storage::{ChildInfo, ChildType}; +use sp_core::storage::{ChildInfo, ChildType, PrefixedStorageKey}; /// Prepare input pairs for building a changes trie of given block. /// @@ -280,7 +280,7 @@ fn prepare_digest_input<'a, H, Number>( return Ok((map, child_map)); } - let mut children_roots = BTreeMap::::new(); + let mut children_roots = BTreeMap::::new(); { let trie_storage = TrieBackendEssence::<_, H>::new( crate::changes_trie::TrieBackendStorageAdapter(storage), @@ -774,7 +774,7 @@ mod test { ], ); assert_eq!( - child_changes_tries_nodes.get(&ChildIndex { block: 16u64, storage_key: child_trie_key2.to_vec() }).unwrap(), + child_changes_tries_nodes.get(&ChildIndex { block: 16u64, storage_key: child_trie_key2.clone() }).unwrap(), &vec![ InputPair::ExtrinsicIndex(ExtrinsicIndex { block: 16u64, key: vec![100] }, vec![0, 2]), diff --git a/primitives/state-machine/src/changes_trie/build_cache.rs b/primitives/state-machine/src/changes_trie/build_cache.rs index 9d0dbb4c1f310..aebebf3a17f59 100644 --- a/primitives/state-machine/src/changes_trie/build_cache.rs +++ b/primitives/state-machine/src/changes_trie/build_cache.rs @@ -19,6 +19,7 @@ use std::collections::{HashMap, HashSet}; use crate::StorageKey; +use sp_core::storage::PrefixedStorageKey; /// Changes trie build cache. /// @@ -38,7 +39,7 @@ pub struct BuildCache { /// The `Option>` in inner `HashMap` stands for the child storage key. /// If it is `None`, then the `HashSet` contains keys changed in top-level storage. /// If it is `Some`, then the `HashSet` contains keys changed in child storage, identified by the key. - changed_keys: HashMap, HashSet>>, + changed_keys: HashMap, HashSet>>, } /// The action to perform when block-with-changes-trie is imported. @@ -56,7 +57,7 @@ pub struct CachedBuildData { block: N, trie_root: H, digest_input_blocks: Vec, - changed_keys: HashMap, HashSet>, + changed_keys: HashMap, HashSet>, } /// The action to perform when block-with-changes-trie is imported. @@ -72,7 +73,7 @@ pub(crate) enum IncompleteCacheAction { #[derive(Debug, PartialEq)] pub(crate) struct IncompleteCachedBuildData { digest_input_blocks: Vec, - changed_keys: HashMap, HashSet>, + changed_keys: HashMap, HashSet>, } impl BuildCache @@ -89,7 +90,7 @@ impl BuildCache } /// Get cached changed keys for changes trie with given root. - pub fn get(&self, root: &H) -> Option<&HashMap, HashSet>> { + pub fn get(&self, root: &H) -> Option<&HashMap, HashSet>> { self.changed_keys.get(&root) } @@ -98,7 +99,7 @@ impl BuildCache pub fn with_changed_keys( &self, root: &H, - functor: &mut dyn FnMut(&HashMap, HashSet>), + functor: &mut dyn FnMut(&HashMap, HashSet>), ) -> bool { match self.changed_keys.get(&root) { Some(changed_keys) => { @@ -164,7 +165,7 @@ impl IncompleteCacheAction { /// Insert changed keys of given storage into cached data. pub(crate) fn insert( self, - storage_key: Option, + storage_key: Option, changed_keys: HashSet, ) -> Self { match self { @@ -200,7 +201,7 @@ impl IncompleteCachedBuildData { fn insert( mut self, - storage_key: Option, + storage_key: Option, changed_keys: HashSet, ) -> Self { self.changed_keys.insert(storage_key, changed_keys); diff --git a/primitives/state-machine/src/changes_trie/changes_iterator.rs b/primitives/state-machine/src/changes_trie/changes_iterator.rs index 685786218c75f..f5a936069ba40 100644 --- a/primitives/state-machine/src/changes_trie/changes_iterator.rs +++ b/primitives/state-machine/src/changes_trie/changes_iterator.rs @@ -22,6 +22,7 @@ use std::collections::VecDeque; use codec::{Decode, Encode, Codec}; use hash_db::Hasher; use num_traits::Zero; +use sp_core::storage::PrefixedStorageKey; use sp_trie::Recorder; use crate::changes_trie::{AnchorBlockId, ConfigurationRange, RootsStorage, Storage, BlockNumber}; use crate::changes_trie::input::{DigestIndex, ExtrinsicIndex, DigestIndexValue, ExtrinsicIndexValue}; @@ -40,7 +41,7 @@ pub fn key_changes<'a, H: Hasher, Number: BlockNumber>( begin: Number, end: &'a AnchorBlockId, max: Number, - storage_key: Option<&'a [u8]>, + storage_key: Option<&'a PrefixedStorageKey>, key: &'a [u8], ) -> Result, String> { // we can't query any roots before root @@ -79,7 +80,7 @@ pub fn key_changes_proof<'a, H: Hasher, Number: BlockNumber>( begin: Number, end: &AnchorBlockId, max: Number, - storage_key: Option<&[u8]>, + storage_key: Option<&PrefixedStorageKey>, key: &[u8], ) -> Result>, String> where H::Out: Codec { // we can't query any roots before root @@ -127,7 +128,7 @@ pub fn key_changes_proof_check<'a, H: Hasher, Number: BlockNumber>( begin: Number, end: &AnchorBlockId, max: Number, - storage_key: Option<&[u8]>, + storage_key: Option<&PrefixedStorageKey>, key: &[u8] ) -> Result, String> where H::Out: Encode { key_changes_proof_check_with_db( @@ -150,7 +151,7 @@ pub fn key_changes_proof_check_with_db<'a, H: Hasher, Number: BlockNumber>( begin: Number, end: &AnchorBlockId, max: Number, - storage_key: Option<&[u8]>, + storage_key: Option<&PrefixedStorageKey>, key: &[u8] ) -> Result, String> where H::Out: Encode { // we can't query any roots before root @@ -188,7 +189,7 @@ pub struct DrilldownIteratorEssence<'a, H, Number> Number: BlockNumber, H::Out: 'a, { - storage_key: Option<&'a [u8]>, + storage_key: Option<&'a PrefixedStorageKey>, key: &'a [u8], roots_storage: &'a dyn RootsStorage, storage: &'a dyn Storage, @@ -238,7 +239,7 @@ impl<'a, H, Number> DrilldownIteratorEssence<'a, H, Number> let trie_root = if let Some(storage_key) = self.storage_key { let child_key = ChildIndex { block: block.clone(), - storage_key: storage_key.to_vec(), + storage_key: storage_key.clone(), }.encode(); if let Some(trie_root) = trie_reader(self.storage, trie_root, &child_key)? .and_then(|v| >::decode(&mut &v[..]).ok()) @@ -382,6 +383,11 @@ mod tests { use sp_runtime::traits::BlakeTwo256; use super::*; + fn child_key() -> PrefixedStorageKey { + let child_info = sp_core::storage::ChildInfo::new_default(&b"1"[..]); + child_info.prefixed_storage_key() + } + fn prepare_for_drilldown() -> (Configuration, InMemoryStorage) { let config = Configuration { digest_interval: 4, digest_levels: 2 }; let backend = InMemoryStorage::with_inputs(vec![ @@ -418,7 +424,7 @@ mod tests { (16, vec![ InputPair::DigestIndex(DigestIndex { block: 16, key: vec![42] }, vec![4, 8]), ]), - ], vec![(b"1".to_vec(), vec![ + ], vec![(child_key(), vec![ (1, vec![ InputPair::ExtrinsicIndex(ExtrinsicIndex { block: 1, key: vec![42] }, vec![0]), ]), @@ -535,7 +541,7 @@ mod tests { 1, &AnchorBlockId { hash: Default::default(), number: 100 }, 1000, - Some(&b"1"[..]), + Some(&child_key()), &[42], ).and_then(|i| i.collect::, _>>()).is_err()); } @@ -577,7 +583,7 @@ mod tests { let (remote_config, remote_storage) = prepare_for_drilldown(); let remote_proof_child = key_changes_proof::( configuration_range(&remote_config, 0), &remote_storage, 1, - &AnchorBlockId { hash: Default::default(), number: 16 }, 16, Some(&b"1"[..]), &[42]).unwrap(); + &AnchorBlockId { hash: Default::default(), number: 16 }, 16, Some(&child_key()), &[42]).unwrap(); // happens on local light node: @@ -592,7 +598,7 @@ mod tests { local_storage.clear_storage(); let local_result_child = key_changes_proof_check::( configuration_range(&local_config, 0), &local_storage, remote_proof_child, 1, - &AnchorBlockId { hash: Default::default(), number: 16 }, 16, Some(&b"1"[..]), &[42]); + &AnchorBlockId { hash: Default::default(), number: 16 }, 16, Some(&child_key()), &[42]); // check that drilldown result is the same as if it was happening at the full node assert_eq!(local_result, Ok(vec![(8, 2), (8, 1), (6, 3), (3, 0)])); diff --git a/primitives/state-machine/src/changes_trie/input.rs b/primitives/state-machine/src/changes_trie/input.rs index 4a1420f8486f9..4007620f92ca8 100644 --- a/primitives/state-machine/src/changes_trie/input.rs +++ b/primitives/state-machine/src/changes_trie/input.rs @@ -21,6 +21,7 @@ use crate::{ StorageKey, StorageValue, changes_trie::BlockNumber }; +use sp_core::storage::PrefixedStorageKey; /// Key of { changed key => set of extrinsic indices } mapping. #[derive(Clone, Debug, PartialEq, Eq)] @@ -49,7 +50,7 @@ pub struct ChildIndex { /// Block at which this key has been inserted in the trie. pub block: Number, /// Storage key this node is responsible for. - pub storage_key: StorageKey, + pub storage_key: PrefixedStorageKey, } /// Value of { changed key => block/digest block numbers } mapping. @@ -176,10 +177,17 @@ impl Decode for InputKey { block: Decode::decode(input)?, key: Decode::decode(input)?, })), - 3 => Ok(InputKey::ChildIndex(ChildIndex { - block: Decode::decode(input)?, - storage_key: Decode::decode(input)?, - })), + 3 => { + let block = Decode::decode(input)?; + if let Some(storage_key) = PrefixedStorageKey::new(Decode::decode(input)?) { + Ok(InputKey::ChildIndex(ChildIndex { + block, + storage_key, + })) + } else { + Err("Invalid prefixed key in change trie".into()) + } + }, _ => Err("Invalid input key variant".into()), } } diff --git a/primitives/state-machine/src/changes_trie/mod.rs b/primitives/state-machine/src/changes_trie/mod.rs index d614992df3033..ee6c6778e0aad 100644 --- a/primitives/state-machine/src/changes_trie/mod.rs +++ b/primitives/state-machine/src/changes_trie/mod.rs @@ -71,6 +71,7 @@ use hash_db::{Hasher, Prefix}; use num_traits::{One, Zero}; use codec::{Decode, Encode}; use sp_core; +use sp_core::storage::PrefixedStorageKey; use sp_trie::{MemoryDB, DBValue, TrieMut}; use sp_trie::trie_types::TrieDBMut; use crate::{ @@ -156,7 +157,7 @@ pub trait Storage: RootsStorage { fn with_cached_changed_keys( &self, root: &H::Out, - functor: &mut dyn FnMut(&HashMap, HashSet>), + functor: &mut dyn FnMut(&HashMap, HashSet>), ) -> bool; /// Get a trie node. fn get(&self, key: &H::Out, prefix: Prefix) -> Result, String>; diff --git a/primitives/state-machine/src/changes_trie/prune.rs b/primitives/state-machine/src/changes_trie/prune.rs index 87923dc2f593c..05555df305b7c 100644 --- a/primitives/state-machine/src/changes_trie/prune.rs +++ b/primitives/state-machine/src/changes_trie/prune.rs @@ -137,7 +137,8 @@ mod tests { #[test] fn prune_works() { fn prepare_storage() -> InMemoryStorage { - let child_key = ChildIndex { block: 67u64, storage_key: b"1".to_vec() }.encode(); + let child_info = sp_core::storage::ChildInfo::new_default(&b"1"[..]); + let child_key = ChildIndex { block: 67u64, storage_key: child_info.prefixed_storage_key() }.encode(); let mut mdb1 = MemoryDB::::default(); let root1 = insert_into_memory_db::( &mut mdb1, vec![(vec![10], vec![20])]).unwrap(); diff --git a/primitives/state-machine/src/changes_trie/storage.rs b/primitives/state-machine/src/changes_trie/storage.rs index 7fb418672872b..81651dd2e719b 100644 --- a/primitives/state-machine/src/changes_trie/storage.rs +++ b/primitives/state-machine/src/changes_trie/storage.rs @@ -18,6 +18,7 @@ use std::collections::{BTreeMap, HashSet, HashMap}; use hash_db::{Hasher, Prefix, EMPTY_PREFIX}; +use sp_core::storage::PrefixedStorageKey; use sp_trie::DBValue; use sp_trie::MemoryDB; use parking_lot::RwLock; @@ -96,7 +97,7 @@ impl InMemoryStorage { #[cfg(test)] pub fn with_inputs( mut top_inputs: Vec<(Number, Vec>)>, - children_inputs: Vec<(StorageKey, Vec<(Number, Vec>)>)>, + children_inputs: Vec<(PrefixedStorageKey, Vec<(Number, Vec>)>)>, ) -> Self { let mut mdb = MemoryDB::default(); let mut roots = BTreeMap::new(); @@ -182,7 +183,7 @@ impl Storage for InMemoryStorage, HashSet>), + functor: &mut dyn FnMut(&HashMap, HashSet>), ) -> bool { self.cache.with_changed_keys(root, functor) } diff --git a/primitives/state-machine/src/ext.rs b/primitives/state-machine/src/ext.rs index 12d40873e074f..33f502a75bdb2 100644 --- a/primitives/state-machine/src/ext.rs +++ b/primitives/state-machine/src/ext.rs @@ -458,9 +458,9 @@ where // A better design would be to manage 'child_storage_transaction' in a // similar way as 'storage_transaction' but for each child trie. if is_empty { - self.overlay.set_storage(prefixed_storage_key, None); + self.overlay.set_storage(prefixed_storage_key.key(), None); } else { - self.overlay.set_storage(prefixed_storage_key, Some(root.clone())); + self.overlay.set_storage(prefixed_storage_key.key(), Some(root.clone())); } trace!(target: "state-trace", "{:04x}: ChildRoot({}) {}", diff --git a/primitives/state-machine/src/in_memory_backend.rs b/primitives/state-machine/src/in_memory_backend.rs index 9245b53a0493b..b0048d90f4103 100644 --- a/primitives/state-machine/src/in_memory_backend.rs +++ b/primitives/state-machine/src/in_memory_backend.rs @@ -331,7 +331,7 @@ impl Backend for InMemory where H::Out: Codec { if let Some(child_info) = child_info.as_ref() { let prefix_storage_key = child_info.prefixed_storage_key(); let ch = insert_into_memory_db::(&mut mdb, map.clone().into_iter())?; - new_child_roots.push((prefix_storage_key, ch.as_ref().into())); + new_child_roots.push((prefix_storage_key.key(), ch.as_ref().into())); } else { root_map = Some(map); } diff --git a/primitives/storage/Cargo.toml b/primitives/storage/Cargo.toml index 9e90b6ecc6c6e..0df854170abaa 100644 --- a/primitives/storage/Cargo.toml +++ b/primitives/storage/Cargo.toml @@ -14,6 +14,7 @@ sp-std = { version = "2.0.0-alpha.4", default-features = false, path = "../std" serde = { version = "1.0.101", optional = true, features = ["derive"] } impl-serde = { version = "0.2.3", optional = true } sp-debug-derive = { version = "2.0.0-alpha.4", path = "../debug-derive" } +ref-cast = "1.0.0" [features] default = [ "std" ] diff --git a/primitives/storage/src/lib.rs b/primitives/storage/src/lib.rs index 984c8e4738796..de2a0d7e01856 100644 --- a/primitives/storage/src/lib.rs +++ b/primitives/storage/src/lib.rs @@ -23,6 +23,8 @@ use serde::{Serialize, Deserialize}; use sp_debug_derive::RuntimeDebug; use sp_std::vec::Vec; +use sp_std::ops::{Deref, DerefMut}; +use ref_cast::RefCast; /// Storage key. #[derive(PartialEq, Eq, RuntimeDebug)] @@ -32,6 +34,67 @@ pub struct StorageKey( pub Vec, ); +/// Storage key of a child trie, it contains the prefix to the key. +#[derive(PartialEq, Eq, RuntimeDebug)] +#[cfg_attr(feature = "std", derive(Serialize, Deserialize, Hash, PartialOrd, Ord, Clone))] +#[repr(transparent)] +#[derive(RefCast)] +pub struct PrefixedStorageKey( + #[cfg_attr(feature = "std", serde(with="impl_serde::serialize"))] + Vec, +); + +impl Deref for PrefixedStorageKey { + type Target = Vec; + + fn deref(&self) -> &Vec { + &self.0 + } +} + +impl DerefMut for PrefixedStorageKey { + fn deref_mut(&mut self) -> &mut Vec { + &mut self.0 + } +} + +impl PrefixedStorageKey { + /// Create a prefixed storage key from its byte array + /// representation. + /// Returns `None` on unknown prefix. + pub fn new(inner: Vec) -> Option { + let result = PrefixedStorageKey(inner); + // currently only support for child trie key + // note that this function should not be use in a runtime + // as it will change its behavior with future child types. + if ChildType::from_prefixed_key(&result).is_some() { + Some(result) + } else { + None + } + } + + pub fn new_ref(inner: &Vec) -> Option<&Self> { + let result = PrefixedStorageKey::ref_cast(inner); + // currently only support for child trie key + // note that this function should not be use in a runtime + // as it will change its behavior with future child types. + if ChildType::from_prefixed_key(&result).is_some() { + Some(result) + } else { + None + } + } + + /// Get inner key, this should + /// only be needed when writing + /// into parent trie to avoid an + /// allocation. + pub fn key(self) -> Vec { + self.0 + } +} + /// Storage data associated to a [`StorageKey`]. #[derive(PartialEq, Eq, RuntimeDebug)] #[cfg_attr(feature = "std", derive(Serialize, Deserialize, Hash, PartialOrd, Ord, Clone))] @@ -109,24 +172,6 @@ pub mod well_known_keys { // Other code might depend on this, so be careful changing this. key.starts_with(CHILD_STORAGE_KEY_PREFIX) } - - /// Determine whether a child trie key is valid. - /// - /// For now, the only valid child trie keys are those starting with `:child_storage:default:`. - /// - /// `child_trie_root` and `child_delta_trie_root` can panic if invalid value is provided to them. - pub fn is_child_trie_key_valid(storage_key: &[u8]) -> bool { - let has_right_prefix = storage_key.starts_with(super::DEFAULT_CHILD_TYPE_PARENT_PREFIX); - if has_right_prefix { - // This is an attempt to catch a change of `is_child_storage_key`, which - // just checks if the key has prefix `:child_storage:` at the moment of writing. - debug_assert!( - is_child_storage_key(&storage_key), - "`is_child_trie_key_valid` is a subset of `is_child_storage_key`", - ); - } - has_right_prefix - } } /// Information related to a child state. @@ -212,7 +257,7 @@ impl ChildInfo { /// Return a the full location in the direct parent of /// this trie. - pub fn prefixed_storage_key(&self) -> Vec { + pub fn prefixed_storage_key(&self) -> PrefixedStorageKey { match self { ChildInfo::ParentKeyId(ChildTrieParentKeyId { data, @@ -222,13 +267,13 @@ impl ChildInfo { /// Returns a the full location in the direct parent of /// this trie. - pub fn into_prefixed_storage_key(self) -> Vec { + pub fn into_prefixed_storage_key(self) -> PrefixedStorageKey { match self { ChildInfo::ParentKeyId(ChildTrieParentKeyId { mut data, }) => { ChildType::ParentKeyId.do_prefix_key(&mut data); - data + PrefixedStorageKey(data) }, } } @@ -264,7 +309,7 @@ impl ChildType { /// Transform a prefixed key into a tuple of the child type /// and the unprefixed representation of the key. - pub fn from_prefixed_key<'a>(storage_key: &'a [u8]) -> Option<(Self, &'a [u8])> { + pub fn from_prefixed_key<'a>(storage_key: &'a PrefixedStorageKey) -> Option<(Self, &'a [u8])> { let match_type = |storage_key: &'a [u8], child_type: ChildType| { let prefix = child_type.parent_prefix(); if storage_key.starts_with(prefix) { @@ -277,12 +322,12 @@ impl ChildType { } /// Produce a prefixed key for a given child type. - fn new_prefixed_key(&self, key: &[u8]) -> Vec { + fn new_prefixed_key(&self, key: &[u8]) -> PrefixedStorageKey { let parent_prefix = self.parent_prefix(); let mut result = Vec::with_capacity(parent_prefix.len() + key.len()); result.extend_from_slice(parent_prefix); result.extend_from_slice(key); - result + PrefixedStorageKey(result) } /// Prefixes a vec with the prefix for this child type. diff --git a/test-utils/runtime/client/src/lib.rs b/test-utils/runtime/client/src/lib.rs index 3c56fbdcdcc2e..e4849dee99aec 100644 --- a/test-utils/runtime/client/src/lib.rs +++ b/test-utils/runtime/client/src/lib.rs @@ -128,7 +128,7 @@ impl substrate_test_client::GenesisInit for GenesisParameters { child_content.data.clone().into_iter().collect() ); let prefixed_storage_key = child_content.child_info.prefixed_storage_key(); - (prefixed_storage_key, state_root.encode()) + (prefixed_storage_key.key(), state_root.encode()) }); let state_root = <<::Header as HeaderT>::Hashing as HashT>::trie_root( storage.top.clone().into_iter().chain(child_roots).collect() From 750cdd8af4e916cf74633825b1f7998a1a415eb1 Mon Sep 17 00:00:00 2001 From: cheme Date: Mon, 23 Mar 2020 10:00:50 +0100 Subject: [PATCH 70/85] Fix rpc test. --- client/rpc/src/state/tests.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/client/rpc/src/state/tests.rs b/client/rpc/src/state/tests.rs index 74455c99f61f8..57c91c13540ff 100644 --- a/client/rpc/src/state/tests.rs +++ b/client/rpc/src/state/tests.rs @@ -34,7 +34,7 @@ use substrate_test_runtime_client::{ const STORAGE_KEY: &[u8] = b"child"; fn prefixed_storage_key() -> PrefixedStorageKey { - let child_info = ChildInfo::new_default(&b":child_storage:default:child"[..]); + let child_info = ChildInfo::new_default(&STORAGE_KEY[..]); child_info.prefixed_storage_key() } From cc65f4483528b27012c446a590555869bb77b20a Mon Sep 17 00:00:00 2001 From: cheme Date: Mon, 23 Mar 2020 10:08:20 +0100 Subject: [PATCH 71/85] bump spec version --- bin/node/runtime/src/lib.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/bin/node/runtime/src/lib.rs b/bin/node/runtime/src/lib.rs index ccd2eba78f329..ba219a1890e6b 100644 --- a/bin/node/runtime/src/lib.rs +++ b/bin/node/runtime/src/lib.rs @@ -83,7 +83,7 @@ pub const VERSION: RuntimeVersion = RuntimeVersion { // and set impl_version to 0. If only runtime // implementation changes and behavior does not, then leave spec_version as // is and increment impl_version. - spec_version: 239, + spec_version: 240, impl_version: 0, apis: RUNTIME_API_VERSIONS, }; From f1b23dfa420f80457b60ace224808bf843b0bbe1 Mon Sep 17 00:00:00 2001 From: cheme Date: Thu, 26 Mar 2020 20:06:54 +0100 Subject: [PATCH 72/85] Apply review suggestion --- client/network/src/protocol.rs | 23 ++++++--------- .../src/protocol/light_client_handler.rs | 19 +++++-------- client/rpc-api/src/child_state/mod.rs | 3 ++ primitives/state-machine/src/backend.rs | 4 +-- primitives/state-machine/src/basic.rs | 2 +- .../state-machine/src/changes_trie/input.rs | 15 +++------- primitives/state-machine/src/ext.rs | 4 +-- .../state-machine/src/in_memory_backend.rs | 2 +- primitives/storage/src/lib.rs | 28 ++++--------------- test-utils/runtime/client/src/lib.rs | 2 +- 10 files changed, 35 insertions(+), 67 deletions(-) diff --git a/client/network/src/protocol.rs b/client/network/src/protocol.rs index 788c88b620f9c..d8c8f8dc9cdd1 100644 --- a/client/network/src/protocol.rs +++ b/client/network/src/protocol.rs @@ -323,7 +323,7 @@ impl<'a, B: BlockT> LightDispatchNetwork for LightDispatchIn<'a> { message::RemoteReadChildRequest { id, block, - storage_key: storage_key.key(), + storage_key: storage_key.into_inner(), keys, }); @@ -365,7 +365,7 @@ impl<'a, B: BlockT> LightDispatchNetwork for LightDispatchIn<'a> { last, min, max, - storage_key: storage_key.map(|p| p.key()), + storage_key: storage_key.map(|p| p.into_inner()), key, }); @@ -1626,7 +1626,7 @@ impl Protocol { trace!(target: "sync", "Remote read child request {} from {} ({} {} at {})", request.id, who, HexDisplay::from(&request.storage_key), keys_str(), request.block); let prefixed_key = PrefixedStorageKey::new_ref(&request.storage_key); - let child_info = match prefixed_key.and_then(|key| ChildType::from_prefixed_key(key)) { + let child_info = match ChildType::from_prefixed_key(prefixed_key) { Some((ChildType::ParentKeyId, storage_key)) => Ok(ChildInfo::new_default(storage_key)), None => Err("Invalid child storage key".into()), }; @@ -1727,24 +1727,17 @@ impl Protocol { request.last ); let key = StorageKey(request.key); - let prefixed_key = if let Some(storage_key) = request.storage_key.as_ref() { - if let Some(storage_key) = PrefixedStorageKey::new_ref(storage_key) { - Ok(Some(storage_key)) - } else { - Err("Invalid prefixed storage key".into()) - } - } else { - Ok(None) - }; + let prefixed_key = request.storage_key.as_ref() + .map(|storage_key| PrefixedStorageKey::new_ref(storage_key)); let (first, last, min, max) = (request.first, request.last, request.min, request.max); - let proof = match prefixed_key.and_then(|p_key| self.context_data.chain.key_changes_proof( + let proof = match self.context_data.chain.key_changes_proof( first, last, min, max, - p_key, + prefixed_key, &key, - )) { + ) { Ok(proof) => proof, Err(error) => { trace!(target: "sync", "Remote changes proof request {} from {} for key {} ({}..{}) failed with: {}", diff --git a/client/network/src/protocol/light_client_handler.rs b/client/network/src/protocol/light_client_handler.rs index dc6ad3fc34048..4f20c9ced0526 100644 --- a/client/network/src/protocol/light_client_handler.rs +++ b/client/network/src/protocol/light_client_handler.rs @@ -513,7 +513,7 @@ where let block = Decode::decode(&mut request.block.as_ref())?; let prefixed_key = PrefixedStorageKey::new_ref(&request.storage_key); - let child_info = match prefixed_key.and_then(|key| ChildType::from_prefixed_key(key)) { + let child_info = match ChildType::from_prefixed_key(prefixed_key) { Some((ChildType::ParentKeyId, storage_key)) => Ok(ChildInfo::new_default(storage_key)), None => Err("Invalid child storage key".into()), }; @@ -592,18 +592,12 @@ where let max = Decode::decode(&mut request.max.as_ref())?; let key = StorageKey(request.key.clone()); let storage_key = if request.storage_key.is_empty() { - Ok(None) + None } else { - if let Some(storage_key) = PrefixedStorageKey::new_ref(&request.storage_key) { - Ok(Some(storage_key)) - } else { - Err("Invalid prefix for storage key.".into()) - } + Some(PrefixedStorageKey::new_ref(&request.storage_key)) }; - let proof = match storage_key.and_then(|storage_key| { - self.chain.key_changes_proof(first, last, min, max, storage_key, &key) - }) { + let proof = match self.chain.key_changes_proof(first, last, min, max, storage_key, &key) { Ok(proof) => proof, Err(error) => { log::trace!("remote changes proof request from {} for key {} ({:?}..{:?}) failed with: {}", @@ -922,7 +916,7 @@ fn serialize_request(request: &Request) -> api::v1::light::Request Request::ReadChild { request, .. } => { let r = api::v1::light::RemoteReadChildRequest { block: request.block.encode(), - storage_key: request.storage_key.clone().key(), + storage_key: request.storage_key.clone().into_inner(), keys: request.keys.clone(), }; api::v1::light::request::Request::RemoteReadChildRequest(r) @@ -941,7 +935,8 @@ fn serialize_request(request: &Request) -> api::v1::light::Request last: request.last_block.1.encode(), min: request.tries_roots.1.encode(), max: request.max_block.1.encode(), - storage_key: request.storage_key.clone().map(|s| s.key()).unwrap_or_default(), + storage_key: request.storage_key.clone().map(|s| s.into_inner()) + .unwrap_or_default(), key: request.key.clone(), }; api::v1::light::request::Request::RemoteChangesRequest(r) diff --git a/client/rpc-api/src/child_state/mod.rs b/client/rpc-api/src/child_state/mod.rs index 3c530b64dec30..a46269cad6c0c 100644 --- a/client/rpc-api/src/child_state/mod.rs +++ b/client/rpc-api/src/child_state/mod.rs @@ -23,6 +23,9 @@ use crate::state::error::FutureResult; pub use self::gen_client::Client as ChildStateClient; /// Substrate child state API +/// +/// Note that all `PrefixedStorageKey` are desierialized +/// from json and not guaranted valid. #[rpc] pub trait ChildStateApi { /// RPC Metadata diff --git a/primitives/state-machine/src/backend.rs b/primitives/state-machine/src/backend.rs index c3b2146a73ae1..0d4134b6ad618 100644 --- a/primitives/state-machine/src/backend.rs +++ b/primitives/state-machine/src/backend.rs @@ -187,9 +187,9 @@ pub trait Backend: std::fmt::Debug { let prefixed_storage_key = child_info.prefixed_storage_key(); txs.consolidate(child_txs); if empty { - child_roots.push((prefixed_storage_key.key(), None)); + child_roots.push((prefixed_storage_key.into_inner(), None)); } else { - child_roots.push((prefixed_storage_key.key(), Some(child_root.encode()))); + child_roots.push((prefixed_storage_key.into_inner(), Some(child_root.encode()))); } } let (root, parent_txs) = self.storage_root( diff --git a/primitives/state-machine/src/basic.rs b/primitives/state-machine/src/basic.rs index b8b3210a87c9c..f03d5c1659ba8 100644 --- a/primitives/state-machine/src/basic.rs +++ b/primitives/state-machine/src/basic.rs @@ -242,7 +242,7 @@ impl Externalities for BasicExternalities { if &empty_hash[..] == &child_root[..] { top.remove(prefixed_storage_key.as_slice()); } else { - top.insert(prefixed_storage_key.key(), child_root); + top.insert(prefixed_storage_key.into_inner(), child_root); } } diff --git a/primitives/state-machine/src/changes_trie/input.rs b/primitives/state-machine/src/changes_trie/input.rs index 4007620f92ca8..4f0f3da40c52b 100644 --- a/primitives/state-machine/src/changes_trie/input.rs +++ b/primitives/state-machine/src/changes_trie/input.rs @@ -177,17 +177,10 @@ impl Decode for InputKey { block: Decode::decode(input)?, key: Decode::decode(input)?, })), - 3 => { - let block = Decode::decode(input)?; - if let Some(storage_key) = PrefixedStorageKey::new(Decode::decode(input)?) { - Ok(InputKey::ChildIndex(ChildIndex { - block, - storage_key, - })) - } else { - Err("Invalid prefixed key in change trie".into()) - } - }, + 3 => Ok(InputKey::ChildIndex(ChildIndex { + block: Decode::decode(input)?, + storage_key: PrefixedStorageKey::new(Decode::decode(input)?), + })), _ => Err("Invalid input key variant".into()), } } diff --git a/primitives/state-machine/src/ext.rs b/primitives/state-machine/src/ext.rs index 33f502a75bdb2..fa0e24d2ec1b9 100644 --- a/primitives/state-machine/src/ext.rs +++ b/primitives/state-machine/src/ext.rs @@ -458,9 +458,9 @@ where // A better design would be to manage 'child_storage_transaction' in a // similar way as 'storage_transaction' but for each child trie. if is_empty { - self.overlay.set_storage(prefixed_storage_key.key(), None); + self.overlay.set_storage(prefixed_storage_key.into_inner(), None); } else { - self.overlay.set_storage(prefixed_storage_key.key(), Some(root.clone())); + self.overlay.set_storage(prefixed_storage_key.into_inner(), Some(root.clone())); } trace!(target: "state-trace", "{:04x}: ChildRoot({}) {}", diff --git a/primitives/state-machine/src/in_memory_backend.rs b/primitives/state-machine/src/in_memory_backend.rs index b0048d90f4103..58787597534e2 100644 --- a/primitives/state-machine/src/in_memory_backend.rs +++ b/primitives/state-machine/src/in_memory_backend.rs @@ -331,7 +331,7 @@ impl Backend for InMemory where H::Out: Codec { if let Some(child_info) = child_info.as_ref() { let prefix_storage_key = child_info.prefixed_storage_key(); let ch = insert_into_memory_db::(&mut mdb, map.clone().into_iter())?; - new_child_roots.push((prefix_storage_key.key(), ch.as_ref().into())); + new_child_roots.push((prefix_storage_key.into_inner(), ch.as_ref().into())); } else { root_map = Some(map); } diff --git a/primitives/storage/src/lib.rs b/primitives/storage/src/lib.rs index de2a0d7e01856..eeb57d66770dd 100644 --- a/primitives/storage/src/lib.rs +++ b/primitives/storage/src/lib.rs @@ -61,36 +61,20 @@ impl DerefMut for PrefixedStorageKey { impl PrefixedStorageKey { /// Create a prefixed storage key from its byte array /// representation. - /// Returns `None` on unknown prefix. - pub fn new(inner: Vec) -> Option { - let result = PrefixedStorageKey(inner); - // currently only support for child trie key - // note that this function should not be use in a runtime - // as it will change its behavior with future child types. - if ChildType::from_prefixed_key(&result).is_some() { - Some(result) - } else { - None - } + pub fn new(inner: Vec) -> Self { + PrefixedStorageKey(inner) } - pub fn new_ref(inner: &Vec) -> Option<&Self> { - let result = PrefixedStorageKey::ref_cast(inner); - // currently only support for child trie key - // note that this function should not be use in a runtime - // as it will change its behavior with future child types. - if ChildType::from_prefixed_key(&result).is_some() { - Some(result) - } else { - None - } + /// Create a prefixed storage key reference. + pub fn new_ref(inner: &Vec) -> &Self { + PrefixedStorageKey::ref_cast(inner) } /// Get inner key, this should /// only be needed when writing /// into parent trie to avoid an /// allocation. - pub fn key(self) -> Vec { + pub fn into_inner(self) -> Vec { self.0 } } diff --git a/test-utils/runtime/client/src/lib.rs b/test-utils/runtime/client/src/lib.rs index e4849dee99aec..10360c8076338 100644 --- a/test-utils/runtime/client/src/lib.rs +++ b/test-utils/runtime/client/src/lib.rs @@ -128,7 +128,7 @@ impl substrate_test_client::GenesisInit for GenesisParameters { child_content.data.clone().into_iter().collect() ); let prefixed_storage_key = child_content.child_info.prefixed_storage_key(); - (prefixed_storage_key.key(), state_root.encode()) + (prefixed_storage_key.into_inner(), state_root.encode()) }); let state_root = <<::Header as HeaderT>::Hashing as HashT>::trie_root( storage.top.clone().into_iter().chain(child_roots).collect() From c631a462833c1f560432d1cba8fad26184a25eff Mon Sep 17 00:00:00 2001 From: cheme Date: Mon, 30 Mar 2020 18:15:59 +0200 Subject: [PATCH 73/85] Fix unrelated warning for CI --- client/network/src/protocol/generic_proto/handler/group.rs | 1 - 1 file changed, 1 deletion(-) diff --git a/client/network/src/protocol/generic_proto/handler/group.rs b/client/network/src/protocol/generic_proto/handler/group.rs index 69a519134a6ff..6b23263b14c5f 100644 --- a/client/network/src/protocol/generic_proto/handler/group.rs +++ b/client/network/src/protocol/generic_proto/handler/group.rs @@ -64,7 +64,6 @@ use libp2p::swarm::{ NegotiatedSubstream, }; use log::{debug, error}; -use sp_runtime::ConsensusEngineId; use std::{borrow::Cow, error, io, str, task::{Context, Poll}}; /// Implements the `IntoProtocolsHandler` trait of libp2p. From 7c8f3935b9a952ba2811dd6d0ca4f16e85f63e32 Mon Sep 17 00:00:00 2001 From: cheme Date: Thu, 2 Apr 2020 15:49:08 +0200 Subject: [PATCH 74/85] companion fix --- .maintain/gitlab/check_polkadot_companion_build.sh | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.maintain/gitlab/check_polkadot_companion_build.sh b/.maintain/gitlab/check_polkadot_companion_build.sh index 04524a736acca..65e3af5c7ab3e 100755 --- a/.maintain/gitlab/check_polkadot_companion_build.sh +++ b/.maintain/gitlab/check_polkadot_companion_build.sh @@ -57,7 +57,7 @@ then # get the last reference to a pr in polkadot pr_data="$(curl -sSL -H "${github_header}" -s ${github_api_substrate_pull_url}/${CI_COMMIT_REF_NAME})" pr_ref="$(echo $pr_data | grep -Po '"ref"\s*:\s*"\K(?!master)[^"]*')" - pr_body="$(echo $pr_data | sed -n -r 's/^[[:space:]]+"body": (".*")[^"]+$/\1/p')" + pr_body="$(echo "$pr_data" | sed -n -r 's/^[[:space:]]+"body": (".*")[^"]+$/\1/p')" pr_companion="$(echo "${pr_body}" | sed -n -r \ -e 's;^.*polkadot companion: paritytech/polkadot#([0-9]+).*$;\1;p' \ From b2240f16f4bd4d2224f88566209fc17a6385eaea Mon Sep 17 00:00:00 2001 From: cheme Date: Thu, 2 Apr 2020 16:28:39 +0200 Subject: [PATCH 75/85] Update .maintain/gitlab/check_polkadot_companion_build.sh --- .maintain/gitlab/check_polkadot_companion_build.sh | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/.maintain/gitlab/check_polkadot_companion_build.sh b/.maintain/gitlab/check_polkadot_companion_build.sh index 65e3af5c7ab3e..e6ea27679aab5 100755 --- a/.maintain/gitlab/check_polkadot_companion_build.sh +++ b/.maintain/gitlab/check_polkadot_companion_build.sh @@ -57,7 +57,7 @@ then # get the last reference to a pr in polkadot pr_data="$(curl -sSL -H "${github_header}" -s ${github_api_substrate_pull_url}/${CI_COMMIT_REF_NAME})" pr_ref="$(echo $pr_data | grep -Po '"ref"\s*:\s*"\K(?!master)[^"]*')" - pr_body="$(echo "$pr_data" | sed -n -r 's/^[[:space:]]+"body": (".*")[^"]+$/\1/p')" + pr_body="$(echo "${pr_data}" | sed -n -r 's/^[[:space:]]+"body": (".*")[^"]+$/\1/p')" pr_companion="$(echo "${pr_body}" | sed -n -r \ -e 's;^.*polkadot companion: paritytech/polkadot#([0-9]+).*$;\1;p' \ @@ -102,4 +102,3 @@ cargo update # Test Polkadot pr or master branch with this Substrate commit. time cargo test --all --release --verbose - From f3b9f234d88bdfb63020b772736cb45c47f1d3e3 Mon Sep 17 00:00:00 2001 From: cheme Date: Thu, 2 Apr 2020 17:00:05 +0200 Subject: [PATCH 76/85] Fix compilation errors. --- client/db/src/bench.rs | 16 ++++++++++++++-- client/db/src/lib.rs | 2 +- frame/contracts/src/account_db.rs | 2 +- frame/contracts/src/exec.rs | 2 +- frame/contracts/src/lib.rs | 2 +- frame/contracts/src/tests.rs | 4 ++-- primitives/state-machine/src/backend.rs | 2 +- .../state-machine/src/changes_trie/storage.rs | 3 ++- .../state-machine/src/overlayed_changes.rs | 5 +---- .../state-machine/src/trie_backend_essence.rs | 1 - 10 files changed, 24 insertions(+), 15 deletions(-) diff --git a/client/db/src/bench.rs b/client/db/src/bench.rs index c626942084c5b..8f55f00766f21 100644 --- a/client/db/src/bench.rs +++ b/client/db/src/bench.rs @@ -98,7 +98,19 @@ impl BenchmarkingState { child_delta, false, ); - state.genesis = transaction.clone().drain(); + let mut keyspace = crate::Keyspaced::new(&[]); + for (info, mut updates) in transaction.clone().into_iter() { + keyspace.change_keyspace(info.keyspace()); + for (key, rc_val) in updates.drain() { + let key = if info.is_top_trie() { + key + } else { + keyspace.prefix_key(key.as_slice()).to_vec() + }; + + state.genesis.insert(key, rc_val); + } + } state.genesis_root = root.clone(); state.commit(root, transaction)?; Ok(state) @@ -287,8 +299,8 @@ impl StateBackend> for BenchmarkingState { } else if rc < 0 { db_transaction.delete(0, &key); } + keys.push(key); } - keys.push(key); } self.record.set(keys); db.write(db_transaction).map_err(|_| String::from("Error committing transaction"))?; diff --git a/client/db/src/lib.rs b/client/db/src/lib.rs index 6e8fd4eebad3b..f5cc98ad8289c 100644 --- a/client/db/src/lib.rs +++ b/client/db/src/lib.rs @@ -1169,7 +1169,7 @@ impl Backend { let mut ops: u64 = 0; let mut bytes: u64 = 0; for (key, value) in operation.storage_updates.iter() - .chain(operation.child_storage_updates.iter().flat_map(|(_, s)| s.iter())) { + .chain(operation.child_storage_updates.iter().flat_map(|(_, s, _)| s.iter())) { ops += 1; bytes += key.len() as u64; if let Some(v) = value.as_ref() { diff --git a/frame/contracts/src/account_db.rs b/frame/contracts/src/account_db.rs index 524fb376e6d71..14c9ead7e6c22 100644 --- a/frame/contracts/src/account_db.rs +++ b/frame/contracts/src/account_db.rs @@ -215,7 +215,7 @@ impl AccountDb for DirectAccountDb { new_info.last_write = Some(>::block_number()); } - let child_info = &new_info.child_trie_unique_id(); + let child_info = &new_info.child_trie_info(); for (k, v) in changed.storage.into_iter() { if let Some(value) = child::get_raw( child_info, diff --git a/frame/contracts/src/exec.rs b/frame/contracts/src/exec.rs index 847bfc2cc4ec9..d8b42b2f9ecae 100644 --- a/frame/contracts/src/exec.rs +++ b/frame/contracts/src/exec.rs @@ -577,7 +577,7 @@ where { let (output, change_set, deferred) = { let mut nested = self.nested(dest, trie_id.map(|trie_id| { - crate::trie_unique_id(&trie_id) + crate::child_trie_info(&trie_id) })); let output = func(&mut nested)?; (output, nested.overlay.into_change_set(), nested.deferred) diff --git a/frame/contracts/src/lib.rs b/frame/contracts/src/lib.rs index 52a397995fe50..5a439ed3163f0 100644 --- a/frame/contracts/src/lib.rs +++ b/frame/contracts/src/lib.rs @@ -690,7 +690,7 @@ impl Module { .get_alive() .ok_or(ContractAccessError::IsTombstone)?; - let child_info = trie_unique_id(&contract_info.trie_id); + let child_info = child_trie_info(&contract_info.trie_id); let maybe_value = AccountDb::::get_storage( &DirectAccountDb, &address, diff --git a/frame/contracts/src/tests.rs b/frame/contracts/src/tests.rs index 0c358b92ef2d6..0839aa7ea6312 100644 --- a/frame/contracts/src/tests.rs +++ b/frame/contracts/src/tests.rs @@ -311,8 +311,8 @@ fn account_removal_does_not_remove_storage() { ExtBuilder::default().existential_deposit(100).build().execute_with(|| { let trie_id1 = ::TrieIdGenerator::trie_id(&1); let trie_id2 = ::TrieIdGenerator::trie_id(&2); - let child_info1 = crate::trie_unique_id(trie_id1.as_ref()); - let child_info2 = crate::trie_unique_id(trie_id2.as_ref()); + let child_info1 = crate::child_trie_info(trie_id1.as_ref()); + let child_info2 = crate::child_trie_info(trie_id2.as_ref()); let child_info1 = Some(&child_info1); let child_info2 = Some(&child_info2); let key1 = &[1; 32]; diff --git a/primitives/state-machine/src/backend.rs b/primitives/state-machine/src/backend.rs index 32e7d41c68aaa..0f3af4466c69e 100644 --- a/primitives/state-machine/src/backend.rs +++ b/primitives/state-machine/src/backend.rs @@ -21,7 +21,7 @@ use sp_core::{Hasher, InnerHasher}; use codec::{Decode, Encode}; use sp_core::{traits::RuntimeCode, - storage::{ChildInfo, ChildrenMap, well_known_keys}}; + storage::{ChildInfo, ChildrenMap, well_known_keys, PrefixedStorageKey}}; use sp_trie::{TrieMut, MemoryDB, trie_types::TrieDBMut}; use crate::{ trie_backend::TrieBackend, diff --git a/primitives/state-machine/src/changes_trie/storage.rs b/primitives/state-machine/src/changes_trie/storage.rs index 4fc32ed82ffd7..df731b699eb0f 100644 --- a/primitives/state-machine/src/changes_trie/storage.rs +++ b/primitives/state-machine/src/changes_trie/storage.rs @@ -17,7 +17,8 @@ //! Changes trie storage utilities. use std::collections::{BTreeMap, HashSet, HashMap}; -use hash_db::{Hasher, Prefix, EMPTY_PREFIX}; +use hash_db::{Prefix, EMPTY_PREFIX}; +use sp_core::Hasher; use sp_core::storage::PrefixedStorageKey; use sp_core::storage::ChildInfo; use sp_trie::DBValue; diff --git a/primitives/state-machine/src/overlayed_changes.rs b/primitives/state-machine/src/overlayed_changes.rs index fbdf3a5c1c857..c9b6a6f6defc2 100644 --- a/primitives/state-machine/src/overlayed_changes.rs +++ b/primitives/state-machine/src/overlayed_changes.rs @@ -29,7 +29,7 @@ use crate::{ use std::iter::FromIterator; use std::collections::{HashMap, BTreeMap, BTreeSet}; use codec::{Decode, Encode}; -use sp_core::storage::{well_known_keys::EXTRINSIC_INDEX, ChildInfo}; +use sp_core::storage::{well_known_keys::EXTRINSIC_INDEX, ChildInfo, PrefixedStorageKey}; use std::{mem, ops}; use sp_core::Hasher; @@ -37,9 +37,6 @@ use sp_core::Hasher; /// Storage key. pub type StorageKey = Vec; -/// Storage key. -pub type PrefixedStorageKey = Vec; - /// Storage value. pub type StorageValue = Vec; diff --git a/primitives/state-machine/src/trie_backend_essence.rs b/primitives/state-machine/src/trie_backend_essence.rs index 0dc7174c205f7..4c8cde131c440 100644 --- a/primitives/state-machine/src/trie_backend_essence.rs +++ b/primitives/state-machine/src/trie_backend_essence.rs @@ -383,7 +383,6 @@ impl<'a, S, H> hash_db::HashDBRef for BackendStorageDBRef<'a, S, H> } } - /// Key-value pairs storage that is used by trie backend essence. pub trait TrieBackendStorageRef { /// Type of in-memory overlay. From 63943ee00a222a1a910e7261b33f3c9d2bb92605 Mon Sep 17 00:00:00 2001 From: cheme Date: Thu, 2 Apr 2020 17:28:17 +0200 Subject: [PATCH 77/85] last attempt --- .maintain/gitlab/check_polkadot_companion_build.sh | 5 +++++ 1 file changed, 5 insertions(+) diff --git a/.maintain/gitlab/check_polkadot_companion_build.sh b/.maintain/gitlab/check_polkadot_companion_build.sh index e6ea27679aab5..49d247da4e520 100755 --- a/.maintain/gitlab/check_polkadot_companion_build.sh +++ b/.maintain/gitlab/check_polkadot_companion_build.sh @@ -56,19 +56,24 @@ then # get the last reference to a pr in polkadot pr_data="$(curl -sSL -H "${github_header}" -s ${github_api_substrate_pull_url}/${CI_COMMIT_REF_NAME})" + boldprint "pr_dta: #${pr_data}" pr_ref="$(echo $pr_data | grep -Po '"ref"\s*:\s*"\K(?!master)[^"]*')" + boldprint "pr_ref: #${pr_ref}" pr_body="$(echo "${pr_data}" | sed -n -r 's/^[[:space:]]+"body": (".*")[^"]+$/\1/p')" + boldprint "pr_body: #${pr_body}" pr_companion="$(echo "${pr_body}" | sed -n -r \ -e 's;^.*polkadot companion: paritytech/polkadot#([0-9]+).*$;\1;p' \ -e 's;^.*polkadot companion: https://github.com/paritytech/polkadot/pull/([0-9]+).*$;\1;p' \ | tail -n 1)" + boldprint "pr_comp: #${pr_companion}" if [ -z "${pr_companion}" ] then pr_companion="$(echo "${pr_body}" | sed -n -r \ 's;^.*https://github.com/paritytech/polkadot/pull/([0-9]+).*$;\1;p' \ | tail -n 1)" fi + boldprint "pr_com2: #${pr_companion}" if [ "${pr_companion}" ] then From 784cb2211dfa33f8a73f5fd011295a963f87239a Mon Sep 17 00:00:00 2001 From: cheme Date: Thu, 2 Apr 2020 19:13:50 +0200 Subject: [PATCH 78/85] Forcing companion pr. --- .maintain/gitlab/check_polkadot_companion_build.sh | 10 +++------- 1 file changed, 3 insertions(+), 7 deletions(-) diff --git a/.maintain/gitlab/check_polkadot_companion_build.sh b/.maintain/gitlab/check_polkadot_companion_build.sh index 49d247da4e520..e98316eb3f6a4 100755 --- a/.maintain/gitlab/check_polkadot_companion_build.sh +++ b/.maintain/gitlab/check_polkadot_companion_build.sh @@ -56,24 +56,19 @@ then # get the last reference to a pr in polkadot pr_data="$(curl -sSL -H "${github_header}" -s ${github_api_substrate_pull_url}/${CI_COMMIT_REF_NAME})" - boldprint "pr_dta: #${pr_data}" pr_ref="$(echo $pr_data | grep -Po '"ref"\s*:\s*"\K(?!master)[^"]*')" - boldprint "pr_ref: #${pr_ref}" - pr_body="$(echo "${pr_data}" | sed -n -r 's/^[[:space:]]+"body": (".*")[^"]+$/\1/p')" - boldprint "pr_body: #${pr_body}" + pr_body="$(echo $pr_data | sed -n -r 's/^[[:space:]]+"body": (".*")[^"]+$/\1/p')" - pr_companion="$(echo "${pr_body}" | sed -n -r \ + pr_companion="$(echo "${pr_data}" | sed -n -r \ -e 's;^.*polkadot companion: paritytech/polkadot#([0-9]+).*$;\1;p' \ -e 's;^.*polkadot companion: https://github.com/paritytech/polkadot/pull/([0-9]+).*$;\1;p' \ | tail -n 1)" - boldprint "pr_comp: #${pr_companion}" if [ -z "${pr_companion}" ] then pr_companion="$(echo "${pr_body}" | sed -n -r \ 's;^.*https://github.com/paritytech/polkadot/pull/([0-9]+).*$;\1;p' \ | tail -n 1)" fi - boldprint "pr_com2: #${pr_companion}" if [ "${pr_companion}" ] then @@ -107,3 +102,4 @@ cargo update # Test Polkadot pr or master branch with this Substrate commit. time cargo test --all --release --verbose + From 4bbb24dedda0d5eef20ea2d88d15c90ffb8befba Mon Sep 17 00:00:00 2001 From: cheme Date: Thu, 2 Apr 2020 19:54:48 +0200 Subject: [PATCH 79/85] revert ci changes. --- .maintain/gitlab/check_polkadot_companion_build.sh | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.maintain/gitlab/check_polkadot_companion_build.sh b/.maintain/gitlab/check_polkadot_companion_build.sh index e98316eb3f6a4..04524a736acca 100755 --- a/.maintain/gitlab/check_polkadot_companion_build.sh +++ b/.maintain/gitlab/check_polkadot_companion_build.sh @@ -59,7 +59,7 @@ then pr_ref="$(echo $pr_data | grep -Po '"ref"\s*:\s*"\K(?!master)[^"]*')" pr_body="$(echo $pr_data | sed -n -r 's/^[[:space:]]+"body": (".*")[^"]+$/\1/p')" - pr_companion="$(echo "${pr_data}" | sed -n -r \ + pr_companion="$(echo "${pr_body}" | sed -n -r \ -e 's;^.*polkadot companion: paritytech/polkadot#([0-9]+).*$;\1;p' \ -e 's;^.*polkadot companion: https://github.com/paritytech/polkadot/pull/([0-9]+).*$;\1;p' \ | tail -n 1)" From 226f7cd879a95744662a8978d9693c54170eccc5 Mon Sep 17 00:00:00 2001 From: cheme Date: Tue, 14 Apr 2020 10:25:20 +0200 Subject: [PATCH 80/85] name of children in chain spec change. --- bin/node/cli/res/flaming-fir.json | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/bin/node/cli/res/flaming-fir.json b/bin/node/cli/res/flaming-fir.json index 7ed98239b54b6..3612d7284faba 100644 --- a/bin/node/cli/res/flaming-fir.json +++ b/bin/node/cli/res/flaming-fir.json @@ -134,7 +134,7 @@ "0x5f3e4907f716ac89b6347d15ececedca0b6a45321efae92aea15e0740ec7afe7": "0x00000000", "0x5f3e4907f716ac89b6347d15ececedca9220e172bed316605f73f1ff7b4ade98e54094c2d5af8ae10b91e1288f4f59f2946d7738f2c509b7effd909e5e9ba0ad": "0x00" }, - "children": {} + "childrenDefault": {} } } } From 5938d8623c51c4fd33bd0ae5c7eb860d10ef76d4 Mon Sep 17 00:00:00 2001 From: cheme Date: Wed, 15 Apr 2020 10:23:08 +0200 Subject: [PATCH 81/85] remove terminal space --- primitives/storage/src/lib.rs | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/primitives/storage/src/lib.rs b/primitives/storage/src/lib.rs index c7cbda520b6da..49f24a93cb192 100644 --- a/primitives/storage/src/lib.rs +++ b/primitives/storage/src/lib.rs @@ -382,10 +382,10 @@ impl ChildTrieParentKeyId { /// A few utilities methods are defined. pub struct ChildrenMap(pub BTreeMap); -/// Type alias for storage of children related content. +/// Type alias for storage of children related content. pub type ChildrenVec = Vec<(ChildInfo, T)>; -/// Type alias for storage of children related content. +/// Type alias for storage of children related content. pub type ChildrenSlice<'a, T> = &'a [(ChildInfo, T)]; #[cfg(feature = "std")] From bc2a198dc0da64899fd6d72cb3af28779b267026 Mon Sep 17 00:00:00 2001 From: cheme Date: Wed, 15 Apr 2020 14:25:27 +0200 Subject: [PATCH 82/85] sp-io documentation changes. --- primitives/io/src/lib.rs | 68 +++++++++++++++++++++++++++++----------- 1 file changed, 50 insertions(+), 18 deletions(-) diff --git a/primitives/io/src/lib.rs b/primitives/io/src/lib.rs index c8004057a78c9..5178fb7169d55 100644 --- a/primitives/io/src/lib.rs +++ b/primitives/io/src/lib.rs @@ -145,8 +145,9 @@ pub trait Storage { self.next_storage_key(&key) } - - /// Deprecated, please use dedicated runtime apis. + /// Read child key. + /// + /// Deprecated, please use dedicated runtime apis (`sp_io::default_child_storage::get`). fn child_get( &self, storage_key: &[u8], @@ -160,7 +161,9 @@ pub trait Storage { self.child_storage(&child_info, key).map(|s| s.to_vec()) } - /// Deprecated, please use dedicated runtime apis. + /// Read child key. + /// + /// Deprecated, please use dedicated runtime apis (`sp_io::default_child_storage::read`). fn child_read( &self, storage_key: &[u8], @@ -183,7 +186,9 @@ pub trait Storage { }) } - /// Deprecated, please use dedicated runtime apis. + /// Set a child storage value. + /// + /// Deprecated, please use dedicated runtime apis (`sp_io::default_child_storage::set`). fn child_set( &mut self, storage_key: &[u8], @@ -198,7 +203,9 @@ pub trait Storage { self.set_child_storage(&child_info, key.to_vec(), value.to_vec()); } - /// Deprecated, please use dedicated runtime apis. + /// Remove child key value. + /// + /// Deprecated, please use dedicated runtime apis (`sp_io::default_child_storage::clear`). fn child_clear( &mut self, storage_key: &[u8], @@ -212,7 +219,9 @@ pub trait Storage { self.clear_child_storage(&child_info, key); } - /// Deprecated, please use dedicated runtime apis. + /// Remove all child storage values. + /// + /// Deprecated, please use dedicated runtime apis (`sp_io::default_child_storage::storage_kill`). fn child_storage_kill( &mut self, storage_key: &[u8], @@ -225,7 +234,9 @@ pub trait Storage { self.kill_child_storage(&child_info); } - /// Deprecated, please use dedicated runtime apis. + /// Check a child storage key. + /// + /// Deprecated, please use dedicated runtime apis (`sp_io::default_child_storage::exists`). fn child_exists( &self, storage_key: &[u8], @@ -239,7 +250,9 @@ pub trait Storage { self.exists_child_storage(&child_info, key) } - /// Deprecated, please use dedicated runtime apis. + /// Clear child key by prefix. + /// + /// Deprecated, please use dedicated runtime apis (`sp_io::default_child_storage::clear_prefix`). fn child_clear_prefix( &mut self, storage_key: &[u8], @@ -253,7 +266,9 @@ pub trait Storage { self.clear_child_prefix(&child_info, prefix); } - /// Deprecated, please use dedicated runtime apis. + /// Child trie root calcualation. + /// + /// Deprecated, please use dedicated runtime apis (`sp_io::default_child_storage::clear_root`). fn child_root( &mut self, storage_key: &[u8], @@ -268,7 +283,9 @@ pub trait Storage { self.child_storage_root(&child_info) } - /// Deprecated, please use dedicated runtime apis. + /// Child storage key iteration. + /// + /// Deprecated, please use dedicated runtime apis (`sp_io::default_child_storage::next_key`). fn child_next_key( &mut self, storage_key: &[u8], @@ -281,18 +298,17 @@ pub trait Storage { .expect("Invalid child definition"); self.next_child_storage_key(&child_info, key) } - } - /// Interface for accessing the child storage for default child trie, /// from within the runtime. #[runtime_interface] pub trait DefaultChildStorage { - /// `storage_key` is the unprefixed location of the root of the child trie in the parent trie. + + /// Get a default child storage value for a given key. /// - /// This function specifically returns the data for `key` in the child storage or `None` - /// if the key can not be found. + /// Parameter `storage_key` is the unprefixed location of the root of the child trie in the parent trie. + /// Result is `None` if the value for `key` in the child storage can not be found. fn get( &self, storage_key: &[u8], @@ -302,6 +318,8 @@ pub trait DefaultChildStorage { self.child_storage(&child_info, key).map(|s| s.to_vec()) } + /// Allocation efficient variant of `get`. + /// /// Get `key` from child storage, placing the value into `value_out` and return the number /// of bytes that the entry in storage has beyond the offset or `None` if the storage entry /// doesn't exist at all. @@ -325,6 +343,8 @@ pub trait DefaultChildStorage { }) } + /// Set a child storage value. + /// /// Set `key` to `value` in the child storage denoted by `storage_key`. fn set( &mut self, @@ -336,7 +356,9 @@ pub trait DefaultChildStorage { self.set_child_storage(&child_info, key.to_vec(), value.to_vec()); } - /// Clear the given child storage of the given `key` and its value. + /// Clear a child storage key. + /// + /// For the default child storage at `storage_key`, clear value at `key`. fn clear ( &mut self, storage_key: &[u8], @@ -347,6 +369,9 @@ pub trait DefaultChildStorage { } /// Clear an entire child storage. + /// + /// If it exists, the child storage for `storage_key` + /// is removed. fn storage_kill( &mut self, storage_key: &[u8], @@ -355,7 +380,9 @@ pub trait DefaultChildStorage { self.kill_child_storage(&child_info); } - /// Check whether the given `key` exists in storage. + /// Check a child storage key. + /// + /// Check whether the given `key` exists in default child defined at `storage_key`. fn exists( &self, storage_key: &[u8], @@ -365,6 +392,8 @@ pub trait DefaultChildStorage { self.exists_child_storage(&child_info, key) } + /// Clear child default key by prefix. + /// /// Clear the child storage of each key-value pair where the key starts with the given `prefix`. fn clear_prefix( &mut self, @@ -375,8 +404,9 @@ pub trait DefaultChildStorage { self.clear_child_prefix(&child_info, prefix); } - /// "Commit" all existing operations and compute the resulting child storage root. + /// Default child root calculation. /// + /// "Commit" all existing operations and compute the resulting child storage root. /// The hashing algorithm is defined by the `Block`. /// /// Returns the SCALE encoded hash. @@ -388,6 +418,8 @@ pub trait DefaultChildStorage { self.child_storage_root(&child_info) } + /// Child storage key iteration. + /// /// Get the next key in storage after the given one in lexicographic order in child storage. fn next_key( &mut self, From 619b454c3ccf7b5d35bfaf8eea3df00084ee1272 Mon Sep 17 00:00:00 2001 From: cheme Date: Wed, 15 Apr 2020 16:56:26 +0200 Subject: [PATCH 83/85] Retain compatibility with network protocol. --- client/network/src/protocol.rs | 28 +++++-- client/network/src/protocol/message.rs | 106 +++++++++++++++++++++++++ 2 files changed, 126 insertions(+), 8 deletions(-) diff --git a/client/network/src/protocol.rs b/client/network/src/protocol.rs index 84b913b284c62..9b55cc8d6a8d2 100644 --- a/client/network/src/protocol.rs +++ b/client/network/src/protocol.rs @@ -38,7 +38,7 @@ use sp_runtime::traits::{ Block as BlockT, Header as HeaderT, NumberFor, One, Zero, CheckedSub }; use sp_arithmetic::traits::SaturatedConversion; -use message::{BlockAnnounce, Message}; +use message::{BlockAnnounce, Message, MessageV6}; use message::generic::{Message as GenericMessage, ConsensusMessage, Roles}; use prometheus_endpoint::{Registry, Gauge, GaugeVec, HistogramVec, PrometheusError, Opts, register, U64}; use sync::{ChainSync, SyncState}; @@ -91,7 +91,7 @@ const MAX_KNOWN_BLOCKS: usize = 1024; // ~32kb per peer + LruHashSet overhead const MAX_KNOWN_EXTRINSICS: usize = 4096; // ~128kb per peer + overhead /// Current protocol version. -pub(crate) const CURRENT_VERSION: u32 = 6; +pub(crate) const CURRENT_VERSION: u32 = 7; /// Lowest version we support pub(crate) const MIN_VERSION: u32 = 3; @@ -524,12 +524,24 @@ impl Protocol { data: BytesMut, ) -> CustomMessageOutcome { - let message = match as Decode>::decode(&mut &data[..]) { - Ok(message) => message, - Err(err) => { - debug!(target: "sync", "Couldn't decode packet sent by {}: {:?}: {}", who, data, err.what()); - self.peerset_handle.report_peer(who.clone(), rep::BAD_MESSAGE); - return CustomMessageOutcome::None; + let input = &mut &data[..]; + let decoded_result = as Decode>::decode(input); + let all_read = input.is_empty(); + let message = match (all_read, decoded_result) { + (true, Ok(message)) => message, + (false, _) | (_, Err(_)) => match as Decode>::decode(&mut &data[..]) { + Ok(message) => if let Some(message) = message.into_latest() { + message + } else { + debug!(target: "sync", "Couldn't call packet sent by {}: {:?}: {}", who, data, "Invalid input."); + self.peerset_handle.report_peer(who.clone(), rep::BAD_MESSAGE); + return CustomMessageOutcome::None; + }, + Err(err) => { + debug!(target: "sync", "Couldn't decode packet sent by {}: {:?}: {}", who, data, err.what()); + self.peerset_handle.report_peer(who.clone(), rep::BAD_MESSAGE); + return CustomMessageOutcome::None; + } } }; diff --git a/client/network/src/protocol/message.rs b/client/network/src/protocol/message.rs index 8638e9afc59b9..bc9d0f79facb7 100644 --- a/client/network/src/protocol/message.rs +++ b/client/network/src/protocol/message.rs @@ -25,6 +25,7 @@ pub use self::generic::{ RemoteChangesRequest, RemoteChangesResponse, FinalityProofRequest, FinalityProofResponse, FromBlock, RemoteReadChildRequest, Roles, + RemoteReadChildRequestV6, }; use sc_client_api::StorageProof; @@ -39,6 +40,17 @@ pub type Message = generic::Message< ::Extrinsic, >; +/// Type alias for using the message type using block type parameters. +/// +/// This could be removed as soon as MIN_VERSION switch to 7. +pub type MessageV6 = generic::MessageV6< + ::Header, + ::Hash, + <::Header as HeaderT>::Number, + ::Extrinsic, +>; + + /// Type alias for using the status type using block type parameters. pub type Status = generic::Status< ::Hash, @@ -237,6 +249,49 @@ pub mod generic { Number(Number), } + /// A protocol V6 network message, this is only for backward compatibility. + /// It should only be use when we fail to decode a message + /// with the latest encoding. + #[derive(Decode)] + pub enum MessageV6 { + /// Status packet. + Status(Status), + /// Block request. + BlockRequest(BlockRequest), + /// Block response. + BlockResponse(BlockResponse), + /// Block announce. + BlockAnnounce(BlockAnnounce
), + /// Transactions. + Transactions(Transactions), + /// Consensus protocol message. + Consensus(ConsensusMessage), + /// Remote method call request. + RemoteCallRequest(RemoteCallRequest), + /// Remote method call response. + RemoteCallResponse(RemoteCallResponse), + /// Remote storage read request. + RemoteReadRequest(RemoteReadRequest), + /// Remote storage read response. + RemoteReadResponse(RemoteReadResponse), + /// Remote header request. + RemoteHeaderRequest(RemoteHeaderRequest), + /// Remote header response. + RemoteHeaderResponse(RemoteHeaderResponse
), + /// Remote changes request. + RemoteChangesRequest(RemoteChangesRequest), + /// Remote changes response. + RemoteChangesResponse(RemoteChangesResponse), + /// Remote child storage read request. + RemoteReadChildRequest(RemoteReadChildRequestV6), + /// Finality proof request. + FinalityProofRequest(FinalityProofRequest), + /// Finality proof response. + FinalityProofResponse(FinalityProofResponse), + /// Batch of consensus protocol messages. + ConsensusBatch(Vec), + } + /// A network message. #[derive(Debug, PartialEq, Eq, Clone, Encode, Decode)] pub enum Message { @@ -278,6 +333,39 @@ pub mod generic { ConsensusBatch(Vec), } + impl MessageV6 { + /// Get matching latest protocol message for a protocol V6 message. + /// + /// Note that this function expect that V6 message are only created + /// after a failed latest message decoding, so we do only convert for diverging + /// decoding path. + pub fn into_latest(self) -> Option> { + match self { + MessageV6::RemoteReadChildRequest(RemoteReadChildRequestV6 { + id, + block, + storage_key, + child_info: _, + child_type, + keys, + }) => { + // V6 protocol only got implementation for child type 1. + if child_type != 1 { + None + } else { + Some(Message::RemoteReadChildRequest(RemoteReadChildRequest { + id, + block, + storage_key, + keys, + })) + } + }, + _ => None, + } + } + } + impl Message { /// Message id useful for logging. pub fn id(&self) -> &'static str { @@ -468,6 +556,24 @@ pub mod generic { pub keys: Vec>, } + #[derive(Decode)] + /// Backward compatibility remote storage read child request. + pub struct RemoteReadChildRequestV6 { + /// Unique request id. + pub id: RequestId, + /// Block at which to perform call. + pub block: H, + /// Child Storage key. + pub storage_key: Vec, + /// Child trie source information. + pub child_info: Vec, + /// Child type, its required to resolve `child_info` + /// content and choose child implementation. + pub child_type: u32, + /// Storage key. + pub keys: Vec>, + } + #[derive(Debug, PartialEq, Eq, Clone, Encode, Decode)] /// Remote storage read child request. pub struct RemoteReadChildRequest { From fa52a8c43105e1052e039368ad66e291ca850c84 Mon Sep 17 00:00:00 2001 From: cheme Date: Wed, 15 Apr 2020 17:16:29 +0200 Subject: [PATCH 84/85] Revert "Retain compatibility with network protocol." This reverts commit 619b454c3ccf7b5d35bfaf8eea3df00084ee1272. --- client/network/src/protocol.rs | 28 ++----- client/network/src/protocol/message.rs | 106 ------------------------- 2 files changed, 8 insertions(+), 126 deletions(-) diff --git a/client/network/src/protocol.rs b/client/network/src/protocol.rs index 9b55cc8d6a8d2..84b913b284c62 100644 --- a/client/network/src/protocol.rs +++ b/client/network/src/protocol.rs @@ -38,7 +38,7 @@ use sp_runtime::traits::{ Block as BlockT, Header as HeaderT, NumberFor, One, Zero, CheckedSub }; use sp_arithmetic::traits::SaturatedConversion; -use message::{BlockAnnounce, Message, MessageV6}; +use message::{BlockAnnounce, Message}; use message::generic::{Message as GenericMessage, ConsensusMessage, Roles}; use prometheus_endpoint::{Registry, Gauge, GaugeVec, HistogramVec, PrometheusError, Opts, register, U64}; use sync::{ChainSync, SyncState}; @@ -91,7 +91,7 @@ const MAX_KNOWN_BLOCKS: usize = 1024; // ~32kb per peer + LruHashSet overhead const MAX_KNOWN_EXTRINSICS: usize = 4096; // ~128kb per peer + overhead /// Current protocol version. -pub(crate) const CURRENT_VERSION: u32 = 7; +pub(crate) const CURRENT_VERSION: u32 = 6; /// Lowest version we support pub(crate) const MIN_VERSION: u32 = 3; @@ -524,24 +524,12 @@ impl Protocol { data: BytesMut, ) -> CustomMessageOutcome { - let input = &mut &data[..]; - let decoded_result = as Decode>::decode(input); - let all_read = input.is_empty(); - let message = match (all_read, decoded_result) { - (true, Ok(message)) => message, - (false, _) | (_, Err(_)) => match as Decode>::decode(&mut &data[..]) { - Ok(message) => if let Some(message) = message.into_latest() { - message - } else { - debug!(target: "sync", "Couldn't call packet sent by {}: {:?}: {}", who, data, "Invalid input."); - self.peerset_handle.report_peer(who.clone(), rep::BAD_MESSAGE); - return CustomMessageOutcome::None; - }, - Err(err) => { - debug!(target: "sync", "Couldn't decode packet sent by {}: {:?}: {}", who, data, err.what()); - self.peerset_handle.report_peer(who.clone(), rep::BAD_MESSAGE); - return CustomMessageOutcome::None; - } + let message = match as Decode>::decode(&mut &data[..]) { + Ok(message) => message, + Err(err) => { + debug!(target: "sync", "Couldn't decode packet sent by {}: {:?}: {}", who, data, err.what()); + self.peerset_handle.report_peer(who.clone(), rep::BAD_MESSAGE); + return CustomMessageOutcome::None; } }; diff --git a/client/network/src/protocol/message.rs b/client/network/src/protocol/message.rs index bc9d0f79facb7..8638e9afc59b9 100644 --- a/client/network/src/protocol/message.rs +++ b/client/network/src/protocol/message.rs @@ -25,7 +25,6 @@ pub use self::generic::{ RemoteChangesRequest, RemoteChangesResponse, FinalityProofRequest, FinalityProofResponse, FromBlock, RemoteReadChildRequest, Roles, - RemoteReadChildRequestV6, }; use sc_client_api::StorageProof; @@ -40,17 +39,6 @@ pub type Message = generic::Message< ::Extrinsic, >; -/// Type alias for using the message type using block type parameters. -/// -/// This could be removed as soon as MIN_VERSION switch to 7. -pub type MessageV6 = generic::MessageV6< - ::Header, - ::Hash, - <::Header as HeaderT>::Number, - ::Extrinsic, ->; - - /// Type alias for using the status type using block type parameters. pub type Status = generic::Status< ::Hash, @@ -249,49 +237,6 @@ pub mod generic { Number(Number), } - /// A protocol V6 network message, this is only for backward compatibility. - /// It should only be use when we fail to decode a message - /// with the latest encoding. - #[derive(Decode)] - pub enum MessageV6 { - /// Status packet. - Status(Status), - /// Block request. - BlockRequest(BlockRequest), - /// Block response. - BlockResponse(BlockResponse), - /// Block announce. - BlockAnnounce(BlockAnnounce
), - /// Transactions. - Transactions(Transactions), - /// Consensus protocol message. - Consensus(ConsensusMessage), - /// Remote method call request. - RemoteCallRequest(RemoteCallRequest), - /// Remote method call response. - RemoteCallResponse(RemoteCallResponse), - /// Remote storage read request. - RemoteReadRequest(RemoteReadRequest), - /// Remote storage read response. - RemoteReadResponse(RemoteReadResponse), - /// Remote header request. - RemoteHeaderRequest(RemoteHeaderRequest), - /// Remote header response. - RemoteHeaderResponse(RemoteHeaderResponse
), - /// Remote changes request. - RemoteChangesRequest(RemoteChangesRequest), - /// Remote changes response. - RemoteChangesResponse(RemoteChangesResponse), - /// Remote child storage read request. - RemoteReadChildRequest(RemoteReadChildRequestV6), - /// Finality proof request. - FinalityProofRequest(FinalityProofRequest), - /// Finality proof response. - FinalityProofResponse(FinalityProofResponse), - /// Batch of consensus protocol messages. - ConsensusBatch(Vec), - } - /// A network message. #[derive(Debug, PartialEq, Eq, Clone, Encode, Decode)] pub enum Message { @@ -333,39 +278,6 @@ pub mod generic { ConsensusBatch(Vec), } - impl MessageV6 { - /// Get matching latest protocol message for a protocol V6 message. - /// - /// Note that this function expect that V6 message are only created - /// after a failed latest message decoding, so we do only convert for diverging - /// decoding path. - pub fn into_latest(self) -> Option> { - match self { - MessageV6::RemoteReadChildRequest(RemoteReadChildRequestV6 { - id, - block, - storage_key, - child_info: _, - child_type, - keys, - }) => { - // V6 protocol only got implementation for child type 1. - if child_type != 1 { - None - } else { - Some(Message::RemoteReadChildRequest(RemoteReadChildRequest { - id, - block, - storage_key, - keys, - })) - } - }, - _ => None, - } - } - } - impl Message { /// Message id useful for logging. pub fn id(&self) -> &'static str { @@ -556,24 +468,6 @@ pub mod generic { pub keys: Vec>, } - #[derive(Decode)] - /// Backward compatibility remote storage read child request. - pub struct RemoteReadChildRequestV6 { - /// Unique request id. - pub id: RequestId, - /// Block at which to perform call. - pub block: H, - /// Child Storage key. - pub storage_key: Vec, - /// Child trie source information. - pub child_info: Vec, - /// Child type, its required to resolve `child_info` - /// content and choose child implementation. - pub child_type: u32, - /// Storage key. - pub keys: Vec>, - } - #[derive(Debug, PartialEq, Eq, Clone, Encode, Decode)] /// Remote storage read child request. pub struct RemoteReadChildRequest { From 69ed11886456732ae8ec83bf3e3ff1aeff854cac Mon Sep 17 00:00:00 2001 From: cheme Date: Fri, 17 Apr 2020 12:31:33 +0200 Subject: [PATCH 85/85] fix renamed field related error --- primitives/state-machine/src/basic.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/primitives/state-machine/src/basic.rs b/primitives/state-machine/src/basic.rs index 8d3ecb1b190a8..7f26085958e97 100644 --- a/primitives/state-machine/src/basic.rs +++ b/primitives/state-machine/src/basic.rs @@ -83,7 +83,7 @@ impl BasicExternalities { let mut ext = Self { inner: Storage { top: std::mem::replace(&mut storage.top, Default::default()), - children_default: std::mem::replace(&mut storage.children, Default::default()), + children_default: std::mem::replace(&mut storage.children_default, Default::default()), }, extensions: Default::default(), };