From e9e1f4a3c38fa9d17b841af9f6ab9f460162de49 Mon Sep 17 00:00:00 2001 From: Svyatoslav Nikolsky Date: Mon, 10 Jun 2019 16:23:48 +0300 Subject: [PATCH 1/5] changes tries build cache added CT build cache test --- core/client/db/src/lib.rs | 29 ++- core/client/src/backend.rs | 5 +- core/client/src/call_executor.rs | 15 +- core/client/src/client.rs | 12 +- core/client/src/in_mem.rs | 12 +- core/client/src/light/backend.rs | 4 +- core/client/src/light/call_executor.rs | 9 +- core/client/src/light/fetcher.rs | 2 +- core/state-machine/src/changes_trie/build.rs | 84 ++++++-- .../src/changes_trie/build_cache.rs | 197 ++++++++++++++++++ core/state-machine/src/changes_trie/input.rs | 10 + core/state-machine/src/changes_trie/mod.rs | 75 ++++++- .../state-machine/src/changes_trie/storage.rs | 19 +- core/state-machine/src/ext.rs | 13 +- core/state-machine/src/lib.rs | 20 +- core/state-machine/src/testing.rs | 2 +- 16 files changed, 441 insertions(+), 67 deletions(-) create mode 100644 core/state-machine/src/changes_trie/build_cache.rs diff --git a/core/client/db/src/lib.rs b/core/client/db/src/lib.rs index e73d3df62c9c0..d86a6c995574b 100644 --- a/core/client/db/src/lib.rs +++ b/core/client/db/src/lib.rs @@ -36,7 +36,7 @@ mod utils; use std::sync::Arc; use std::path::PathBuf; use std::io; -use std::collections::HashMap; +use std::collections::{HashMap, HashSet}; use client::backend::NewBlockState; use client::blockchain::HeaderBackend; @@ -58,7 +58,7 @@ use runtime_primitives::traits::{ }; use state_machine::backend::Backend as StateBackend; use executor::RuntimeInfo; -use state_machine::{CodeExecutor, DBValue}; +use state_machine::{CodeExecutor, DBValue, ChangesTrieTransaction, ChangesTrieCachedBuildData, ChangesTrieBuildCache}; use crate::utils::{Meta, db_err, meta_keys, read_db, block_id_to_lookup_key, read_meta}; use client::leaves::{LeafSet, FinalizationDisplaced}; use client::children; @@ -385,6 +385,7 @@ pub struct BlockImportOperation { storage_updates: StorageCollection, child_storage_updates: ChildStorageCollection, changes_trie_updates: MemoryDB, + changes_trie_cache_update: Option>>, pending_block: Option>, aux_ops: Vec<(Vec, Option>)>, finalized_blocks: Vec<(BlockId, Option)>, @@ -467,8 +468,9 @@ where Block: BlockT, Ok(root) } - fn update_changes_trie(&mut self, update: MemoryDB) -> Result<(), client::error::Error> { - self.changes_trie_updates = update; + fn update_changes_trie(&mut self, update: ChangesTrieTransaction>) -> Result<(), client::error::Error> { + self.changes_trie_updates = update.0; + self.changes_trie_cache_update = update.1; Ok(()) } @@ -545,6 +547,7 @@ pub struct DbChangesTrieStorage { db: Arc, meta: Arc, Block::Hash>>>, min_blocks_to_keep: Option, + cache: RwLock>>, _phantom: ::std::marker::PhantomData, } @@ -556,6 +559,11 @@ impl> DbChangesTrieStorage { } } + /// Commit changes into changes trie build cache. + pub fn commit_cache(&self, cache_update: ChangesTrieCachedBuildData>) { + self.cache.write().insert(cache_update); + } + /// Prune obsolete changes tries. pub fn prune( &self, @@ -675,6 +683,10 @@ impl state_machine::ChangesTrieStorage> where Block: BlockT, { + fn cached_changed_keys(&self, root: &H256) -> Option>> { + self.cache.read().get(root).cloned() + } + fn get(&self, key: &H256, _prefix: &[u8]) -> Result, String> { self.db.get(columns::CHANGES_TRIE, &key[..]) .map_err(|err| format!("{}", err)) @@ -759,6 +771,7 @@ impl> Backend { db, meta, min_blocks_to_keep: if is_archive_pruning { None } else { Some(MIN_BLOCKS_TO_KEEP_CHANGES_TRIES_FOR) }, + cache: RwLock::new(ChangesTrieBuildCache::new()), _phantom: Default::default(), }; @@ -1072,7 +1085,6 @@ impl> Backend { self.changes_tries_storage.commit(&mut transaction, changes_trie_updates); let cache = operation.old_state.release(); // release state reference so that it can be finalized - if finalized { // TODO: ensure best chain contains this block. self.ensure_sequential_finalization(header, Some(last_finalized_hash))?; @@ -1126,6 +1138,10 @@ impl> Backend { let write_result = self.storage.db.write(transaction).map_err(db_err); + if let Some(changes_trie_cache_update) = operation.changes_trie_cache_update { + self.changes_tries_storage.commit_cache(changes_trie_cache_update); + } + if let Some((number, hash, enacted, retracted, displaced_leaf, is_best, mut cache)) = imported { if let Err(e) = write_result { let mut leaves = self.blockchain.leaves.write(); @@ -1255,6 +1271,7 @@ impl client::backend::Backend for Backend whe storage_updates: Default::default(), child_storage_updates: Default::default(), changes_trie_updates: MemoryDB::default(), + changes_trie_cache_update: None, aux_ops: Vec::new(), finalized_blocks: Vec::new(), set_head: None, @@ -1482,7 +1499,7 @@ mod tests { let mut op = backend.begin_operation().unwrap(); backend.begin_state_operation(&mut op, block_id).unwrap(); op.set_block_data(header, None, None, NewBlockState::Best).unwrap(); - op.update_changes_trie(changes_trie_update).unwrap(); + op.update_changes_trie((changes_trie_update, None)).unwrap(); backend.commit_operation(op).unwrap(); header_hash diff --git a/core/client/src/backend.rs b/core/client/src/backend.rs index 79bc1b475b414..b3a1d79d43b47 100644 --- a/core/client/src/backend.rs +++ b/core/client/src/backend.rs @@ -22,10 +22,9 @@ use primitives::ChangesTrieConfiguration; use runtime_primitives::{generic::BlockId, Justification, StorageOverlay, ChildrenStorageOverlay}; use runtime_primitives::traits::{Block as BlockT, NumberFor}; use state_machine::backend::Backend as StateBackend; -use state_machine::ChangesTrieStorage as StateChangesTrieStorage; +use state_machine::{ChangesTrieStorage as StateChangesTrieStorage, ChangesTrieTransaction}; use consensus::well_known_cache_keys; use hash_db::Hasher; -use trie::MemoryDB; use parking_lot::Mutex; /// In memory array of storage values. @@ -95,7 +94,7 @@ pub trait BlockImportOperation where child_update: ChildStorageCollection, ) -> error::Result<()>; /// Inject changes trie data into the database. - fn update_changes_trie(&mut self, update: MemoryDB) -> error::Result<()>; + fn update_changes_trie(&mut self, update: ChangesTrieTransaction>) -> error::Result<()>; /// Insert auxiliary keys. Values are `None` if should be deleted. fn insert_aux(&mut self, ops: I) -> error::Result<()> where I: IntoIterator, Option>)>; diff --git a/core/client/src/call_executor.rs b/core/client/src/call_executor.rs index c107e6f2bbd4a..54f02ad4ac3b2 100644 --- a/core/client/src/call_executor.rs +++ b/core/client/src/call_executor.rs @@ -17,15 +17,15 @@ use std::{sync::Arc, cmp::Ord, panic::UnwindSafe, result, cell::RefCell, rc::Rc}; use parity_codec::{Encode, Decode}; use runtime_primitives::{ - generic::BlockId, traits::Block as BlockT, + generic::BlockId, traits::{Block as BlockT, NumberFor}, }; use state_machine::{ self, OverlayedChanges, Ext, CodeExecutor, ExecutionManager, ExecutionStrategy, NeverOffchainExt, backend::Backend as _, + ChangesTrieTransaction, }; use executor::{RuntimeVersion, RuntimeInfo, NativeVersion}; use hash_db::Hasher; -use trie::MemoryDB; use primitives::{offchain, H256, Blake2Hasher, NativeOrEncoded, NeverNativeValue}; use crate::runtime_api::{ProofRecorder, InitializeBlock}; @@ -110,7 +110,14 @@ where manager: ExecutionManager, native_call: Option, side_effects_handler: Option<&mut O>, - ) -> Result<(NativeOrEncoded, (S::Transaction, H::Out), Option>), error::Error>; + ) -> Result< + ( + NativeOrEncoded, + (S::Transaction, H::Out), + Option>> + ), + error::Error, + >; /// Execute a call to a contract on top of given state, gathering execution proof. /// @@ -317,7 +324,7 @@ where ) -> error::Result<( NativeOrEncoded, (S::Transaction, ::Out), - Option>, + Option>>, )> { state_machine::new( state, diff --git a/core/client/src/client.rs b/core/client/src/client.rs index 67a9140e9e42d..d4e83a89a1d6a 100644 --- a/core/client/src/client.rs +++ b/core/client/src/client.rs @@ -55,7 +55,7 @@ use parity_codec::{Encode, Decode}; use state_machine::{ DBValue, Backend as StateBackend, CodeExecutor, ChangesTrieAnchorBlockId, ExecutionStrategy, ExecutionManager, prove_read, prove_child_read, - ChangesTrieRootsStorage, ChangesTrieStorage, + ChangesTrieRootsStorage, ChangesTrieStorage, ChangesTrieTransaction, key_changes, key_changes_proof, OverlayedChanges, NeverOffchainExt, }; use hash_db::Hasher; @@ -93,7 +93,7 @@ type StorageUpdate = < >::BlockImportOperation as BlockImportOperation >::State as state_machine::Backend>::Transaction; -type ChangesUpdate = trie::MemoryDB; +type ChangesUpdate = ChangesTrieTransaction>; /// Execution strategies settings. #[derive(Debug, Clone)] @@ -615,6 +615,10 @@ impl Client where } impl<'a, Block: BlockT> ChangesTrieStorage> for AccessedRootsRecorder<'a, Block> { + fn cached_changed_keys(&self, root: &H256) -> Option>> { + self.storage.cached_changed_keys(root) + } + fn get(&self, key: &H256, prefix: &[u8]) -> Result, String> { self.storage.get(key, prefix) } @@ -922,7 +926,7 @@ impl Client where } // FIXME #1232: correct path logic for when to execute this function - let (storage_update,changes_update,storage_changes) = self.block_execution(&operation.op, &import_headers, origin, hash, body.clone())?; + let (storage_update, changes_update, storage_changes) = self.block_execution(&operation.op, &import_headers, origin, hash, body.clone())?; let is_new_best = finalized || match fork_choice { ForkChoiceStrategy::LongestChain => import_headers.post().number() > &last_best_number, @@ -978,7 +982,7 @@ impl Client where body: Option>, ) -> error::Result<( Option>, - Option>, + Option>>, Option<( Vec<(Vec, Option>)>, Vec<(Vec, Vec<(Vec, Option>)>)> diff --git a/core/client/src/in_mem.rs b/core/client/src/in_mem.rs index 8a229470be1b1..672ab2e950468 100644 --- a/core/client/src/in_mem.rs +++ b/core/client/src/in_mem.rs @@ -16,7 +16,7 @@ //! In memory client backend -use std::collections::HashMap; +use std::collections::{HashMap, HashSet}; use std::sync::Arc; use parking_lot::{RwLock, Mutex}; use primitives::{ChangesTrieConfiguration, storage::well_known_keys}; @@ -24,7 +24,7 @@ use runtime_primitives::generic::{BlockId, DigestItem}; use runtime_primitives::traits::{Block as BlockT, Header as HeaderT, Zero, NumberFor}; use runtime_primitives::{Justification, StorageOverlay, ChildrenStorageOverlay}; use state_machine::backend::{Backend as StateBackend, InMemory}; -use state_machine::{self, InMemoryChangesTrieStorage, ChangesTrieAnchorBlockId}; +use state_machine::{self, InMemoryChangesTrieStorage, ChangesTrieAnchorBlockId, ChangesTrieTransaction}; use hash_db::Hasher; use trie::MemoryDB; use consensus::well_known_cache_keys::Id as CacheKeyId; @@ -484,8 +484,8 @@ where Ok(()) } - fn update_changes_trie(&mut self, update: MemoryDB) -> error::Result<()> { - self.changes_trie_update = Some(update); + fn update_changes_trie(&mut self, update: ChangesTrieTransaction>) -> error::Result<()> { + self.changes_trie_update = Some(update.0); Ok(()) } @@ -755,6 +755,10 @@ impl state_machine::ChangesTrieStorage> for Change Block: BlockT, H: Hasher, { + fn cached_changed_keys(&self, _root: &H::Out) -> Option>> { + None + } + fn get(&self, _key: &H::Out, _prefix: &[u8]) -> Result, String> { Err("Dummy implementation".into()) } diff --git a/core/client/src/light/backend.rs b/core/client/src/light/backend.rs index 6f6bde2418373..f15047a24bd60 100644 --- a/core/client/src/light/backend.rs +++ b/core/client/src/light/backend.rs @@ -22,7 +22,7 @@ use std::sync::{Arc, Weak}; use parking_lot::{RwLock, Mutex}; use runtime_primitives::{generic::BlockId, Justification, StorageOverlay, ChildrenStorageOverlay}; -use state_machine::{Backend as StateBackend, TrieBackend, backend::InMemory as InMemoryState}; +use state_machine::{Backend as StateBackend, TrieBackend, backend::InMemory as InMemoryState, ChangesTrieTransaction}; use runtime_primitives::traits::{Block as BlockT, NumberFor, Zero, Header}; use crate::in_mem::{self, check_genesis_storage}; use crate::backend::{ @@ -280,7 +280,7 @@ where Ok(()) } - fn update_changes_trie(&mut self, _update: MemoryDB) -> ClientResult<()> { + fn update_changes_trie(&mut self, _update: ChangesTrieTransaction>) -> ClientResult<()> { // we're not storing anything locally => ignore changes Ok(()) } diff --git a/core/client/src/light/call_executor.rs b/core/client/src/light/call_executor.rs index faa7c10def070..f0b8085f59798 100644 --- a/core/client/src/light/call_executor.rs +++ b/core/client/src/light/call_executor.rs @@ -25,10 +25,10 @@ use std::{ use parity_codec::{Encode, Decode}; use primitives::{offchain, H256, Blake2Hasher, convert_hash, NativeOrEncoded}; use runtime_primitives::generic::BlockId; -use runtime_primitives::traits::{One, Block as BlockT, Header as HeaderT}; +use runtime_primitives::traits::{One, Block as BlockT, Header as HeaderT, NumberFor}; use state_machine::{ self, Backend as StateBackend, CodeExecutor, OverlayedChanges, - ExecutionStrategy, create_proof_check_backend, + ExecutionStrategy, ChangesTrieTransaction, create_proof_check_backend, execution_proof_check_on_trie_backend, ExecutionManager, NeverOffchainExt }; use hash_db::Hasher; @@ -40,7 +40,6 @@ use crate::call_executor::CallExecutor; use crate::error::{Error as ClientError, Result as ClientResult}; use crate::light::fetcher::{Fetcher, RemoteCallRequest}; use executor::{RuntimeVersion, NativeVersion}; -use trie::MemoryDB; /// Call executor that executes methods on remote node, querying execution proof /// and checking proof by re-executing locally. @@ -172,7 +171,7 @@ where ) -> ClientResult<( NativeOrEncoded, (S::Transaction, ::Out), - Option>, + Option>>, )> { Err(ClientError::NotAvailableOnLightClient.into()) } @@ -349,7 +348,7 @@ impl CallExecutor for ) -> ClientResult<( NativeOrEncoded, (S::Transaction, ::Out), - Option>, + Option>>, )> { // there's no actual way/need to specify native/wasm execution strategy on light node // => we can safely ignore passed values diff --git a/core/client/src/light/fetcher.rs b/core/client/src/light/fetcher.rs index dd718001cd265..3ef3f2eeb2ad8 100644 --- a/core/client/src/light/fetcher.rs +++ b/core/client/src/light/fetcher.rs @@ -448,7 +448,7 @@ struct RootsStorage<'a, Number: SimpleArithmetic, Hash: 'a> { impl<'a, H, Number, Hash> ChangesTrieRootsStorage for RootsStorage<'a, Number, Hash> where H: Hasher, - Number: ::std::fmt::Display + Clone + SimpleArithmetic + Encode + Decode + Send + Sync + 'static, + Number: ::std::fmt::Display + ::std::hash::Hash + Clone + SimpleArithmetic + Encode + Decode + Send + Sync + 'static, Hash: 'a + Send + Sync + Clone + AsRef<[u8]>, { fn build_anchor( diff --git a/core/state-machine/src/changes_trie/build.rs b/core/state-machine/src/changes_trie/build.rs index d32d28906fb48..b9fcf06661cd6 100644 --- a/core/state-machine/src/changes_trie/build.rs +++ b/core/state-machine/src/changes_trie/build.rs @@ -32,13 +32,13 @@ use crate::changes_trie::{AnchorBlockId, Configuration, Storage, BlockNumber}; /// /// Returns Err if storage error has occurred OR if storage haven't returned /// required data. -pub fn prepare_input<'a, B, S, H, Number>( +pub(crate) fn prepare_input<'a, B, S, H, Number>( backend: &'a B, storage: &'a S, config: &'a Configuration, changes: &'a OverlayedChanges, parent: &'a AnchorBlockId, -) -> Result> + 'a, String> +) -> Result<(impl Iterator> + 'a, Vec), String> where B: Backend, S: Storage, @@ -49,13 +49,15 @@ pub fn prepare_input<'a, B, S, H, Number>( let extrinsics_input = prepare_extrinsics_input( backend, &number, - changes)?; - let digest_input = prepare_digest_input::<_, H, Number>( + changes, + )?; + let (digest_input, digest_input_blocks) = prepare_digest_input::<_, H, Number>( parent, config, number, - storage)?; - Ok(extrinsics_input.chain(digest_input)) + storage, + )?; + Ok((extrinsics_input.chain(digest_input), digest_input_blocks)) } /// Prepare ExtrinsicIndex input pairs. @@ -116,21 +118,18 @@ fn prepare_digest_input<'a, S, H, Number>( config: &Configuration, block: Number, storage: &'a S -) -> Result> + 'a, String> +) -> Result<(impl Iterator> + 'a, Vec), String> where S: Storage, H: Hasher, H::Out: 'a, Number: BlockNumber, { - digest_build_iterator(config, block.clone()) + let digest_input_blocks = digest_build_iterator(config, block.clone()).collect::>(); + digest_input_blocks.clone().into_iter() .try_fold(BTreeMap::new(), move |mut map, digest_build_block| { let trie_root = storage.root(parent, digest_build_block.clone())?; let trie_root = trie_root.ok_or_else(|| format!("No changes trie root for block {}", digest_build_block.clone()))?; - let trie_storage = TrieBackendEssence::<_, H>::new( - crate::changes_trie::TrieBackendStorageAdapter(storage), - trie_root, - ); let mut insert_to_map = |key: Vec| { match map.entry(key.clone()) { @@ -154,6 +153,19 @@ fn prepare_digest_input<'a, S, H, Number>( } }; + if let Some(changed_keys) = storage.cached_changed_keys(&trie_root) { + for changed_key in changed_keys { + insert_to_map(changed_key); + } + + return Ok(map); + } + + let trie_storage = TrieBackendEssence::<_, H>::new( + crate::changes_trie::TrieBackendStorageAdapter(storage), + trie_root, + ); + let extrinsic_prefix = ExtrinsicIndex::key_neutral_prefix(digest_build_block.clone()); trie_storage.for_keys_with_prefix(&extrinsic_prefix, |key| if let Some(InputKey::ExtrinsicIndex::(trie_key)) = Decode::decode(&mut &key[..]) { @@ -168,7 +180,7 @@ fn prepare_digest_input<'a, S, H, Number>( Ok(map) }) - .map(|pairs| pairs.into_iter().map(|(_, (k, v))| InputPair::DigestIndex(k, v))) + .map(|pairs| (pairs.into_iter().map(|(_, (k, v))| InputPair::DigestIndex(k, v)), digest_input_blocks)) } #[cfg(test)] @@ -177,7 +189,8 @@ mod test { use primitives::Blake2Hasher; use primitives::storage::well_known_keys::EXTRINSIC_INDEX; use crate::backend::InMemory; - use crate::changes_trie::storage::InMemoryStorage; + use crate::changes_trie::{RootsStorage, storage::InMemoryStorage}; + use crate::changes_trie::build_cache::IncompleteCachedBuildData; use crate::overlayed_changes::OverlayedValue; use super::*; @@ -267,7 +280,7 @@ mod test { &changes, &parent, ).unwrap(); - assert_eq!(changes_trie_nodes.collect::>>(), vec![ + assert_eq!(changes_trie_nodes.0.collect::>>(), vec![ InputPair::ExtrinsicIndex(ExtrinsicIndex { block: 5, key: vec![100] }, vec![0, 2, 3]), InputPair::ExtrinsicIndex(ExtrinsicIndex { block: 5, key: vec![101] }, vec![1]), InputPair::ExtrinsicIndex(ExtrinsicIndex { block: 5, key: vec![103] }, vec![0, 1]), @@ -286,7 +299,7 @@ mod test { &changes, &parent, ).unwrap(); - assert_eq!(changes_trie_nodes.collect::>>(), vec![ + assert_eq!(changes_trie_nodes.0.collect::>>(), vec![ InputPair::ExtrinsicIndex(ExtrinsicIndex { block: 4, key: vec![100] }, vec![0, 2, 3]), InputPair::ExtrinsicIndex(ExtrinsicIndex { block: 4, key: vec![101] }, vec![1]), InputPair::ExtrinsicIndex(ExtrinsicIndex { block: 4, key: vec![103] }, vec![0, 1]), @@ -310,7 +323,7 @@ mod test { &changes, &parent, ).unwrap(); - assert_eq!(changes_trie_nodes.collect::>>(), vec![ + assert_eq!(changes_trie_nodes.0.collect::>>(), vec![ InputPair::ExtrinsicIndex(ExtrinsicIndex { block: 16, key: vec![100] }, vec![0, 2, 3]), InputPair::ExtrinsicIndex(ExtrinsicIndex { block: 16, key: vec![101] }, vec![1]), InputPair::ExtrinsicIndex(ExtrinsicIndex { block: 16, key: vec![103] }, vec![0, 1]), @@ -342,7 +355,7 @@ mod test { &changes, &parent, ).unwrap(); - assert_eq!(changes_trie_nodes.collect::>>(), vec![ + assert_eq!(changes_trie_nodes.0.collect::>>(), vec![ InputPair::ExtrinsicIndex(ExtrinsicIndex { block: 4, key: vec![100] }, vec![0, 2, 3]), InputPair::ExtrinsicIndex(ExtrinsicIndex { block: 4, key: vec![101] }, vec![1]), InputPair::ExtrinsicIndex(ExtrinsicIndex { block: 4, key: vec![103] }, vec![0, 1]), @@ -353,4 +366,39 @@ mod test { InputPair::DigestIndex(DigestIndex { block: 4, key: vec![105] }, vec![1, 3]), ]); } + + #[test] + fn cache_is_used_when_changes_trie_is_built() { + let (backend, mut storage, changes) = prepare_for_build(); + let config = changes.changes_trie_config.as_ref().unwrap(); + let parent = AnchorBlockId { hash: Default::default(), number: 15 }; + + // override some actual values from storage with values from the cache + // (keys 100, 101, 103, 105 are now missing from block#4 => they do not appear + // in l2 digest at block 16) + let trie4_root = storage.root(&parent, 4).unwrap().unwrap(); + let cached4_data = IncompleteCachedBuildData::new(false) + .set_digest_input_blocks(vec![1, 2, 3]) + .insert(vec![100]) + .insert(vec![102]) + .complete(4, trie4_root); + storage.cache_mut().insert(cached4_data); + + let changes_trie_nodes = prepare_input( + &backend, + &storage, + config, + &changes, + &parent, + ).unwrap(); + assert_eq!(changes_trie_nodes.0.collect::>>(), vec![ + InputPair::ExtrinsicIndex(ExtrinsicIndex { block: 16, key: vec![100] }, vec![0, 2, 3]), + InputPair::ExtrinsicIndex(ExtrinsicIndex { block: 16, key: vec![101] }, vec![1]), + InputPair::ExtrinsicIndex(ExtrinsicIndex { block: 16, key: vec![103] }, vec![0, 1]), + + InputPair::DigestIndex(DigestIndex { block: 16, key: vec![100] }, vec![4]), + InputPair::DigestIndex(DigestIndex { block: 16, key: vec![102] }, vec![4]), + InputPair::DigestIndex(DigestIndex { block: 16, key: vec![105] }, vec![8]), + ]); + } } diff --git a/core/state-machine/src/changes_trie/build_cache.rs b/core/state-machine/src/changes_trie/build_cache.rs new file mode 100644 index 0000000000000..b8eb8f3d9e4bb --- /dev/null +++ b/core/state-machine/src/changes_trie/build_cache.rs @@ -0,0 +1,197 @@ +// Copyright 2019 Parity Technologies (UK) Ltd. +// This file is part of Substrate. + +// Substrate is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// Substrate is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with Substrate. If not, see . + +//! Changes tries build cache. + +use std::collections::{HashMap, HashSet}; + +/// Changes trie build cache. +/// +/// Helps to avoid read of changes tries from the database when digest trie +/// is built. It holds changed keys for every block (indexed by changes trie +/// root) that could be referenced by future digest items. For digest entries +/// it also holds keys covered by this digest. +/// +/// Entries are pruned from the cache once digest block that is using this entry +/// is inserted (because digest block will includes all keys from this entry). +/// When there's a fork, entries are pruned when first changes trie is inserted. +pub struct BuildCache { + /// Map of block (implies changes true) number => changes trie root. + roots_by_number: HashMap, + /// Map of changes trie root => + changed_keys: HashMap>>, +} + +/// The data that has been cached during changes trie building. +#[derive(Debug, PartialEq)] +pub struct CachedBuildData { + block: N, + trie_root: H, + is_top_level_digest: bool, + digest_input_blocks: Vec, + changed_keys: HashSet>, +} + +/// The data (without changes trie root) that has been cached during changes trie building. +#[derive(Debug, PartialEq)] +pub(crate) struct IncompleteCachedBuildData { + is_top_level_digest: bool, + digest_input_blocks: Vec, + changed_keys: HashSet>, +} + +impl BuildCache + where + N: Eq + ::std::hash::Hash, + H: Eq + ::std::hash::Hash + Clone, +{ + /// Create new changes trie build cache. + pub fn new() -> Self { + BuildCache { + roots_by_number: HashMap::new(), + changed_keys: HashMap::new(), + } + } + + /// Get cached changed keys for changes trie with given root. + pub fn get(&self, root: &H) -> Option<&HashSet>> { + self.changed_keys.get(&root) + } + + /// Insert data into cache. + pub fn insert(&mut self, data: CachedBuildData) { + if !data.is_top_level_digest { + self.roots_by_number.insert(data.block, data.trie_root.clone()); + self.changed_keys.insert(data.trie_root, data.changed_keys); + } + + for digest_input_block in data.digest_input_blocks { + let digest_input_block_hash = self.roots_by_number.remove(&digest_input_block); + if let Some(digest_input_block_hash) = digest_input_block_hash { + self.changed_keys.remove(&digest_input_block_hash); + } + } + } +} + +impl IncompleteCachedBuildData { + /// Create new cached data. + pub(crate) fn new(is_top_level_digest: bool) -> Self { + IncompleteCachedBuildData { + is_top_level_digest, + digest_input_blocks: Vec::new(), + changed_keys: HashSet::new(), + } + } + + /// Returns true if this is a cached data for top-level digest CT. + #[cfg(test)] + pub(crate) fn is_top_level_digest(&self) -> bool { + self.is_top_level_digest + } + + /// Complete cached data with computed changes trie root. + pub(crate) fn complete(self, block: N, trie_root: H) -> CachedBuildData { + CachedBuildData { + block, + trie_root, + is_top_level_digest: self.is_top_level_digest, + digest_input_blocks: self.digest_input_blocks, + changed_keys: self.changed_keys, + } + } + + /// Called for digest entries only. Set numbers of blocks that are superseded + /// by this new entry. + pub(crate) fn set_digest_input_blocks(mut self, digest_input_blocks: Vec) -> Self { + self.digest_input_blocks = digest_input_blocks; + self + } + + /// Insert changed keys into cached data. + pub(crate) fn insert(mut self, changed_key: Vec) -> Self { + if !self.is_top_level_digest { + self.changed_keys.insert(changed_key); + } + self + } +} + +#[cfg(test)] +mod tests { + use super::*; + + #[test] + fn updated_keys_are_stored_when_non_top_level_digest_is_built() { + let mut data = IncompleteCachedBuildData::::new(false); + data = data.insert(vec![1]); + assert_eq!(data.changed_keys.len(), 1); + + let mut cache = BuildCache::new(); + cache.insert(data.complete(1, 1)); + assert_eq!(cache.changed_keys.len(), 1); + assert_eq!(cache.get(&1).unwrap().clone(), vec![vec![1]].into_iter().collect()); + } + + #[test] + fn updated_keys_are_not_stored_when_top_level_digest_is_built() { + let mut data = IncompleteCachedBuildData::::new(true); + data = data.insert(vec![1]); + assert_eq!(data.changed_keys.len(), 0); + + let mut cache = BuildCache::new(); + cache.insert(data.complete(1, 1)); + assert_eq!(cache.changed_keys.len(), 0); + assert_eq!(cache.get(&1), None); + } + + #[test] + fn obsolete_entries_are_purged_when_new_ct_is_built() { + let mut cache = BuildCache::::new(); + cache.insert(IncompleteCachedBuildData::new(false) + .insert(vec![1]) + .complete(1, 1)); + cache.insert(IncompleteCachedBuildData::new(false) + .insert(vec![2]) + .complete(2, 2)); + cache.insert(IncompleteCachedBuildData::new(false) + .insert(vec![3]) + .complete(3, 3)); + + assert_eq!(cache.changed_keys.len(), 3); + + cache.insert(IncompleteCachedBuildData::new(false) + .set_digest_input_blocks(vec![1, 2, 3]) + .complete(4, 4)); + + assert_eq!(cache.changed_keys.len(), 1); + + cache.insert(IncompleteCachedBuildData::new(false) + .insert(vec![8]) + .complete(8, 8)); + cache.insert(IncompleteCachedBuildData::new(false) + .insert(vec![12]) + .complete(12, 12)); + + assert_eq!(cache.changed_keys.len(), 3); + + cache.insert(IncompleteCachedBuildData::new(true) + .set_digest_input_blocks(vec![4, 8, 12]) + .complete(16, 16)); + + assert_eq!(cache.changed_keys.len(), 0); + } +} \ No newline at end of file diff --git a/core/state-machine/src/changes_trie/input.rs b/core/state-machine/src/changes_trie/input.rs index ae939c028b3e1..0b9cbeff7dd11 100644 --- a/core/state-machine/src/changes_trie/input.rs +++ b/core/state-machine/src/changes_trie/input.rs @@ -61,6 +61,16 @@ pub enum InputKey { DigestIndex(DigestIndex), } +impl InputPair { + /// Extract storage key that this pair corresponds to. + pub fn key(&self) -> &[u8] { + match *self { + InputPair::ExtrinsicIndex(ref key, _) => &key.key, + InputPair::DigestIndex(ref key, _) => &key.key, + } + } +} + impl Into<(Vec, Vec)> for InputPair { fn into(self) -> (Vec, Vec) { match self { diff --git a/core/state-machine/src/changes_trie/mod.rs b/core/state-machine/src/changes_trie/mod.rs index ab36eb6423b2e..397a71ec64e26 100644 --- a/core/state-machine/src/changes_trie/mod.rs +++ b/core/state-machine/src/changes_trie/mod.rs @@ -36,22 +36,26 @@ //! are propagated through its storage root on the top level storage. mod build; +mod build_cache; mod build_iterator; mod changes_iterator; mod input; mod prune; mod storage; +pub use self::build_cache::{BuildCache, CachedBuildData}; pub use self::storage::InMemoryStorage; pub use self::changes_iterator::{key_changes, key_changes_proof, key_changes_proof_check}; pub use self::prune::{prune, oldest_non_pruned_trie}; +use std::collections::HashSet; use hash_db::Hasher; use crate::backend::Backend; use num_traits::{One, Zero}; use parity_codec::{Decode, Encode}; use primitives; use crate::changes_trie::build::prepare_input; +use crate::changes_trie::build_cache::IncompleteCachedBuildData; use crate::overlayed_changes::OverlayedChanges; use trie::{MemoryDB, TrieDBMut, TrieMut, DBValue}; @@ -65,6 +69,7 @@ pub trait BlockNumber: Clone + From + One + Zero + PartialEq + Ord + + ::std::hash::Hash + ::std::ops::Add + ::std::ops::Sub + ::std::ops::Mul + ::std::ops::Div + ::std::ops::Rem + @@ -79,6 +84,7 @@ impl BlockNumber for T where T: Clone + From + One + Zero + PartialEq + Ord + + ::std::hash::Hash + ::std::ops::Add + ::std::ops::Sub + ::std::ops::Mul + ::std::ops::Div + ::std::ops::Rem + @@ -107,6 +113,8 @@ pub trait RootsStorage: Send + Sync { /// Changes trie storage. Provides access to trie roots and trie nodes. pub trait Storage: RootsStorage { + /// Get cached changed keys at trie with given root. Returns None if entry is missing from the cache. + fn cached_changed_keys(&self, root: &H::Out) -> Option>>; /// Get a trie node. fn get(&self, key: &H::Out, prefix: &[u8]) -> Result, String>; } @@ -134,7 +142,7 @@ pub fn build_changes_trie<'a, B: Backend, S: Storage, H: Hasher, N storage: Option<&'a S>, changes: &OverlayedChanges, parent_hash: H::Out, -) -> Result, H::Out)>, ()> +) -> Result, H::Out, Option>)>, ()> where H::Out: Ord + 'static, { @@ -145,19 +153,78 @@ pub fn build_changes_trie<'a, B: Backend, S: Storage, H: Hasher, N // build_anchor error should not be considered fatal let parent = storage.build_anchor(parent_hash).map_err(|_| ())?; + let block = parent.number.clone() + One::one(); // storage errors are considered fatal (similar to situations when runtime fetches values from storage) - let input_pairs = prepare_input::(backend, storage, config, changes, &parent) + let (input_pairs, digest_input_blocks) = prepare_input::(backend, storage, config, changes, &parent) .expect("changes trie: storage access is not allowed to fail within runtime"); + + // prepare cached data + let mut cached_data = prepare_cached_build_data(config, block.clone()); + cached_data = cached_data + .map(|cached_data| cached_data.set_digest_input_blocks(digest_input_blocks)); + let mut root = Default::default(); let mut mdb = MemoryDB::default(); { let mut trie = TrieDBMut::::new(&mut mdb, &mut root); - for (key, value) in input_pairs.map(Into::into) { + for input_pair in input_pairs { + cached_data = cached_data + .map(|cached_data| cached_data.insert(input_pair.key().to_vec())); + + let (key, value) = input_pair.into(); trie.insert(&key, &value) .expect("changes trie: insertion to trie is not allowed to fail within runtime"); } } - Ok(Some((mdb, root))) + let cached_data = cached_data.map(|d| d.complete(block, root.clone())); + Ok(Some((mdb, root, cached_data))) +} + +/// Prepare empty cached build data for given block. +fn prepare_cached_build_data( + config: &Configuration, + block: Number, +) -> Option> { + // when digests are not enabled in configuration, we do not need to cache anything + // because it'll never be used again for building other tries + if !config.is_digest_build_enabled() { + return None; + } + + // we do not need to cache anything when top-level digest trie is created, because + // it'll never be used again for building other tries + // + // but we still want to use cache data to purge obsolete entries from the cache + let is_top_level_digest = match config.digest_level_at_block(block) { + Some((digest_level, _, _)) => digest_level == config.digest_levels, + _ => false, + }; + + Some(IncompleteCachedBuildData::new(is_top_level_digest)) +} + +#[cfg(test)] +mod tests { + use super::*; + + #[test] + fn build_data_is_not_cached_when_digests_are_disabled() { + let config = Configuration { digest_interval: 0, digest_levels: 0 }; + assert_eq!(prepare_cached_build_data(&config, 8u32), None); + } + + #[test] + fn build_data_is_cached_when_digests_are_enabled() { + let config = Configuration { digest_interval: 8, digest_levels: 2 }; + assert_eq!(prepare_cached_build_data(&config, 4u32).unwrap().is_top_level_digest(), false); + assert_eq!(prepare_cached_build_data(&config, 8u32).unwrap().is_top_level_digest(), false); + } + + #[test] + fn build_data_is_not_cached_when_digests_are_enabled_and_top_level_digest_is_built() { + let config = Configuration { digest_interval: 8, digest_levels: 2 }; + assert_eq!(prepare_cached_build_data(&config, 64u32).unwrap().is_top_level_digest(), true); + } } diff --git a/core/state-machine/src/changes_trie/storage.rs b/core/state-machine/src/changes_trie/storage.rs index 8da205251532c..40014f1848045 100644 --- a/core/state-machine/src/changes_trie/storage.rs +++ b/core/state-machine/src/changes_trie/storage.rs @@ -16,16 +16,14 @@ //! Changes trie storage utilities. -use std::collections::BTreeMap; +use std::collections::{BTreeMap, HashSet}; use hash_db::Hasher; use trie::DBValue; use trie::MemoryDB; use parking_lot::RwLock; -use crate::changes_trie::{RootsStorage, Storage, AnchorBlockId, BlockNumber}; +use crate::changes_trie::{BuildCache, RootsStorage, Storage, AnchorBlockId, BlockNumber}; use crate::trie_backend_essence::TrieBackendStorage; -#[cfg(test)] -use std::collections::HashSet; #[cfg(test)] use crate::backend::insert_into_memory_db; #[cfg(test)] @@ -34,6 +32,7 @@ use crate::changes_trie::input::InputPair; /// In-memory implementation of changes trie storage. pub struct InMemoryStorage { data: RwLock>, + cache: BuildCache, } /// Adapter for using changes trie storage as a TrieBackendEssence' storage. @@ -55,6 +54,7 @@ impl InMemoryStorage { roots: BTreeMap::new(), mdb, }), + cache: BuildCache::new(), } } @@ -63,6 +63,11 @@ impl InMemoryStorage { Self::with_db(Default::default()) } + /// Get mutable cache reference. + pub fn cache_mut(&mut self) -> &mut BuildCache { + &mut self.cache + } + /// Create the storage with given blocks. pub fn with_blocks(blocks: Vec<(Number, H::Out)>) -> Self { Self { @@ -70,6 +75,7 @@ impl InMemoryStorage { roots: blocks.into_iter().collect(), mdb: MemoryDB::default(), }), + cache: BuildCache::new(), } } @@ -89,6 +95,7 @@ impl InMemoryStorage { roots, mdb, }), + cache: BuildCache::new(), } } @@ -132,6 +139,10 @@ impl RootsStorage for InMemoryStorage } impl Storage for InMemoryStorage { + fn cached_changed_keys(&self, root: &H::Out) -> Option>> { + self.cache.get(root).cloned() + } + fn get(&self, key: &H::Out, prefix: &[u8]) -> Result, String> { MemoryDB::::get(&self.data.read().mdb, key, prefix) } diff --git a/core/state-machine/src/ext.rs b/core/state-machine/src/ext.rs index f6369159ba4b8..6cf50ac5abf89 100644 --- a/core/state-machine/src/ext.rs +++ b/core/state-machine/src/ext.rs @@ -19,7 +19,10 @@ use std::{error, fmt, cmp::Ord}; use log::warn; use crate::backend::Backend; -use crate::changes_trie::{Storage as ChangesTrieStorage, build_changes_trie}; +use crate::changes_trie::{ + Storage as ChangesTrieStorage, CachedBuildData as ChangesTrieCachedBuildData, + build_changes_trie, +}; use crate::{Externalities, OverlayedChanges, ChildStorageKey}; use hash_db::Hasher; use primitives::offchain; @@ -78,7 +81,7 @@ where /// This differs from `storage_transaction` behavior, because the moment when /// `storage_changes_root` is called matters + we need to remember additional /// data at this moment (block number). - changes_trie_transaction: Option<(MemoryDB, H::Out)>, + changes_trie_transaction: Option<(MemoryDB, H::Out, Option>)>, /// Additional externalities for offchain workers. /// /// If None, some methods from the trait might not be supported. @@ -115,14 +118,14 @@ where } /// Get the transaction necessary to update the backend. - pub fn transaction(mut self) -> ((B::Transaction, H::Out), Option>) { + pub fn transaction(mut self) -> ((B::Transaction, H::Out), Option>) { let _ = self.storage_root(); let (storage_transaction, changes_trie_transaction) = ( self.storage_transaction .expect("storage_transaction always set after calling storage root; qed"), self.changes_trie_transaction - .map(|(tx, _)| tx), + .map(|(tx, _, cache)| (tx, cache)), ); ( @@ -326,7 +329,7 @@ where self.overlay, parent_hash, )?; - Ok(self.changes_trie_transaction.as_ref().map(|(_, root)| root.clone())) + Ok(self.changes_trie_transaction.as_ref().map(|(_, root, _)| root.clone())) } fn offchain(&mut self) -> Option<&mut dyn offchain::Externalities> { diff --git a/core/state-machine/src/lib.rs b/core/state-machine/src/lib.rs index f2275cc41fd0c..3fffeb33b571b 100644 --- a/core/state-machine/src/lib.rs +++ b/core/state-machine/src/lib.rs @@ -48,9 +48,11 @@ pub use changes_trie::{ Storage as ChangesTrieStorage, RootsStorage as ChangesTrieRootsStorage, InMemoryStorage as InMemoryChangesTrieStorage, + BuildCache as ChangesTrieBuildCache, + CachedBuildData as ChangesTrieCachedBuildData, key_changes, key_changes_proof, key_changes_proof_check, prune as prune_changes_tries, - oldest_non_pruned_trie as oldest_non_pruned_changes_trie + oldest_non_pruned_trie as oldest_non_pruned_changes_trie, }; pub use overlayed_changes::OverlayedChanges; pub use proving_backend::{ @@ -60,6 +62,12 @@ pub use proving_backend::{ pub use trie_backend_essence::{TrieBackendStorage, Storage}; pub use trie_backend::TrieBackend; +/// Type of changes trie transaction. +pub type ChangesTrieTransaction = ( + MemoryDB, + Option::Out, N>>, +); + /// A wrapper around a child storage key. /// /// This wrapper ensures that the child storage key is correct and properly used. It is @@ -525,7 +533,7 @@ impl<'a, H, N, B, T, O, Exec> StateMachine<'a, H, N, B, T, O, Exec> where pub fn execute( &mut self, strategy: ExecutionStrategy, - ) -> Result<(Vec, (B::Transaction, H::Out), Option>), Box> { + ) -> Result<(Vec, (B::Transaction, H::Out), Option>), Box> { // We are not giving a native call and thus we are sure that the result can never be a native // value. self.execute_using_consensus_failure_handler::<_, NeverNativeValue, fn() -> _>( @@ -549,7 +557,7 @@ impl<'a, H, N, B, T, O, Exec> StateMachine<'a, H, N, B, T, O, Exec> where CallResult, bool, Option<(B::Transaction, H::Out)>, - Option>, + Option>, ) where R: Decode + Encode + PartialEq, NC: FnOnce() -> result::Result + UnwindSafe, @@ -582,7 +590,7 @@ impl<'a, H, N, B, T, O, Exec> StateMachine<'a, H, N, B, T, O, Exec> where mut native_call: Option, orig_prospective: OverlayedChangeSet, on_consensus_failure: Handler, - ) -> (CallResult, Option<(B::Transaction, H::Out)>, Option>) where + ) -> (CallResult, Option<(B::Transaction, H::Out)>, Option>) where R: Decode + Encode + PartialEq, NC: FnOnce() -> result::Result + UnwindSafe, Handler: FnOnce( @@ -613,7 +621,7 @@ impl<'a, H, N, B, T, O, Exec> StateMachine<'a, H, N, B, T, O, Exec> where compute_tx: bool, mut native_call: Option, orig_prospective: OverlayedChangeSet, - ) -> (CallResult, Option<(B::Transaction, H::Out)>, Option>) where + ) -> (CallResult, Option<(B::Transaction, H::Out)>, Option>) where R: Decode + Encode + PartialEq, NC: FnOnce() -> result::Result + UnwindSafe, { @@ -644,7 +652,7 @@ impl<'a, H, N, B, T, O, Exec> StateMachine<'a, H, N, B, T, O, Exec> where ) -> Result<( NativeOrEncoded, Option<(B::Transaction, H::Out)>, - Option> + Option>, ), Box> where R: Decode + Encode + PartialEq, NC: FnOnce() -> result::Result + UnwindSafe, diff --git a/core/state-machine/src/testing.rs b/core/state-machine/src/testing.rs index dc19dad7b34bb..4d46d937aa696 100644 --- a/core/state-machine/src/testing.rs +++ b/core/state-machine/src/testing.rs @@ -255,7 +255,7 @@ impl Externalities for TestExternalities Some(&self.changes_trie_storage), &self.overlay, parent, - )?.map(|(_, root)| root)) + )?.map(|(_, root, _)| root)) } fn offchain(&mut self) -> Option<&mut dyn offchain::Externalities> { From b8fd2dc6198e22b9575bd5d5e7117a2bae622333 Mon Sep 17 00:00:00 2001 From: Svyatoslav Nikolsky Date: Wed, 24 Jul 2019 12:05:58 +0300 Subject: [PATCH 2/5] fix lines width --- core/client/db/src/lib.rs | 5 ++++- core/client/src/client.rs | 8 +++++++- 2 files changed, 11 insertions(+), 2 deletions(-) diff --git a/core/client/db/src/lib.rs b/core/client/db/src/lib.rs index d86a6c995574b..f6e563e17b807 100644 --- a/core/client/db/src/lib.rs +++ b/core/client/db/src/lib.rs @@ -468,7 +468,10 @@ where Block: BlockT, Ok(root) } - fn update_changes_trie(&mut self, update: ChangesTrieTransaction>) -> Result<(), client::error::Error> { + fn update_changes_trie( + &mut self, + update: ChangesTrieTransaction>, + ) -> Result<(), client::error::Error> { self.changes_trie_updates = update.0; self.changes_trie_cache_update = update.1; Ok(()) diff --git a/core/client/src/client.rs b/core/client/src/client.rs index d4e83a89a1d6a..8c6977b09dbbf 100644 --- a/core/client/src/client.rs +++ b/core/client/src/client.rs @@ -926,7 +926,13 @@ impl Client where } // FIXME #1232: correct path logic for when to execute this function - let (storage_update, changes_update, storage_changes) = self.block_execution(&operation.op, &import_headers, origin, hash, body.clone())?; + let (storage_update, changes_update, storage_changes) = self.block_execution( + &operation.op, + &import_headers, + origin, + hash, + body.clone(), + )?; let is_new_best = finalized || match fork_choice { ForkChoiceStrategy::LongestChain => import_headers.post().number() > &last_best_number, From 95215512268870e956f9252461bea278a93e1931 Mon Sep 17 00:00:00 2001 From: Svyatoslav Nikolsky Date: Wed, 4 Sep 2019 11:05:31 +0300 Subject: [PATCH 3/5] fixed some grumbles --- core/client/db/src/lib.rs | 4 ++-- core/state-machine/src/changes_trie/build.rs | 8 +++---- .../src/changes_trie/build_cache.rs | 14 +++++++++++-- core/state-machine/src/changes_trie/mod.rs | 21 ++++++++++++------- core/state-machine/src/ext.rs | 2 +- core/state-machine/src/lib.rs | 2 +- 6 files changed, 34 insertions(+), 17 deletions(-) diff --git a/core/client/db/src/lib.rs b/core/client/db/src/lib.rs index 3b0e9c0dffe8a..60fabe13fdb0d 100644 --- a/core/client/db/src/lib.rs +++ b/core/client/db/src/lib.rs @@ -493,7 +493,7 @@ where Block: BlockT, update: ChangesTrieTransaction>, ) -> Result<(), client::error::Error> { self.changes_trie_updates = update.0; - self.changes_trie_cache_update = update.1; + self.changes_trie_cache_update = Some(update.1); Ok(()) } @@ -1535,7 +1535,7 @@ mod tests { let mut op = backend.begin_operation().unwrap(); backend.begin_state_operation(&mut op, block_id).unwrap(); op.set_block_data(header, None, None, NewBlockState::Best).unwrap(); - op.update_changes_trie((changes_trie_update, None)).unwrap(); + op.update_changes_trie((changes_trie_update, ChangesTrieCacheAction::DoNothing)).unwrap(); backend.commit_operation(op).unwrap(); header_hash diff --git a/core/state-machine/src/changes_trie/build.rs b/core/state-machine/src/changes_trie/build.rs index 7f31b6de910e6..6f84aa46bcb64 100644 --- a/core/state-machine/src/changes_trie/build.rs +++ b/core/state-machine/src/changes_trie/build.rs @@ -704,14 +704,14 @@ mod test { // // "2" child storage: // (keys 105, 106) are now added to block#4 => they appear in l2 digest at block 16 - let trie4_root = storage.root(&parent, 4).unwrap().unwrap(); - let cached4_data = IncompleteCachedBuildData::new(false) + let trie_root4 = storage.root(&parent, 4).unwrap().unwrap(); + let cached_data4 = IncompleteCachedBuildData::new(false) .set_digest_input_blocks(vec![1, 2, 3]) .insert(None, vec![vec![100], vec![102]].into_iter().collect()) .insert(Some(b"1".to_vec()), vec![vec![103], vec![104]].into_iter().collect()) .insert(Some(b"2".to_vec()), vec![vec![105], vec![106]].into_iter().collect()) - .complete(4, trie4_root); - storage.cache_mut().perform(CacheAction::CacheBuildData(cached4_data)); + .complete(4, trie_root4); + storage.cache_mut().perform(CacheAction::CacheBuildData(cached_data4)); let (root_changes_trie_nodes, child_changes_tries_nodes, _) = prepare_input( &backend, diff --git a/core/state-machine/src/changes_trie/build_cache.rs b/core/state-machine/src/changes_trie/build_cache.rs index 005606438edbe..f35ac65c5c7b3 100644 --- a/core/state-machine/src/changes_trie/build_cache.rs +++ b/core/state-machine/src/changes_trie/build_cache.rs @@ -23,7 +23,8 @@ use std::collections::{HashMap, HashSet}; /// Helps to avoid read of changes tries from the database when digest trie /// is built. It holds changed keys for every block (indexed by changes trie /// root) that could be referenced by future digest items. For digest entries -/// it also holds keys covered by this digest. +/// it also holds keys covered by this digest. Entries for top level digests +/// are never created, because they'll never be used to build other digests. /// /// Entries are pruned from the cache once digest block that is using this entry /// is inserted (because digest block will includes all keys from this entry). @@ -31,13 +32,18 @@ use std::collections::{HashMap, HashSet}; pub struct BuildCache { /// Map of block (implies changes true) number => changes trie root. roots_by_number: HashMap, - /// Map of changes trie root => + /// Map of changes trie root => set of storage keys that are in this trie. + /// The `Option>` in inner `HashMap` stands for the child storage key. + /// If it is `None`, then the `HashSet` contains keys changed in top-level storage. + /// If it is `Some`, then the `HashSet` contains keys changed in child storage, identified by the key. changed_keys: HashMap>, HashSet>>>, } /// The action to perform when block-with-changes-trie is imported. #[derive(Debug, PartialEq)] pub enum CacheAction { + /// No action is required. + DoNothing, /// Cache data that has been collected when CT has been built. CacheBuildData(CachedBuildData), /// Clear cache from all existing entries. When new CT configuration enacts, @@ -84,6 +90,7 @@ impl BuildCache /// Insert data into cache. pub fn perform(&mut self, action: CacheAction) { match action { + CacheAction::DoNothing => (), CacheAction::CacheBuildData(data) => { if !data.is_top_level_digest { self.roots_by_number.insert(data.block, data.trie_root.clone()); @@ -134,6 +141,9 @@ impl IncompleteCachedBuildData { /// Called for digest entries only. Set numbers of blocks that are superseded /// by this new entry. + /// + /// If/when this build data is committed to the cache, entries for these blocks + /// will be removed from the cache. pub(crate) fn set_digest_input_blocks(mut self, digest_input_blocks: Vec) -> Self { self.digest_input_blocks = digest_input_blocks; self diff --git a/core/state-machine/src/changes_trie/mod.rs b/core/state-machine/src/changes_trie/mod.rs index 1406ece03b2d0..c106c285f081c 100644 --- a/core/state-machine/src/changes_trie/mod.rs +++ b/core/state-machine/src/changes_trie/mod.rs @@ -174,7 +174,7 @@ pub fn build_changes_trie<'a, B: Backend, S: Storage, H: Hasher, N storage: Option<&'a S>, changes: &OverlayedChanges, parent_hash: H::Out, -) -> Result, H::Out, Option>)>, ()> +) -> Result, H::Out, CacheAction)>, ()> where H::Out: Ord + 'static, { @@ -206,6 +206,7 @@ pub fn build_changes_trie<'a, B: Backend, S: Storage, H: Hasher, N // prepare cached data let mut cached_data = prepare_cached_build_data(config, block.clone()); + let needs_changed_keys = cached_data.is_some(); cached_data = cached_data .map(|cached_data| cached_data.set_digest_input_blocks(digest_input_blocks)); @@ -218,8 +219,10 @@ pub fn build_changes_trie<'a, B: Backend, S: Storage, H: Hasher, N let mut trie = TrieDBMut::::new(&mut mdb, &mut root); let mut storage_changed_keys = HashSet::new(); for input_pair in input_pairs { - if let Some(key) = input_pair.key() { - storage_changed_keys.insert(key.to_vec()); + if needs_changed_keys { + if let Some(key) = input_pair.key() { + storage_changed_keys.insert(key.to_vec()); + } } let (key, value) = input_pair.into(); @@ -248,8 +251,10 @@ pub fn build_changes_trie<'a, B: Backend, S: Storage, H: Hasher, N let mut storage_changed_keys = HashSet::new(); for input_pair in input_pairs { - if let Some(key) = input_pair.key() { - storage_changed_keys.insert(key.to_vec()); + if needs_changed_keys { + if let Some(key) = input_pair.key() { + storage_changed_keys.insert(key.to_vec()); + } } let (key, value) = input_pair.into(); @@ -265,8 +270,10 @@ pub fn build_changes_trie<'a, B: Backend, S: Storage, H: Hasher, N let cached_data = cached_data.map(|d| d.complete(block, root.clone())); let cache_action = match is_end_block { - false => cached_data.map(CacheAction::CacheBuildData), - true => Some(CacheAction::Clear), + false => cached_data + .map(CacheAction::CacheBuildData) + .unwrap_or(CacheAction::DoNothing), + true => CacheAction::Clear, }; Ok(Some((mdb, root, cache_action))) diff --git a/core/state-machine/src/ext.rs b/core/state-machine/src/ext.rs index ddfa7b4a31cf1..fde3ba2b01953 100644 --- a/core/state-machine/src/ext.rs +++ b/core/state-machine/src/ext.rs @@ -81,7 +81,7 @@ where /// This differs from `storage_transaction` behavior, because the moment when /// `storage_changes_root` is called matters + we need to remember additional /// data at this moment (block number). - changes_trie_transaction: Option<(MemoryDB, H::Out, Option>)>, + changes_trie_transaction: Option<(MemoryDB, H::Out, ChangesTrieCacheAction)>, /// Additional externalities for offchain workers. /// /// If None, some methods from the trait might not be supported. diff --git a/core/state-machine/src/lib.rs b/core/state-machine/src/lib.rs index 307fa585dfd29..70a7f3b1462b0 100644 --- a/core/state-machine/src/lib.rs +++ b/core/state-machine/src/lib.rs @@ -68,7 +68,7 @@ pub use trie_backend::TrieBackend; /// Type of changes trie transaction. pub type ChangesTrieTransaction = ( MemoryDB, - Option::Out, N>>, + ChangesTrieCacheAction<::Out, N>, ); /// A wrapper around a child storage key. From 27eb30b2e4f6803d4951019a77ce3a7920ebd3e9 Mon Sep 17 00:00:00 2001 From: Svyatoslav Nikolsky Date: Wed, 4 Sep 2019 13:33:55 +0300 Subject: [PATCH 4/5] clear cache when: digests disabled, top-level or skewed digest is built --- core/client/db/src/lib.rs | 2 +- core/state-machine/src/changes_trie/build.rs | 8 +- .../src/changes_trie/build_cache.rs | 124 ++++++++++-------- core/state-machine/src/changes_trie/mod.rs | 78 +++++------ 4 files changed, 107 insertions(+), 105 deletions(-) diff --git a/core/client/db/src/lib.rs b/core/client/db/src/lib.rs index 60fabe13fdb0d..1a78e42f7985d 100644 --- a/core/client/db/src/lib.rs +++ b/core/client/db/src/lib.rs @@ -1535,7 +1535,7 @@ mod tests { let mut op = backend.begin_operation().unwrap(); backend.begin_state_operation(&mut op, block_id).unwrap(); op.set_block_data(header, None, None, NewBlockState::Best).unwrap(); - op.update_changes_trie((changes_trie_update, ChangesTrieCacheAction::DoNothing)).unwrap(); + op.update_changes_trie((changes_trie_update, ChangesTrieCacheAction::Clear)).unwrap(); backend.commit_operation(op).unwrap(); header_hash diff --git a/core/state-machine/src/changes_trie/build.rs b/core/state-machine/src/changes_trie/build.rs index 6f84aa46bcb64..a6b08b35d4a11 100644 --- a/core/state-machine/src/changes_trie/build.rs +++ b/core/state-machine/src/changes_trie/build.rs @@ -328,7 +328,7 @@ mod test { use primitives::storage::well_known_keys::{EXTRINSIC_INDEX}; use crate::backend::InMemory; use crate::changes_trie::{RootsStorage, Configuration, storage::InMemoryStorage}; - use crate::changes_trie::build_cache::{CacheAction, IncompleteCachedBuildData}; + use crate::changes_trie::build_cache::{IncompleteCacheAction, IncompleteCachedBuildData}; use crate::overlayed_changes::{OverlayedValue, OverlayedChangeSet}; use super::*; @@ -705,13 +705,13 @@ mod test { // "2" child storage: // (keys 105, 106) are now added to block#4 => they appear in l2 digest at block 16 let trie_root4 = storage.root(&parent, 4).unwrap().unwrap(); - let cached_data4 = IncompleteCachedBuildData::new(false) + let cached_data4 = IncompleteCacheAction::CacheBuildData(IncompleteCachedBuildData::new()) .set_digest_input_blocks(vec![1, 2, 3]) .insert(None, vec![vec![100], vec![102]].into_iter().collect()) .insert(Some(b"1".to_vec()), vec![vec![103], vec![104]].into_iter().collect()) .insert(Some(b"2".to_vec()), vec![vec![105], vec![106]].into_iter().collect()) - .complete(4, trie_root4); - storage.cache_mut().perform(CacheAction::CacheBuildData(cached_data4)); + .complete(4, &trie_root4); + storage.cache_mut().perform(cached_data4); let (root_changes_trie_nodes, child_changes_tries_nodes, _) = prepare_input( &backend, diff --git a/core/state-machine/src/changes_trie/build_cache.rs b/core/state-machine/src/changes_trie/build_cache.rs index f35ac65c5c7b3..0fb71d29676b4 100644 --- a/core/state-machine/src/changes_trie/build_cache.rs +++ b/core/state-machine/src/changes_trie/build_cache.rs @@ -42,12 +42,9 @@ pub struct BuildCache { /// The action to perform when block-with-changes-trie is imported. #[derive(Debug, PartialEq)] pub enum CacheAction { - /// No action is required. - DoNothing, /// Cache data that has been collected when CT has been built. CacheBuildData(CachedBuildData), - /// Clear cache from all existing entries. When new CT configuration enacts, - /// we don't need any cached data from previous configuration. + /// Clear cache from all existing entries. Clear, } @@ -56,15 +53,22 @@ pub enum CacheAction { pub struct CachedBuildData { block: N, trie_root: H, - is_top_level_digest: bool, digest_input_blocks: Vec, changed_keys: HashMap>, HashSet>>, } +/// The action to perform when block-with-changes-trie is imported. +#[derive(Debug, PartialEq)] +pub(crate) enum IncompleteCacheAction { + /// Cache data that has been collected when CT has been built. + CacheBuildData(IncompleteCachedBuildData), + /// Clear cache from all existing entries. + Clear, +} + /// The data (without changes trie root) that has been cached during changes trie building. #[derive(Debug, PartialEq)] pub(crate) struct IncompleteCachedBuildData { - is_top_level_digest: bool, digest_input_blocks: Vec, changed_keys: HashMap>, HashSet>>, } @@ -90,12 +94,9 @@ impl BuildCache /// Insert data into cache. pub fn perform(&mut self, action: CacheAction) { match action { - CacheAction::DoNothing => (), CacheAction::CacheBuildData(data) => { - if !data.is_top_level_digest { - self.roots_by_number.insert(data.block, data.trie_root.clone()); - self.changed_keys.insert(data.trie_root, data.changed_keys); - } + self.roots_by_number.insert(data.block, data.trie_root.clone()); + self.changed_keys.insert(data.trie_root, data.changed_keys); for digest_input_block in data.digest_input_blocks { let digest_input_block_hash = self.roots_by_number.remove(&digest_input_block); @@ -112,52 +113,79 @@ impl BuildCache } } +impl IncompleteCacheAction { + /// Returns true if we need to collect changed keys for this action. + pub fn collects_changed_keys(&self) -> bool { + match *self { + IncompleteCacheAction::CacheBuildData(_) => true, + IncompleteCacheAction::Clear => false, + } + } + + /// Complete cache action with computed changes trie root. + pub(crate) fn complete(self, block: N, trie_root: &H) -> CacheAction { + match self { + IncompleteCacheAction::CacheBuildData(build_data) => + CacheAction::CacheBuildData(build_data.complete(block, trie_root.clone())), + IncompleteCacheAction::Clear => CacheAction::Clear, + } + } + + /// Set numbers of blocks that are superseded by this new entry. + /// + /// If/when this build data is committed to the cache, entries for these blocks + /// will be removed from the cache. + pub(crate) fn set_digest_input_blocks(self, digest_input_blocks: Vec) -> Self { + match self { + IncompleteCacheAction::CacheBuildData(build_data) => + IncompleteCacheAction::CacheBuildData(build_data.set_digest_input_blocks(digest_input_blocks)), + IncompleteCacheAction::Clear => IncompleteCacheAction::Clear, + } + } + + /// Insert changed keys of given storage into cached data. + pub(crate) fn insert( + self, + storage_key: Option>, + changed_keys: HashSet>, + ) -> Self { + match self { + IncompleteCacheAction::CacheBuildData(build_data) => + IncompleteCacheAction::CacheBuildData(build_data.insert(storage_key, changed_keys)), + IncompleteCacheAction::Clear => IncompleteCacheAction::Clear, + } + } +} + impl IncompleteCachedBuildData { /// Create new cached data. - pub(crate) fn new(is_top_level_digest: bool) -> Self { + pub(crate) fn new() -> Self { IncompleteCachedBuildData { - is_top_level_digest, digest_input_blocks: Vec::new(), changed_keys: HashMap::new(), } } - /// Returns true if this is a cached data for top-level digest CT. - #[cfg(test)] - pub(crate) fn is_top_level_digest(&self) -> bool { - self.is_top_level_digest - } - - /// Complete cached data with computed changes trie root. - pub(crate) fn complete(self, block: N, trie_root: H) -> CachedBuildData { + fn complete(self, block: N, trie_root: H) -> CachedBuildData { CachedBuildData { block, trie_root, - is_top_level_digest: self.is_top_level_digest, digest_input_blocks: self.digest_input_blocks, changed_keys: self.changed_keys, } } - /// Called for digest entries only. Set numbers of blocks that are superseded - /// by this new entry. - /// - /// If/when this build data is committed to the cache, entries for these blocks - /// will be removed from the cache. - pub(crate) fn set_digest_input_blocks(mut self, digest_input_blocks: Vec) -> Self { + fn set_digest_input_blocks(mut self, digest_input_blocks: Vec) -> Self { self.digest_input_blocks = digest_input_blocks; self } - /// Insert changed keys of given storage into cached data. - pub(crate) fn insert( + fn insert( mut self, storage_key: Option>, changed_keys: HashSet>, ) -> Self { - if !self.is_top_level_digest { - self.changed_keys.insert(storage_key, changed_keys); - } + self.changed_keys.insert(storage_key, changed_keys); self } } @@ -168,7 +196,7 @@ mod tests { #[test] fn updated_keys_are_stored_when_non_top_level_digest_is_built() { - let mut data = IncompleteCachedBuildData::::new(false); + let mut data = IncompleteCachedBuildData::::new(); data = data.insert(None, vec![vec![1]].into_iter().collect()); assert_eq!(data.changed_keys.len(), 1); @@ -181,51 +209,37 @@ mod tests { ); } - #[test] - fn updated_keys_are_not_stored_when_top_level_digest_is_built() { - let mut data = IncompleteCachedBuildData::::new(true); - data = data.insert(None, vec![vec![1]].into_iter().collect()); - assert_eq!(data.changed_keys.len(), 0); - - let mut cache = BuildCache::new(); - cache.perform(CacheAction::CacheBuildData(data.complete(1, 1))); - assert_eq!(cache.changed_keys.len(), 0); - assert_eq!(cache.get(&1), None); - } - #[test] fn obsolete_entries_are_purged_when_new_ct_is_built() { let mut cache = BuildCache::::new(); - cache.perform(CacheAction::CacheBuildData(IncompleteCachedBuildData::new(false) + cache.perform(CacheAction::CacheBuildData(IncompleteCachedBuildData::new() .insert(None, vec![vec![1]].into_iter().collect()) .complete(1, 1))); - cache.perform(CacheAction::CacheBuildData(IncompleteCachedBuildData::new(false) + cache.perform(CacheAction::CacheBuildData(IncompleteCachedBuildData::new() .insert(None, vec![vec![2]].into_iter().collect()) .complete(2, 2))); - cache.perform(CacheAction::CacheBuildData(IncompleteCachedBuildData::new(false) + cache.perform(CacheAction::CacheBuildData(IncompleteCachedBuildData::new() .insert(None, vec![vec![3]].into_iter().collect()) .complete(3, 3))); assert_eq!(cache.changed_keys.len(), 3); - cache.perform(CacheAction::CacheBuildData(IncompleteCachedBuildData::new(false) + cache.perform(CacheAction::CacheBuildData(IncompleteCachedBuildData::new() .set_digest_input_blocks(vec![1, 2, 3]) .complete(4, 4))); assert_eq!(cache.changed_keys.len(), 1); - cache.perform(CacheAction::CacheBuildData(IncompleteCachedBuildData::new(false) + cache.perform(CacheAction::CacheBuildData(IncompleteCachedBuildData::new() .insert(None, vec![vec![8]].into_iter().collect()) .complete(8, 8))); - cache.perform(CacheAction::CacheBuildData(IncompleteCachedBuildData::new(false) + cache.perform(CacheAction::CacheBuildData(IncompleteCachedBuildData::new() .insert(None, vec![vec![12]].into_iter().collect()) .complete(12, 12))); assert_eq!(cache.changed_keys.len(), 3); - cache.perform(CacheAction::CacheBuildData(IncompleteCachedBuildData::new(true) - .set_digest_input_blocks(vec![4, 8, 12]) - .complete(16, 16))); + cache.perform(CacheAction::Clear); assert_eq!(cache.changed_keys.len(), 0); } diff --git a/core/state-machine/src/changes_trie/mod.rs b/core/state-machine/src/changes_trie/mod.rs index c106c285f081c..d3534b2739a48 100644 --- a/core/state-machine/src/changes_trie/mod.rs +++ b/core/state-machine/src/changes_trie/mod.rs @@ -73,7 +73,7 @@ use num_traits::{One, Zero}; use codec::{Decode, Encode}; use primitives; use crate::changes_trie::build::prepare_input; -use crate::changes_trie::build_cache::IncompleteCachedBuildData; +use crate::changes_trie::build_cache::{IncompleteCachedBuildData, IncompleteCacheAction}; use crate::overlayed_changes::OverlayedChanges; use trie::{MemoryDB, DBValue, TrieMut}; use trie::trie_types::TrieDBMut; @@ -193,7 +193,6 @@ pub fn build_changes_trie<'a, B: Backend, S: Storage, H: Hasher, N // build_anchor error should not be considered fatal let parent = storage.build_anchor(parent_hash).map_err(|_| ())?; let block = parent.number.clone() + One::one(); - let is_end_block = Some(&block) == config.end.as_ref(); // storage errors are considered fatal (similar to situations when runtime fetches values from storage) let (input_pairs, child_input_pairs, digest_input_blocks) = prepare_input::( @@ -205,10 +204,9 @@ pub fn build_changes_trie<'a, B: Backend, S: Storage, H: Hasher, N ).expect("changes trie: storage access is not allowed to fail within runtime"); // prepare cached data - let mut cached_data = prepare_cached_build_data(config, block.clone()); - let needs_changed_keys = cached_data.is_some(); - cached_data = cached_data - .map(|cached_data| cached_data.set_digest_input_blocks(digest_input_blocks)); + let mut cache_action = prepare_cached_build_data(config, block.clone()); + let needs_changed_keys = cache_action.collects_changed_keys(); + cache_action = cache_action.set_digest_input_blocks(digest_input_blocks); let mut mdb = MemoryDB::default(); let mut child_roots = Vec::with_capacity(child_input_pairs.len()); @@ -231,11 +229,10 @@ pub fn build_changes_trie<'a, B: Backend, S: Storage, H: Hasher, N .expect("changes trie: insertion to trie is not allowed to fail within runtime"); } - cached_data = cached_data - .map(|cached_data| cached_data.insert( - Some(child_index.storage_key.clone()), - storage_changed_keys, - )); + cache_action = cache_action.insert( + Some(child_index.storage_key.clone()), + storage_changed_keys, + ); } if not_empty { child_roots.push(input::InputPair::ChildIndex(child_index, root.as_ref().to_vec())); @@ -261,21 +258,13 @@ pub fn build_changes_trie<'a, B: Backend, S: Storage, H: Hasher, N trie.insert(&key, &value) .expect("changes trie: insertion to trie is not allowed to fail within runtime"); } - cached_data = cached_data - .map(|cached_data| cached_data.insert( - None, - storage_changed_keys, - )); + cache_action = cache_action.insert( + None, + storage_changed_keys, + ); } - let cached_data = cached_data.map(|d| d.complete(block, root.clone())); - let cache_action = match is_end_block { - false => cached_data - .map(CacheAction::CacheBuildData) - .unwrap_or(CacheAction::DoNothing), - true => CacheAction::Clear, - }; - + let cache_action = cache_action.complete(block, &root); Ok(Some((mdb, root, cache_action))) } @@ -283,29 +272,27 @@ pub fn build_changes_trie<'a, B: Backend, S: Storage, H: Hasher, N fn prepare_cached_build_data( config: ConfigurationRange, block: Number, -) -> Option> { +) -> IncompleteCacheAction { // when digests are not enabled in configuration, we do not need to cache anything // because it'll never be used again for building other tries + // => let's clear the cache if !config.config.is_digest_build_enabled() { - return None; + return IncompleteCacheAction::Clear; } - // when this is the last block where current configuration is active => there - // are no reason to cache anything + // when this is the last block where current configuration is active + // => let's clear the cache if config.end.as_ref() == Some(&block) { - return None; + return IncompleteCacheAction::Clear; } // we do not need to cache anything when top-level digest trie is created, because // it'll never be used again for building other tries - // - // but we still want to use cache data to purge obsolete entries from the cache - let is_top_level_digest = match config.config.digest_level_at_block(config.zero.clone(), block) { - Some((digest_level, _, _)) => digest_level == config.config.digest_levels, - _ => false, - }; - - Some(IncompleteCachedBuildData::new(is_top_level_digest)) + // => let's clear the cache + match config.config.digest_level_at_block(config.zero.clone(), block) { + Some((digest_level, _, _)) if digest_level == config.config.digest_levels => IncompleteCacheAction::Clear, + _ => IncompleteCacheAction::CacheBuildData(IncompleteCachedBuildData::new()), + } } #[cfg(test)] @@ -313,31 +300,32 @@ mod tests { use super::*; #[test] - fn build_data_is_not_cached_when_digests_are_disabled() { + fn cache_is_cleared_when_digests_are_disabled() { let config = Configuration { digest_interval: 0, digest_levels: 0 }; let config_range = ConfigurationRange { zero: 0, end: None, config: &config }; - assert_eq!(prepare_cached_build_data(config_range, 8u32), None); + assert_eq!(prepare_cached_build_data(config_range, 8u32), IncompleteCacheAction::Clear); } #[test] fn build_data_is_cached_when_digests_are_enabled() { let config = Configuration { digest_interval: 8, digest_levels: 2 }; let config_range = ConfigurationRange { zero: 0, end: None, config: &config }; - assert_eq!(prepare_cached_build_data(config_range.clone(), 4u32).unwrap().is_top_level_digest(), false); - assert_eq!(prepare_cached_build_data(config_range, 8u32).unwrap().is_top_level_digest(), false); + assert!(prepare_cached_build_data(config_range.clone(), 4u32).collects_changed_keys()); + assert!(prepare_cached_build_data(config_range.clone(), 7u32).collects_changed_keys()); + assert!(prepare_cached_build_data(config_range, 8u32).collects_changed_keys()); } #[test] - fn build_data_is_not_cached_when_digests_are_enabled_and_top_level_digest_is_built() { + fn cache_is_cleared_when_digests_are_enabled_and_top_level_digest_is_built() { let config = Configuration { digest_interval: 8, digest_levels: 2 }; let config_range = ConfigurationRange { zero: 0, end: None, config: &config }; - assert_eq!(prepare_cached_build_data(config_range, 64u32).unwrap().is_top_level_digest(), true); + assert_eq!(prepare_cached_build_data(config_range, 64u32), IncompleteCacheAction::Clear); } #[test] - fn build_data_is_not_cached_when_end_block_of_configuration_is_built() { + fn cache_is_cleared_when_end_block_of_configuration_is_built() { let config = Configuration { digest_interval: 8, digest_levels: 2 }; let config_range = ConfigurationRange { zero: 0, end: Some(4u32), config: &config }; - assert_eq!(prepare_cached_build_data(config_range.clone(), 4u32), None); + assert_eq!(prepare_cached_build_data(config_range.clone(), 4u32), IncompleteCacheAction::Clear); } } From 8fd7a80f3de32f5aff11aff6cb20f4cc2f3e7072 Mon Sep 17 00:00:00 2001 From: Svyatoslav Nikolsky Date: Wed, 4 Sep 2019 14:25:56 +0300 Subject: [PATCH 5/5] cached_changed_keys -> with_cached_changed_keys --- core/client/db/src/lib.rs | 8 +++-- core/client/src/client.rs | 8 +++-- core/client/src/in_mem.rs | 8 +++-- core/state-machine/src/changes_trie/build.rs | 31 +++++++++++-------- .../src/changes_trie/build_cache.rs | 16 ++++++++++ core/state-machine/src/changes_trie/mod.rs | 9 ++++-- .../state-machine/src/changes_trie/storage.rs | 8 +++-- 7 files changed, 65 insertions(+), 23 deletions(-) diff --git a/core/client/db/src/lib.rs b/core/client/db/src/lib.rs index 1a78e42f7985d..f5ece21fbb710 100644 --- a/core/client/db/src/lib.rs +++ b/core/client/db/src/lib.rs @@ -710,8 +710,12 @@ where self } - fn cached_changed_keys(&self, root: &H256) -> Option>, HashSet>>> { - self.cache.read().get(root).cloned() + fn with_cached_changed_keys( + &self, + root: &H256, + functor: &mut dyn FnMut(&HashMap>, HashSet>>), + ) -> bool { + self.cache.read().with_changed_keys(root, functor) } fn get(&self, key: &H256, _prefix: Prefix) -> Result, String> { diff --git a/core/client/src/client.rs b/core/client/src/client.rs index 20bb589636b1f..47a93ac4e39db 100644 --- a/core/client/src/client.rs +++ b/core/client/src/client.rs @@ -636,8 +636,12 @@ impl Client where self } - fn cached_changed_keys(&self, root: &H256) -> Option>, HashSet>>> { - self.storage.cached_changed_keys(root) + fn with_cached_changed_keys( + &self, + root: &H256, + functor: &mut dyn FnMut(&HashMap>, HashSet>>), + ) -> bool { + self.storage.with_cached_changed_keys(root, functor) } fn get(&self, key: &H256, prefix: Prefix) -> Result, String> { diff --git a/core/client/src/in_mem.rs b/core/client/src/in_mem.rs index 1c898fe055521..3c6a1e18c01d6 100644 --- a/core/client/src/in_mem.rs +++ b/core/client/src/in_mem.rs @@ -766,8 +766,12 @@ impl state_machine::ChangesTrieStorage> for Change self } - fn cached_changed_keys(&self, _root: &H::Out) -> Option>, HashSet>>> { - None + fn with_cached_changed_keys( + &self, + _root: &H::Out, + _functor: &mut dyn FnMut(&HashMap>, HashSet>>), + ) -> bool { + false } fn get(&self, key: &H::Out, prefix: Prefix) -> Result, String> { diff --git a/core/state-machine/src/changes_trie/build.rs b/core/state-machine/src/changes_trie/build.rs index a6b08b35d4a11..824bdd3542d58 100644 --- a/core/state-machine/src/changes_trie/build.rs +++ b/core/state-machine/src/changes_trie/build.rs @@ -245,21 +245,26 @@ fn prepare_digest_input<'a, H, Number>( }; // try to get all updated keys from cache - if let Some(changed_keys) = storage.cached_changed_keys(&trie_root) { - for (storage_key, changed_keys) in changed_keys { - let map = match storage_key { - Some(storage_key) => child_map - .entry(ChildIndex:: { - block: block.clone(), - storage_key, - }) - .or_default(), - None => &mut map, - }; - for changed_key in changed_keys { - insert_to_map(map, changed_key); + let populated_from_cache = storage.with_cached_changed_keys( + &trie_root, + &mut |changed_keys| { + for (storage_key, changed_keys) in changed_keys { + let map = match storage_key { + Some(storage_key) => child_map + .entry(ChildIndex:: { + block: block.clone(), + storage_key: storage_key.clone(), + }) + .or_default(), + None => &mut map, + }; + for changed_key in changed_keys.iter().cloned() { + insert_to_map(map, changed_key); + } } } + ); + if populated_from_cache { return Ok((map, child_map)); } diff --git a/core/state-machine/src/changes_trie/build_cache.rs b/core/state-machine/src/changes_trie/build_cache.rs index 0fb71d29676b4..f5c7c28b6b801 100644 --- a/core/state-machine/src/changes_trie/build_cache.rs +++ b/core/state-machine/src/changes_trie/build_cache.rs @@ -91,6 +91,22 @@ impl BuildCache self.changed_keys.get(&root) } + /// Execute given functor with cached entry for given block. + /// Returns true if the functor has been called and false otherwise. + pub fn with_changed_keys( + &self, + root: &H, + functor: &mut dyn FnMut(&HashMap>, HashSet>>), + ) -> bool { + match self.changed_keys.get(&root) { + Some(changed_keys) => { + functor(changed_keys); + true + }, + None => false, + } + } + /// Insert data into cache. pub fn perform(&mut self, action: CacheAction) { match action { diff --git a/core/state-machine/src/changes_trie/mod.rs b/core/state-machine/src/changes_trie/mod.rs index d3534b2739a48..f771fddf61964 100644 --- a/core/state-machine/src/changes_trie/mod.rs +++ b/core/state-machine/src/changes_trie/mod.rs @@ -134,8 +134,13 @@ pub trait RootsStorage: Send + Sync { pub trait Storage: RootsStorage { /// Casts from self reference to RootsStorage reference. fn as_roots_storage(&self) -> &dyn RootsStorage; - /// Get cached changed keys at trie with given root. Returns None if entry is missing from the cache. - fn cached_changed_keys(&self, root: &H::Out) -> Option>, HashSet>>>; + /// Execute given functor with cached entry for given trie root. + /// Returns true if the functor has been called (cache entry exists) and false otherwise. + fn with_cached_changed_keys( + &self, + root: &H::Out, + functor: &mut dyn FnMut(&HashMap>, HashSet>>), + ) -> bool; /// Get a trie node. fn get(&self, key: &H::Out, prefix: Prefix) -> Result, String>; } diff --git a/core/state-machine/src/changes_trie/storage.rs b/core/state-machine/src/changes_trie/storage.rs index 0a4505c2fae4a..f8c556a46c61b 100644 --- a/core/state-machine/src/changes_trie/storage.rs +++ b/core/state-machine/src/changes_trie/storage.rs @@ -176,8 +176,12 @@ impl Storage for InMemoryStorage Option>, HashSet>>> { - self.cache.get(root).cloned() + fn with_cached_changed_keys( + &self, + root: &H::Out, + functor: &mut dyn FnMut(&HashMap>, HashSet>>), + ) -> bool { + self.cache.with_changed_keys(root, functor) } fn get(&self, key: &H::Out, prefix: Prefix) -> Result, String> {