diff --git a/grovedb/Cargo.toml b/grovedb/Cargo.toml index b075d8804..8f3b067c0 100644 --- a/grovedb/Cargo.toml +++ b/grovedb/Cargo.toml @@ -5,7 +5,10 @@ edition = "2021" [dependencies] rs_merkle = "1.1.0" -merk = { path = "../merk" } +merk = { path = "../merk", features = ["full"] } thiserror = "1.0.30" -ed = "0.2.2" tempdir = "0.3.7" +bincode = "1.3.3" +serde = { version = "1.0.130", features = ["derive"] } + +[features] diff --git a/grovedb/src/lib.rs b/grovedb/src/lib.rs index 4521dee03..fdbefe868 100644 --- a/grovedb/src/lib.rs +++ b/grovedb/src/lib.rs @@ -1,37 +1,40 @@ #![feature(trivial_bounds)] -use std::path::Path; +mod subtree; +#[cfg(test)] +mod tests; + +use std::{ + collections::{HashMap, HashSet}, + path::Path, + rc::Rc, +}; -use ed::Encode; -use merk::{self, Merk}; -use rs_merkle::{algorithms::Sha256, Hasher, MerkleTree}; +use merk::{self, rocksdb, Merk}; +use rs_merkle::{algorithms::Sha256, MerkleTree}; use subtree::Element; -mod subtree; -// Root tree has hardcoded leafs; each of them is `pub` to be easily used in -// `path` arg -pub const COMMON_TREE_KEY: &[u8] = b"common"; -pub const IDENTITIES_TREE_KEY: &[u8] = b"identities"; -pub const PUBLIC_KEYS_TO_IDENTITY_IDS_TREE_KEY: &[u8] = b"publicKeysToIdentityIDs"; -pub const DATA_CONTRACTS_TREE_KEY: &[u8] = b"dataContracts"; -pub const SPENT_ASSET_LOCK_TRANSACTIONS_TREE_KEY: &[u8] = b"spentAssetLockTransactions"; -const SUBTREES: [&[u8]; 5] = [ - COMMON_TREE_KEY, - IDENTITIES_TREE_KEY, - PUBLIC_KEYS_TO_IDENTITY_IDS_TREE_KEY, - DATA_CONTRACTS_TREE_KEY, - SPENT_ASSET_LOCK_TRANSACTIONS_TREE_KEY, -]; +/// Limit of possible indirections +const MAX_REFERENCE_HOPS: usize = 10; +/// A key to store serialized data about subtree prefixes to restore HADS +/// structure +const SUBTRESS_SERIALIZED_KEY: &[u8] = b"subtreesSerialized"; +/// A key to store serialized data about root tree leafs keys and order +const ROOT_LEAFS_SERIALIZED_KEY: &[u8] = b"rootLeafsSerialized"; #[derive(Debug, thiserror::Error)] pub enum Error { + #[error("rocksdb error")] + RocksDBError(#[from] merk::rocksdb::Error), #[error("unable to open Merk db")] MerkError(merk::Error), #[error("invalid path")] InvalidPath(&'static str), #[error("unable to decode")] - EdError(#[from] ed::Error), + BincodeError(#[from] bincode::Error), #[error("cyclic reference path")] - CyclicReferencePath, + CyclicReference, + #[error("reference hops limit exceeded")] + ReferenceLimit, } impl From for Error { @@ -42,59 +45,234 @@ impl From for Error { pub struct GroveDb { root_tree: MerkleTree, - subtrees_merk: Merk, + root_leaf_keys: HashMap, usize>, + subtrees: HashMap, Merk>, + db: Rc, } impl GroveDb { pub fn open>(path: P) -> Result { - let mut subtrees_merk = Merk::open(path)?; - let mut leaves = Vec::with_capacity(SUBTREES.len()); - // Populate Merk with root tree's leafs if no previous Merk data found - for subtree_key in SUBTREES { - let node_hash = if let Some(hash) = subtrees_merk.get_hash(subtree_key)? { - hash + let db = Rc::new(rocksdb::DB::open_cf_descriptors( + &Merk::default_db_opts(), + path, + merk::column_families(), + )?); + + let mut subtrees = HashMap::new(); + // TODO: owned `get` is not required for deserialization + if let Some(prefixes_serialized) = db.get(SUBTRESS_SERIALIZED_KEY)? { + let subtrees_prefixes: Vec> = bincode::deserialize(&prefixes_serialized)?; + for prefix in subtrees_prefixes { + let subtree_merk = Merk::open(db.clone(), prefix.to_vec())?; + subtrees.insert(prefix.to_vec(), subtree_merk); + } + } + + // TODO: owned `get` is not required for deserialization + let root_leaf_keys: HashMap, usize> = + if let Some(root_leaf_keys_serialized) = db.get(ROOT_LEAFS_SERIALIZED_KEY)? { + bincode::deserialize(&root_leaf_keys_serialized)? } else { - let element = Element::Tree; - element.insert(&mut subtrees_merk, &[], subtree_key)?; - subtrees_merk - .get_hash(subtree_key)? - .expect("was inserted previously") + HashMap::new() }; - leaves.push(node_hash); - } + Ok(GroveDb { - root_tree: MerkleTree::::from_leaves(&leaves), - subtrees_merk, + root_tree: Self::build_root_tree(&subtrees, &root_leaf_keys), + db: db.clone(), + subtrees, + root_leaf_keys, }) } + fn store_subtrees_keys_data(&self) -> Result<(), Error> { + let prefixes: Vec> = self.subtrees.keys().map(|x| x.clone()).collect(); + self.db + .put(SUBTRESS_SERIALIZED_KEY, bincode::serialize(&prefixes)?)?; + self.db.put( + ROOT_LEAFS_SERIALIZED_KEY, + bincode::serialize(&self.root_leaf_keys)?, + )?; + Ok(()) + } + + fn build_root_tree( + subtrees: &HashMap, Merk>, + root_leaf_keys: &HashMap, usize>, + ) -> MerkleTree { + let mut leaf_hashes: Vec<[u8; 32]> = vec![[0; 32]; root_leaf_keys.len()]; + for (subtree_path, root_leaf_idx) in root_leaf_keys { + let subtree_merk = subtrees + .get(subtree_path) + .expect("`root_leaf_keys` must be in sync with `subtrees`"); + leaf_hashes[*root_leaf_idx] = subtree_merk.root_hash(); + } + let res = MerkleTree::::from_leaves(&leaf_hashes); + res + } + + // TODO: split the function into smaller ones pub fn insert( &mut self, path: &[&[u8]], - key: &[u8], - element: subtree::Element, + key: Vec, + mut element: subtree::Element, ) -> Result<(), Error> { - todo!() + let compressed_path = Self::compress_path(path, None); + match &mut element { + Element::Tree(subtree_root_hash) => { + // Helper closure to create a new subtree under path + key + let create_subtree_merk = || -> Result<(Vec, Merk), Error> { + let compressed_path_subtree = Self::compress_path(path, Some(&key)); + Ok(( + compressed_path_subtree.clone(), + Merk::open(self.db.clone(), compressed_path_subtree)?, + )) + }; + if path.is_empty() { + // Add subtree to the root tree + + // Open Merk and put handle into `subtrees` dictionary accessible by its + // compressed path + let (compressed_path_subtree, subtree_merk) = create_subtree_merk()?; + self.subtrees + .insert(compressed_path_subtree.clone(), subtree_merk); + + // Update root leafs index to persist rs-merkle structure later + if self.root_leaf_keys.get(&compressed_path_subtree).is_none() { + self.root_leaf_keys + .insert(compressed_path_subtree, self.root_tree.leaves_len()); + } + self.propagate_changes(&[&key])?; + } else { + // Add subtree to another subtree. + // First, check if a subtree exists to create a new subtree under it + self.subtrees + .get(&compressed_path) + .ok_or(Error::InvalidPath("no subtree found under that path"))?; + let (compressed_path_subtree, subtree_merk) = create_subtree_merk()?; + // Set tree value as a a subtree root hash + *subtree_root_hash = subtree_merk.root_hash(); + self.subtrees.insert(compressed_path_subtree, subtree_merk); + // Had to take merk from `subtrees` once again to solve multiple &mut s + let mut merk = self + .subtrees + .get_mut(&compressed_path) + .expect("merk object must exist in `subtrees`"); + // need to mark key as taken in the upper tree + element.insert(&mut merk, key)?; + self.propagate_changes(path)?; + } + self.store_subtrees_keys_data()?; + } + _ => { + // If path is empty that means there is an attempt to insert something into a + // root tree and this branch is for anything but trees + if path.is_empty() { + return Err(Error::InvalidPath( + "only subtrees are allowed as root tree's leafs", + )); + } + // Get a Merk by a path + let mut merk = self + .subtrees + .get_mut(&compressed_path) + .ok_or(Error::InvalidPath("no subtree found under that path"))?; + element.insert(&mut merk, key)?; + self.propagate_changes(path)?; + } + } + Ok(()) } pub fn get(&self, path: &[&[u8]], key: &[u8]) -> Result { - todo!() + match self.get_raw(path, key)? { + Element::Reference(reference_path) => self.follow_reference(reference_path), + other => Ok(other), + } + } + + /// Get tree item without following references + fn get_raw(&self, path: &[&[u8]], key: &[u8]) -> Result { + let merk = self + .subtrees + .get(&Self::compress_path(path, None)) + .ok_or(Error::InvalidPath("no subtree found under that path"))?; + Element::get(&merk, key) + } + + fn follow_reference<'a>(&self, mut path: Vec>) -> Result { + let mut hops_left = MAX_REFERENCE_HOPS; + let mut current_element; + let mut visited = HashSet::new(); + + while hops_left > 0 { + if visited.contains(&path) { + return Err(Error::CyclicReference); + } + if let Some((key, path_slice)) = path.split_last() { + current_element = self.get_raw( + path_slice + .iter() + .map(|x| x.as_slice()) + .collect::>() + .as_slice(), + key, + )?; + } else { + return Err(Error::InvalidPath("empty path")); + } + visited.insert(path); + match current_element { + Element::Reference(reference_path) => path = reference_path, + other => return Ok(other), + } + hops_left -= 1; + } + Err(Error::ReferenceLimit) } pub fn proof(&self) -> ! { todo!() } -} - -#[cfg(test)] -mod tests { - use tempdir::TempDir; - use super::*; + /// Method to propagate updated subtree root hashes up to GroveDB root + fn propagate_changes(&mut self, path: &[&[u8]]) -> Result<(), Error> { + let mut split_path = path.split_last(); + // Go up until only one element in path, which means a key of a root tree + while let Some((key, path_slice)) = split_path { + if path_slice.is_empty() { + // Hit the root tree + self.root_tree = Self::build_root_tree(&self.subtrees, &self.root_leaf_keys); + break; + } else { + let compressed_path_upper_tree = Self::compress_path(path_slice, None); + let compressed_path_subtree = Self::compress_path(path_slice, Some(key)); + let subtree = self + .subtrees + .get(&compressed_path_subtree) + .ok_or(Error::InvalidPath("no subtree found under that path"))?; + let element = Element::Tree(subtree.root_hash()); + let upper_tree = self + .subtrees + .get_mut(&compressed_path_upper_tree) + .ok_or(Error::InvalidPath("no subtree found under that path"))?; + element.insert(upper_tree, key.to_vec())?; + split_path = path_slice.split_last(); + } + } + Ok(()) + } - #[test] - fn test_init() { - let tmp_dir = TempDir::new("db").unwrap(); - GroveDb::open(tmp_dir).expect("empty tree is ok"); + /// A helper method to build a prefix to rocksdb keys or identify a subtree + /// in `subtrees` map by tree path; + fn compress_path(path: &[&[u8]], key: Option<&[u8]>) -> Vec { + let mut res = path.iter().fold(Vec::::new(), |mut acc, p| { + acc.extend(p.into_iter()); + acc + }); + if let Some(k) = key { + res.extend_from_slice(k); + } + res } } diff --git a/grovedb/src/subtree.rs b/grovedb/src/subtree.rs index bc6521a9d..6b3c5a36f 100644 --- a/grovedb/src/subtree.rs +++ b/grovedb/src/subtree.rs @@ -1,181 +1,68 @@ //! Module for subtrees handling. -use ed::{Decode, Encode}; +//! Subtrees handling is isolated so basically this module is about adapting +//! Merk API to GroveDB needs. use merk::Op; +use serde::{Deserialize, Serialize}; use crate::{Error, Merk}; -/// Variants of an insertable entity -#[derive(Debug, Decode, Encode, PartialEq)] +/// Variants of GroveDB stored entities +#[derive(Debug, Clone, Serialize, Deserialize, PartialEq)] pub enum Element { /// An ordinary value Item(Vec), /// A reference to an object by its path - Reference(Vec), - /// A subtree - Tree, + Reference(Vec>), + /// A subtree, contains a root hash of the underlying Merk. + /// Hash is stored to make Merk become different when its subtrees have + /// changed, otherwise changes won't be reflected in parent trees. + Tree([u8; 32]), } impl Element { - pub fn new_reference(path: &[&[u8]], key: &[u8]) -> Self { - Element::Reference(Self::build_merk_key(path.iter().map(|x| *x), key)) + // TODO: improve API to avoid creation of Tree elements with uncertain state + pub fn empty_tree() -> Element { + Element::Tree(Default::default()) } - /// Helper method to short-circuit out in case a tree is expected - fn is_tree(&self) -> Result<(), Error> { - match self { - Element::Tree => Ok(()), - _ => Err(Error::InvalidPath("tree expected")), - } - } - - /// Recursively follow `Element::Reference` - fn follow_reference(self, merk: &Merk) -> Result { - fn follow_reference_with_path( - element: Element, - merk: &Merk, - paths: &mut Vec>, - ) -> Result { - if let Element::Reference(reference_merk_key) = element { - // Check if the reference merk key has been visited before - // if it has then we have a cycle - if paths.contains(&reference_merk_key) { - return Err(Error::CyclicReferencePath); - } - let element = Element::decode( - merk.get(reference_merk_key.as_slice())? - .ok_or(Error::InvalidPath("key not found in Merk"))? - .as_slice(), - )?; - - paths.push(reference_merk_key); - follow_reference_with_path(element, merk, paths) - } else { - Ok(element) - } - } - - let mut reference_paths: Vec> = Vec::new(); - follow_reference_with_path(self, merk, &mut reference_paths) - } - - /// A helper method to build Merk keys (and RocksDB as well) out of path + - /// key - fn build_merk_key<'a>(path: impl Iterator, key: &'a [u8]) -> Vec { - let mut merk_key = path.fold(Vec::::new(), |mut acc, p| { - acc.extend(p.into_iter()); - acc - }); - merk_key.extend(key); - merk_key - } - - pub fn get(merk: &Merk, path: &[&[u8]], key: &[u8]) -> Result { - // We'll iterate over path accumulating RocksDB key to retrieve the data, - // validating the path while doing so - let mut merk_key = Vec::new(); - for p in path { - merk_key.extend(p.into_iter()); - let element = Element::decode( - merk.get(&merk_key)? - .ok_or(Error::InvalidPath("key not found in Merk"))? - .as_slice(), - )?; - element.is_tree()?; - } - merk_key.extend(key); - let element = Element::decode( - merk.get(&merk_key)? + /// Get an element from Merk under a key; path should be resolved and proper + /// Merk should be loaded by this moment + pub fn get(merk: &Merk, key: &[u8]) -> Result { + let element = bincode::deserialize( + merk.get(&key)? .ok_or(Error::InvalidPath("key not found in Merk"))? .as_slice(), )?; - - element.follow_reference(&merk) + Ok(element) } - pub fn insert(&self, merk: &mut Merk, path: &[&[u8]], key: &[u8]) -> Result<(), Error> { - // check if a tree was inserted by the path - if let Some((tree_key, tree_path)) = path.split_last() { - Element::get(merk, tree_path, tree_key)?.is_tree()?; - } - if path.len() == 1 { - Element::get( - merk, - &[], - path.first().expect("expected the path of length of 1"), - )? - .is_tree()?; - } - - let merk_key: Vec = Self::build_merk_key(path.iter().map(|x| *x), key); - let batch = [(merk_key, Op::Put(Element::encode(self)?))]; + /// Insert an element in Merk under a key; path should be resolved and + /// proper Merk should be loaded by this moment + pub fn insert(&self, merk: &mut Merk, key: Vec) -> Result<(), Error> { + let batch = [(key, Op::Put(bincode::serialize(self)?))]; merk.apply(&batch, &[]).map_err(|e| e.into()) } } #[cfg(test)] mod tests { - use tempdir::TempDir; + use merk::test_utils::TempMerk; use super::*; #[test] fn test_success_insert() { - let tmp_dir = TempDir::new("db").unwrap(); - let mut merk = Merk::open(tmp_dir.path()).unwrap(); - Element::Tree - .insert(&mut merk, &[], b"mykey") + let mut merk = TempMerk::new(); + Element::empty_tree() + .insert(&mut merk, b"mykey".to_vec()) .expect("expected successful insertion"); Element::Item(b"value".to_vec()) - .insert(&mut merk, &[b"mykey"], b"another-key") + .insert(&mut merk, b"another-key".to_vec()) .expect("expected successful insertion 2"); assert_eq!( - Element::get(&merk, &[b"mykey"], b"another-key").expect("expected successful get"), + Element::get(&merk, b"another-key").expect("expected successful get"), Element::Item(b"value".to_vec()), ); } - - #[test] - fn test_follow_references() { - let tmp_dir = TempDir::new("db").unwrap(); - let mut merk = Merk::open(tmp_dir.path()).unwrap(); - Element::Tree - .insert(&mut merk, &[], b"mykey") - .expect("expected successful insertion"); - Element::Item(b"value".to_vec()) - .insert(&mut merk, &[b"mykey"], b"another-key") - .expect("expected successful insertion 2"); - Element::new_reference(&[b"mykey"], b"another-key") - .insert(&mut merk, &[b"mykey"], b"reference") - .expect("expected successful reference insertion"); - Element::new_reference(&[b"mykey"], b"reference") - .insert(&mut merk, &[b"mykey"], b"another-reference") - .expect("expected successful reference insertion 2"); - - assert_eq!( - Element::get(&merk, &[b"mykey"], b"another-reference") - .expect("expected successful get"), - Element::Item(b"value".to_vec()), - ); - } - - #[test] - fn test_circular_references() { - let tmp_dir = TempDir::new("db").unwrap(); - let mut merk = Merk::open(tmp_dir.path()).unwrap(); - - Element::Tree - .insert(&mut merk, &[], b"tree-key") - .expect("expected successful insertion"); - - // r1 points to r2 and r2 points to r1 (cycle!) - Element::new_reference(&[b"tree-key"], b"reference-2") - .insert(&mut merk, &[b"tree-key"], b"reference-1") - .expect("expected successful reference insertion"); - Element::new_reference(&[b"tree-key"], b"reference-1") - .insert(&mut merk, &[b"tree-key"], b"reference-2") - .expect("expected successful reference insertion"); - - assert!(Element::get(&merk, &[b"tree-key"], b"reference-1").is_err()); - } } diff --git a/grovedb/src/tests.rs b/grovedb/src/tests.rs new file mode 100644 index 000000000..df8e6e29b --- /dev/null +++ b/grovedb/src/tests.rs @@ -0,0 +1,241 @@ +use std::ops::{Deref, DerefMut}; + +use tempdir::TempDir; + +use super::*; + +const TEST_LEAF: &[u8] = b"test_leaf"; +const ANOTHER_TEST_LEAF: &[u8] = b"test_leaf2"; + +/// GroveDB wrapper to keep temp directory alive +struct TempGroveDb { + _tmp_dir: TempDir, + db: GroveDb, +} + +impl DerefMut for TempGroveDb { + fn deref_mut(&mut self) -> &mut Self::Target { + &mut self.db + } +} + +impl Deref for TempGroveDb { + type Target = GroveDb; + + fn deref(&self) -> &Self::Target { + &self.db + } +} + +/// A helper method to create GroveDB with one leaf for a root tree +fn make_grovedb() -> TempGroveDb { + let tmp_dir = TempDir::new("db").unwrap(); + let mut db = GroveDb::open(tmp_dir.path()).unwrap(); + add_test_leafs(&mut db); + TempGroveDb { + _tmp_dir: tmp_dir, + db, + } +} + +fn add_test_leafs(db: &mut GroveDb) { + db.insert(&[], TEST_LEAF.to_vec(), Element::empty_tree()) + .expect("successful root tree leaf insert"); + db.insert(&[], ANOTHER_TEST_LEAF.to_vec(), Element::empty_tree()) + .expect("successful root tree leaf 2 insert"); +} + +#[test] +fn test_init() { + let tmp_dir = TempDir::new("db").unwrap(); + GroveDb::open(tmp_dir).expect("empty tree is ok"); +} + +#[test] +fn test_insert_value_to_merk() { + let mut db = make_grovedb(); + let element = Element::Item(b"ayy".to_vec()); + db.insert(&[TEST_LEAF], b"key".to_vec(), element.clone()) + .expect("successful insert"); + assert_eq!( + db.get(&[TEST_LEAF], b"key").expect("succesful get"), + element + ); +} + +#[test] +fn test_insert_value_to_subtree() { + let mut db = make_grovedb(); + let element = Element::Item(b"ayy".to_vec()); + + // Insert a subtree first + db.insert(&[TEST_LEAF], b"key1".to_vec(), Element::empty_tree()) + .expect("successful subtree insert"); + // Insert an element into subtree + db.insert(&[TEST_LEAF, b"key1"], b"key2".to_vec(), element.clone()) + .expect("successful value insert"); + assert_eq!( + db.get(&[TEST_LEAF, b"key1"], b"key2") + .expect("succesful get"), + element + ); +} + +#[test] +fn test_changes_propagated() { + let mut db = make_grovedb(); + let old_hash = db.root_tree.root(); + let element = Element::Item(b"ayy".to_vec()); + + // Insert some nested subtrees + db.insert(&[TEST_LEAF], b"key1".to_vec(), Element::empty_tree()) + .expect("successful subtree 1 insert"); + db.insert( + &[TEST_LEAF, b"key1"], + b"key2".to_vec(), + Element::empty_tree(), + ) + .expect("successful subtree 2 insert"); + // Insert an element into subtree + db.insert( + &[TEST_LEAF, b"key1", b"key2"], + b"key3".to_vec(), + element.clone(), + ) + .expect("successful value insert"); + assert_eq!( + db.get(&[TEST_LEAF, b"key1", b"key2"], b"key3") + .expect("succesful get"), + element + ); + assert_ne!(old_hash, db.root_tree.root()); +} + +#[test] +fn test_follow_references() { + let mut db = make_grovedb(); + let element = Element::Item(b"ayy".to_vec()); + + // Insert a reference + db.insert( + &[TEST_LEAF], + b"reference_key".to_vec(), + Element::Reference(vec![TEST_LEAF.to_vec(), b"key2".to_vec(), b"key3".to_vec()]), + ) + .expect("successful reference insert"); + + // Insert an item to refer to + db.insert(&[TEST_LEAF], b"key2".to_vec(), Element::empty_tree()) + .expect("successful subtree 1 insert"); + db.insert(&[TEST_LEAF, b"key2"], b"key3".to_vec(), element.clone()) + .expect("successful value insert"); + assert_eq!( + db.get(&[TEST_LEAF], b"reference_key") + .expect("succesful get"), + element + ); +} + +#[test] +fn test_cyclic_references() { + let mut db = make_grovedb(); + + db.insert( + &[TEST_LEAF], + b"reference_key_1".to_vec(), + Element::Reference(vec![TEST_LEAF.to_vec(), b"reference_key_2".to_vec()]), + ) + .expect("successful reference 1 insert"); + + db.insert( + &[TEST_LEAF], + b"reference_key_2".to_vec(), + Element::Reference(vec![TEST_LEAF.to_vec(), b"reference_key_1".to_vec()]), + ) + .expect("successful reference 2 insert"); + + assert!(matches!( + db.get(&[TEST_LEAF], b"reference_key_1").unwrap_err(), + Error::CyclicReference + )); +} + +#[test] +fn test_too_many_indirections() { + let mut db = make_grovedb(); + + let keygen = |idx| format!("key{}", idx).bytes().collect::>(); + + db.insert( + &[TEST_LEAF], + b"key0".to_vec(), + Element::Item(b"oops".to_vec()), + ) + .expect("successful item insert"); + + for i in 1..=(MAX_REFERENCE_HOPS + 1) { + db.insert( + &[TEST_LEAF], + keygen(i), + Element::Reference(vec![TEST_LEAF.to_vec(), keygen(i - 1)]), + ) + .expect("successful reference insert"); + } + + assert!(matches!( + db.get(&[TEST_LEAF], &keygen(MAX_REFERENCE_HOPS + 1)) + .unwrap_err(), + Error::ReferenceLimit + )); +} + +#[test] +fn test_tree_structure_is_presistent() { + let tmp_dir = TempDir::new("db").unwrap(); + let element = Element::Item(b"ayy".to_vec()); + // Create a scoped GroveDB + { + let mut db = GroveDb::open(tmp_dir.path()).unwrap(); + add_test_leafs(&mut db); + + // Insert some nested subtrees + db.insert(&[TEST_LEAF], b"key1".to_vec(), Element::empty_tree()) + .expect("successful subtree 1 insert"); + db.insert( + &[TEST_LEAF, b"key1"], + b"key2".to_vec(), + Element::empty_tree(), + ) + .expect("successful subtree 2 insert"); + // Insert an element into subtree + db.insert( + &[TEST_LEAF, b"key1", b"key2"], + b"key3".to_vec(), + element.clone(), + ) + .expect("successful value insert"); + assert_eq!( + db.get(&[TEST_LEAF, b"key1", b"key2"], b"key3") + .expect("succesful get 1"), + element + ); + } + // Open a persisted GroveDB + let db = GroveDb::open(tmp_dir).unwrap(); + assert_eq!( + db.get(&[TEST_LEAF, b"key1", b"key2"], b"key3") + .expect("succesful get 2"), + element + ); + assert!(db.get(&[TEST_LEAF, b"key1", b"key2"], b"key4").is_err()); +} + +#[test] +fn test_root_tree_leafs_are_noted() { + let db = make_grovedb(); + let mut hm = HashMap::new(); + hm.insert(TEST_LEAF.to_vec(), 0); + hm.insert(ANOTHER_TEST_LEAF.to_vec(), 1); + assert_eq!(db.root_leaf_keys, hm); + assert_eq!(db.root_tree.leaves_len(), 2); +} diff --git a/merk/Cargo.toml b/merk/Cargo.toml index 5801bafda..738a6cb17 100644 --- a/merk/Cargo.toml +++ b/merk/Cargo.toml @@ -7,6 +7,7 @@ edition = "2021" license = "MIT" [dependencies] +tempdir = "0.3.7" [dependencies.time] version = "0.1.42" diff --git a/merk/benches/merk.rs b/merk/benches/merk.rs index 419131b3a..da501d679 100644 --- a/merk/benches/merk.rs +++ b/merk/benches/merk.rs @@ -2,12 +2,12 @@ extern crate test; -use merk::proofs::encode_into as encode_proof_into; -use merk::restore::Restorer; -use merk::test_utils::*; -use merk::{Merk, Result}; -use rand::prelude::*; use std::thread; + +use merk::{ + proofs::encode_into as encode_proof_into, restore::Restorer, test_utils::*, Merk, Result, +}; +use rand::prelude::*; use test::Bencher; #[bench] diff --git a/merk/benches/ops.rs b/merk/benches/ops.rs index 14010f1e1..a7c8aae65 100644 --- a/merk/benches/ops.rs +++ b/merk/benches/ops.rs @@ -2,8 +2,7 @@ extern crate test; -use merk::owner::Owner; -use merk::test_utils::*; +use merk::{owner::Owner, test_utils::*}; use test::Bencher; #[bench] diff --git a/merk/src/lib.rs b/merk/src/lib.rs index fea6f3aea..f64473173 100644 --- a/merk/src/lib.rs +++ b/merk/src/lib.rs @@ -12,26 +12,25 @@ mod error; /// The top-level store API. #[cfg(feature = "full")] mod merk; -/// Provides a container type that allows temporarily taking ownership of a value. +pub use crate::merk::column_families; +/// Provides a container type that allows temporarily taking ownership of a +/// value. // TODO: move this into its own crate pub mod owner; /// Algorithms for generating and verifying Merkle proofs. pub mod proofs; /// Various helpers useful for tests or benchmarks. -#[cfg(feature = "full")] pub mod test_utils; /// The core tree data structure. pub mod tree; -#[cfg(feature = "full")] -pub use crate::merk::{chunks, restore, Merk}; - pub use error::{Error, Result}; -pub use tree::{Batch, BatchEntry, Hash, Op, PanicSource, HASH_LENGTH}; - #[allow(deprecated)] pub use proofs::query::verify_query; +pub use proofs::query::{execute_proof, verify}; +pub use tree::{Batch, BatchEntry, Hash, Op, PanicSource, HASH_LENGTH}; -pub use proofs::query::execute_proof; -pub use proofs::query::verify; +#[cfg(feature = "full")] +// pub use crate::merk::{chunks, restore, Merk}; +pub use crate::merk::{chunks, Merk}; diff --git a/merk/src/merk/chunks.rs b/merk/src/merk/chunks.rs index 66f87bcd2..6d6665c5e 100644 --- a/merk/src/merk/chunks.rs +++ b/merk/src/merk/chunks.rs @@ -1,14 +1,16 @@ //! Provides `ChunkProducer`, which creates chunk proofs for full replication of //! a Merk. -use super::Merk; -use crate::proofs::{chunk::get_next_chunk, Node, Op}; - -use crate::Result; use ed::Encode; use failure::bail; use rocksdb::DBRawIterator; +use super::Merk; +use crate::{ + proofs::{chunk::get_next_chunk, Node, Op}, + Result, +}; + /// A `ChunkProducer` allows the creation of chunk proofs, used for trustlessly /// replicating entire Merk trees. Chunks can be generated on the fly in a /// random order, or iterated in order for slightly better performance. @@ -52,8 +54,8 @@ impl<'a> ChunkProducer<'a> { } /// Gets the chunk with the given index. Errors if the index is out of - /// bounds or the tree is empty - the number of chunks can be checked by calling - /// `producer.len()`. + /// bounds or the tree is empty - the number of chunks can be checked by + /// calling `producer.len()`. pub fn chunk(&mut self, index: usize) -> Result> { if index >= self.len() { bail!("Chunk index out-of-bounds"); @@ -149,6 +151,8 @@ impl Merk { #[cfg(test)] mod tests { + use tempdir::TempDir; + use super::*; use crate::{ proofs::{ @@ -160,7 +164,7 @@ mod tests { #[test] fn len_small() { - let mut merk = TempMerk::new().unwrap(); + let mut merk = TempMerk::new(); let batch = make_batch_seq(1..256); merk.apply(batch.as_slice(), &[]).unwrap(); @@ -171,7 +175,7 @@ mod tests { #[test] fn len_big() { - let mut merk = TempMerk::new().unwrap(); + let mut merk = TempMerk::new(); let batch = make_batch_seq(1..10_000); merk.apply(batch.as_slice(), &[]).unwrap(); @@ -182,7 +186,7 @@ mod tests { #[test] fn generate_and_verify_chunks() { - let mut merk = TempMerk::new().unwrap(); + let mut merk = TempMerk::new(); let batch = make_batch_seq(1..10_000); merk.apply(batch.as_slice(), &[]).unwrap(); @@ -204,14 +208,10 @@ mod tests { #[test] fn chunks_from_reopen() { - let time = std::time::SystemTime::now() - .duration_since(std::time::SystemTime::UNIX_EPOCH) - .unwrap() - .as_nanos(); - let path = format!("chunks_from_reopen_{}.db", time); - + let tmp_dir = TempDir::new("chunks_from_reopen").expect("cannot create tempdir"); let original_chunks = { - let mut merk = Merk::open(&path).unwrap(); + let db = default_rocksdb(tmp_dir.path()); + let mut merk = Merk::open(db, Vec::new()).unwrap(); let batch = make_batch_seq(1..10); merk.apply(batch.as_slice(), &[]).unwrap(); @@ -223,7 +223,8 @@ mod tests { .into_iter() }; - let merk = TempMerk::open(path).unwrap(); + let db = default_rocksdb(tmp_dir.path()); + let merk = Merk::open(db, Vec::new()).unwrap(); let reopen_chunks = merk.chunks().unwrap().into_iter().map(Result::unwrap); for (original, checkpoint) in original_chunks.zip(reopen_chunks) { @@ -231,31 +232,33 @@ mod tests { } } - #[test] - fn chunks_from_checkpoint() { - let mut merk = TempMerk::new().unwrap(); - let batch = make_batch_seq(1..10); - merk.apply(batch.as_slice(), &[]).unwrap(); + // #[test] + // fn chunks_from_checkpoint() { + // let mut merk = TempMerk::new(); + // let batch = make_batch_seq(1..10); + // merk.apply(batch.as_slice(), &[]).unwrap(); - let path: std::path::PathBuf = "generate_and_verify_chunks_from_checkpoint.db".into(); - if path.exists() { - std::fs::remove_dir_all(&path).unwrap(); - } - let checkpoint = merk.checkpoint(&path).unwrap(); + // let path: std::path::PathBuf = + // "generate_and_verify_chunks_from_checkpoint.db".into(); if path. + // exists() { std::fs::remove_dir_all(&path).unwrap(); + // } + // let checkpoint = merk.checkpoint(&path).unwrap(); - let original_chunks = merk.chunks().unwrap().into_iter().map(Result::unwrap); - let checkpoint_chunks = checkpoint.chunks().unwrap().into_iter().map(Result::unwrap); + // let original_chunks = + // merk.chunks().unwrap().into_iter().map(Result::unwrap); + // let checkpoint_chunks = + // checkpoint.chunks().unwrap().into_iter().map(Result::unwrap); - for (original, checkpoint) in original_chunks.zip(checkpoint_chunks) { - assert_eq!(original.len(), checkpoint.len()); - } + // for (original, checkpoint) in original_chunks.zip(checkpoint_chunks) { + // assert_eq!(original.len(), checkpoint.len()); + // } - std::fs::remove_dir_all(&path).unwrap(); - } + // std::fs::remove_dir_all(&path).unwrap(); + // } #[test] fn random_access_chunks() { - let mut merk = TempMerk::new().unwrap(); + let mut merk = TempMerk::new(); let batch = make_batch_seq(1..111); merk.apply(batch.as_slice(), &[]).unwrap(); @@ -276,7 +279,7 @@ mod tests { #[test] #[should_panic(expected = "Attempted to fetch chunk on empty tree")] fn test_chunk_empty() { - let merk = TempMerk::new().unwrap(); + let merk = TempMerk::new(); let _chunks = merk .chunks() @@ -289,7 +292,7 @@ mod tests { #[test] #[should_panic(expected = "Chunk index out-of-bounds")] fn test_chunk_index_oob() { - let mut merk = TempMerk::new().unwrap(); + let mut merk = TempMerk::new(); let batch = make_batch_seq(1..42); merk.apply(batch.as_slice(), &[]).unwrap(); @@ -299,7 +302,7 @@ mod tests { #[test] fn test_chunk_index_gt_1_access() { - let mut merk = TempMerk::new().unwrap(); + let mut merk = TempMerk::new(); let batch = make_batch_seq(1..513); merk.apply(batch.as_slice(), &[]).unwrap(); @@ -380,7 +383,7 @@ mod tests { #[test] #[should_panic(expected = "Called next_chunk after end")] fn test_next_chunk_index_oob() { - let mut merk = TempMerk::new().unwrap(); + let mut merk = TempMerk::new(); let batch = make_batch_seq(1..42); merk.apply(batch.as_slice(), &[]).unwrap(); diff --git a/merk/src/merk/mod.rs b/merk/src/merk/mod.rs index 8c4445f6d..9505adbcc 100644 --- a/merk/src/merk/mod.rs +++ b/merk/src/merk/mod.rs @@ -1,11 +1,13 @@ pub mod chunks; -pub mod restore; +// TODO +// pub mod restore; use std::{ cell::Cell, cmp::Ordering, collections::LinkedList, path::{Path, PathBuf}, + rc::Rc, }; use failure::bail; @@ -21,7 +23,7 @@ const ROOT_KEY_KEY: &[u8] = b"root"; const AUX_CF_NAME: &str = "aux"; const INTERNAL_CF_NAME: &str = "internal"; -fn column_families() -> Vec { +pub fn column_families() -> Vec { vec![ // TODO: clone opts or take args ColumnFamilyDescriptor::new(AUX_CF_NAME, Merk::default_db_opts()), @@ -32,34 +34,18 @@ fn column_families() -> Vec { /// A handle to a Merkle key/value store backed by RocksDB. pub struct Merk { pub(crate) tree: Cell>, - pub(crate) db: rocksdb::DB, - pub(crate) path: PathBuf, + pub(crate) db: Rc, + pub(crate) prefix: Vec, } pub type UseTreeMutResult = Result, Option>)>>; impl Merk { - /// Opens a store with the specified file path. If no store exists at that - /// path, one will be created. - pub fn open>(path: P) -> Result { - let db_opts = Merk::default_db_opts(); - Merk::open_opt(path, db_opts) - } - - /// Opens a store with the specified file path and the given options. If no - /// store exists at that path, one will be created. - pub fn open_opt

(path: P, db_opts: rocksdb::Options) -> Result - where - P: AsRef, - { - let mut path_buf = PathBuf::new(); - path_buf.push(path); - let db = rocksdb::DB::open_cf_descriptors(&db_opts, &path_buf, column_families())?; - + pub fn open(db: Rc, prefix: Vec) -> Result { let mut merk = Merk { tree: Cell::new(None), db, - path: path_buf, + prefix, }; merk.load_root()?; @@ -126,7 +112,7 @@ impl Merk { match maybe_child { None => { // fetch from RocksDB - break fetch_node(&self.db, key) + break fetch_node(&self.db, &self.prefix, &key) .map(|maybe_node| maybe_node.map(|node| f(&node))); } Some(child) => cursor = child, // traverse to child @@ -151,7 +137,7 @@ impl Merk { /// /// # Example /// ``` - /// # let mut store = merk::test_utils::TempMerk::new().unwrap(); + /// # let mut store = merk::test_utils::TempMerk::new(); /// # store.apply(&[(vec![4,5,6], Op::Put(vec![0]))], &[]).unwrap(); /// /// use merk::Op; @@ -189,7 +175,7 @@ impl Merk { /// /// # Example /// ``` - /// # let mut store = merk::test_utils::TempMerk::new().unwrap(); + /// # let mut store = merk::test_utils::TempMerk::new(); /// # store.apply(&[(vec![4,5,6], Op::Put(vec![0]))], &[]).unwrap(); /// /// use merk::Op; @@ -214,15 +200,6 @@ impl Merk { self.commit(deleted_keys, aux) } - /// Closes the store and deletes all data from disk. - pub fn destroy(self) -> Result<()> { - let opts = Merk::default_db_opts(); - let path = self.path.clone(); - drop(self); - rocksdb::DB::destroy(&opts, path)?; - Ok(()) - } - /// Creates a Merkle proof for the list of queried keys. For each key in the /// query, if the key is found in the store then the value will be proven to /// be in the tree. For each key in the query that does not exist in the @@ -284,19 +261,21 @@ impl Merk { let mut batch = rocksdb::WriteBatch::default(); let mut to_batch = self.use_tree_mut(|maybe_tree| -> UseTreeMutResult { // TODO: concurrent commit + // + let mut prefixed_root = self.prefix.clone(); + prefixed_root.extend_from_slice(ROOT_KEY_KEY); if let Some(tree) = maybe_tree { // TODO: configurable committer let mut committer = MerkCommitter::new(tree.height(), 100); tree.commit(&mut committer)?; // update pointer to root node - batch.put_cf(internal_cf, ROOT_KEY_KEY, tree.key()); + batch.put_cf(internal_cf, prefixed_root, tree.key()); Ok(committer.batch) } else { // empty tree, delete pointer to root - batch.delete_cf(internal_cf, ROOT_KEY_KEY); - + batch.delete_cf(internal_cf, prefixed_root); Ok(vec![]) } })?; @@ -307,17 +286,21 @@ impl Merk { } to_batch.sort_by(|a, b| a.0.cmp(&b.0)); for (key, maybe_value) in to_batch { + let mut prefixed_key = self.prefix.clone(); + prefixed_key.extend_from_slice(&key); if let Some(value) = maybe_value { - batch.put(key, value); + batch.put(prefixed_key, value); } else { - batch.delete(key); + batch.delete(prefixed_key); } } for (key, value) in aux { + let mut prefixed_key = self.prefix.clone(); + prefixed_key.extend_from_slice(&key); match value { - Op::Put(value) => batch.put_cf(aux_cf, key, value), - Op::Delete => batch.delete_cf(aux_cf, key), + Op::Put(value) => batch.put_cf(aux_cf, prefixed_key, value), + Op::Delete => batch.delete_cf(aux_cf, prefixed_key), }; } @@ -341,13 +324,16 @@ impl Merk { self.db.raw_iterator() } - pub fn checkpoint>(&self, path: P) -> Result { - Checkpoint::new(&self.db)?.create_checkpoint(&path)?; - Merk::open(path) - } + // pub fn checkpoint>(&self, path: P, prefix: &[u8]) -> + // Result { Checkpoint::new(&self.db)?.create_checkpoint(&path)?; + // Merk::open(path, prefix) + // } fn source(&self) -> MerkSource { - MerkSource { db: &self.db } + MerkSource { + db: &self.db, + prefix: &self.prefix, + } } fn use_tree(&self, f: impl FnOnce(Option<&Tree>) -> T) -> T { @@ -375,20 +361,24 @@ impl Merk { pub(crate) fn set_root_key(&mut self, key: Vec) -> Result<()> { let internal_cf = self.db.cf_handle(INTERNAL_CF_NAME).unwrap(); let mut batch = WriteBatch::default(); - batch.put_cf(internal_cf, ROOT_KEY_KEY, key); + let mut prefixed_root_key = self.prefix.clone(); + prefixed_root_key.extend_from_slice(ROOT_KEY_KEY); + batch.put_cf(internal_cf, prefixed_root_key, key); self.write(batch) } - pub(crate) fn fetch_node(&self, key: &[u8]) -> Result> { - fetch_node(&self.db, key) + pub(crate) fn fetch_node(&self, prefix: &[u8], key: &[u8]) -> Result> { + fetch_node(&self.db, prefix, key) } pub(crate) fn load_root(&mut self) -> Result<()> { let internal_cf = self.db.cf_handle(INTERNAL_CF_NAME).unwrap(); + let mut prefixed_root_key = self.prefix.clone(); + prefixed_root_key.extend_from_slice(ROOT_KEY_KEY); let tree = self .db - .get_pinned_cf(internal_cf, ROOT_KEY_KEY)? - .map(|root_key| fetch_existing_node(&self.db, &root_key)) + .get_pinned_cf(internal_cf, &prefixed_root_key)? + .map(|root_key| fetch_existing_node(&self.db, &self.prefix, &root_key)) .transpose()?; self.tree = Cell::new(tree); Ok(()) @@ -398,11 +388,12 @@ impl Merk { #[derive(Clone)] pub struct MerkSource<'a> { db: &'a rocksdb::DB, + prefix: &'a [u8], } impl<'a> Fetch for MerkSource<'a> { fn fetch(&self, link: &Link) -> Result { - fetch_existing_node(self.db, link.key()) + fetch_existing_node(self.db, &self.prefix, link.key()) } } @@ -437,8 +428,10 @@ impl Commit for MerkCommitter { } } -fn fetch_node(db: &rocksdb::DB, key: &[u8]) -> Result> { - let bytes = db.get_pinned(key)?; +fn fetch_node(db: &rocksdb::DB, prefix: &[u8], key: &[u8]) -> Result> { + let mut prefixed_key = prefix.to_vec(); + prefixed_key.extend_from_slice(key); + let bytes = db.get_pinned(&prefixed_key)?; if let Some(bytes) = bytes { Ok(Some(Tree::decode(key.to_vec(), &bytes))) } else { @@ -446,8 +439,8 @@ fn fetch_node(db: &rocksdb::DB, key: &[u8]) -> Result> { } } -fn fetch_existing_node(db: &rocksdb::DB, key: &[u8]) -> Result { - match fetch_node(db, key)? { +fn fetch_existing_node(db: &rocksdb::DB, prefix: &[u8], key: &[u8]) -> Result { + match fetch_node(db, prefix, key)? { None => bail!("key not found: {:?}", key), Some(node) => Ok(node), } @@ -455,7 +448,7 @@ fn fetch_existing_node(db: &rocksdb::DB, key: &[u8]) -> Result { #[cfg(test)] mod test { - use std::thread; + use tempdir::TempDir; use super::{Merk, MerkSource, RefWalker}; use crate::{test_utils::*, Op}; @@ -472,10 +465,7 @@ mod test { #[test] fn simple_insert_apply() { let batch_size = 20; - - let path = thread::current().name().unwrap().to_owned(); - let mut merk = TempMerk::open(path).expect("failed to open merk"); - + let mut merk = TempMerk::new(); let batch = make_batch_seq(0..batch_size); merk.apply(&batch, &[]).expect("apply failed"); @@ -492,9 +482,7 @@ mod test { #[test] fn insert_uncached() { let batch_size = 20; - - let path = thread::current().name().unwrap().to_owned(); - let mut merk = TempMerk::open(path).expect("failed to open merk"); + let mut merk = TempMerk::new(); let batch = make_batch_seq(0..batch_size); merk.apply(&batch, &[]).expect("apply failed"); @@ -509,9 +497,7 @@ mod test { fn insert_rand() { let tree_size = 40; let batch_size = 4; - - let path = thread::current().name().unwrap().to_owned(); - let mut merk = TempMerk::open(path).expect("failed to open merk"); + let mut merk = TempMerk::new(); for i in 0..(tree_size / batch_size) { println!("i:{}", i); @@ -522,8 +508,7 @@ mod test { #[test] fn actual_deletes() { - let path = thread::current().name().unwrap().to_owned(); - let mut merk = TempMerk::open(path).expect("failed to open merk"); + let mut merk = TempMerk::new(); let batch = make_batch_rand(10, 1); merk.apply(&batch, &[]).expect("apply failed"); @@ -537,8 +522,7 @@ mod test { #[test] fn aux_data() { - let path = thread::current().name().unwrap().to_owned(); - let mut merk = TempMerk::open(path).expect("failed to open merk"); + let mut merk = TempMerk::new(); merk.apply(&[], &[(vec![1, 2, 3], Op::Put(vec![4, 5, 6]))]) .expect("apply failed"); let val = merk.get_aux(&[1, 2, 3]).unwrap(); @@ -547,8 +531,7 @@ mod test { #[test] fn simulated_crash() { - let path = thread::current().name().unwrap().to_owned(); - let mut merk = CrashMerk::open(path).expect("failed to open merk"); + let mut merk = CrashMerk::open().expect("failed to open merk"); merk.apply( &[(vec![0], Op::Put(vec![1]))], @@ -561,19 +544,14 @@ mod test { merk.apply(&make_batch_seq(i * 2_000..(i + 1) * 2_000), &[]) .expect("apply failed"); } - - unsafe { - merk.crash().unwrap(); - } + merk.crash(); assert_eq!(merk.get_aux(&[2]).unwrap(), Some(vec![3])); - merk.destroy().unwrap(); } #[test] fn get_not_found() { - let path = thread::current().name().unwrap().to_owned(); - let mut merk = TempMerk::open(path).expect("failed to open merk"); + let mut merk = TempMerk::new(); // no root assert!(merk.get(&[1, 2, 3]).unwrap().is_none()); @@ -604,14 +582,11 @@ mod test { node.walk(false).unwrap().map(|c| collect(c, nodes)); } - let time = std::time::SystemTime::now() - .duration_since(std::time::SystemTime::UNIX_EPOCH) - .unwrap() - .as_nanos(); - let path = format!("merk_reopen_{}.db", time); + let tmp_dir = TempDir::new("test_reopen").expect("cannot open tempdir"); let original_nodes = { - let mut merk = Merk::open(&path).unwrap(); + let db = default_rocksdb(tmp_dir.path()); + let mut merk = Merk::open(db, Vec::new()).unwrap(); let batch = make_batch_seq(1..10_000); merk.apply(batch.as_slice(), &[]).unwrap(); let mut tree = merk.tree.take().unwrap(); @@ -622,7 +597,8 @@ mod test { nodes }; - let merk = TempMerk::open(&path).unwrap(); + let db = default_rocksdb(tmp_dir.path()); + let merk = Merk::open(db, Vec::new()).unwrap(); let mut tree = merk.tree.take().unwrap(); let walker = RefWalker::new(&mut tree, merk.source()); @@ -640,15 +616,11 @@ mod test { iter.next(); } } - - let time = std::time::SystemTime::now() - .duration_since(std::time::SystemTime::UNIX_EPOCH) - .unwrap() - .as_nanos(); - let path = format!("merk_reopen_{}.db", time); + let tmp_dir = TempDir::new("reopen_iter_test").expect("cannot open tempdir"); let original_nodes = { - let mut merk = Merk::open(&path).unwrap(); + let db = default_rocksdb(tmp_dir.path()); + let mut merk = Merk::open(db, Vec::new()).unwrap(); let batch = make_batch_seq(1..10_000); merk.apply(batch.as_slice(), &[]).unwrap(); @@ -656,8 +628,8 @@ mod test { collect(&mut merk.raw_iter(), &mut nodes); nodes }; - - let merk = TempMerk::open(&path).unwrap(); + let db = default_rocksdb(tmp_dir.path()); + let mut merk = Merk::open(db, Vec::new()).unwrap(); let mut reopen_nodes = vec![]; collect(&mut merk.raw_iter(), &mut reopen_nodes); @@ -665,75 +637,75 @@ mod test { assert_eq!(reopen_nodes, original_nodes); } - #[test] - fn checkpoint() { - let path = thread::current().name().unwrap().to_owned(); - let mut merk = TempMerk::open(&path).expect("failed to open merk"); + // #[test] + // fn checkpoint() { + // let mut merk = TempMerk::new(); - merk.apply(&[(vec![1], Op::Put(vec![0]))], &[]) - .expect("apply failed"); + // merk.apply(&[(vec![1], Op::Put(vec![0]))], &[]) + // .expect("apply failed"); - let mut checkpoint = merk.checkpoint(path + ".checkpoint").unwrap(); + // let mut checkpoint = + // merk.inner.checkpoint(merk.path.path().join("checkpoint")).unwrap(); - assert_eq!(merk.get(&[1]).unwrap(), Some(vec![0])); - assert_eq!(checkpoint.get(&[1]).unwrap(), Some(vec![0])); + // assert_eq!(merk.get(&[1]).unwrap(), Some(vec![0])); + // assert_eq!(checkpoint.get(&[1]).unwrap(), Some(vec![0])); - merk.apply( - &[(vec![1], Op::Put(vec![1])), (vec![2], Op::Put(vec![0]))], - &[], - ) - .expect("apply failed"); + // merk.apply( + // &[(vec![1], Op::Put(vec![1])), (vec![2], Op::Put(vec![0]))], + // &[], + // ) + // .expect("apply failed"); - assert_eq!(merk.get(&[1]).unwrap(), Some(vec![1])); - assert_eq!(merk.get(&[2]).unwrap(), Some(vec![0])); - assert_eq!(checkpoint.get(&[1]).unwrap(), Some(vec![0])); - assert_eq!(checkpoint.get(&[2]).unwrap(), None); + // assert_eq!(merk.get(&[1]).unwrap(), Some(vec![1])); + // assert_eq!(merk.get(&[2]).unwrap(), Some(vec![0])); + // assert_eq!(checkpoint.get(&[1]).unwrap(), Some(vec![0])); + // assert_eq!(checkpoint.get(&[2]).unwrap(), None); - checkpoint - .apply(&[(vec![2], Op::Put(vec![123]))], &[]) - .expect("apply failed"); + // checkpoint + // .apply(&[(vec![2], Op::Put(vec![123]))], &[]) + // .expect("apply failed"); - assert_eq!(merk.get(&[1]).unwrap(), Some(vec![1])); - assert_eq!(merk.get(&[2]).unwrap(), Some(vec![0])); - assert_eq!(checkpoint.get(&[1]).unwrap(), Some(vec![0])); - assert_eq!(checkpoint.get(&[2]).unwrap(), Some(vec![123])); + // assert_eq!(merk.get(&[1]).unwrap(), Some(vec![1])); + // assert_eq!(merk.get(&[2]).unwrap(), Some(vec![0])); + // assert_eq!(checkpoint.get(&[1]).unwrap(), Some(vec![0])); + // assert_eq!(checkpoint.get(&[2]).unwrap(), Some(vec![123])); - checkpoint.destroy().unwrap(); + // checkpoint.destroy().unwrap(); - assert_eq!(merk.get(&[1]).unwrap(), Some(vec![1])); - assert_eq!(merk.get(&[2]).unwrap(), Some(vec![0])); - } + // assert_eq!(merk.get(&[1]).unwrap(), Some(vec![1])); + // assert_eq!(merk.get(&[2]).unwrap(), Some(vec![0])); + // } - #[test] - fn checkpoint_iterator() { - let path = thread::current().name().unwrap().to_owned(); - let mut merk = TempMerk::open(&path).expect("failed to open merk"); + // #[test] + // fn checkpoint_iterator() { + // let path = thread::current().name().unwrap().to_owned(); + // let mut merk = TempMerk::open(&path).expect("failed to open merk"); - merk.apply(&make_batch_seq(1..100), &[]) - .expect("apply failed"); + // merk.apply(&make_batch_seq(1..100), &[]) + // .expect("apply failed"); - let path: std::path::PathBuf = (path + ".checkpoint").into(); - if path.exists() { - std::fs::remove_dir_all(&path).unwrap(); - } - let checkpoint = merk.checkpoint(&path).unwrap(); + // let path: std::path::PathBuf = (path + ".checkpoint").into(); + // if path.exists() { + // std::fs::remove_dir_all(&path).unwrap(); + // } + // let checkpoint = merk.checkpoint(&path).unwrap(); - let mut merk_iter = merk.raw_iter(); - let mut checkpoint_iter = checkpoint.raw_iter(); + // let mut merk_iter = merk.raw_iter(); + // let mut checkpoint_iter = checkpoint.raw_iter(); - loop { - assert_eq!(merk_iter.valid(), checkpoint_iter.valid()); - if !merk_iter.valid() { - break; - } + // loop { + // assert_eq!(merk_iter.valid(), checkpoint_iter.valid()); + // if !merk_iter.valid() { + // break; + // } - assert_eq!(merk_iter.key(), checkpoint_iter.key()); - assert_eq!(merk_iter.value(), checkpoint_iter.value()); + // assert_eq!(merk_iter.key(), checkpoint_iter.key()); + // assert_eq!(merk_iter.value(), checkpoint_iter.value()); - merk_iter.next(); - checkpoint_iter.next(); - } + // merk_iter.next(); + // checkpoint_iter.next(); + // } - std::fs::remove_dir_all(&path).unwrap(); - } + // std::fs::remove_dir_all(&path).unwrap(); + // } } diff --git a/merk/src/merk/restore.rs b/merk/src/merk/restore.rs index 96b3be037..93b0dc4cc 100644 --- a/merk/src/merk/restore.rs +++ b/merk/src/merk/restore.rs @@ -20,16 +20,16 @@ use std::{path::Path, u8}; /// A `Restorer` handles decoding, verifying, and storing chunk proofs to /// replicate an entire Merk tree. It expects the chunks to be processed in /// order, retrying the last chunk if verification fails. -pub struct Restorer { +pub struct Restorer<'a> { leaf_hashes: Option>>, parent_keys: Option>>>, trunk_height: Option, - merk: Merk, + merk: Merk<'a>, expected_root_hash: Hash, stated_length: usize, } -impl Restorer { +impl<'a> Restorer<'a> { /// Creates a new `Restorer`, which will initialize a new Merk at the given /// file path. The first chunk (the "trunk") will be compared against /// `expected_root_hash`, then each subsequent chunk will be compared @@ -42,6 +42,7 @@ impl Restorer { /// length. pub fn new>( db_path: P, + prefix: &[u8], expected_root_hash: Hash, stated_length: usize, ) -> Result { @@ -53,7 +54,7 @@ impl Restorer { expected_root_hash, stated_length, trunk_height: None, - merk: Merk::open(db_path)?, + merk: Merk::open(db_path, prefix)?, leaf_hashes: None, parent_keys: None, }) @@ -78,7 +79,7 @@ impl Restorer { /// Merk instance. This method will return an error if called before /// processing all chunks (e.g. `restorer.remaining_chunks()` is not equal /// to 0). - pub fn finalize(mut self) -> Result { + pub fn finalize(mut self) -> Result> { if self.remaining_chunks().is_none() || self.remaining_chunks().unwrap() != 0 { bail!("Called finalize before all chunks were processed"); } @@ -299,10 +300,11 @@ impl Merk { /// verified during the restoration process. pub fn restore>( path: P, + prefix: &[u8], expected_root_hash: Hash, stated_length: usize, ) -> Result { - Restorer::new(path, expected_root_hash, stated_length) + Restorer::new(path, prefix, expected_root_hash, stated_length) } } diff --git a/merk/src/proofs/chunk.rs b/merk/src/proofs/chunk.rs index 6766f200b..40f088e78 100644 --- a/merk/src/proofs/chunk.rs +++ b/merk/src/proofs/chunk.rs @@ -8,8 +8,10 @@ use { }; use super::{Node, Op}; -use crate::error::Result; -use crate::tree::{Fetch, RefWalker}; +use crate::{ + error::Result, + tree::{Fetch, RefWalker}, +}; /// The minimum number of layers the trunk will be guaranteed to have before /// splitting into multiple chunks. If the tree's height is less than double @@ -178,7 +180,7 @@ pub(crate) fn verify_leaf>>( expected_hash: Hash, ) -> Result { let tree = execute(ops, false, |node| match node { - Node::KV(_, _) => Ok(()), + Node::KV(..) => Ok(()), _ => bail!("Leaf chunks must contain full subtree"), })?; @@ -221,7 +223,7 @@ pub(crate) fn verify_trunk>>(ops: I) -> Result<(Pr if remaining_depth > 0 { match tree.node { - Node::KV(_, _) => {} + Node::KV(..) => {} _ => bail!("Expected trunk inner nodes to contain keys and values"), } recurse(true, leftmost)?; @@ -263,10 +265,11 @@ pub(crate) fn verify_trunk>>(ops: I) -> Result<(Pr mod tests { use std::usize; - use super::super::tree::Tree; - use super::*; - use crate::test_utils::*; - use crate::tree::{NoopCommit, PanicSource, Tree as BaseTree}; + use super::{super::tree::Tree, *}; + use crate::{ + test_utils::*, + tree::{NoopCommit, PanicSource, Tree as BaseTree}, + }; #[derive(Default)] struct NodeCounts { @@ -282,7 +285,7 @@ mod tests { match node { Node::Hash(_) => counts.hash += 1, Node::KVHash(_) => counts.kvhash += 1, - Node::KV(_, _) => counts.kv += 1, + Node::KV(..) => counts.kv += 1, }; }); @@ -402,7 +405,7 @@ mod tests { #[test] fn leaf_chunk_roundtrip() { - let mut merk = TempMerk::new().unwrap(); + let mut merk = TempMerk::new(); let batch = make_batch_seq(0..31); merk.apply(batch.as_slice(), &[]).unwrap(); diff --git a/merk/src/proofs/encoding.rs b/merk/src/proofs/encoding.rs index 36418b7d8..e7ef87599 100644 --- a/merk/src/proofs/encoding.rs +++ b/merk/src/proofs/encoding.rs @@ -4,8 +4,7 @@ use ed::{Decode, Encode, Terminated}; use failure::bail; use super::{Node, Op}; -use crate::error::Result; -use crate::tree::HASH_LENGTH; +use crate::{error::Result, tree::HASH_LENGTH}; impl Encode for Op { fn encode_into(&self, dest: &mut W) -> ed::Result<()> { diff --git a/merk/src/proofs/mod.rs b/merk/src/proofs/mod.rs index d391adb46..0cd784e16 100644 --- a/merk/src/proofs/mod.rs +++ b/merk/src/proofs/mod.rs @@ -3,12 +3,12 @@ pub mod encoding; pub mod query; pub mod tree; -use crate::tree::Hash; - pub use encoding::{encode_into, Decoder}; pub use query::Query; pub use tree::Tree; +use crate::tree::Hash; + /// A proof operator, executed to verify the data in a Merkle proof. #[derive(Debug, PartialEq)] pub enum Op { diff --git a/merk/src/proofs/query/map.rs b/merk/src/proofs/query/map.rs index 76bc606d8..cac838cb8 100644 --- a/merk/src/proofs/query/map.rs +++ b/merk/src/proofs/query/map.rs @@ -1,10 +1,12 @@ +use std::{ + collections::{btree_map, btree_map::Iter, BTreeMap}, + ops::{Bound, RangeBounds}, +}; + +use failure::{bail, ensure, format_err}; + use super::super::Node; use crate::Result; -use failure::{bail, ensure, format_err}; -use std::collections::btree_map; -use std::collections::btree_map::Iter; -use std::collections::BTreeMap; -use std::ops::{Bound, RangeBounds}; /// `MapBuilder` allows a consumer to construct a `Map` by inserting the nodes /// contained in a proof, in key-order. diff --git a/merk/src/proofs/query/mod.rs b/merk/src/proofs/query/mod.rs index 39a21ec92..c4ae2c675 100644 --- a/merk/src/proofs/query/mod.rs +++ b/merk/src/proofs/query/mod.rs @@ -1,18 +1,21 @@ mod map; -#[cfg(feature = "full")] -use {super::Op, std::collections::LinkedList}; +use std::{ + cmp::{max, min, Ordering}, + collections::BTreeSet, + ops::{Range, RangeInclusive}, +}; -use super::tree::execute; -use super::{Decoder, Node}; -use crate::error::Result; -use crate::tree::{Fetch, Hash, Link, RefWalker}; use failure::bail; -use std::cmp::{max, min, Ordering}; -use std::collections::BTreeSet; -use std::ops::{Range, RangeInclusive}; - pub use map::*; +#[cfg(feature = "full")] +use {super::Op, std::collections::LinkedList}; + +use super::{tree::execute, Decoder, Node}; +use crate::{ + error::Result, + tree::{Fetch, Hash, Link, RefWalker}, +}; /// `Query` represents one or more keys or ranges of keys, which can be used to /// resolve a proof which will include all of the requested values. @@ -100,8 +103,8 @@ impl From for Vec { } impl IntoIterator for Query { - type Item = QueryItem; type IntoIter = as IntoIterator>::IntoIter; + type Item = QueryItem; fn into_iter(self) -> Self::IntoIter { self.items.into_iter() @@ -411,7 +414,7 @@ pub fn verify_query( // lower bound is proven - the preceding tree node // is lower than the bound - Some(Node::KV(_, _)) => {} + Some(Node::KV(..)) => {} // cannot verify lower bound - we have an abridged // tree so we cannot tell what the preceding key was @@ -461,7 +464,7 @@ pub fn verify_query( if query.peek().is_some() { match last_push { // last node in tree was less than queried item - Some(Node::KV(_, _)) => {} + Some(Node::KV(..)) => {} // proof contains abridged data so we cannot verify absence of // remaining query items @@ -483,11 +486,14 @@ pub fn verify_query( #[allow(deprecated)] #[cfg(test)] mod test { - use super::super::encoding::encode_into; - use super::super::*; - use super::*; - use crate::test_utils::make_tree_seq; - use crate::tree::{NoopCommit, PanicSource, RefWalker, Tree}; + use super::{ + super::{encoding::encode_into, *}, + *, + }; + use crate::{ + test_utils::make_tree_seq, + tree::{NoopCommit, PanicSource, RefWalker, Tree}, + }; fn make_3_node_tree() -> Tree { let mut tree = Tree::new(vec![5], vec![5]) diff --git a/merk/src/proofs/tree.rs b/merk/src/proofs/tree.rs index a712e1e1e..24da1e13f 100644 --- a/merk/src/proofs/tree.rs +++ b/merk/src/proofs/tree.rs @@ -1,8 +1,11 @@ -use super::{Node, Op}; -use crate::error::Result; -use crate::tree::{kv_hash, node_hash, Hash, NULL_HASH}; use failure::bail; +use super::{Node, Op}; +use crate::{ + error::Result, + tree::{kv_hash, node_hash, Hash, NULL_HASH}, +}; + /// Contains a tree's child node and its hash. The hash can always be assumed to /// be up-to-date. #[derive(Debug)] @@ -168,7 +171,8 @@ impl<'a> LayerIter<'a> { iter } - /// Builds up the stack by traversing through left children to the desired depth. + /// Builds up the stack by traversing through left children to the desired + /// depth. fn traverse_to_start(&mut self, tree: &'a Tree, remaining_depth: usize) { self.stack.push(tree); @@ -285,9 +289,7 @@ where #[cfg(test)] mod test { - use super::super::*; - use super::Tree as ProofTree; - use super::*; + use super::{super::*, Tree as ProofTree, *}; fn make_7_node_prooftree() -> ProofTree { let make_node = |i| -> super::super::tree::Tree { Node::KV(vec![i], vec![]).into() }; diff --git a/merk/src/test_utils/crash_merk.rs b/merk/src/test_utils/crash_merk.rs index 220242739..f1f57db55 100644 --- a/merk/src/test_utils/crash_merk.rs +++ b/merk/src/test_utils/crash_merk.rs @@ -1,52 +1,35 @@ +use std::{ + fs, + mem::ManuallyDrop, + ops::{Deref, DerefMut}, +}; + +use tempdir::TempDir; + use crate::{Merk, Result}; -use std::fs; -use std::mem::ManuallyDrop; -use std::ops::{Deref, DerefMut}; -use std::path::Path; /// Wraps a Merk instance and drops it without flushing once it goes out of /// scope. pub struct CrashMerk { - inner: Option>, - path: Box, + merk: Merk, + path: Option, } impl CrashMerk { - /// Opens a `CrashMerk` at the given file path, creating a new one if it does - /// not exist. - pub fn open>(path: P) -> Result { - let merk = Merk::open(&path)?; - let inner = Some(ManuallyDrop::new(merk)); + /// Opens a `CrashMerk` at the given file path, creating a new one if it + /// does not exist. + pub fn open() -> Result { + let path = TempDir::new("db").expect("cannot create tempdir"); + let db = super::default_rocksdb(path.path()); + let merk = Merk::open(db, Vec::new())?; Ok(CrashMerk { - inner, - path: path.as_ref().into(), + merk, + path: Some(path), }) } - #[allow(clippy::missing_safety_doc)] - pub unsafe fn crash(&mut self) -> Result<()> { - ManuallyDrop::drop(&mut self.inner.take().unwrap()); - - // rename to invalidate rocksdb's lock - let file_name = format!( - "{}_crashed", - self.path.file_name().unwrap().to_str().unwrap() - ); - let new_path = self.path.with_file_name(file_name); - fs::rename(&self.path, &new_path)?; - - let mut new_merk = CrashMerk::open(&new_path)?; - self.inner = new_merk.inner.take(); - self.path = new_merk.path; - Ok(()) - } - - pub fn into_inner(self) -> Merk { - ManuallyDrop::into_inner(self.inner.unwrap()) - } - - pub fn destroy(self) -> Result<()> { - self.into_inner().destroy() + pub fn crash(&mut self) { + self.path.take().map(|x| drop(x)); } } @@ -54,13 +37,13 @@ impl Deref for CrashMerk { type Target = Merk; fn deref(&self) -> &Merk { - self.inner.as_ref().unwrap() + &self.merk } } impl DerefMut for CrashMerk { fn deref_mut(&mut self) -> &mut Merk { - self.inner.as_mut().unwrap() + &mut self.merk } } @@ -74,13 +57,12 @@ mod tests { fn crash() { let path = std::thread::current().name().unwrap().to_owned(); - let mut merk = CrashMerk::open(&path).expect("failed to open merk"); + let mut merk = CrashMerk::open().expect("failed to open merk"); merk.apply(&[(vec![1, 2, 3], Op::Put(vec![4, 5, 6]))], &[]) .expect("apply failed"); - unsafe { - merk.crash().unwrap(); - } + + merk.crash(); + assert_eq!(merk.get(&[1, 2, 3]).expect("failed to get"), None); - merk.into_inner().destroy().unwrap(); } } diff --git a/merk/src/test_utils/mod.rs b/merk/src/test_utils/mod.rs index 7cbdc92de..1956997a5 100644 --- a/merk/src/test_utils/mod.rs +++ b/merk/src/test_utils/mod.rs @@ -1,14 +1,14 @@ mod crash_merk; mod temp_merk; -use crate::tree::{Batch, BatchEntry, NoopCommit, Op, PanicSource, Tree, Walker}; +use std::{convert::TryInto, ops::Range}; + use byteorder::{BigEndian, WriteBytesExt}; +pub use crash_merk::CrashMerk; use rand::prelude::*; -use std::convert::TryInto; -use std::ops::Range; +pub use temp_merk::{default_rocksdb, TempMerk}; -pub use crash_merk::CrashMerk; -pub use temp_merk::TempMerk; +use crate::tree::{Batch, BatchEntry, NoopCommit, Op, PanicSource, Tree, Walker}; pub fn assert_tree_invariants(tree: &Tree) { assert!(tree.balance_factor().abs() < 2); diff --git a/merk/src/test_utils/temp_merk.rs b/merk/src/test_utils/temp_merk.rs index 890ffd6ae..c1fc2c06b 100644 --- a/merk/src/test_utils/temp_merk.rs +++ b/merk/src/test_utils/temp_merk.rs @@ -1,54 +1,48 @@ -use crate::{Merk, Result}; -use std::env::temp_dir; -use std::ops::{Deref, DerefMut}; -use std::path::Path; -use std::time::SystemTime; +use std::{ + ops::{Deref, DerefMut}, + path::Path, + rc::Rc, +}; + +use tempdir::TempDir; + +use crate::Merk; /// Wraps a Merk instance and deletes it from disk it once it goes out of scope. pub struct TempMerk { - inner: Option, + pub inner: Merk, + pub path: TempDir, } impl TempMerk { - /// Opens a `TempMerk` at the given file path, creating a new one if it does - /// not exist. - pub fn open>(path: P) -> Result { - let inner = Some(Merk::open(path)?); - Ok(TempMerk { inner }) - } - /// Opens a `TempMerk` at an autogenerated, temporary file path. - pub fn new() -> Result { - let time = SystemTime::now() - .duration_since(SystemTime::UNIX_EPOCH) - .unwrap() - .as_nanos(); - let mut path = temp_dir(); - path.push(format!("merk-temp–{}", time)); - TempMerk::open(path) + pub fn new() -> TempMerk { + let path = TempDir::new("db").expect("cannot create tempdir"); + let db = default_rocksdb(path.path()); + TempMerk { + inner: Merk::open(db, Vec::new()).expect("cannot open Merk"), + path, + } } } -impl Drop for TempMerk { - fn drop(&mut self) { - self.inner - .take() - .unwrap() - .destroy() - .expect("failed to delete db"); - } +pub fn default_rocksdb(path: &Path) -> Rc { + Rc::new( + rocksdb::DB::open_cf_descriptors(&Merk::default_db_opts(), &path, crate::column_families()) + .expect("cannot create rocksdb"), + ) } impl Deref for TempMerk { type Target = Merk; fn deref(&self) -> &Merk { - self.inner.as_ref().unwrap() + &self.inner } } impl DerefMut for TempMerk { fn deref_mut(&mut self) -> &mut Merk { - self.inner.as_mut().unwrap() + &mut self.inner } } diff --git a/merk/src/tree/debug.rs b/merk/src/tree/debug.rs index cb46091f6..f3d29bce0 100644 --- a/merk/src/tree/debug.rs +++ b/merk/src/tree/debug.rs @@ -1,7 +1,9 @@ -use super::{Link, Tree}; -use colored::Colorize; use std::fmt::{Debug, Formatter, Result}; +use colored::Colorize; + +use super::{Link, Tree}; + impl Debug for Tree { // TODO: unwraps should be results that bubble up fn fmt(&self, f: &mut Formatter) -> Result { diff --git a/merk/src/tree/encoding.rs b/merk/src/tree/encoding.rs index 84c8d3f1c..84f848376 100644 --- a/merk/src/tree/encoding.rs +++ b/merk/src/tree/encoding.rs @@ -1,6 +1,7 @@ -use super::Tree; use ed::{Decode, Encode}; +use super::Tree; + impl Tree { #[inline] pub fn encode(&self) -> Vec { @@ -38,8 +39,7 @@ impl Tree { #[cfg(test)] mod tests { - use super::super::Link; - use super::*; + use super::{super::Link, *}; #[test] fn encode_leaf_tree() { diff --git a/merk/src/tree/fuzz_tests.rs b/merk/src/tree/fuzz_tests.rs index a58b8f2c2..62b9b2067 100644 --- a/merk/src/tree/fuzz_tests.rs +++ b/merk/src/tree/fuzz_tests.rs @@ -1,10 +1,10 @@ #![cfg(tests)] -use crate::test_utils::*; -use crate::tree::*; +use std::{cell::RefCell, collections::BTreeMap}; + use rand::prelude::*; -use std::cell::RefCell; -use std::collections::BTreeMap; + +use crate::{test_utils::*, tree::*}; const ITERATIONS: usize = 2_000; type Map = BTreeMap, Vec>; diff --git a/merk/src/tree/kv.rs b/merk/src/tree/kv.rs index 20fad86f3..8307d5b03 100644 --- a/merk/src/tree/kv.rs +++ b/merk/src/tree/kv.rs @@ -1,7 +1,9 @@ -use super::hash::{kv_hash, Hash, HASH_LENGTH, NULL_HASH}; -use ed::{Decode, Encode, Result}; use std::io::{Read, Write}; +use ed::{Decode, Encode, Result}; + +use super::hash::{kv_hash, Hash, HASH_LENGTH, NULL_HASH}; + // TODO: maybe use something similar to Vec but without capacity field, // (should save 16 bytes per entry). also, maybe a shorter length // field to save even more. also might be possible to combine key diff --git a/merk/src/tree/link.rs b/merk/src/tree/link.rs index 41684c914..eb8b4f22c 100644 --- a/merk/src/tree/link.rs +++ b/merk/src/tree/link.rs @@ -1,10 +1,11 @@ -use std::cmp::max; -use std::io::{Read, Write}; +use std::{ + cmp::max, + io::{Read, Write}, +}; use ed::{Decode, Encode, Result, Terminated}; -use super::hash::Hash; -use super::Tree; +use super::{hash::Hash, Tree}; // TODO: optimize memory footprint @@ -154,8 +155,8 @@ impl Link { right_height as i8 - left_height as i8 } - /// Consumes the link and converts to variant `Link::Reference`. Panics if the - /// link is of variant `Link::Modified` or `Link::Uncommitted`. + /// Consumes the link and converts to variant `Link::Reference`. Panics if + /// the link is of variant `Link::Modified` or `Link::Uncommitted`. #[inline] pub fn into_reference(self) -> Self { match self { @@ -307,9 +308,10 @@ fn read_u8(mut input: R) -> Result { #[cfg(test)] mod test { - use super::super::hash::NULL_HASH; - use super::super::Tree; - use super::*; + use super::{ + super::{hash::NULL_HASH, Tree}, + *, + }; #[test] fn from_modified_tree() { diff --git a/merk/src/tree/mod.rs b/merk/src/tree/mod.rs index c5bcdab1b..7c433f093 100644 --- a/merk/src/tree/mod.rs +++ b/merk/src/tree/mod.rs @@ -12,16 +12,16 @@ mod walk; use std::cmp::max; -use ed::{Decode, Encode}; - -use super::error::Result; pub use commit::{Commit, NoopCommit}; +use ed::{Decode, Encode}; pub use hash::{kv_hash, node_hash, Hash, HASH_LENGTH, NULL_HASH}; use kv::KV; pub use link::Link; pub use ops::{Batch, BatchEntry, Op, PanicSource}; pub use walk::{Fetch, RefWalker, Walker}; +use super::error::Result; + // TODO: remove need for `TreeInner`, and just use `Box` receiver for // relevant methods @@ -376,8 +376,8 @@ impl Tree { } /// Fetches the child on the given side using the given data source, and - /// places it in the child slot (upgrading the link from `Link::Reference` to - /// `Link::Loaded`). + /// places it in the child slot (upgrading the link from `Link::Reference` + /// to `Link::Loaded`). #[inline] pub fn load(&mut self, left: bool, source: &S) -> Result<()> { // TODO: return Err instead of panic? @@ -413,9 +413,7 @@ pub fn side_to_str(left: bool) -> &'static str { #[cfg(test)] mod test { - use super::commit::NoopCommit; - use super::hash::NULL_HASH; - use super::Tree; + use super::{commit::NoopCommit, hash::NULL_HASH, Tree}; #[test] fn build_tree() { diff --git a/merk/src/tree/ops.rs b/merk/src/tree/ops.rs index d35135d69..2b3d1cbec 100644 --- a/merk/src/tree/ops.rs +++ b/merk/src/tree/ops.rs @@ -1,8 +1,9 @@ +use std::{collections::LinkedList, fmt}; + +use Op::*; + use super::{Fetch, Link, Tree, Walker}; use crate::error::Result; -use std::collections::LinkedList; -use std::fmt; -use Op::*; /// An operation to be applied to a key in the store. pub enum Op { @@ -294,10 +295,10 @@ where #[cfg(test)] mod test { use super::*; - use crate::test_utils::{ - apply_memonly, assert_tree_invariants, del_entry, make_tree_seq, seq_key, + use crate::{ + test_utils::{apply_memonly, assert_tree_invariants, del_entry, make_tree_seq, seq_key}, + tree::*, }; - use crate::tree::*; #[test] fn simple_insert() { diff --git a/merk/src/tree/walk/mod.rs b/merk/src/tree/walk/mod.rs index 6a0487e10..184d0adc5 100644 --- a/merk/src/tree/walk/mod.rs +++ b/merk/src/tree/walk/mod.rs @@ -1,12 +1,12 @@ mod fetch; mod ref_walker; -use super::{Link, Tree}; -use crate::error::Result; -use crate::owner::Owner; pub use fetch::Fetch; pub use ref_walker::RefWalker; +use super::{Link, Tree}; +use crate::{error::Result, owner::Owner}; + /// Allows traversal of a `Tree`, fetching from the given source when traversing /// to a pruned node, detaching children as they are traversed. pub struct Walker @@ -30,7 +30,8 @@ where } /// Similar to `Tree#detach`, but yields a `Walker` which fetches from the - /// same source as `self`. Returned tuple is `(updated_self, maybe_child_walker)`. + /// same source as `self`. Returned tuple is `(updated_self, + /// maybe_child_walker)`. pub fn detach(mut self, left: bool) -> Result<(Self, Option)> { let link = match self.tree.link(left) { None => return Ok((self, None)), @@ -56,7 +57,8 @@ where } /// Similar to `Tree#detach_expect`, but yields a `Walker` which fetches - /// from the same source as `self`. Returned tuple is `(updated_self, child_walker)`. + /// from the same source as `self`. Returned tuple is `(updated_self, + /// child_walker)`. pub fn detach_expect(self, left: bool) -> Result<(Self, Self)> { let (walker, maybe_child) = self.detach(left)?; if let Some(child) = maybe_child { @@ -145,8 +147,7 @@ where #[cfg(test)] mod test { - use super::super::NoopCommit; - use super::*; + use super::{super::NoopCommit, *}; use crate::tree::Tree; #[derive(Clone)] diff --git a/merk/src/tree/walk/ref_walker.rs b/merk/src/tree/walk/ref_walker.rs index 95602693c..c33f2d960 100644 --- a/merk/src/tree/walk/ref_walker.rs +++ b/merk/src/tree/walk/ref_walker.rs @@ -1,5 +1,7 @@ -use super::super::{Link, Tree}; -use super::Fetch; +use super::{ + super::{Link, Tree}, + Fetch, +}; use crate::error::Result; /// Allows read-only traversal of a `Tree`, fetching from the given source when