From 6a93109820334b8db9dc21675061655778459015 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?=E5=BF=97=E5=AE=87?= Date: Sun, 4 Jun 2023 03:12:42 +0800 Subject: [PATCH 01/24] [local_chain] Implement `LocalChain` with linked list This allows the data-source thread to hold a reference to checkpoints without a lock on `LocalChain` itself. Introduce `LocalChain::update` that replaces `determine_changeset` and `apply_update`. This returns a closure that actually updates `self` when called. This method allows for efficient and elegant updating while being able to "preview" the update before applying. The `LocalChain` update/determine_changeset tests have been updated to also check for the final state after applying the update (not just looking at the changeset). Update `keychain::LocalUpdate` struct to use `CheckPoint` Instead of containing a complete `LocalChain`, the update uses `CheckPoint`. This simplifies the API since updating a `LocalChain` only requires a `CheckPoint` now. The examples and chain source `..Ext` implementations have all been updated to use the new API. Additionally, `..Ext` implementations didn't 100% guarantee consistency of the updates, the logic has been changed to enforce better guarantees. --- crates/bdk/Cargo.toml | 2 +- crates/bdk/src/wallet/mod.rs | 59 +- crates/bdk/tests/wallet.rs | 9 +- crates/chain/src/keychain.rs | 24 +- crates/chain/src/local_chain.rs | 533 +++++++++++++----- crates/chain/tests/common/mod.rs | 20 +- crates/chain/tests/test_indexed_tx_graph.rs | 16 +- crates/chain/tests/test_local_chain.rs | 370 ++++++------ crates/chain/tests/test_tx_graph.rs | 22 +- crates/electrum/src/electrum_ext.rs | 129 +++-- crates/electrum/src/lib.rs | 13 +- crates/esplora/src/async_ext.rs | 99 +++- crates/esplora/src/blocking_ext.rs | 92 +-- crates/esplora/src/lib.rs | 6 +- example-crates/example_electrum/src/main.rs | 39 +- example-crates/wallet_electrum/src/main.rs | 5 +- example-crates/wallet_esplora/src/main.rs | 4 +- .../wallet_esplora_async/src/main.rs | 11 +- 18 files changed, 901 insertions(+), 552 deletions(-) diff --git a/crates/bdk/Cargo.toml b/crates/bdk/Cargo.toml index 29b478d13..d917595f3 100644 --- a/crates/bdk/Cargo.toml +++ b/crates/bdk/Cargo.toml @@ -47,7 +47,7 @@ dev-getrandom-wasm = ["getrandom/js"] lazy_static = "1.4" env_logger = "0.7" # Move back to importing from rust-bitcoin once https://github.com/rust-bitcoin/rust-bitcoin/pull/1342 is released -base64 = "^0.13" +base64 = "^0.21" assert_matches = "1.5.0" [package.metadata.docs.rs] diff --git a/crates/bdk/src/wallet/mod.rs b/crates/bdk/src/wallet/mod.rs index f2f717d9f..af818e1ff 100644 --- a/crates/bdk/src/wallet/mod.rs +++ b/crates/bdk/src/wallet/mod.rs @@ -23,7 +23,7 @@ pub use bdk_chain::keychain::Balance; use bdk_chain::{ indexed_tx_graph::IndexedAdditions, keychain::{KeychainTxOutIndex, LocalChangeSet, LocalUpdate}, - local_chain::{self, LocalChain, UpdateNotConnectedError}, + local_chain::{self, CannotConnectError, CheckPoint, CheckPointIter, LocalChain}, tx_graph::{CanonicalTx, TxGraph}, Append, BlockId, ChainPosition, ConfirmationTime, ConfirmationTimeAnchor, FullTxOut, IndexedTxGraph, Persist, PersistBackend, @@ -32,8 +32,8 @@ use bitcoin::consensus::encode::serialize; use bitcoin::secp256k1::Secp256k1; use bitcoin::util::psbt; use bitcoin::{ - Address, BlockHash, EcdsaSighashType, LockTime, Network, OutPoint, SchnorrSighashType, Script, - Sequence, Transaction, TxOut, Txid, Witness, + Address, EcdsaSighashType, LockTime, Network, OutPoint, SchnorrSighashType, Script, Sequence, + Transaction, TxOut, Txid, Witness, }; use core::fmt; use core::ops::Deref; @@ -245,7 +245,7 @@ impl Wallet { }; let changeset = db.load_from_persistence().map_err(NewError::Persist)?; - chain.apply_changeset(changeset.chain_changeset); + chain.apply_changeset(&changeset.chain_changeset); indexed_graph.apply_additions(changeset.indexed_additions); let persist = Persist::new(db); @@ -370,19 +370,19 @@ impl Wallet { .graph() .filter_chain_unspents( &self.chain, - self.chain.tip().unwrap_or_default(), + self.chain.tip().map(|cp| cp.block_id()).unwrap_or_default(), self.indexed_graph.index.outpoints().iter().cloned(), ) .map(|((k, i), full_txo)| new_local_utxo(k, i, full_txo)) } /// Get all the checkpoints the wallet is currently storing indexed by height. - pub fn checkpoints(&self) -> &BTreeMap { - self.chain.blocks() + pub fn checkpoints(&self) -> CheckPointIter { + self.chain.iter_checkpoints(None) } /// Returns the latest checkpoint. - pub fn latest_checkpoint(&self) -> Option { + pub fn latest_checkpoint(&self) -> Option { self.chain.tip() } @@ -420,7 +420,7 @@ impl Wallet { .graph() .filter_chain_unspents( &self.chain, - self.chain.tip().unwrap_or_default(), + self.chain.tip().map(|cp| cp.block_id()).unwrap_or_default(), core::iter::once((spk_i, op)), ) .map(|((k, i), full_txo)| new_local_utxo(k, i, full_txo)) @@ -437,7 +437,7 @@ impl Wallet { let canonical_tx = CanonicalTx { observed_as: graph.get_chain_position( &self.chain, - self.chain.tip().unwrap_or_default(), + self.chain.tip().map(|cp| cp.block_id()).unwrap_or_default(), txid, )?, node: graph.get_tx_node(txid)?, @@ -460,11 +460,11 @@ impl Wallet { pub fn insert_checkpoint( &mut self, block_id: BlockId, - ) -> Result + ) -> Result where D: PersistBackend, { - let changeset = self.chain.insert_block(block_id)?; + let (_, changeset) = self.chain.get_or_insert(block_id)?; let changed = !changeset.is_empty(); self.persist.stage(changeset.into()); Ok(changed) @@ -500,18 +500,15 @@ impl Wallet { // anchor tx to checkpoint with lowest height that is >= position's height let anchor = self .chain - .blocks() + .checkpoints() .range(height..) .next() .ok_or(InsertTxError::ConfirmationHeightCannotBeGreaterThanTip { - tip_height: self.chain.tip().map(|b| b.height), + tip_height: self.chain.tip().map(|b| b.height()), tx_height: height, }) - .map(|(&anchor_height, &anchor_hash)| ConfirmationTimeAnchor { - anchor_block: BlockId { - height: anchor_height, - hash: anchor_hash, - }, + .map(|(&_, cp)| ConfirmationTimeAnchor { + anchor_block: cp.block_id(), confirmation_height: height, confirmation_time: time, })?; @@ -531,9 +528,10 @@ impl Wallet { pub fn transactions( &self, ) -> impl Iterator> + '_ { - self.indexed_graph - .graph() - .list_chain_txs(&self.chain, self.chain.tip().unwrap_or_default()) + self.indexed_graph.graph().list_chain_txs( + &self.chain, + self.chain.tip().map(|cp| cp.block_id()).unwrap_or_default(), + ) } /// Return the balance, separated into available, trusted-pending, untrusted-pending and immature @@ -541,7 +539,7 @@ impl Wallet { pub fn get_balance(&self) -> Balance { self.indexed_graph.graph().balance( &self.chain, - self.chain.tip().unwrap_or_default(), + self.chain.tip().map(|cp| cp.block_id()).unwrap_or_default(), self.indexed_graph.index.outpoints().iter().cloned(), |&(k, _), _| k == KeychainKind::Internal, ) @@ -715,8 +713,7 @@ impl Wallet { None => self .chain .tip() - .and_then(|cp| cp.height.into()) - .map(|height| LockTime::from_height(height).expect("Invalid height")), + .map(|cp| LockTime::from_height(cp.height()).expect("Invalid height")), h => h, }; @@ -1030,7 +1027,7 @@ impl Wallet { ) -> Result, Error> { let graph = self.indexed_graph.graph(); let txout_index = &self.indexed_graph.index; - let chain_tip = self.chain.tip().unwrap_or_default(); + let chain_tip = self.chain.tip().map(|cp| cp.block_id()).unwrap_or_default(); let mut tx = graph .get_tx(txid) @@ -1265,7 +1262,7 @@ impl Wallet { psbt: &mut psbt::PartiallySignedTransaction, sign_options: SignOptions, ) -> Result { - let chain_tip = self.chain.tip().unwrap_or_default(); + let chain_tip = self.chain.tip().map(|cp| cp.block_id()).unwrap_or_default(); let tx = &psbt.unsigned_tx; let mut finished = true; @@ -1288,7 +1285,7 @@ impl Wallet { }); let current_height = sign_options .assume_height - .or(self.chain.tip().map(|b| b.height)); + .or(self.chain.tip().map(|b| b.height())); debug!( "Input #{} - {}, using `confirmation_height` = {:?}, `current_height` = {:?}", @@ -1433,7 +1430,7 @@ impl Wallet { must_only_use_confirmed_tx: bool, current_height: Option, ) -> (Vec, Vec) { - let chain_tip = self.chain.tip().unwrap_or_default(); + let chain_tip = self.chain.tip().map(|cp| cp.block_id()).unwrap_or_default(); // must_spend <- manually selected utxos // may_spend <- all other available utxos let mut may_spend = self.get_available_utxos(); @@ -1704,11 +1701,11 @@ impl Wallet { /// transactions related to your wallet into it. /// /// [`commit`]: Self::commit - pub fn apply_update(&mut self, update: Update) -> Result + pub fn apply_update(&mut self, update: Update) -> Result where D: PersistBackend, { - let mut changeset: ChangeSet = self.chain.apply_update(update.chain)?.into(); + let mut changeset = ChangeSet::from(self.chain.apply_update(update.tip)?); let (_, index_additions) = self .indexed_graph .index diff --git a/crates/bdk/tests/wallet.rs b/crates/bdk/tests/wallet.rs index 282a74fcb..ed014f70a 100644 --- a/crates/bdk/tests/wallet.rs +++ b/crates/bdk/tests/wallet.rs @@ -44,7 +44,10 @@ fn receive_output(wallet: &mut Wallet, value: u64, height: ConfirmationTime) -> fn receive_output_in_latest_block(wallet: &mut Wallet, value: u64) -> OutPoint { let height = match wallet.latest_checkpoint() { - Some(BlockId { height, .. }) => ConfirmationTime::Confirmed { height, time: 0 }, + Some(cp) => ConfirmationTime::Confirmed { + height: cp.height(), + time: 0, + }, None => ConfirmationTime::Unconfirmed { last_seen: 0 }, }; receive_output(wallet, value, height) @@ -222,7 +225,7 @@ fn test_create_tx_fee_sniping_locktime_last_sync() { // If there's no current_height we're left with using the last sync height assert_eq!( psbt.unsigned_tx.lock_time.0, - wallet.latest_checkpoint().unwrap().height + wallet.latest_checkpoint().unwrap().height() ); } @@ -1482,7 +1485,7 @@ fn test_bump_fee_drain_wallet() { .insert_tx( tx.clone(), ConfirmationTime::Confirmed { - height: wallet.latest_checkpoint().unwrap().height, + height: wallet.latest_checkpoint().unwrap().height(), time: 42_000, }, ) diff --git a/crates/chain/src/keychain.rs b/crates/chain/src/keychain.rs index f9b2436f2..d83868890 100644 --- a/crates/chain/src/keychain.rs +++ b/crates/chain/src/keychain.rs @@ -13,7 +13,7 @@ use crate::{ collections::BTreeMap, indexed_tx_graph::IndexedAdditions, - local_chain::{self, LocalChain}, + local_chain::{self, CheckPoint}, tx_graph::TxGraph, Anchor, Append, }; @@ -89,8 +89,9 @@ impl AsRef> for DerivationAdditions { } } -/// A structure to update [`KeychainTxOutIndex`], [`TxGraph`] and [`LocalChain`] -/// atomically. +/// A structure to update [`KeychainTxOutIndex`], [`TxGraph`] and [`LocalChain`] atomically. +/// +/// [`LocalChain`]: local_chain::LocalChain #[derive(Debug, Clone, PartialEq)] pub struct LocalUpdate { /// Last active derivation index per keychain (`K`). @@ -98,15 +99,18 @@ pub struct LocalUpdate { /// Update for the [`TxGraph`]. pub graph: TxGraph, /// Update for the [`LocalChain`]. - pub chain: LocalChain, + /// + /// [`LocalChain`]: local_chain::LocalChain + pub tip: CheckPoint, } -impl Default for LocalUpdate { - fn default() -> Self { +impl LocalUpdate { + /// Construct a [`LocalUpdate`] with a given [`CheckPoint`] tip. + pub fn new(tip: CheckPoint) -> Self { Self { - keychain: Default::default(), - graph: Default::default(), - chain: Default::default(), + keychain: BTreeMap::new(), + graph: TxGraph::default(), + tip, } } } @@ -126,6 +130,8 @@ impl Default for LocalUpdate { )] pub struct LocalChangeSet { /// Changes to the [`LocalChain`]. + /// + /// [`LocalChain`]: local_chain::LocalChain pub chain_changeset: local_chain::ChangeSet, /// Additions to [`IndexedTxGraph`]. diff --git a/crates/chain/src/local_chain.rs b/crates/chain/src/local_chain.rs index fe97e3f27..361dcff01 100644 --- a/crates/chain/src/local_chain.rs +++ b/crates/chain/src/local_chain.rs @@ -2,15 +2,148 @@ use core::convert::Infallible; -use alloc::collections::BTreeMap; +use crate::collections::BTreeMap; +use crate::{BlockId, ChainOracle}; +use alloc::sync::Arc; use bitcoin::BlockHash; -use crate::{BlockId, ChainOracle}; +/// A structure that represents changes to [`LocalChain`]. +pub type ChangeSet = BTreeMap>; + +/// Represents a block of [`LocalChain`]. +#[derive(Debug, Clone, PartialEq, Eq, PartialOrd, Ord)] +pub struct CheckPoint(Arc); + +/// The internal contents of [`CheckPoint`]. +#[derive(Debug, Clone, PartialEq, Eq, PartialOrd, Ord)] +struct CPInner { + /// Block id (hash and height). + block: BlockId, + /// Previous checkpoint (if any). + prev: Option>, +} + +/// Occurs when the caller contructs a [`CheckPoint`] with a height that is not higher than the +/// previous checkpoint it points to. +#[derive(Debug, Clone, PartialEq)] +pub struct NewCheckPointError { + /// The height of the new checkpoint. + pub new_height: u32, + /// The height of the previous checkpoint. + pub prev_height: u32, +} + +impl core::fmt::Display for NewCheckPointError { + fn fmt(&self, f: &mut core::fmt::Formatter<'_>) -> core::fmt::Result { + write!(f, "cannot construct checkpoint with a height ({}) that is not higher than the previous checkpoint ({})", self.new_height, self.prev_height) + } +} + +impl std::error::Error for NewCheckPointError {} + +impl CheckPoint { + /// Construct a [`CheckPoint`] from a [`BlockId`]. + pub fn new(block: BlockId) -> Self { + Self(Arc::new(CPInner { block, prev: None })) + } + + /// Construct a [`CheckPoint`] of `block` with a previous checkpoint. + pub fn new_with_prev( + block: BlockId, + prev: Option, + ) -> Result { + if let Some(prev_cp) = &prev { + if prev_cp.height() >= block.height { + return Err(NewCheckPointError { + new_height: block.height, + prev_height: prev_cp.height(), + }); + } + } + + Ok(Self(Arc::new(CPInner { + block, + prev: prev.map(|cp| cp.0), + }))) + } + + /// Get the [`BlockId`] of the checkpoint. + pub fn block_id(&self) -> BlockId { + self.0.block + } + + /// Get the height of the checkpoint. + pub fn height(&self) -> u32 { + self.0.block.height + } + + /// Get the block hash of the checkpoint. + pub fn hash(&self) -> BlockHash { + self.0.block.hash + } + + /// Detach this checkpoint from the previous. + pub fn detach(self) -> Self { + Self(Arc::new(CPInner { + block: self.0.block, + prev: None, + })) + } + + /// Get previous checkpoint. + pub fn prev(&self) -> Option { + self.0.prev.clone().map(CheckPoint) + } + + /// Iterate + pub fn iter(&self) -> CheckPointIter { + CheckPointIter { + current: Some(Arc::clone(&self.0)), + } + } +} + +/// A structure that iterates over checkpoints backwards. +pub struct CheckPointIter { + current: Option>, +} + +impl Iterator for CheckPointIter { + type Item = CheckPoint; + + fn next(&mut self) -> Option { + let current = self.current.clone()?; + self.current = current.prev.clone(); + Some(CheckPoint(current)) + } +} /// This is a local implementation of [`ChainOracle`]. #[derive(Debug, Default, Clone, PartialEq, Eq, PartialOrd, Ord)] pub struct LocalChain { - blocks: BTreeMap, + checkpoints: BTreeMap, +} + +impl From for BTreeMap { + fn from(value: LocalChain) -> Self { + value + .checkpoints + .values() + .map(|cp| (cp.height(), cp.hash())) + .collect() + } +} + +impl From for LocalChain { + fn from(value: ChangeSet) -> Self { + Self::from_changeset(value) + } +} + +impl From> for LocalChain { + fn from(value: BTreeMap) -> Self { + Self::from_blocks(value) + } } impl ChainOracle for LocalChain { @@ -19,18 +152,18 @@ impl ChainOracle for LocalChain { fn is_block_in_chain( &self, block: BlockId, - static_block: BlockId, + chain_tip: BlockId, ) -> Result, Self::Error> { - if block.height > static_block.height { + if block.height > chain_tip.height { return Ok(None); } Ok( match ( - self.blocks.get(&block.height), - self.blocks.get(&static_block.height), + self.checkpoints.get(&block.height), + self.checkpoints.get(&chain_tip.height), ) { - (Some(&hash), Some(&static_hash)) => { - Some(hash == block.hash && static_hash == static_block.hash) + (Some(cp), Some(tip_cp)) => { + Some(cp.hash() == block.hash && tip_cp.hash() == chain_tip.hash) } _ => None, }, @@ -38,196 +171,282 @@ impl ChainOracle for LocalChain { } fn get_chain_tip(&self) -> Result, Self::Error> { - Ok(self.tip()) + Ok(self.checkpoints.values().last().map(CheckPoint::block_id)) } } -impl AsRef> for LocalChain { - fn as_ref(&self) -> &BTreeMap { - &self.blocks - } -} - -impl From for BTreeMap { - fn from(value: LocalChain) -> Self { - value.blocks +impl LocalChain { + /// Construct a [`LocalChain`] from an initial `changeset`. + pub fn from_changeset(changeset: ChangeSet) -> Self { + let mut chain = Self::default(); + chain.apply_changeset(&changeset); + chain } -} -impl From> for LocalChain { - fn from(value: BTreeMap) -> Self { - Self { blocks: value } + /// Construct a [`LocalChain`] from a given `checkpoint` tip. + pub fn from_checkpoint(checkpoint: CheckPoint) -> Self { + Self { + checkpoints: checkpoint.iter().map(|cp| (cp.height(), cp)).collect(), + } } -} -impl LocalChain { - /// Contruct a [`LocalChain`] from a list of [`BlockId`]s. - pub fn from_blocks(blocks: B) -> Self - where - B: IntoIterator, - { + /// Constructs a [`LocalChain`] from a [`BTreeMap`] of height to [`BlockHash`]. + /// + /// The [`BTreeMap`] enforces the height order. However, the caller must ensure the blocks are + /// all of the same chain. + pub fn from_blocks(blocks: BTreeMap) -> Self { Self { - blocks: blocks.into_iter().map(|b| (b.height, b.hash)).collect(), + checkpoints: blocks + .into_iter() + .map({ + let mut prev = None; + move |(height, hash)| { + let cp = CheckPoint::new_with_prev(BlockId { height, hash }, prev.clone()) + .expect("must not fail"); + prev = Some(cp.clone()); + (height, cp) + } + }) + .collect(), } } - /// Get a reference to a map of block height to hash. - pub fn blocks(&self) -> &BTreeMap { - &self.blocks + /// Get the highest checkpoint. + pub fn tip(&self) -> Option { + self.checkpoints.values().last().cloned() } - /// Get the chain tip. - pub fn tip(&self) -> Option { - self.blocks - .iter() - .last() - .map(|(&height, &hash)| BlockId { height, hash }) + /// Returns whether the [`LocalChain`] is empty (has no checkpoints). + pub fn is_empty(&self) -> bool { + self.checkpoints.is_empty() } - /// This is like the sparsechain's logic, expect we must guarantee that all invalidated heights - /// are to be re-filled. - pub fn determine_changeset(&self, update: &Self) -> Result { - let update = update.as_ref(); - let update_tip = match update.keys().last().cloned() { - Some(tip) => tip, - None => return Ok(ChangeSet::default()), - }; + /// Previews, and optionally applies updates to [`Self`] with the given `new_tip`. + /// + /// The method returns `(apply_update, changeset)` if [`Ok`]. `apply_update` is a closure that + /// can be called to apply the changes represented in `changeset. + /// + /// To update, the `new_tip` must *connect* with `self`. If `self` and `new_tip` has a mutual + /// checkpoint (same height and hash), it can connect if: + /// * The mutual checkpoint is the tip of `self`. + /// * An ancestor of `new_tip` has a height which is of the checkpoint one higher than the + /// mutual checkpoint from `self`. + /// + /// Additionally: + /// * If `self` is empty, `new_tip` will always connect. + /// * If `self` only has one checkpoint, `new_tip` must have an ancestor checkpoint with the + /// same height as it. + /// + /// To invalidate from a given checkpoint, `new_tip` must contain an ancestor checkpoint with + /// the same height but different hash. + /// + /// # Errors + /// + /// An error will occur if the update does not correctly connect with `self`. + /// + /// Refer to [module-level documentation] for more. + /// + /// [module-level documentation]: crate::local_chain + pub fn update( + &mut self, + new_tip: CheckPoint, + ) -> Result<(impl FnOnce() + '_, ChangeSet), CannotConnectError> { + let mut updated_cps = BTreeMap::::new(); + let mut agreement_height = Option::::None; + let mut complete_match = false; + + for cp in new_tip.iter() { + let block = cp.block_id(); + let original_cp = self.checkpoints.get(&block.height); + + // if original block of height does not exist, or if the hash does not match we will + // need to update the original checkpoint at that height + if original_cp.map(CheckPoint::block_id) != Some(block) { + updated_cps.insert(block.height, cp.clone()); + } - // this is the latest height where both the update and local chain has the same block hash - let agreement_height = update - .iter() - .rev() - .find(|&(u_height, u_hash)| self.blocks.get(u_height) == Some(u_hash)) - .map(|(&height, _)| height); + if let Some(original_cp) = original_cp { + // record the first agreement height + if agreement_height.is_none() && original_cp.block_id() == block { + agreement_height = Some(block.height); + } + // break if the internal pointers of the checkpoints are the same + if Arc::as_ptr(&original_cp.0) == Arc::as_ptr(&cp.0) { + complete_match = true; + break; + } + } + } - // the lower bound of the range to invalidate + // Lower bound of the range to invalidate in `self`. let invalidate_lb = match agreement_height { - Some(height) if height == update_tip => u32::MAX, + // if there is no agreement, we invalidate all of the original chain + None => u32::MIN, + // if the agreement is at the update's tip, we don't need to invalidate + Some(height) if height == new_tip.height() => u32::MAX, Some(height) => height + 1, - None => 0, }; - // the first block's height to invalidate in the local chain - let invalidate_from_height = self.blocks.range(invalidate_lb..).next().map(|(&h, _)| h); - - // the first block of height to invalidate (if any) should be represented in the update - if let Some(first_invalid_height) = invalidate_from_height { - if !update.contains_key(&first_invalid_height) { - return Err(UpdateNotConnectedError(first_invalid_height)); + let changeset = { + // Construct initial changeset of heights to invalidate in `self`. + let mut changeset = self + .checkpoints + .range(invalidate_lb..) + .map(|(&height, _)| (height, None)) + .collect::(); + + // The height of the first block to invalidate (if any) must be represented in the `update`. + if let Some(first_invalidated_height) = changeset.keys().next() { + if !updated_cps.contains_key(first_invalidated_height) { + return Err(CannotConnectError { + try_include: self + .checkpoints + .get(first_invalidated_height) + .expect("checkpoint already exists") + .block_id(), + }); + } } - } - let mut changeset: BTreeMap> = match invalidate_from_height { - Some(first_invalid_height) => { - // the first block of height to invalidate should be represented in the update - if !update.contains_key(&first_invalid_height) { - return Err(UpdateNotConnectedError(first_invalid_height)); + changeset.extend( + updated_cps + .iter() + .map(|(height, cp)| (*height, Some(cp.hash()))), + ); + changeset + }; + + let apply_update = move || { + if let Some(&start_height) = updated_cps.keys().next() { + self.checkpoints.split_off(&invalidate_lb); + self.checkpoints.append(&mut updated_cps); + if !self.is_empty() && !complete_match { + self.fix_links(start_height); } - self.blocks - .range(first_invalid_height..) - .map(|(height, _)| (*height, None)) - .collect() } - None => BTreeMap::new(), }; - for (height, update_hash) in update { - let original_hash = self.blocks.get(height); - if Some(update_hash) != original_hash { - changeset.insert(*height, Some(*update_hash)); - } - } - Ok(changeset) + Ok((apply_update, changeset)) } - /// Applies the given `changeset`. - pub fn apply_changeset(&mut self, changeset: ChangeSet) { - for (height, blockhash) in changeset { - match blockhash { - Some(blockhash) => self.blocks.insert(height, blockhash), - None => self.blocks.remove(&height), - }; + /// Apply the given `changeset`. + pub fn apply_changeset(&mut self, changeset: &ChangeSet) { + if let Some(start_height) = changeset.keys().next().cloned() { + for (&height, &hash) in changeset { + match hash { + Some(hash) => self + .checkpoints + .insert(height, CheckPoint::new(BlockId { height, hash })), + None => self.checkpoints.remove(&height), + }; + } + self.fix_links(start_height); } } - /// Updates [`LocalChain`] with an update [`LocalChain`]. + /// Update [`LocalChain`]. /// - /// This is equivalent to calling [`determine_changeset`] and [`apply_changeset`] in sequence. + /// This is equivalent to calling [`update`] and applying the update in sequence. /// - /// [`determine_changeset`]: Self::determine_changeset - /// [`apply_changeset`]: Self::apply_changeset - pub fn apply_update(&mut self, update: Self) -> Result { - let changeset = self.determine_changeset(&update)?; - self.apply_changeset(changeset.clone()); + /// [`update`]: Self::update + pub fn apply_update(&mut self, new_tip: CheckPoint) -> Result { + let (apply, changeset) = self.update(new_tip)?; + apply(); Ok(changeset) } - /// Derives a [`ChangeSet`] that assumes that there are no preceding changesets. + /// Get or insert a `block_id`. /// - /// The changeset returned will record additions of all blocks included in [`Self`]. - pub fn initial_changeset(&self) -> ChangeSet { - self.blocks - .iter() - .map(|(&height, &hash)| (height, Some(hash))) - .collect() - } - - /// Insert a block of [`BlockId`] into the [`LocalChain`]. + /// # Errors /// - /// # Error - /// - /// If the insertion height already contains a block, and the block has a different blockhash, - /// this will result in an [`InsertBlockNotMatchingError`]. - pub fn insert_block( + /// Replacing the block hash of an existing checkpoint will result in an error. + pub fn get_or_insert( &mut self, block_id: BlockId, - ) -> Result { - let mut update = Self::from_blocks(self.tip()); - - if let Some(original_hash) = update.blocks.insert(block_id.height, block_id.hash) { - if original_hash != block_id.hash { - return Err(InsertBlockNotMatchingError { - height: block_id.height, - original_hash, - update_hash: block_id.hash, + ) -> Result<(CheckPoint, ChangeSet), InsertBlockError> { + use crate::collections::btree_map::Entry; + + match self.checkpoints.entry(block_id.height) { + Entry::Vacant(entry) => { + entry.insert(CheckPoint::new(block_id)); + self.fix_links(block_id.height); + let cp = self.checkpoint(block_id.height).expect("must be inserted"); + let changeset = + core::iter::once((block_id.height, Some(block_id.hash))).collect::(); + Ok((cp, changeset)) + } + Entry::Occupied(entry) => { + let cp = entry.get(); + if cp.block_id() == block_id { + Ok((cp.clone(), ChangeSet::default())) + } else { + Err(InsertBlockError { + height: block_id.height, + original_hash: cp.hash(), + update_hash: block_id.hash, + }) + } + } + } + } + + fn fix_links(&mut self, start_height: u32) { + let mut prev = self + .checkpoints + .range(..start_height) + .last() + .map(|(_, cp)| cp.clone()); + + for (_, cp) in self.checkpoints.range_mut(start_height..) { + if cp.0.prev.as_ref().map(Arc::as_ptr) != prev.as_ref().map(|cp| Arc::as_ptr(&cp.0)) { + cp.0 = Arc::new(CPInner { + block: cp.block_id(), + prev: prev.clone().map(|cp| cp.0), }); } + prev = Some(cp.clone()); } + } - Ok(self.apply_update(update).expect("should always connect")) + /// Derives an initial [`ChangeSet`], meaning that it can be applied to an empty chain to + /// recover the current chain. + pub fn initial_changeset(&self) -> ChangeSet { + self.iter_checkpoints(None) + .map(|cp| (cp.height(), Some(cp.hash()))) + .collect() } -} -/// This is the return value of [`determine_changeset`] and represents changes to [`LocalChain`]. -/// -/// [`determine_changeset`]: LocalChain::determine_changeset -pub type ChangeSet = BTreeMap>; + /// Get checkpoint of `height` (if any). + pub fn checkpoint(&self, height: u32) -> Option { + self.checkpoints.get(&height).cloned() + } -/// Represents an update failure of [`LocalChain`] due to the update not connecting to the original -/// chain. -/// -/// The update cannot be applied to the chain because the chain suffix it represents did not -/// connect to the existing chain. This error case contains the checkpoint height to include so -/// that the chains can connect. -#[derive(Clone, Debug, PartialEq)] -pub struct UpdateNotConnectedError(pub u32); + /// Iterate over checkpoints in decending height order. + /// + /// `height_upper_bound` is inclusive. A value of `None` means there is no bound, so all + /// checkpoints will be traversed. + pub fn iter_checkpoints(&self, height_upper_bound: Option) -> CheckPointIter { + CheckPointIter { + current: match height_upper_bound { + Some(height) => self + .checkpoints + .range(..=height) + .last() + .map(|(_, cp)| cp.0.clone()), + None => self.checkpoints.values().last().map(|cp| cp.0.clone()), + }, + } + } -impl core::fmt::Display for UpdateNotConnectedError { - fn fmt(&self, f: &mut core::fmt::Formatter<'_>) -> core::fmt::Result { - write!( - f, - "the update cannot connect with the chain, try include block at height {}", - self.0 - ) + /// Get a reference to the internal checkpoint map. + pub fn checkpoints(&self) -> &BTreeMap { + &self.checkpoints } } -#[cfg(feature = "std")] -impl std::error::Error for UpdateNotConnectedError {} - /// Represents a failure when trying to insert a checkpoint into [`LocalChain`]. #[derive(Clone, Debug, PartialEq)] -pub struct InsertBlockNotMatchingError { +pub struct InsertBlockError { /// The checkpoints' height. pub height: u32, /// Original checkpoint's block hash. @@ -236,7 +455,7 @@ pub struct InsertBlockNotMatchingError { pub update_hash: BlockHash, } -impl core::fmt::Display for InsertBlockNotMatchingError { +impl core::fmt::Display for InsertBlockError { fn fmt(&self, f: &mut core::fmt::Formatter<'_>) -> core::fmt::Result { write!( f, @@ -247,4 +466,24 @@ impl core::fmt::Display for InsertBlockNotMatchingError { } #[cfg(feature = "std")] -impl std::error::Error for InsertBlockNotMatchingError {} +impl std::error::Error for InsertBlockError {} + +/// Occurs when an update does not have a common checkpoint with the original chain. +#[derive(Clone, Debug, PartialEq)] +pub struct CannotConnectError { + /// The suggested checkpoint to include to connect the two chains. + pub try_include: BlockId, +} + +impl core::fmt::Display for CannotConnectError { + fn fmt(&self, f: &mut core::fmt::Formatter<'_>) -> core::fmt::Result { + write!( + f, + "introduced chain cannot connect with the original chain, try include {}:{}", + self.try_include.height, self.try_include.hash, + ) + } +} + +#[cfg(feature = "std")] +impl std::error::Error for CannotConnectError {} diff --git a/crates/chain/tests/common/mod.rs b/crates/chain/tests/common/mod.rs index 7d7288bdf..cd799c03a 100644 --- a/crates/chain/tests/common/mod.rs +++ b/crates/chain/tests/common/mod.rs @@ -9,25 +9,17 @@ macro_rules! h { macro_rules! local_chain { [ $(($height:expr, $block_hash:expr)), * ] => {{ #[allow(unused_mut)] - bdk_chain::local_chain::LocalChain::from_blocks([$(($height, $block_hash).into()),*]) + bdk_chain::local_chain::LocalChain::from_blocks([$(($height, $block_hash).into()),*].into_iter().collect()) }}; } #[allow(unused_macros)] -macro_rules! chain { - ($([$($tt:tt)*]),*) => { chain!( checkpoints: [$([$($tt)*]),*] ) }; - (checkpoints: $($tail:tt)*) => { chain!( index: TxHeight, checkpoints: $($tail)*) }; - (index: $ind:ty, checkpoints: [ $([$height:expr, $block_hash:expr]),* ] $(,txids: [$(($txid:expr, $tx_height:expr)),*])?) => {{ +macro_rules! chain_update { + [ $(($height:expr, $hash:expr)), * ] => {{ #[allow(unused_mut)] - let mut chain = bdk_chain::sparse_chain::SparseChain::<$ind>::from_checkpoints([$(($height, $block_hash).into()),*]); - - $( - $( - let _ = chain.insert_tx($txid, $tx_height).expect("should succeed"); - )* - )? - - chain + bdk_chain::local_chain::LocalChain::from_blocks([$(($height, $hash).into()),*].into_iter().collect()) + .tip() + .expect("must have tip") }}; } diff --git a/crates/chain/tests/test_indexed_tx_graph.rs b/crates/chain/tests/test_indexed_tx_graph.rs index 2ebd913c2..3319b2594 100644 --- a/crates/chain/tests/test_indexed_tx_graph.rs +++ b/crates/chain/tests/test_indexed_tx_graph.rs @@ -8,7 +8,7 @@ use bdk_chain::{ keychain::{Balance, DerivationAdditions, KeychainTxOutIndex}, local_chain::LocalChain, tx_graph::Additions, - BlockId, ChainPosition, ConfirmationHeightAnchor, + ChainPosition, ConfirmationHeightAnchor, }; use bitcoin::{secp256k1::Secp256k1, BlockHash, OutPoint, Script, Transaction, TxIn, TxOut}; use miniscript::Descriptor; @@ -109,8 +109,8 @@ fn test_list_owned_txouts() { // Create Local chains let local_chain = (0..150) - .map(|i| (i as u32, h!("random"))) - .collect::>(); + .map(|i| (i as u32, Some(h!("random")))) + .collect::>>(); let local_chain = LocalChain::from(local_chain); // Initiate IndexedTxGraph @@ -212,9 +212,8 @@ fn test_list_owned_txouts() { ( *tx, local_chain - .blocks() - .get(&height) - .map(|&hash| BlockId { height, hash }) + .checkpoint(height) + .map(|cp| cp.block_id()) .map(|anchor_block| ConfirmationHeightAnchor { anchor_block, confirmation_height: anchor_block.height, @@ -231,9 +230,8 @@ fn test_list_owned_txouts() { |height: u32, graph: &IndexedTxGraph>| { let chain_tip = local_chain - .blocks() - .get(&height) - .map(|&hash| BlockId { height, hash }) + .checkpoint(height) + .map(|cp| cp.block_id()) .expect("block must exist"); let txouts = graph .graph() diff --git a/crates/chain/tests/test_local_chain.rs b/crates/chain/tests/test_local_chain.rs index 55d8af113..fc0a9a4ab 100644 --- a/crates/chain/tests/test_local_chain.rs +++ b/crates/chain/tests/test_local_chain.rs @@ -1,172 +1,224 @@ -use bdk_chain::local_chain::{ - ChangeSet, InsertBlockNotMatchingError, LocalChain, UpdateNotConnectedError, +use bdk_chain::{ + local_chain::{CannotConnectError, ChangeSet, CheckPoint, InsertBlockError, LocalChain}, + BlockId, }; use bitcoin::BlockHash; #[macro_use] mod common; -#[test] -fn add_first_tip() { - let chain = LocalChain::default(); - assert_eq!( - chain.determine_changeset(&local_chain![(0, h!("A"))]), - Ok([(0, Some(h!("A")))].into()), - "add first tip" - ); -} - -#[test] -fn add_second_tip() { - let chain = local_chain![(0, h!("A"))]; - assert_eq!( - chain.determine_changeset(&local_chain![(0, h!("A")), (1, h!("B"))]), - Ok([(1, Some(h!("B")))].into()) - ); -} - -#[test] -fn two_disjoint_chains_cannot_merge() { - let chain1 = local_chain![(0, h!("A"))]; - let chain2 = local_chain![(1, h!("B"))]; - assert_eq!( - chain1.determine_changeset(&chain2), - Err(UpdateNotConnectedError(0)) - ); -} - -#[test] -fn duplicate_chains_should_merge() { - let chain1 = local_chain![(0, h!("A"))]; - let chain2 = local_chain![(0, h!("A"))]; - assert_eq!(chain1.determine_changeset(&chain2), Ok(Default::default())); -} - -#[test] -fn can_introduce_older_checkpoints() { - let chain1 = local_chain![(2, h!("C")), (3, h!("D"))]; - let chain2 = local_chain![(1, h!("B")), (2, h!("C"))]; - - assert_eq!( - chain1.determine_changeset(&chain2), - Ok([(1, Some(h!("B")))].into()) - ); -} - -#[test] -fn fix_blockhash_before_agreement_point() { - let chain1 = local_chain![(0, h!("im-wrong")), (1, h!("we-agree"))]; - let chain2 = local_chain![(0, h!("fix")), (1, h!("we-agree"))]; - - assert_eq!( - chain1.determine_changeset(&chain2), - Ok([(0, Some(h!("fix")))].into()) - ) -} - -/// B and C are in both chain and update -/// ``` -/// | 0 | 1 | 2 | 3 | 4 -/// chain | B C -/// update | A B C D -/// ``` -/// This should succeed with the point of agreement being C and A should be added in addition. -#[test] -fn two_points_of_agreement() { - let chain1 = local_chain![(1, h!("B")), (2, h!("C"))]; - let chain2 = local_chain![(0, h!("A")), (1, h!("B")), (2, h!("C")), (3, h!("D"))]; - - assert_eq!( - chain1.determine_changeset(&chain2), - Ok([(0, Some(h!("A"))), (3, Some(h!("D")))].into()), - ); +#[derive(Debug)] +struct TestLocalChain<'a> { + name: &'static str, + chain: LocalChain, + new_tip: CheckPoint, + exp: ExpectedResult<'a>, } -/// Update and chain does not connect: -/// ``` -/// | 0 | 1 | 2 | 3 | 4 -/// chain | B C -/// update | A B D -/// ``` -/// This should fail as we cannot figure out whether C & D are on the same chain -#[test] -fn update_and_chain_does_not_connect() { - let chain1 = local_chain![(1, h!("B")), (2, h!("C"))]; - let chain2 = local_chain![(0, h!("A")), (1, h!("B")), (3, h!("D"))]; - - assert_eq!( - chain1.determine_changeset(&chain2), - Err(UpdateNotConnectedError(2)), - ); +#[derive(Debug, PartialEq)] +enum ExpectedResult<'a> { + Ok { + changeset: &'a [(u32, Option)], + init_changeset: &'a [(u32, Option)], + }, + Err(CannotConnectError), } -/// Transient invalidation: -/// ``` -/// | 0 | 1 | 2 | 3 | 4 | 5 -/// chain | A B C E -/// update | A B' C' D -/// ``` -/// This should succeed and invalidate B,C and E with point of agreement being A. -#[test] -fn transitive_invalidation_applies_to_checkpoints_higher_than_invalidation() { - let chain1 = local_chain![(0, h!("A")), (2, h!("B")), (3, h!("C")), (5, h!("E"))]; - let chain2 = local_chain![(0, h!("A")), (2, h!("B'")), (3, h!("C'")), (4, h!("D"))]; - - assert_eq!( - chain1.determine_changeset(&chain2), - Ok([ - (2, Some(h!("B'"))), - (3, Some(h!("C'"))), - (4, Some(h!("D"))), - (5, None), - ] - .into()) - ); -} +impl<'a> TestLocalChain<'a> { + fn run(mut self) { + let got_changeset = match self.chain.update(self.new_tip) { + Ok((apply, changeset)) => { + apply(); + changeset + } + Err(err) => { + assert_eq!(ExpectedResult::Err(err), self.exp); + return; + } + }; -/// Transient invalidation: -/// ``` -/// | 0 | 1 | 2 | 3 | 4 -/// chain | B C E -/// update | B' C' D -/// ``` -/// -/// This should succeed and invalidate B, C and E with no point of agreement -#[test] -fn transitive_invalidation_applies_to_checkpoints_higher_than_invalidation_no_point_of_agreement() { - let chain1 = local_chain![(1, h!("B")), (2, h!("C")), (4, h!("E"))]; - let chain2 = local_chain![(1, h!("B'")), (2, h!("C'")), (3, h!("D"))]; - - assert_eq!( - chain1.determine_changeset(&chain2), - Ok([ - (1, Some(h!("B'"))), - (2, Some(h!("C'"))), - (3, Some(h!("D"))), - (4, None) - ] - .into()) - ) + match self.exp { + ExpectedResult::Ok { + changeset, + init_changeset, + } => { + assert_eq!( + got_changeset, + changeset.iter().cloned().collect(), + "{}: unexpected changeset", + self.name + ); + assert_eq!( + self.chain.initial_changeset(), + init_changeset.iter().cloned().collect(), + "{}: unexpected initial changeset", + self.name + ); + } + ExpectedResult::Err(err) => panic!( + "expected error ({}), got non-error result: {:?}", + err, got_changeset + ), + } + } } -/// Transient invalidation: -/// ``` -/// | 0 | 1 | 2 | 3 | 4 -/// chain | A B C E -/// update | B' C' D -/// ``` -/// -/// This should fail since although it tells us that B and C are invalid it doesn't tell us whether -/// A was invalid. #[test] -fn invalidation_but_no_connection() { - let chain1 = local_chain![(0, h!("A")), (1, h!("B")), (2, h!("C")), (4, h!("E"))]; - let chain2 = local_chain![(1, h!("B'")), (2, h!("C'")), (3, h!("D"))]; - - assert_eq!( - chain1.determine_changeset(&chain2), - Err(UpdateNotConnectedError(0)) - ) +fn update() { + [ + TestLocalChain { + name: "add first tip", + chain: local_chain![], + new_tip: chain_update![(0, h!("A"))], + exp: ExpectedResult::Ok { + changeset: &[(0, Some(h!("A")))], + init_changeset: &[(0, Some(h!("A")))], + }, + }, + TestLocalChain { + name: "add second tip", + chain: local_chain![(0, h!("A"))], + new_tip: chain_update![(0, h!("A")), (1, h!("B"))], + exp: ExpectedResult::Ok { + changeset: &[(1, Some(h!("B")))], + init_changeset: &[(0, Some(h!("A"))), (1, Some(h!("B")))], + }, + }, + TestLocalChain { + name: "two disjoint chains cannot merge", + chain: local_chain![(0, h!("A"))], + new_tip: chain_update![(1, h!("B"))], + exp: ExpectedResult::Err(CannotConnectError { + try_include: BlockId { + height: 0, + hash: h!("A"), + }, + }), + }, + TestLocalChain { + name: "duplicate chains should merge", + chain: local_chain![(0, h!("A"))], + new_tip: chain_update![(0, h!("A"))], + exp: ExpectedResult::Ok { + changeset: &[], + init_changeset: &[(0, Some(h!("A")))], + }, + }, + TestLocalChain { + name: "can introduce older checkpoints", + chain: local_chain![(2, h!("C")), (3, h!("D"))], + new_tip: chain_update![(1, h!("B")), (2, h!("C"))], + exp: ExpectedResult::Ok { + changeset: &[(1, Some(h!("B")))], + init_changeset: &[(1, Some(h!("B"))), (2, Some(h!("C"))), (3, Some(h!("D")))], + }, + }, + TestLocalChain { + name: "fix blockhash before agreement point", + chain: local_chain![(0, h!("im-wrong")), (1, h!("we-agree"))], + new_tip: chain_update![(0, h!("fix")), (1, h!("we-agree"))], + exp: ExpectedResult::Ok { + changeset: &[(0, Some(h!("fix")))], + init_changeset: &[(0, Some(h!("fix"))), (1, Some(h!("we-agree")))], + }, + }, + // B and C are in both chain and update + // | 0 | 1 | 2 | 3 | 4 + // chain | B C + // update | A B C D + // This should succeed with the point of agreement being C and A should be added in addition. + TestLocalChain { + name: "two points of agreement", + chain: local_chain![(1, h!("B")), (2, h!("C"))], + new_tip: chain_update![(0, h!("A")), (1, h!("B")), (2, h!("C")), (3, h!("D"))], + exp: ExpectedResult::Ok { + changeset: &[(0, Some(h!("A"))), (3, Some(h!("D")))], + init_changeset: &[ + (0, Some(h!("A"))), + (1, Some(h!("B"))), + (2, Some(h!("C"))), + (3, Some(h!("D"))), + ], + }, + }, + // Update and chain does not connect: + // | 0 | 1 | 2 | 3 | 4 + // chain | B C + // update | A B D + // This should fail as we cannot figure out whether C & D are on the same chain + TestLocalChain { + name: "update and chain does not connect", + chain: local_chain![(1, h!("B")), (2, h!("C"))], + new_tip: chain_update![(0, h!("A")), (1, h!("B")), (3, h!("D"))], + exp: ExpectedResult::Err(CannotConnectError { + try_include: BlockId { + height: 2, + hash: h!("C"), + }, + }), + }, + // Transient invalidation: + // | 0 | 1 | 2 | 3 | 4 | 5 + // chain | A B C E + // update | A B' C' D + // This should succeed and invalidate B,C and E with point of agreement being A. + TestLocalChain { + name: "transitive invalidation applies to checkpoints higher than invalidation", + chain: local_chain![(0, h!("A")), (2, h!("B")), (3, h!("C")), (5, h!("E"))], + new_tip: chain_update![(0, h!("A")), (2, h!("B'")), (3, h!("C'")), (4, h!("D"))], + exp: ExpectedResult::Ok { + changeset: &[ + (2, Some(h!("B'"))), + (3, Some(h!("C'"))), + (4, Some(h!("D"))), + (5, None), + ], + init_changeset: &[ + (0, Some(h!("A"))), + (2, Some(h!("B'"))), + (3, Some(h!("C'"))), + (4, Some(h!("D"))), + ], + }, + }, + // Transient invalidation: + // | 0 | 1 | 2 | 3 | 4 + // chain | B C E + // update | B' C' D + // This should succeed and invalidate B, C and E with no point of agreement + TestLocalChain { + name: "transitive invalidation applies to checkpoints higher than invalidation no point of agreement", + chain: local_chain![(1, h!("B")), (2, h!("C")), (4, h!("E"))], + new_tip: chain_update![(1, h!("B'")), (2, h!("C'")), (3, h!("D"))], + exp: ExpectedResult::Ok { + changeset: &[ + (1, Some(h!("B'"))), + (2, Some(h!("C'"))), + (3, Some(h!("D"))), + (4, None) + ], + init_changeset: &[ + (1, Some(h!("B'"))), + (2, Some(h!("C'"))), + (3, Some(h!("D"))), + ], + }, + }, + // Transient invalidation: + // | 0 | 1 | 2 | 3 | 4 + // chain | A B C E + // update | B' C' D + // This should fail since although it tells us that B and C are invalid it doesn't tell us whether + // A was invalid. + TestLocalChain { + name: "invalidation but no connection", + chain: local_chain![(0, h!("A")), (1, h!("B")), (2, h!("C")), (4, h!("E"))], + new_tip: chain_update![(1, h!("B'")), (2, h!("C'")), (3, h!("D"))], + exp: ExpectedResult::Err(CannotConnectError { try_include: BlockId { height: 0, hash: h!("A") } }), + }, + ] + .into_iter() + .for_each(TestLocalChain::run); } #[test] @@ -174,7 +226,7 @@ fn insert_block() { struct TestCase { original: LocalChain, insert: (u32, BlockHash), - expected_result: Result, + expected_result: Result, expected_final: LocalChain, } @@ -206,7 +258,7 @@ fn insert_block() { TestCase { original: local_chain![(2, h!("K"))], insert: (2, h!("J")), - expected_result: Err(InsertBlockNotMatchingError { + expected_result: Err(InsertBlockError { height: 2, original_hash: h!("K"), update_hash: h!("J"), @@ -218,7 +270,9 @@ fn insert_block() { for (i, t) in test_cases.into_iter().enumerate() { let mut chain = t.original; assert_eq!( - chain.insert_block(t.insert.into()), + chain + .get_or_insert(t.insert.into()) + .map(|(_, changeset)| changeset), t.expected_result, "[{}] unexpected result when inserting block", i, diff --git a/crates/chain/tests/test_tx_graph.rs b/crates/chain/tests/test_tx_graph.rs index c272f97aa..bbffdaf31 100644 --- a/crates/chain/tests/test_tx_graph.rs +++ b/crates/chain/tests/test_tx_graph.rs @@ -697,7 +697,7 @@ fn test_chain_spends() { let _ = graph.insert_anchor( tx.txid(), ConfirmationHeightAnchor { - anchor_block: tip, + anchor_block: tip.block_id(), confirmation_height: *ht, }, ); @@ -705,10 +705,10 @@ fn test_chain_spends() { // Assert that confirmed spends are returned correctly. assert_eq!( - graph.get_chain_spend(&local_chain, tip, OutPoint::new(tx_0.txid(), 0)), + graph.get_chain_spend(&local_chain, tip.block_id(), OutPoint::new(tx_0.txid(), 0)), Some(( ChainPosition::Confirmed(&ConfirmationHeightAnchor { - anchor_block: tip, + anchor_block: tip.block_id(), confirmation_height: 98 }), tx_1.txid(), @@ -717,17 +717,17 @@ fn test_chain_spends() { // Check if chain position is returned correctly. assert_eq!( - graph.get_chain_position(&local_chain, tip, tx_0.txid()), + graph.get_chain_position(&local_chain, tip.block_id(), tx_0.txid()), // Some(ObservedAs::Confirmed(&local_chain.get_block(95).expect("block expected"))), Some(ChainPosition::Confirmed(&ConfirmationHeightAnchor { - anchor_block: tip, + anchor_block: tip.block_id(), confirmation_height: 95 })) ); // Even if unconfirmed tx has a last_seen of 0, it can still be part of a chain spend. assert_eq!( - graph.get_chain_spend(&local_chain, tip, OutPoint::new(tx_0.txid(), 1)), + graph.get_chain_spend(&local_chain, tip.block_id(), OutPoint::new(tx_0.txid(), 1)), Some((ChainPosition::Unconfirmed(0), tx_2.txid())), ); @@ -737,7 +737,7 @@ fn test_chain_spends() { // Check chain spend returned correctly. assert_eq!( graph - .get_chain_spend(&local_chain, tip, OutPoint::new(tx_0.txid(), 1)) + .get_chain_spend(&local_chain, tip.block_id(), OutPoint::new(tx_0.txid(), 1)) .unwrap(), (ChainPosition::Unconfirmed(1234567), tx_2.txid()) ); @@ -754,7 +754,7 @@ fn test_chain_spends() { // Because this tx conflicts with an already confirmed transaction, chain position should return none. assert!(graph - .get_chain_position(&local_chain, tip, tx_1_conflict.txid()) + .get_chain_position(&local_chain, tip.block_id(), tx_1_conflict.txid()) .is_none()); // Another conflicting tx that conflicts with tx_2. @@ -773,7 +773,7 @@ fn test_chain_spends() { // This should return a valid observation with correct last seen. assert_eq!( graph - .get_chain_position(&local_chain, tip, tx_2_conflict.txid()) + .get_chain_position(&local_chain, tip.block_id(), tx_2_conflict.txid()) .expect("position expected"), ChainPosition::Unconfirmed(1234568) ); @@ -781,14 +781,14 @@ fn test_chain_spends() { // Chain_spend now catches the new transaction as the spend. assert_eq!( graph - .get_chain_spend(&local_chain, tip, OutPoint::new(tx_0.txid(), 1)) + .get_chain_spend(&local_chain, tip.block_id(), OutPoint::new(tx_0.txid(), 1)) .expect("expect observation"), (ChainPosition::Unconfirmed(1234568), tx_2_conflict.txid()) ); // Chain position of the `tx_2` is now none, as it is older than `tx_2_conflict` assert!(graph - .get_chain_position(&local_chain, tip, tx_2.txid()) + .get_chain_position(&local_chain, tip.block_id(), tx_2.txid()) .is_none()); } diff --git a/crates/electrum/src/electrum_ext.rs b/crates/electrum/src/electrum_ext.rs index 1ec44d85c..5dc6a8b35 100644 --- a/crates/electrum/src/electrum_ext.rs +++ b/crates/electrum/src/electrum_ext.rs @@ -1,7 +1,7 @@ use bdk_chain::{ bitcoin::{hashes::hex::FromHex, BlockHash, OutPoint, Script, Transaction, Txid}, keychain::LocalUpdate, - local_chain::LocalChain, + local_chain::CheckPoint, tx_graph::{self, TxGraph}, Anchor, BlockId, ConfirmationHeightAnchor, ConfirmationTimeAnchor, }; @@ -14,21 +14,19 @@ use std::{ #[derive(Debug, Clone)] pub struct ElectrumUpdate { pub graph_update: HashMap>, - pub chain_update: LocalChain, + pub chain_update: CheckPoint, pub keychain_update: BTreeMap, } -impl Default for ElectrumUpdate { - fn default() -> Self { +impl ElectrumUpdate { + pub fn new(cp: CheckPoint) -> Self { Self { - graph_update: Default::default(), - chain_update: Default::default(), - keychain_update: Default::default(), + graph_update: HashMap::new(), + chain_update: cp, + keychain_update: BTreeMap::new(), } } -} -impl ElectrumUpdate { pub fn missing_full_txs(&self, graph: &TxGraph) -> Vec { self.graph_update .keys() @@ -56,7 +54,7 @@ impl ElectrumUpdate { Ok(LocalUpdate { keychain: self.keychain_update, graph: graph_update, - chain: self.chain_update, + tip: self.chain_update, }) } } @@ -128,7 +126,7 @@ impl ElectrumUpdate { graph.apply_additions(graph_additions); graph }, - chain: update.chain, + tip: update.tip, }) } } @@ -138,7 +136,7 @@ pub trait ElectrumExt { fn scan( &self, - local_chain: &BTreeMap, + prev_tip: Option, keychain_spks: BTreeMap>, txids: impl IntoIterator, outpoints: impl IntoIterator, @@ -148,7 +146,7 @@ pub trait ElectrumExt { fn scan_without_keychain( &self, - local_chain: &BTreeMap, + prev_tip: Option, misc_spks: impl IntoIterator, txids: impl IntoIterator, outpoints: impl IntoIterator, @@ -160,7 +158,7 @@ pub trait ElectrumExt { .map(|(i, spk)| (i as u32, spk)); self.scan( - local_chain, + prev_tip, [((), spk_iter)].into(), txids, outpoints, @@ -179,7 +177,7 @@ impl ElectrumExt for Client { fn scan( &self, - local_chain: &BTreeMap, + prev_tip: Option, keychain_spks: BTreeMap>, txids: impl IntoIterator, outpoints: impl IntoIterator, @@ -196,14 +194,10 @@ impl ElectrumExt for Client { let outpoints = outpoints.into_iter().collect::>(); let update = loop { - let mut update = ElectrumUpdate:: { - chain_update: prepare_chain_update(self, local_chain)?, - ..Default::default() - }; - let anchor_block = update - .chain_update - .tip() - .expect("must have atleast one block"); + let mut update = ElectrumUpdate::::new( + prepare_chain_update(self, prev_tip.clone())?, + ); + let anchor_block = update.chain_update.block_id(); if !request_spks.is_empty() { if !scanned_spks.is_empty() { @@ -271,39 +265,72 @@ impl ElectrumExt for Client { /// Prepare an update "template" based on the checkpoints of the `local_chain`. fn prepare_chain_update( client: &Client, - local_chain: &BTreeMap, -) -> Result { - let mut update = LocalChain::default(); - - // Find the local chain block that is still there so our update can connect to the local chain. - for (&existing_height, &existing_hash) in local_chain.iter().rev() { - // TODO: a batch request may be safer, as a reorg that happens when we are obtaining - // `block_header`s will result in inconsistencies - let current_hash = client.block_header(existing_height as usize)?.block_hash(); - let _ = update - .insert_block(BlockId { - height: existing_height, - hash: current_hash, - }) - .expect("This never errors because we are working with a fresh chain"); - - if current_hash == existing_hash { - break; + prev_tip: Option, +) -> Result { + let mut header_notification = client.block_headers_subscribe()?; + + let (new_blocks, mut last_cp) = 'retry: loop { + let tip = BlockId { + height: header_notification.height as _, + hash: header_notification.header.block_hash(), + }; + let tip_parent = BlockId { + height: (header_notification.height - 1) as _, + hash: header_notification.header.prev_blockhash, + }; + + // this records new blocks, including blocks that are to be replaced + let mut new_blocks = [tip_parent, tip] + .into_iter() + .map(|b| (b.height, b.hash)) + .collect::>(); + let mut agreement_cp = Option::::None; + + for cp in prev_tip.iter().flat_map(CheckPoint::iter) { + let cp_block = cp.block_id(); + // TODO: a batch request may be safer, as a reorg that happens when we are obtaining + // `block_header`s will result in inconsistencies + let hash = client.block_header(cp_block.height as _)?.block_hash(); + if hash == cp_block.hash { + agreement_cp = Some(cp); + break; + } + new_blocks.insert(cp_block.height, hash); } - } - // Insert the new tip so new transactions will be accepted into the sparsechain. - let tip = { - let (height, hash) = crate::get_tip(client)?; - BlockId { height, hash } + // check for tip changes + loop { + match client.block_headers_pop()? { + Some(new_notification) => { + let new_height = new_notification.height; + header_notification = new_notification; + if new_height as u32 <= tip.height { + // we may have a reorg + // reorg-detection logic can be improved (false positives are possible) + continue 'retry; + } + } + None => { + let new_blocks = match &agreement_cp { + // `new_blocks` should only include blocks that are actually new + Some(agreement_cp) => new_blocks.split_off(&(agreement_cp.height() + 1)), + None => new_blocks, + }; + + break 'retry (new_blocks, agreement_cp); + } + }; + } }; - if update.insert_block(tip).is_err() { - // There has been a re-org before we even begin scanning addresses. - // Just recursively call (this should never happen). - return prepare_chain_update(client, local_chain); + + // construct checkpoints + for (height, hash) in new_blocks { + let cp = CheckPoint::new_with_prev(BlockId { height, hash }, last_cp) + .expect("heights should not conflict"); + last_cp = Some(cp); } - Ok(update) + Ok(last_cp.expect("must have atleast one checkpoint")) } fn determine_tx_anchor( diff --git a/crates/electrum/src/lib.rs b/crates/electrum/src/lib.rs index 4826c6dda..ec693fda9 100644 --- a/crates/electrum/src/lib.rs +++ b/crates/electrum/src/lib.rs @@ -15,21 +15,12 @@ //! //! Refer to [`bdk_electrum_example`] for a complete example. //! -//! [`ElectrumClient::scan`]: ElectrumClient::scan +//! [`ElectrumClient::scan`]: electrum_client::ElectrumClient::scan //! [`missing_full_txs`]: ElectrumUpdate::missing_full_txs -//! [`batch_transaction_get`]: ElectrumApi::batch_transaction_get +//! [`batch_transaction_get`]: electrum_client::ElectrumApi::batch_transaction_get //! [`bdk_electrum_example`]: https://github.com/LLFourn/bdk_core_staging/tree/master/bdk_electrum_example -use bdk_chain::bitcoin::BlockHash; -use electrum_client::{Client, ElectrumApi, Error}; mod electrum_ext; pub use bdk_chain; pub use electrum_client; pub use electrum_ext::*; - -fn get_tip(client: &Client) -> Result<(u32, BlockHash), Error> { - // TODO: unsubscribe when added to the client, or is there a better call to use here? - client - .block_headers_subscribe() - .map(|data| (data.height as u32, data.header.block_hash())) -} diff --git a/crates/esplora/src/async_ext.rs b/crates/esplora/src/async_ext.rs index e496e415c..8023b7422 100644 --- a/crates/esplora/src/async_ext.rs +++ b/crates/esplora/src/async_ext.rs @@ -3,6 +3,7 @@ use bdk_chain::{ bitcoin::{BlockHash, OutPoint, Script, Txid}, collections::BTreeMap, keychain::LocalUpdate, + local_chain::CheckPoint, BlockId, ConfirmationTimeAnchor, }; use esplora_client::{Error, OutputStatus, TxStatus}; @@ -35,7 +36,7 @@ pub trait EsploraAsyncExt { #[allow(clippy::result_large_err)] // FIXME async fn scan( &self, - local_chain: &BTreeMap, + prev_tip: Option, keychain_spks: BTreeMap< K, impl IntoIterator + Send> + Send, @@ -52,14 +53,14 @@ pub trait EsploraAsyncExt { #[allow(clippy::result_large_err)] // FIXME async fn scan_without_keychain( &self, - local_chain: &BTreeMap, + prev_tip: Option, misc_spks: impl IntoIterator + Send> + Send, txids: impl IntoIterator + Send> + Send, outpoints: impl IntoIterator + Send> + Send, parallel_requests: usize, ) -> Result, Error> { self.scan( - local_chain, + prev_tip, [( (), misc_spks @@ -83,7 +84,7 @@ impl EsploraAsyncExt for esplora_client::AsyncClient { #[allow(clippy::result_large_err)] // FIXME async fn scan( &self, - local_chain: &BTreeMap, + prev_tip: Option, keychain_spks: BTreeMap< K, impl IntoIterator + Send> + Send, @@ -95,33 +96,60 @@ impl EsploraAsyncExt for esplora_client::AsyncClient { ) -> Result, Error> { let parallel_requests = Ord::max(parallel_requests, 1); - let (mut update, tip_at_start) = loop { - let mut update = LocalUpdate::::default(); + let (new_blocks, mut last_cp) = 'retry: loop { + let new_tip = loop { + let hash = self.get_tip_hash().await?; + let status = self.get_block_status(&hash).await?; + if status.in_best_chain && status.next_best.is_none() { + break BlockId { + height: status.height.expect("must have height"), + hash, + }; + } + }; + + let mut new_blocks = core::iter::once((new_tip.height, new_tip.hash)) + .collect::>(); - for (&height, &original_hash) in local_chain.iter().rev() { - let update_block_id = BlockId { - height, - hash: self.get_block_hash(height).await?, - }; - let _ = update - .chain - .insert_block(update_block_id) - .expect("cannot repeat height here"); - if update_block_id.hash == original_hash { + let mut agreement_cp = Option::::None; + + for cp in prev_tip.iter().flat_map(CheckPoint::iter) { + let cp_block = cp.block_id(); + let hash = self.get_block_hash(cp_block.height).await?; + if hash == cp_block.hash { + agreement_cp = Some(cp); break; } + new_blocks.insert(cp_block.height, hash); } - let tip_at_start = BlockId { - height: self.get_height().await?, - hash: self.get_tip_hash().await?, - }; + // check for tip changes + // retry if there are changes to the tip + let status = self.get_block_status(&new_tip.hash).await?; - if update.chain.insert_block(tip_at_start).is_ok() { - break (update, tip_at_start); + if !status.in_best_chain || status.next_best.is_some() { + continue 'retry; } + + // `new_blocks` should only include blocks that are actually new + let new_blocks = match &agreement_cp { + Some(agreement_cp) => new_blocks.split_off(&(agreement_cp.height() + 1)), + None => new_blocks, + }; + break 'retry (new_blocks, agreement_cp); }; + // construct checkpoints + for (&height, &hash) in new_blocks.iter() { + last_cp = Some( + CheckPoint::new_with_prev(BlockId { height, hash }, last_cp) + .expect("heights should not conflict"), + ); + } + + let tip = last_cp.expect("must have atleast one checkpoint"); + let mut update = LocalUpdate::::new(tip.clone()); + for (keychain, spks) in keychain_spks { let mut spks = spks.into_iter(); let mut last_active_index = None; @@ -172,7 +200,7 @@ impl EsploraAsyncExt for esplora_client::AsyncClient { empty_scripts = 0; } for tx in related_txs { - let anchor = map_confirmation_time_anchor(&tx.status, tip_at_start); + let anchor = map_confirmation_time_anchor(&tx.status, &tip); let _ = update.graph.insert_tx(tx.to_tx()); if let Some(anchor) = anchor { @@ -202,7 +230,7 @@ impl EsploraAsyncExt for esplora_client::AsyncClient { } match self.get_tx_status(&txid).await? { tx_status if tx_status.confirmed => { - if let Some(anchor) = map_confirmation_time_anchor(&tx_status, tip_at_start) { + if let Some(anchor) = map_confirmation_time_anchor(&tx_status, &tip) { let _ = update.graph.insert_anchor(txid, anchor); } } @@ -236,7 +264,7 @@ impl EsploraAsyncExt for esplora_client::AsyncClient { for (tx, status) in op_txs { let txid = tx.txid(); - let anchor = map_confirmation_time_anchor(&status, tip_at_start); + let anchor = map_confirmation_time_anchor(&status, &tip); let _ = update.graph.insert_tx(tx); if let Some(anchor) = anchor { @@ -245,23 +273,34 @@ impl EsploraAsyncExt for esplora_client::AsyncClient { } } - if tip_at_start.hash != self.get_block_hash(tip_at_start.height).await? { + if tip.hash() != self.get_block_hash(tip.height()).await? { // A reorg occurred, so let's find out where all the txids we found are now in the chain let txids_found = update .graph .full_txs() .map(|tx_node| tx_node.txid) .collect::>(); - update.chain = EsploraAsyncExt::scan_without_keychain( + let new_update = EsploraAsyncExt::scan_without_keychain( self, - local_chain, + Some(tip), [], txids_found, [], parallel_requests, ) - .await? - .chain; + .await?; + update.tip = new_update.tip; + update.graph = new_update.graph; + // update.chain = EsploraAsyncExt::scan_without_keychain( + // self, + // local_chain, + // [], + // txids_found, + // [], + // parallel_requests, + // ) + // .await? + // .chain; } Ok(update) diff --git a/crates/esplora/src/blocking_ext.rs b/crates/esplora/src/blocking_ext.rs index 6e1c61993..2c5ddc6e9 100644 --- a/crates/esplora/src/blocking_ext.rs +++ b/crates/esplora/src/blocking_ext.rs @@ -1,5 +1,6 @@ use bdk_chain::bitcoin::{BlockHash, OutPoint, Script, Txid}; use bdk_chain::collections::BTreeMap; +use bdk_chain::local_chain::CheckPoint; use bdk_chain::BlockId; use bdk_chain::{keychain::LocalUpdate, ConfirmationTimeAnchor}; use esplora_client::{Error, OutputStatus, TxStatus}; @@ -27,7 +28,7 @@ pub trait EsploraExt { #[allow(clippy::result_large_err)] // FIXME fn scan( &self, - local_chain: &BTreeMap, + prev_tip: Option, keychain_spks: BTreeMap>, txids: impl IntoIterator, outpoints: impl IntoIterator, @@ -41,14 +42,14 @@ pub trait EsploraExt { #[allow(clippy::result_large_err)] // FIXME fn scan_without_keychain( &self, - local_chain: &BTreeMap, + prev_tip: Option, misc_spks: impl IntoIterator, txids: impl IntoIterator, outpoints: impl IntoIterator, parallel_requests: usize, ) -> Result, Error> { self.scan( - local_chain, + prev_tip, [( (), misc_spks @@ -68,7 +69,7 @@ pub trait EsploraExt { impl EsploraExt for esplora_client::BlockingClient { fn scan( &self, - local_chain: &BTreeMap, + prev_tip: Option, keychain_spks: BTreeMap>, txids: impl IntoIterator, outpoints: impl IntoIterator, @@ -77,33 +78,59 @@ impl EsploraExt for esplora_client::BlockingClient { ) -> Result, Error> { let parallel_requests = Ord::max(parallel_requests, 1); - let (mut update, tip_at_start) = loop { - let mut update = LocalUpdate::::default(); - - for (&height, &original_hash) in local_chain.iter().rev() { - let update_block_id = BlockId { - height, - hash: self.get_block_hash(height)?, - }; - let _ = update - .chain - .insert_block(update_block_id) - .expect("cannot repeat height here"); - if update_block_id.hash == original_hash { + let (new_blocks, mut last_cp) = 'retry: loop { + let new_tip = loop { + let hash = self.get_tip_hash()?; + let status = self.get_block_status(&hash)?; + if status.in_best_chain && status.next_best.is_none() { + break BlockId { + height: status.height.expect("must have height"), + hash, + }; + } + }; + + let mut new_blocks = core::iter::once((new_tip.height, new_tip.hash)) + .collect::>(); + + let mut agreement_cp = Option::::None; + + for cp in prev_tip.iter().flat_map(CheckPoint::iter) { + let cp_block = cp.block_id(); + let hash = self.get_block_hash(cp_block.height)?; + if hash == cp_block.hash { + agreement_cp = Some(cp); break; } + new_blocks.insert(cp_block.height, hash); } - let tip_at_start = BlockId { - height: self.get_height()?, - hash: self.get_tip_hash()?, - }; - - if update.chain.insert_block(tip_at_start).is_ok() { - break (update, tip_at_start); + // check for tip changes + // retry if there are changes to the tip + let status = self.get_block_status(&new_tip.hash)?; + if !status.in_best_chain || status.next_best.is_some() { + continue 'retry; } + + // `new_blocks` should only include blocks that are actually new + let new_blocks = match &agreement_cp { + Some(agreement_cp) => new_blocks.split_off(&(agreement_cp.height() + 1)), + None => new_blocks, + }; + break 'retry (new_blocks, agreement_cp); }; + // construct checkpoints + for (&height, &hash) in new_blocks.iter() { + last_cp = Some( + CheckPoint::new_with_prev(BlockId { height, hash }, last_cp) + .expect("heights should not conflict"), + ); + } + + let tip = last_cp.expect("must have atleast one checkpoint"); + let mut update = LocalUpdate::::new(tip.clone()); + for (keychain, spks) in keychain_spks { let mut spks = spks.into_iter(); let mut last_active_index = None; @@ -155,7 +182,7 @@ impl EsploraExt for esplora_client::BlockingClient { empty_scripts = 0; } for tx in related_txs { - let anchor = map_confirmation_time_anchor(&tx.status, tip_at_start); + let anchor = map_confirmation_time_anchor(&tx.status, &tip); let _ = update.graph.insert_tx(tx.to_tx()); if let Some(anchor) = anchor { @@ -187,7 +214,7 @@ impl EsploraExt for esplora_client::BlockingClient { tx_status @ TxStatus { confirmed: true, .. } => { - if let Some(anchor) = map_confirmation_time_anchor(&tx_status, tip_at_start) { + if let Some(anchor) = map_confirmation_time_anchor(&tx_status, &tip) { let _ = update.graph.insert_anchor(txid, anchor); } } @@ -219,7 +246,7 @@ impl EsploraExt for esplora_client::BlockingClient { for (tx, status) in op_txs { let txid = tx.txid(); - let anchor = map_confirmation_time_anchor(&status, tip_at_start); + let anchor = map_confirmation_time_anchor(&status, &tip); let _ = update.graph.insert_tx(tx); if let Some(anchor) = anchor { @@ -228,22 +255,23 @@ impl EsploraExt for esplora_client::BlockingClient { } } - if tip_at_start.hash != self.get_block_hash(tip_at_start.height)? { + if tip.hash() != self.get_block_hash(tip.height())? { // A reorg occurred, so let's find out where all the txids we found are now in the chain let txids_found = update .graph .full_txs() .map(|tx_node| tx_node.txid) .collect::>(); - update.chain = EsploraExt::scan_without_keychain( + let new_update = EsploraExt::scan_without_keychain( self, - local_chain, + Some(tip), [], txids_found, [], parallel_requests, - )? - .chain; + )?; + update.tip = new_update.tip; + update.graph = new_update.graph; } Ok(update) diff --git a/crates/esplora/src/lib.rs b/crates/esplora/src/lib.rs index d5f8d8af6..d1c68e81f 100644 --- a/crates/esplora/src/lib.rs +++ b/crates/esplora/src/lib.rs @@ -1,5 +1,5 @@ #![doc = include_str!("../README.md")] -use bdk_chain::{BlockId, ConfirmationTimeAnchor}; +use bdk_chain::{local_chain::CheckPoint, ConfirmationTimeAnchor}; use esplora_client::TxStatus; pub use esplora_client; @@ -16,11 +16,11 @@ pub use async_ext::*; pub(crate) fn map_confirmation_time_anchor( tx_status: &TxStatus, - tip_at_start: BlockId, + tip: &CheckPoint, ) -> Option { match (tx_status.block_time, tx_status.block_height) { (Some(confirmation_time), Some(confirmation_height)) => Some(ConfirmationTimeAnchor { - anchor_block: tip_at_start, + anchor_block: tip.block_id(), confirmation_height, confirmation_time, }), diff --git a/example-crates/example_electrum/src/main.rs b/example-crates/example_electrum/src/main.rs index 41d394234..b5ca8c2a7 100644 --- a/example-crates/example_electrum/src/main.rs +++ b/example-crates/example_electrum/src/main.rs @@ -5,7 +5,7 @@ use std::{ }; use bdk_chain::{ - bitcoin::{Address, BlockHash, Network, OutPoint, Txid}, + bitcoin::{Address, Network, OutPoint, Txid}, indexed_tx_graph::{IndexedAdditions, IndexedTxGraph}, keychain::LocalChangeSet, local_chain::LocalChain, @@ -23,7 +23,7 @@ use example_cli::{ const DB_MAGIC: &[u8] = b"bdk_example_electrum"; const DB_PATH: &str = ".bdk_electrum_example.db"; -const ASSUME_FINAL_DEPTH: usize = 10; +// const ASSUME_FINAL_DEPTH: usize = 10; #[derive(Subcommand, Debug, Clone)] enum ElectrumCommands { @@ -73,11 +73,7 @@ fn main() -> anyhow::Result<()> { graph }); - let chain = Mutex::new({ - let mut chain = LocalChain::default(); - chain.apply_changeset(init_changeset.chain_changeset); - chain - }); + let chain = Mutex::new(LocalChain::from_changeset(init_changeset.chain_changeset)); let electrum_url = match args.network { Network::Bitcoin => "ssl://electrum.blockstream.info:50002", @@ -119,7 +115,7 @@ fn main() -> anyhow::Result<()> { stop_gap, scan_options, } => { - let (keychain_spks, local_chain) = { + let (keychain_spks, tip) = { let graph = &*graph.lock().unwrap(); let chain = &*chain.lock().unwrap(); @@ -142,20 +138,13 @@ fn main() -> anyhow::Result<()> { }) .collect::>(); - let c = chain - .blocks() - .iter() - .rev() - .take(ASSUME_FINAL_DEPTH) - .map(|(k, v)| (*k, *v)) - .collect::>(); - - (keychain_spks, c) + let tip = chain.tip(); + (keychain_spks, tip) }; client .scan( - &local_chain, + tip, keychain_spks, core::iter::empty(), core::iter::empty(), @@ -174,7 +163,7 @@ fn main() -> anyhow::Result<()> { // Get a short lock on the tracker to get the spks we're interested in let graph = graph.lock().unwrap(); let chain = chain.lock().unwrap(); - let chain_tip = chain.tip().unwrap_or_default(); + let chain_tip = chain.tip().map(|cp| cp.block_id()).unwrap_or_default(); if !(all_spks || unused_spks || utxos || unconfirmed) { unused_spks = true; @@ -254,19 +243,13 @@ fn main() -> anyhow::Result<()> { })); } - let c = chain - .blocks() - .iter() - .rev() - .take(ASSUME_FINAL_DEPTH) - .map(|(k, v)| (*k, *v)) - .collect::>(); + let tip = chain.tip(); // drop lock on graph and chain drop((graph, chain)); let update = client - .scan_without_keychain(&c, spks, txids, outpoints, scan_options.batch_size) + .scan_without_keychain(tip, spks, txids, outpoints, scan_options.batch_size) .context("scanning the blockchain")?; ElectrumUpdate { graph_update: update.graph_update, @@ -292,7 +275,7 @@ fn main() -> anyhow::Result<()> { let mut chain = chain.lock().unwrap(); let mut graph = graph.lock().unwrap(); - let chain_changeset = chain.apply_update(final_update.chain)?; + let chain_changeset = chain.apply_update(final_update.tip)?; let indexed_additions = { let mut additions = IndexedAdditions::::default(); diff --git a/example-crates/wallet_electrum/src/main.rs b/example-crates/wallet_electrum/src/main.rs index db80f106d..2355a6fb0 100644 --- a/example-crates/wallet_electrum/src/main.rs +++ b/example-crates/wallet_electrum/src/main.rs @@ -35,7 +35,7 @@ fn main() -> Result<(), Box> { print!("Syncing..."); let client = electrum_client::Client::new("ssl://electrum.blockstream.info:60002")?; - let local_chain = wallet.checkpoints(); + let prev_tip = wallet.latest_checkpoint(); let keychain_spks = wallet .spks_of_all_keychains() .into_iter() @@ -52,8 +52,7 @@ fn main() -> Result<(), Box> { }) .collect(); - let electrum_update = - client.scan(local_chain, keychain_spks, None, None, STOP_GAP, BATCH_SIZE)?; + let electrum_update = client.scan(prev_tip, keychain_spks, None, None, STOP_GAP, BATCH_SIZE)?; println!(); diff --git a/example-crates/wallet_esplora/src/main.rs b/example-crates/wallet_esplora/src/main.rs index 119d9cbd7..8ae042a96 100644 --- a/example-crates/wallet_esplora/src/main.rs +++ b/example-crates/wallet_esplora/src/main.rs @@ -36,7 +36,7 @@ fn main() -> Result<(), Box> { let client = esplora_client::Builder::new("https://blockstream.info/testnet/api").build_blocking()?; - let local_chain = wallet.checkpoints(); + let prev_tip = wallet.latest_checkpoint(); let keychain_spks = wallet .spks_of_all_keychains() .into_iter() @@ -53,7 +53,7 @@ fn main() -> Result<(), Box> { }) .collect(); let update = client.scan( - local_chain, + prev_tip, keychain_spks, None, None, diff --git a/example-crates/wallet_esplora_async/src/main.rs b/example-crates/wallet_esplora_async/src/main.rs index 7cb218ec2..afe751b73 100644 --- a/example-crates/wallet_esplora_async/src/main.rs +++ b/example-crates/wallet_esplora_async/src/main.rs @@ -37,7 +37,7 @@ async fn main() -> Result<(), Box> { let client = esplora_client::Builder::new("https://blockstream.info/testnet/api").build_async()?; - let local_chain = wallet.checkpoints(); + let prev_cp = wallet.latest_checkpoint(); let keychain_spks = wallet .spks_of_all_keychains() .into_iter() @@ -54,14 +54,7 @@ async fn main() -> Result<(), Box> { }) .collect(); let update = client - .scan( - local_chain, - keychain_spks, - [], - [], - STOP_GAP, - PARALLEL_REQUESTS, - ) + .scan(prev_cp, keychain_spks, [], [], STOP_GAP, PARALLEL_REQUESTS) .await?; println!(); wallet.apply_update(update)?; From adf8f8f7f7cd8611ff6d282ef87b460fc0a15fe3 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?=E5=BF=97=E5=AE=87?= Date: Mon, 19 Jun 2023 05:22:49 +0200 Subject: [PATCH 02/24] [bitcoind_rpc] Initial work on `BitcoindRpcIter` --- Cargo.toml | 1 + crates/bitcoind_rpc/Cargo.toml | 11 +++ crates/bitcoind_rpc/src/lib.rs | 164 +++++++++++++++++++++++++++++++++ 3 files changed, 176 insertions(+) create mode 100644 crates/bitcoind_rpc/Cargo.toml create mode 100644 crates/bitcoind_rpc/src/lib.rs diff --git a/Cargo.toml b/Cargo.toml index c5f2692da..adfc16c5d 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -5,6 +5,7 @@ members = [ "crates/file_store", "crates/electrum", "crates/esplora", + "crates/bitcoind_rpc", "example-crates/example_cli", "example-crates/example_electrum", "example-crates/wallet_electrum", diff --git a/crates/bitcoind_rpc/Cargo.toml b/crates/bitcoind_rpc/Cargo.toml new file mode 100644 index 000000000..22c6514e2 --- /dev/null +++ b/crates/bitcoind_rpc/Cargo.toml @@ -0,0 +1,11 @@ +[package] +name = "bitcoind_rpc" +version = "0.1.0" +edition = "2021" + +# See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html + +[dependencies] +bdk_chain = { path = "../chain", version = "0.4.0", features = ["serde", "miniscript"] } +bitcoincore-rpc = { version = "0.16" } +anyhow = { version = "1" } diff --git a/crates/bitcoind_rpc/src/lib.rs b/crates/bitcoind_rpc/src/lib.rs new file mode 100644 index 000000000..c8357fc56 --- /dev/null +++ b/crates/bitcoind_rpc/src/lib.rs @@ -0,0 +1,164 @@ +use std::collections::HashSet; + +use bdk_chain::{ + bitcoin::{Transaction, Txid}, + local_chain::CheckPoint, + BlockId, +}; +use bitcoincore_rpc::{bitcoincore_rpc_json::GetBlockResult, Client, RpcApi}; + +#[derive(Debug, Clone)] +pub enum BitcoindRpcItem { + Block { + cp: CheckPoint, + info: Box, + }, + Mempool { + cp: CheckPoint, + txs: Vec<(Transaction, u64)>, + }, +} + +pub struct BitcoindRpcIter<'a> { + client: &'a Client, + fallback_height: u32, + + last_cp: Option, + last_info: Option, + + seen_txids: HashSet, +} + +impl<'a> Iterator for BitcoindRpcIter<'a> { + type Item = Result; + + fn next(&mut self) -> Option { + self.next_emission().transpose() + } +} + +impl<'a> BitcoindRpcIter<'a> { + pub fn new(client: &'a Client, fallback_height: u32, last_cp: Option) -> Self { + Self { + client, + fallback_height, + last_cp, + last_info: None, + seen_txids: HashSet::new(), + } + } + + fn next_emission(&mut self) -> Result, bitcoincore_rpc::Error> { + let client = self.client; + + 'main_loop: loop { + match (&mut self.last_cp, &mut self.last_info) { + (last_cp @ None, last_info @ None) => { + // get first item at fallback_height + let info = client + .get_block_info(&client.get_block_hash(self.fallback_height as _)?)?; + let cp = CheckPoint::new(BlockId { + height: info.height as _, + hash: info.hash, + }); + *last_info = Some(info.clone()); + *last_cp = Some(cp.clone()); + return Ok(Some(BitcoindRpcItem::Block { + cp, + info: Box::new(info), + })); + } + (last_cp @ Some(_), last_info @ None) => { + 'cp_loop: for cp in last_cp.clone().iter().flat_map(CheckPoint::iter) { + let cp_block = cp.block_id(); + + let info = client.get_block_info(&cp_block.hash)?; + if info.confirmations < 0 { + // block is not in the main chain + continue 'cp_loop; + } + // agreement + // next loop + *last_cp = Some(cp); + *last_info = Some(info); + } + + // no point of agreement found + // next loop will emit block @ fallback height + *last_cp = None; + } + (Some(last_cp), last_info @ Some(_)) => { + // find next block + match last_info.as_ref().unwrap().nextblockhash { + Some(next_hash) => { + let info = self.client.get_block_info(&next_hash)?; + + if info.confirmations < 0 { + *last_info = None; + continue 'main_loop; + } + + let cp = CheckPoint::new_with_prev( + BlockId { + height: info.height as _, + hash: info.hash, + }, + Some(last_cp.clone()), + ) + .expect("must create valid checkpoint"); + + *last_cp = cp.clone(); + *last_info = Some(info.clone()); + + return Ok(Some(BitcoindRpcItem::Block { + cp, + info: Box::new(info), + })); + } + None => { + // emit from mempool! + let mempool_txs = client + .get_raw_mempool()? + .into_iter() + .filter(|&txid| self.seen_txids.insert(txid)) + .map( + |txid| -> Result<(Transaction, u64), bitcoincore_rpc::Error> { + let first_seen = client + .get_mempool_entry(&txid) + .map(|entry| entry.time)?; + let tx = client.get_raw_transaction(&txid, None)?; + Ok((tx, first_seen)) + }, + ) + .collect::, _>>()?; + + // remove last info... + *last_info = None; + + return Ok(Some(BitcoindRpcItem::Mempool { + txs: mempool_txs, + cp: last_cp.clone(), + })); + } + } + } + (None, Some(_)) => unreachable!(), + } + } + } +} + +pub trait BitcoindRpcErrorExt { + fn is_not_found_error(&self) -> bool; +} + +impl BitcoindRpcErrorExt for bitcoincore_rpc::Error { + fn is_not_found_error(&self) -> bool { + if let bitcoincore_rpc::Error::JsonRpc(bitcoincore_rpc::jsonrpc::Error::Rpc(rpc_err)) = self + { + rpc_err.code == -5 + } else { + false + } + } +} From 8f2cf9fd620d10f5097016b7db480f57a104282b Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?=E5=BF=97=E5=AE=87?= Date: Mon, 19 Jun 2023 15:58:04 +0800 Subject: [PATCH 03/24] [bitcoind_rpc] Introduce `prune_and_apply_update` for `IndexedTxGraph` `prune_and_apply_update` first scans all txs contained in `update` through the index, then filters out txs using `I::is_tx_relevant` before applying the update. This is useful for block-by-block syncing. `Wallet::apply_update` now has a second input; `prune: bool`. If `prune` is set, irrelevant transactions of `update` will not be included. --- crates/bdk/src/wallet/mod.rs | 15 +++- crates/bitcoind_rpc/src/lib.rs | 74 ++++++++++++++++++- crates/chain/src/indexed_tx_graph.rs | 44 +++++++++-- example-crates/wallet_electrum/src/main.rs | 2 +- example-crates/wallet_esplora/src/main.rs | 2 +- .../wallet_esplora_async/src/main.rs | 2 +- 6 files changed, 124 insertions(+), 15 deletions(-) diff --git a/crates/bdk/src/wallet/mod.rs b/crates/bdk/src/wallet/mod.rs index af818e1ff..15e5fad5c 100644 --- a/crates/bdk/src/wallet/mod.rs +++ b/crates/bdk/src/wallet/mod.rs @@ -1694,14 +1694,16 @@ impl Wallet { } /// Applies an update to the wallet and stages the changes (but does not [`commit`] them). + /// Returns whether the `update` resulted in any changes. /// - /// This returns whether the `update` resulted in any changes. + /// If `prune` is set, irrelevant transactions are pruned. Relevant transactions change the UTXO + /// set of tracked script pubkeys (script pubkeys derived from tracked descriptors). /// /// Usually you create an `update` by interacting with some blockchain data source and inserting /// transactions related to your wallet into it. /// /// [`commit`]: Self::commit - pub fn apply_update(&mut self, update: Update) -> Result + pub fn apply_update(&mut self, update: Update, prune: bool) -> Result where D: PersistBackend, { @@ -1711,7 +1713,14 @@ impl Wallet { .index .reveal_to_target_multi(&update.keychain); changeset.append(ChangeSet::from(IndexedAdditions::from(index_additions))); - changeset.append(self.indexed_graph.apply_update(update.graph).into()); + changeset.append( + if prune { + self.indexed_graph.prune_and_apply_update(update.graph) + } else { + self.indexed_graph.apply_update(update.graph) + } + .into(), + ); let changed = !changeset.is_empty(); self.persist.stage(changeset); diff --git a/crates/bitcoind_rpc/src/lib.rs b/crates/bitcoind_rpc/src/lib.rs index c8357fc56..70abd7a0f 100644 --- a/crates/bitcoind_rpc/src/lib.rs +++ b/crates/bitcoind_rpc/src/lib.rs @@ -1,9 +1,10 @@ use std::collections::HashSet; use bdk_chain::{ - bitcoin::{Transaction, Txid}, + bitcoin::{Block, Transaction, Txid}, + keychain::LocalUpdate, local_chain::CheckPoint, - BlockId, + BlockId, ConfirmationHeightAnchor, ConfirmationTimeAnchor, TxGraph, }; use bitcoincore_rpc::{bitcoincore_rpc_json::GetBlockResult, Client, RpcApi}; @@ -12,6 +13,7 @@ pub enum BitcoindRpcItem { Block { cp: CheckPoint, info: Box, + block: Box, }, Mempool { cp: CheckPoint, @@ -19,6 +21,70 @@ pub enum BitcoindRpcItem { }, } +pub fn confirmation_height_anchor( + info: &GetBlockResult, + _txid: Txid, + _tx_pos: usize, +) -> ConfirmationHeightAnchor { + ConfirmationHeightAnchor { + anchor_block: BlockId { + height: info.height as _, + hash: info.hash, + }, + confirmation_height: info.height as _, + } +} + +pub fn confirmation_time_anchor( + info: &GetBlockResult, + _txid: Txid, + _tx_pos: usize, +) -> ConfirmationTimeAnchor { + ConfirmationTimeAnchor { + anchor_block: BlockId { + height: info.height as _, + hash: info.hash, + }, + confirmation_height: info.height as _, + confirmation_time: info.time as _, + } +} + +impl BitcoindRpcItem { + pub fn into_update(self, anchor: F) -> LocalUpdate + where + A: Clone + Ord + PartialOrd, + F: Fn(&GetBlockResult, Txid, usize) -> A, + { + match self { + BitcoindRpcItem::Block { cp, info, block } => LocalUpdate { + graph: { + let mut g = TxGraph::::new(block.txdata); + for (tx_pos, &txid) in info.tx.iter().enumerate() { + let _ = g.insert_anchor(txid, anchor(&info, txid, tx_pos)); + } + g + }, + ..LocalUpdate::new(cp) + }, + BitcoindRpcItem::Mempool { cp, txs } => LocalUpdate { + graph: { + let mut last_seens = Vec::<(Txid, u64)>::with_capacity(txs.len()); + let mut g = TxGraph::::new(txs.into_iter().map(|(tx, last_seen)| { + last_seens.push((tx.txid(), last_seen)); + tx + })); + for (txid, seen_at) in last_seens { + let _ = g.insert_seen_at(txid, seen_at); + } + g + }, + ..LocalUpdate::new(cp) + }, + } + } +} + pub struct BitcoindRpcIter<'a> { client: &'a Client, fallback_height: u32, @@ -57,6 +123,7 @@ impl<'a> BitcoindRpcIter<'a> { // get first item at fallback_height let info = client .get_block_info(&client.get_block_hash(self.fallback_height as _)?)?; + let block = self.client.get_block(&info.hash)?; let cp = CheckPoint::new(BlockId { height: info.height as _, hash: info.hash, @@ -66,6 +133,7 @@ impl<'a> BitcoindRpcIter<'a> { return Ok(Some(BitcoindRpcItem::Block { cp, info: Box::new(info), + block: Box::new(block), })); } (last_cp @ Some(_), last_info @ None) => { @@ -98,6 +166,7 @@ impl<'a> BitcoindRpcIter<'a> { continue 'main_loop; } + let block = self.client.get_block(&info.hash)?; let cp = CheckPoint::new_with_prev( BlockId { height: info.height as _, @@ -113,6 +182,7 @@ impl<'a> BitcoindRpcIter<'a> { return Ok(Some(BitcoindRpcItem::Block { cp, info: Box::new(info), + block: Box::new(block), })); } None => { diff --git a/crates/chain/src/indexed_tx_graph.rs b/crates/chain/src/indexed_tx_graph.rs index 730b04340..25f193275 100644 --- a/crates/chain/src/indexed_tx_graph.rs +++ b/crates/chain/src/indexed_tx_graph.rs @@ -90,6 +90,38 @@ where } } + /// Apply `update`, but filters out irrelevant transactions. + /// + /// Relevancy is determined by the [`Indexer::is_tx_relevant`] implementation of `I`. + pub fn prune_and_apply_update( + &mut self, + update: TxGraph, + ) -> IndexedAdditions { + let mut additions = IndexedAdditions::::default(); + + // index all transactions first + for tx_node in update.full_txs() { + additions + .index_additions + .append(self.index.index_tx(&tx_node)); + } + + let update = update + .full_txs() + .filter(|tx_node| self.index.is_tx_relevant(tx_node)) + .fold(TxGraph::default(), |mut g, tx_node| -> TxGraph { + let _ = g.insert_tx(tx_node.tx.clone()); + for anchor in tx_node.anchors { + let _ = g.insert_anchor(tx_node.txid, anchor.clone()); + } + let _ = g.insert_seen_at(tx_node.txid, tx_node.last_seen_unconfirmed); + g + }); + + additions.append(self.apply_update(update)); + additions + } + /// Insert a floating `txout` of given `outpoint`. pub fn insert_txout( &mut self, @@ -146,14 +178,12 @@ where // 2. decide whether to insert them into the graph depending on whether `is_tx_relevant` // returns true or not. (in a second loop). let mut additions = IndexedAdditions::::default(); - let mut transactions = Vec::new(); - for (tx, anchors) in txs.into_iter() { - additions.index_additions.append(self.index.index_tx(tx)); - transactions.push((tx, anchors)); - } + let txs = txs + .into_iter() + .inspect(|(tx, _)| additions.index_additions.append(self.index.index_tx(tx))) + .collect::>(); additions.append( - transactions - .into_iter() + txs.into_iter() .filter_map(|(tx, anchors)| match self.index.is_tx_relevant(tx) { true => Some(self.insert_tx(tx, anchors, seen_at)), false => None, diff --git a/example-crates/wallet_electrum/src/main.rs b/example-crates/wallet_electrum/src/main.rs index 2355a6fb0..32663b2b5 100644 --- a/example-crates/wallet_electrum/src/main.rs +++ b/example-crates/wallet_electrum/src/main.rs @@ -59,7 +59,7 @@ fn main() -> Result<(), Box> { let missing = electrum_update.missing_full_txs(wallet.as_ref()); let update = electrum_update.finalize_as_confirmation_time(&client, None, missing)?; - wallet.apply_update(update)?; + wallet.apply_update(update, false)?; wallet.commit()?; let balance = wallet.get_balance(); diff --git a/example-crates/wallet_esplora/src/main.rs b/example-crates/wallet_esplora/src/main.rs index 8ae042a96..4e0476398 100644 --- a/example-crates/wallet_esplora/src/main.rs +++ b/example-crates/wallet_esplora/src/main.rs @@ -61,7 +61,7 @@ fn main() -> Result<(), Box> { PARALLEL_REQUESTS, )?; println!(); - wallet.apply_update(update)?; + wallet.apply_update(update, false)?; wallet.commit()?; let balance = wallet.get_balance(); diff --git a/example-crates/wallet_esplora_async/src/main.rs b/example-crates/wallet_esplora_async/src/main.rs index afe751b73..a3a3399e1 100644 --- a/example-crates/wallet_esplora_async/src/main.rs +++ b/example-crates/wallet_esplora_async/src/main.rs @@ -57,7 +57,7 @@ async fn main() -> Result<(), Box> { .scan(prev_cp, keychain_spks, [], [], STOP_GAP, PARALLEL_REQUESTS) .await?; println!(); - wallet.apply_update(update)?; + wallet.apply_update(update, false)?; wallet.commit()?; let balance = wallet.get_balance(); From e34a6e05fca806214bd6accb1351c165826c87b3 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?=E5=BF=97=E5=AE=87?= Date: Mon, 19 Jun 2023 20:20:09 +0800 Subject: [PATCH 04/24] [bitcoind_rpc] Initial work on RPC example --- Cargo.toml | 1 + crates/bitcoind_rpc/Cargo.toml | 2 +- crates/bitcoind_rpc/src/lib.rs | 11 +- example-crates/example_electrum/src/main.rs | 3 +- example-crates/example_rpc/Cargo.toml | 12 + example-crates/example_rpc/src/main.rs | 299 ++++++++++++++++++++ 6 files changed, 323 insertions(+), 5 deletions(-) create mode 100644 example-crates/example_rpc/Cargo.toml create mode 100644 example-crates/example_rpc/src/main.rs diff --git a/Cargo.toml b/Cargo.toml index adfc16c5d..8798269e8 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -8,6 +8,7 @@ members = [ "crates/bitcoind_rpc", "example-crates/example_cli", "example-crates/example_electrum", + "example-crates/example_rpc", "example-crates/wallet_electrum", "example-crates/wallet_esplora", "example-crates/wallet_esplora_async", diff --git a/crates/bitcoind_rpc/Cargo.toml b/crates/bitcoind_rpc/Cargo.toml index 22c6514e2..f849be627 100644 --- a/crates/bitcoind_rpc/Cargo.toml +++ b/crates/bitcoind_rpc/Cargo.toml @@ -1,5 +1,5 @@ [package] -name = "bitcoind_rpc" +name = "bdk_bitcoind_rpc" version = "0.1.0" edition = "2021" diff --git a/crates/bitcoind_rpc/src/lib.rs b/crates/bitcoind_rpc/src/lib.rs index 70abd7a0f..3786bb148 100644 --- a/crates/bitcoind_rpc/src/lib.rs +++ b/crates/bitcoind_rpc/src/lib.rs @@ -6,6 +6,7 @@ use bdk_chain::{ local_chain::CheckPoint, BlockId, ConfirmationHeightAnchor, ConfirmationTimeAnchor, TxGraph, }; +pub use bitcoincore_rpc; use bitcoincore_rpc::{bitcoincore_rpc_json::GetBlockResult, Client, RpcApi}; #[derive(Debug, Clone)] @@ -51,6 +52,10 @@ pub fn confirmation_time_anchor( } impl BitcoindRpcItem { + pub fn is_mempool(&self) -> bool { + matches!(self, Self::Mempool { .. }) + } + pub fn into_update(self, anchor: F) -> LocalUpdate where A: Clone + Ord + PartialOrd, @@ -145,15 +150,17 @@ impl<'a> BitcoindRpcIter<'a> { // block is not in the main chain continue 'cp_loop; } + // agreement - // next loop *last_cp = Some(cp); *last_info = Some(info); + continue 'main_loop; } // no point of agreement found // next loop will emit block @ fallback height *last_cp = None; + *last_info = None; } (Some(last_cp), last_info @ Some(_)) => { // find next block @@ -212,7 +219,7 @@ impl<'a> BitcoindRpcIter<'a> { } } } - (None, Some(_)) => unreachable!(), + (None, Some(info)) => unreachable!("got info with no checkpoint? info={:#?}", info), } } } diff --git a/example-crates/example_electrum/src/main.rs b/example-crates/example_electrum/src/main.rs index b5ca8c2a7..243141d7c 100644 --- a/example-crates/example_electrum/src/main.rs +++ b/example-crates/example_electrum/src/main.rs @@ -22,8 +22,7 @@ use example_cli::{ }; const DB_MAGIC: &[u8] = b"bdk_example_electrum"; -const DB_PATH: &str = ".bdk_electrum_example.db"; -// const ASSUME_FINAL_DEPTH: usize = 10; +const DB_PATH: &str = ".bdk_example_electrum.db"; #[derive(Subcommand, Debug, Clone)] enum ElectrumCommands { diff --git a/example-crates/example_rpc/Cargo.toml b/example-crates/example_rpc/Cargo.toml new file mode 100644 index 000000000..c107c49b6 --- /dev/null +++ b/example-crates/example_rpc/Cargo.toml @@ -0,0 +1,12 @@ +[package] +name = "example_rpc" +version = "0.1.0" +edition = "2021" + +# See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html + +[dependencies] +bdk_chain = { path = "../../crates/chain", features = ["serde"] } +bdk_bitcoind_rpc = { path = "../../crates/bitcoind_rpc" } +example_cli = { path = "../example_cli" } +ctrlc = { version = "^2" } diff --git a/example-crates/example_rpc/src/main.rs b/example-crates/example_rpc/src/main.rs new file mode 100644 index 000000000..92064882b --- /dev/null +++ b/example-crates/example_rpc/src/main.rs @@ -0,0 +1,299 @@ +use std::{ + path::PathBuf, + sync::{ + atomic::{AtomicBool, Ordering}, + mpsc::sync_channel, + Arc, Mutex, + }, + time::{Duration, Instant, SystemTime}, +}; + +use bdk_bitcoind_rpc::{ + bitcoincore_rpc::{Auth, Client, RpcApi}, + confirmation_time_anchor, BitcoindRpcItem, BitcoindRpcIter, +}; +use bdk_chain::{ + bitcoin::{Address, Transaction}, + indexed_tx_graph::IndexedAdditions, + keychain::{LocalChangeSet, LocalUpdate}, + local_chain::LocalChain, + Append, BlockId, ConfirmationTimeAnchor, IndexedTxGraph, +}; +use example_cli::{ + anyhow, + clap::{self, Args, Subcommand}, + CoinSelectionAlgo, Keychain, +}; + +const DB_MAGIC: &[u8] = b"bdk_example_rpc"; +const DB_PATH: &str = ".bdk_example_rpc.db"; +const CHANNEL_BOUND: usize = 10; +const LIVE_POLL_DUR_SECS: Duration = Duration::from_secs(15); + +type ChangeSet = LocalChangeSet; + +#[derive(Args, Debug, Clone)] +struct RpcArgs { + /// RPC URL + #[clap(env = "RPC_URL", long, default_value = "127.0.0.1:8332")] + url: String, + /// RPC auth cookie file + #[clap(env = "RPC_COOKIE", long)] + rpc_cookie: Option, + /// RPC auth username + #[clap(env = "RPC_USER", long)] + rpc_user: Option, + /// RPC auth password + #[clap(env = "RPC_PASS", long)] + rpc_password: Option, +} + +impl From for Auth { + fn from(args: RpcArgs) -> Self { + match (args.rpc_cookie, args.rpc_user, args.rpc_password) { + (None, None, None) => Self::None, + (Some(path), _, _) => Self::CookieFile(path), + (_, Some(user), Some(pass)) => Self::UserPass(user, pass), + (_, Some(_), None) => panic!("rpc auth: missing rpc_pass"), + (_, None, Some(_)) => panic!("rpc auth: missing rpc_user"), + } + } +} + +#[derive(Subcommand, Debug, Clone)] +enum RpcCommands { + /// Scans blocks via RPC (starting from last point of agreement) and stores/indexes relevant + /// transactions + Scan { + /// Starting block height to fallback to if no point of agreement if found + #[clap(env = "FALLBACK_HEIGHT", long, default_value = "0")] + fallback_height: u32, + /// The unused-scripts lookahead will be kept at this size + #[clap(long, default_value = "10")] + lookahead: u32, + /// Whether to be live! + #[clap(long, default_value = "false")] + live: bool, + #[clap(flatten)] + rpc_args: RpcArgs, + }, + /// Create and broadcast a transaction. + Tx { + value: u64, + address: Address, + #[clap(short, default_value = "bnb")] + coin_select: CoinSelectionAlgo, + #[clap(flatten)] + rpc_args: RpcArgs, + }, +} + +impl RpcCommands { + fn rpc_args(&self) -> &RpcArgs { + match self { + RpcCommands::Scan { rpc_args, .. } => rpc_args, + RpcCommands::Tx { rpc_args, .. } => rpc_args, + } + } +} + +fn main() -> anyhow::Result<()> { + let sigterm_flag = start_ctrlc_handler(); + + let (args, keymap, index, db, init_changeset) = + example_cli::init::(DB_MAGIC, DB_PATH)?; + + let graph = Mutex::new({ + let mut graph = IndexedTxGraph::new(index); + graph.apply_additions(init_changeset.indexed_additions); + graph + }); + + let chain = Mutex::new(LocalChain::from_changeset(init_changeset.chain_changeset)); + + let rpc_cmd = match args.command { + example_cli::Commands::ChainSpecific(rpc_cmd) => rpc_cmd, + general_cmd => { + let res = example_cli::handle_commands( + &graph, + &db, + &chain, + &keymap, + args.network, + |_| Err(anyhow::anyhow!("use `tx` instead")), + general_cmd, + ); + db.lock().unwrap().commit()?; + return res; + } + }; + + let rpc_client = { + let a = rpc_cmd.rpc_args(); + Client::new( + &a.url, + match (&a.rpc_cookie, &a.rpc_user, &a.rpc_password) { + (None, None, None) => Auth::None, + (Some(path), _, _) => Auth::CookieFile(path.clone()), + (_, Some(user), Some(pass)) => Auth::UserPass(user.clone(), pass.clone()), + (_, Some(_), None) => panic!("rpc auth: missing rpc_pass"), + (_, None, Some(_)) => panic!("rpc auth: missing rpc_user"), + }, + )? + }; + + match rpc_cmd { + RpcCommands::Scan { + fallback_height, + lookahead, + live, + .. + } => { + graph.lock().unwrap().index.set_lookahead_for_all(lookahead); + + let (chan, recv) = sync_channel::<(BitcoindRpcItem, u32)>(CHANNEL_BOUND); + let prev_cp = chain.lock().unwrap().tip(); + + let join_handle = std::thread::spawn(move || -> anyhow::Result<()> { + let mut tip_height = Option::::None; + + for item in BitcoindRpcIter::new(&rpc_client, fallback_height, prev_cp) { + let item = item?; + let is_block = !item.is_mempool(); + let is_mempool = item.is_mempool(); + + if tip_height.is_none() || !is_block { + tip_height = Some(rpc_client.get_block_count()? as u32); + } + chan.send((item, tip_height.expect("must have tip height")))?; + + if sigterm_flag.load(Ordering::Acquire) { + return Ok(()); + } + if is_mempool { + if !live { + return Ok(()); + } + if await_flag(&sigterm_flag, LIVE_POLL_DUR_SECS) { + return Ok(()); + } + } + } + unreachable!() + }); + + let mut start = Instant::now(); + + for (item, tip_height) in recv { + let is_mempool = item.is_mempool(); + let update: LocalUpdate = + item.into_update(confirmation_time_anchor); + let current_height = update.tip.height(); + + let db_changeset = { + let mut chain = chain.lock().unwrap(); + let mut graph = graph.lock().unwrap(); + + let chain_changeset = chain.apply_update(update.tip)?; + + let mut indexed_additions = + IndexedAdditions::::default(); + let (_, index_additions) = graph.index.reveal_to_target_multi(&update.keychain); + indexed_additions.append(index_additions.into()); + indexed_additions.append(graph.prune_and_apply_update(update.graph)); + + ChangeSet { + indexed_additions, + chain_changeset, + } + }; + + let mut db = db.lock().unwrap(); + db.stage(db_changeset); + + // print stuff every 3 seconds + if start.elapsed() >= Duration::from_secs(3) { + start = Instant::now(); + let balance = { + let chain = chain.lock().unwrap(); + let graph = graph.lock().unwrap(); + graph.graph().balance( + &*chain, + chain.tip().map_or(BlockId::default(), |cp| cp.block_id()), + graph.index.outpoints().iter().cloned(), + |(k, _), _| k == &Keychain::Internal, + ) + }; + println!( + "* scanned_to: {} / {} tip | total: {} sats", + if is_mempool { + "mempool".to_string() + } else { + current_height.to_string() + }, + tip_height, + balance.confirmed + + balance.immature + + balance.trusted_pending + + balance.untrusted_pending + ); + } + } + + db.lock().unwrap().commit()?; + println!("commited to database!"); + + join_handle + .join() + .expect("failed to join chain source thread") + } + RpcCommands::Tx { + value, + address, + coin_select, + .. + } => { + let chain = chain.lock().unwrap(); + let broadcast = move |tx: &Transaction| -> anyhow::Result<()> { + rpc_client.send_raw_transaction(tx)?; + Ok(()) + }; + example_cli::run_send_cmd( + &graph, + &db, + &*chain, + &keymap, + coin_select, + address, + value, + broadcast, + ) + } + } +} + +fn start_ctrlc_handler() -> Arc { + let flag = Arc::new(AtomicBool::new(false)); + let cloned_flag = flag.clone(); + + ctrlc::set_handler(move || cloned_flag.store(true, Ordering::Release)); + + flag +} + +fn await_flag(flag: &AtomicBool, duration: Duration) -> bool { + let start = SystemTime::now(); + loop { + if flag.load(Ordering::Acquire) { + return true; + } + if SystemTime::now() + .duration_since(start) + .expect("should succeed") + >= duration + { + return false; + } + std::thread::sleep(Duration::from_secs(1)); + } +} From 1c39e6c9d78a23e6f69d51d2d8d5709cb7fd38a3 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?=E5=BF=97=E5=AE=87?= Date: Tue, 20 Jun 2023 14:59:52 +0800 Subject: [PATCH 05/24] [local_chain] API simplifications Revert `get_or_insert` back as `insert_block`. Method `update` now mutates `LocalChain` directly, instead of mutating via a second call. `CheckPoint::new_with_prev` is replaced with `CheckPoint::extend`. --- crates/bdk/src/wallet/mod.rs | 4 +- crates/bitcoind_rpc/src/lib.rs | 11 ++- crates/chain/src/local_chain.rs | 91 ++++++++------------- crates/chain/tests/test_local_chain.rs | 9 +- crates/electrum/src/electrum_ext.rs | 8 +- crates/esplora/src/async_ext.rs | 10 ++- crates/esplora/src/blocking_ext.rs | 10 ++- example-crates/example_electrum/src/main.rs | 2 +- example-crates/example_rpc/src/main.rs | 2 +- 9 files changed, 63 insertions(+), 84 deletions(-) diff --git a/crates/bdk/src/wallet/mod.rs b/crates/bdk/src/wallet/mod.rs index 15e5fad5c..39c9fa37d 100644 --- a/crates/bdk/src/wallet/mod.rs +++ b/crates/bdk/src/wallet/mod.rs @@ -464,7 +464,7 @@ impl Wallet { where D: PersistBackend, { - let (_, changeset) = self.chain.get_or_insert(block_id)?; + let changeset = self.chain.insert_block(block_id)?; let changed = !changeset.is_empty(); self.persist.stage(changeset.into()); Ok(changed) @@ -1707,7 +1707,7 @@ impl Wallet { where D: PersistBackend, { - let mut changeset = ChangeSet::from(self.chain.apply_update(update.tip)?); + let mut changeset = ChangeSet::from(self.chain.update(update.tip)?); let (_, index_additions) = self .indexed_graph .index diff --git a/crates/bitcoind_rpc/src/lib.rs b/crates/bitcoind_rpc/src/lib.rs index 3786bb148..ccaf780b8 100644 --- a/crates/bitcoind_rpc/src/lib.rs +++ b/crates/bitcoind_rpc/src/lib.rs @@ -174,14 +174,13 @@ impl<'a> BitcoindRpcIter<'a> { } let block = self.client.get_block(&info.hash)?; - let cp = CheckPoint::new_with_prev( - BlockId { + let cp = last_cp + .clone() + .extend(BlockId { height: info.height as _, hash: info.hash, - }, - Some(last_cp.clone()), - ) - .expect("must create valid checkpoint"); + }) + .expect("must extend from checkpoint"); *last_cp = cp.clone(); *last_info = Some(info.clone()); diff --git a/crates/chain/src/local_chain.rs b/crates/chain/src/local_chain.rs index 361dcff01..23286c93f 100644 --- a/crates/chain/src/local_chain.rs +++ b/crates/chain/src/local_chain.rs @@ -47,24 +47,18 @@ impl CheckPoint { Self(Arc::new(CPInner { block, prev: None })) } - /// Construct a [`CheckPoint`] of `block` with a previous checkpoint. - pub fn new_with_prev( - block: BlockId, - prev: Option, - ) -> Result { - if let Some(prev_cp) = &prev { - if prev_cp.height() >= block.height { - return Err(NewCheckPointError { - new_height: block.height, - prev_height: prev_cp.height(), - }); - } + /// Extends [`CheckPoint`] with `block` and returns the new checkpoint tip. + /// + /// Returns an `Err` of the initial checkpoint + pub fn extend(self, block: BlockId) -> Result { + if self.height() < block.height { + Ok(Self(Arc::new(CPInner { + block, + prev: Some(self.0), + }))) + } else { + Err(self) } - - Ok(Self(Arc::new(CPInner { - block, - prev: prev.map(|cp| cp.0), - }))) } /// Get the [`BlockId`] of the checkpoint. @@ -199,10 +193,14 @@ impl LocalChain { checkpoints: blocks .into_iter() .map({ - let mut prev = None; + let mut prev = Option::::None; move |(height, hash)| { - let cp = CheckPoint::new_with_prev(BlockId { height, hash }, prev.clone()) - .expect("must not fail"); + let cp = match prev.clone() { + Some(prev) => { + prev.extend(BlockId { height, hash }).expect("must extend") + } + None => CheckPoint::new(BlockId { height, hash }), + }; prev = Some(cp.clone()); (height, cp) } @@ -221,10 +219,10 @@ impl LocalChain { self.checkpoints.is_empty() } - /// Previews, and optionally applies updates to [`Self`] with the given `new_tip`. + /// Updates [`Self`] with the given `new_tip`. /// - /// The method returns `(apply_update, changeset)` if [`Ok`]. `apply_update` is a closure that - /// can be called to apply the changes represented in `changeset. + /// The method returns [`ChangeSet`] on success. This represents the applied changes to + /// [`Self`]. /// /// To update, the `new_tip` must *connect* with `self`. If `self` and `new_tip` has a mutual /// checkpoint (same height and hash), it can connect if: @@ -247,10 +245,7 @@ impl LocalChain { /// Refer to [module-level documentation] for more. /// /// [module-level documentation]: crate::local_chain - pub fn update( - &mut self, - new_tip: CheckPoint, - ) -> Result<(impl FnOnce() + '_, ChangeSet), CannotConnectError> { + pub fn update(&mut self, new_tip: CheckPoint) -> Result { let mut updated_cps = BTreeMap::::new(); let mut agreement_height = Option::::None; let mut complete_match = false; @@ -316,17 +311,16 @@ impl LocalChain { changeset }; - let apply_update = move || { - if let Some(&start_height) = updated_cps.keys().next() { - self.checkpoints.split_off(&invalidate_lb); - self.checkpoints.append(&mut updated_cps); - if !self.is_empty() && !complete_match { - self.fix_links(start_height); - } + // apply update if `update_cps` is non-empty + if let Some(&start_height) = updated_cps.keys().next() { + self.checkpoints.split_off(&invalidate_lb); + self.checkpoints.append(&mut updated_cps); + if !self.is_empty() && !complete_match { + self.fix_links(start_height); } - }; + } - Ok((apply_update, changeset)) + Ok(changeset) } /// Apply the given `changeset`. @@ -344,41 +338,24 @@ impl LocalChain { } } - /// Update [`LocalChain`]. - /// - /// This is equivalent to calling [`update`] and applying the update in sequence. - /// - /// [`update`]: Self::update - pub fn apply_update(&mut self, new_tip: CheckPoint) -> Result { - let (apply, changeset) = self.update(new_tip)?; - apply(); - Ok(changeset) - } - - /// Get or insert a `block_id`. + /// Insert a [`BlockId`]. /// /// # Errors /// /// Replacing the block hash of an existing checkpoint will result in an error. - pub fn get_or_insert( - &mut self, - block_id: BlockId, - ) -> Result<(CheckPoint, ChangeSet), InsertBlockError> { + pub fn insert_block(&mut self, block_id: BlockId) -> Result { use crate::collections::btree_map::Entry; match self.checkpoints.entry(block_id.height) { Entry::Vacant(entry) => { entry.insert(CheckPoint::new(block_id)); self.fix_links(block_id.height); - let cp = self.checkpoint(block_id.height).expect("must be inserted"); - let changeset = - core::iter::once((block_id.height, Some(block_id.hash))).collect::(); - Ok((cp, changeset)) + Ok(core::iter::once((block_id.height, Some(block_id.hash))).collect()) } Entry::Occupied(entry) => { let cp = entry.get(); if cp.block_id() == block_id { - Ok((cp.clone(), ChangeSet::default())) + Ok(ChangeSet::default()) } else { Err(InsertBlockError { height: block_id.height, diff --git a/crates/chain/tests/test_local_chain.rs b/crates/chain/tests/test_local_chain.rs index fc0a9a4ab..101a7cf32 100644 --- a/crates/chain/tests/test_local_chain.rs +++ b/crates/chain/tests/test_local_chain.rs @@ -27,10 +27,7 @@ enum ExpectedResult<'a> { impl<'a> TestLocalChain<'a> { fn run(mut self) { let got_changeset = match self.chain.update(self.new_tip) { - Ok((apply, changeset)) => { - apply(); - changeset - } + Ok(changeset) => changeset, Err(err) => { assert_eq!(ExpectedResult::Err(err), self.exp); return; @@ -270,9 +267,7 @@ fn insert_block() { for (i, t) in test_cases.into_iter().enumerate() { let mut chain = t.original; assert_eq!( - chain - .get_or_insert(t.insert.into()) - .map(|(_, changeset)| changeset), + chain.insert_block(t.insert.into()), t.expected_result, "[{}] unexpected result when inserting block", i, diff --git a/crates/electrum/src/electrum_ext.rs b/crates/electrum/src/electrum_ext.rs index 5dc6a8b35..49fe236cb 100644 --- a/crates/electrum/src/electrum_ext.rs +++ b/crates/electrum/src/electrum_ext.rs @@ -325,8 +325,12 @@ fn prepare_chain_update( // construct checkpoints for (height, hash) in new_blocks { - let cp = CheckPoint::new_with_prev(BlockId { height, hash }, last_cp) - .expect("heights should not conflict"); + let cp = match last_cp.clone() { + Some(last_cp) => last_cp + .extend(BlockId { height, hash }) + .expect("must extend checkpoint"), + None => CheckPoint::new(BlockId { height, hash }), + }; last_cp = Some(cp); } diff --git a/crates/esplora/src/async_ext.rs b/crates/esplora/src/async_ext.rs index 8023b7422..7fc08e822 100644 --- a/crates/esplora/src/async_ext.rs +++ b/crates/esplora/src/async_ext.rs @@ -141,10 +141,12 @@ impl EsploraAsyncExt for esplora_client::AsyncClient { // construct checkpoints for (&height, &hash) in new_blocks.iter() { - last_cp = Some( - CheckPoint::new_with_prev(BlockId { height, hash }, last_cp) - .expect("heights should not conflict"), - ); + last_cp = Some(match last_cp { + Some(last_cp) => last_cp + .extend(BlockId { height, hash }) + .expect("must extend checkpoint"), + None => CheckPoint::new(BlockId { height, hash }), + }); } let tip = last_cp.expect("must have atleast one checkpoint"); diff --git a/crates/esplora/src/blocking_ext.rs b/crates/esplora/src/blocking_ext.rs index 2c5ddc6e9..3649d7b21 100644 --- a/crates/esplora/src/blocking_ext.rs +++ b/crates/esplora/src/blocking_ext.rs @@ -122,10 +122,12 @@ impl EsploraExt for esplora_client::BlockingClient { // construct checkpoints for (&height, &hash) in new_blocks.iter() { - last_cp = Some( - CheckPoint::new_with_prev(BlockId { height, hash }, last_cp) - .expect("heights should not conflict"), - ); + last_cp = Some(match last_cp { + Some(last_cp) => last_cp + .extend(BlockId { height, hash }) + .expect("must extend checkpoint"), + None => CheckPoint::new(BlockId { height, hash }), + }); } let tip = last_cp.expect("must have atleast one checkpoint"); diff --git a/example-crates/example_electrum/src/main.rs b/example-crates/example_electrum/src/main.rs index 243141d7c..89a54b7ef 100644 --- a/example-crates/example_electrum/src/main.rs +++ b/example-crates/example_electrum/src/main.rs @@ -274,7 +274,7 @@ fn main() -> anyhow::Result<()> { let mut chain = chain.lock().unwrap(); let mut graph = graph.lock().unwrap(); - let chain_changeset = chain.apply_update(final_update.tip)?; + let chain_changeset = chain.update(final_update.tip)?; let indexed_additions = { let mut additions = IndexedAdditions::::default(); diff --git a/example-crates/example_rpc/src/main.rs b/example-crates/example_rpc/src/main.rs index 92064882b..c6e148958 100644 --- a/example-crates/example_rpc/src/main.rs +++ b/example-crates/example_rpc/src/main.rs @@ -194,7 +194,7 @@ fn main() -> anyhow::Result<()> { let mut chain = chain.lock().unwrap(); let mut graph = graph.lock().unwrap(); - let chain_changeset = chain.apply_update(update.tip)?; + let chain_changeset = chain.update(update.tip)?; let mut indexed_additions = IndexedAdditions::::default(); From 6be254139da54def91ab22a5bcfe2a5da336e31c Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?=E5=BF=97=E5=AE=87?= Date: Fri, 23 Jun 2023 13:37:09 +0800 Subject: [PATCH 06/24] [local_chain] Fix incorrect optimisation logic in `update()` Within `update()`, it is not always necessary to call `fix_links()`. The logic to detect this was wrong previously. Add test that would fail with the previous logic. --- crates/chain/src/local_chain.rs | 57 +++++++++++--------------- crates/chain/tests/test_local_chain.rs | 23 +++++++++++ 2 files changed, 46 insertions(+), 34 deletions(-) diff --git a/crates/chain/src/local_chain.rs b/crates/chain/src/local_chain.rs index 23286c93f..d4d55dae0 100644 --- a/crates/chain/src/local_chain.rs +++ b/crates/chain/src/local_chain.rs @@ -23,24 +23,6 @@ struct CPInner { prev: Option>, } -/// Occurs when the caller contructs a [`CheckPoint`] with a height that is not higher than the -/// previous checkpoint it points to. -#[derive(Debug, Clone, PartialEq)] -pub struct NewCheckPointError { - /// The height of the new checkpoint. - pub new_height: u32, - /// The height of the previous checkpoint. - pub prev_height: u32, -} - -impl core::fmt::Display for NewCheckPointError { - fn fmt(&self, f: &mut core::fmt::Formatter<'_>) -> core::fmt::Result { - write!(f, "cannot construct checkpoint with a height ({}) that is not higher than the previous checkpoint ({})", self.new_height, self.prev_height) - } -} - -impl std::error::Error for NewCheckPointError {} - impl CheckPoint { /// Construct a [`CheckPoint`] from a [`BlockId`]. pub fn new(block: BlockId) -> Self { @@ -248,27 +230,30 @@ impl LocalChain { pub fn update(&mut self, new_tip: CheckPoint) -> Result { let mut updated_cps = BTreeMap::::new(); let mut agreement_height = Option::::None; - let mut complete_match = false; + let mut agreement_ptr_matches = false; for cp in new_tip.iter() { let block = cp.block_id(); - let original_cp = self.checkpoints.get(&block.height); - // if original block of height does not exist, or if the hash does not match we will - // need to update the original checkpoint at that height - if original_cp.map(CheckPoint::block_id) != Some(block) { - updated_cps.insert(block.height, cp.clone()); - } + match self.checkpoints.get(&block.height) { + Some(original_cp) if original_cp.block_id() == block => { + let ptr_matches = Arc::as_ptr(&original_cp.0) == Arc::as_ptr(&cp.0); + + // only record the first agreement height + if agreement_height.is_none() && original_cp.block_id() == block { + agreement_height = Some(block.height); + agreement_ptr_matches = ptr_matches; + } - if let Some(original_cp) = original_cp { - // record the first agreement height - if agreement_height.is_none() && original_cp.block_id() == block { - agreement_height = Some(block.height); + // break if the internal pointers of the checkpoints are the same + if ptr_matches { + break; + } } - // break if the internal pointers of the checkpoints are the same - if Arc::as_ptr(&original_cp.0) == Arc::as_ptr(&cp.0) { - complete_match = true; - break; + // only insert into `updated_cps` if cp is actually updated (original cp is `None`, + // or block ids do not match) + _ => { + updated_cps.insert(block.height, cp.clone()); } } } @@ -315,7 +300,11 @@ impl LocalChain { if let Some(&start_height) = updated_cps.keys().next() { self.checkpoints.split_off(&invalidate_lb); self.checkpoints.append(&mut updated_cps); - if !self.is_empty() && !complete_match { + + // we never need to fix links if either: + // 1. the original chain is empty + // 2. the pointers match at the first point of agreement (where the block ids are equal) + if !(self.is_empty() || agreement_ptr_matches) { self.fix_links(start_height); } } diff --git a/crates/chain/tests/test_local_chain.rs b/crates/chain/tests/test_local_chain.rs index 101a7cf32..4d6697841 100644 --- a/crates/chain/tests/test_local_chain.rs +++ b/crates/chain/tests/test_local_chain.rs @@ -213,6 +213,29 @@ fn update() { new_tip: chain_update![(1, h!("B'")), (2, h!("C'")), (3, h!("D"))], exp: ExpectedResult::Err(CannotConnectError { try_include: BlockId { height: 0, hash: h!("A") } }), }, + // Introduce blocks between two points of agreement + // | 0 | 1 | 2 | 3 | 4 | 5 + // chain | A B D E + // update | A C E F + TestLocalChain { + name: "introduce blocks between two points of agreement", + chain: local_chain![(0, h!("A")), (1, h!("B")), (3, h!("D")), (4, h!("E"))], + new_tip: chain_update![(0, h!("A")), (2, h!("C")), (4, h!("E")), (5, h!("F"))], + exp: ExpectedResult::Ok { + changeset: &[ + (2, Some(h!("C"))), + (5, Some(h!("F"))), + ], + init_changeset: &[ + (0, Some(h!("A"))), + (1, Some(h!("B"))), + (2, Some(h!("C"))), + (3, Some(h!("D"))), + (4, Some(h!("E"))), + (5, Some(h!("F"))), + ], + }, + } ] .into_iter() .for_each(TestLocalChain::run); From 01cc782ca192616e03781e05355c9af0a185c43a Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?=E5=BF=97=E5=AE=87?= Date: Sat, 24 Jun 2023 10:11:23 +0800 Subject: [PATCH 07/24] [esplora/electrum] Make updates more consistent For esplora: * Separate checkpoint-update logic into a separate method `construct_update_tip`. * We batch-get latest blocks to ensure consistency. * Reorg-mitigation logic is changed to only reconstruct checkpoints and anchors. For electrum: * Rename `prepare_chain_update` to `construct_update_tip`. Use `Client::block_headers` to get latest headers atomically (instead of fetching headers one by one). * `determine_tx_anchor` now uses the lowest anchor checkpoint possible. * Add comments for better documentation. --- crates/electrum/src/electrum_ext.rs | 214 ++++++++++++---------- crates/electrum/src/lib.rs | 2 + crates/esplora/src/async_ext.rs | 194 +++++++++++--------- crates/esplora/src/blocking_ext.rs | 180 ++++++++++-------- crates/esplora/src/lib.rs | 29 ++- example-crates/wallet_esplora/src/main.rs | 2 +- 6 files changed, 350 insertions(+), 271 deletions(-) diff --git a/crates/electrum/src/electrum_ext.rs b/crates/electrum/src/electrum_ext.rs index 49fe236cb..b96395eb5 100644 --- a/crates/electrum/src/electrum_ext.rs +++ b/crates/electrum/src/electrum_ext.rs @@ -1,25 +1,36 @@ use bdk_chain::{ - bitcoin::{hashes::hex::FromHex, BlockHash, OutPoint, Script, Transaction, Txid}, + bitcoin::{hashes::hex::FromHex, OutPoint, Script, Transaction, Txid}, keychain::LocalUpdate, local_chain::CheckPoint, tx_graph::{self, TxGraph}, Anchor, BlockId, ConfirmationHeightAnchor, ConfirmationTimeAnchor, }; -use electrum_client::{Client, ElectrumApi, Error}; +use electrum_client::{Client, ElectrumApi, Error, HeaderNotification}; use std::{ collections::{BTreeMap, BTreeSet, HashMap, HashSet}, fmt::Debug, }; +/// We assume that a block of this depth and deeper cannot be reorged. +const ASSUME_FINAL_DEPTH: u32 = 8; + +/// Represents an update fetched from an Electrum server, but excludes full transactions. +/// +/// To provide a complete update to [`TxGraph`], you'll need to call [`Self::missing_full_txs`] to +/// determine the full transactions missing from [`TxGraph`]. Then call [`Self::finalize`] to fetch +/// the full transactions from Electrum and finalize the update. #[derive(Debug, Clone)] pub struct ElectrumUpdate { + /// Map of [`Txid`]s to associated [`Anchor`]s. pub graph_update: HashMap>, + /// The latest chain tip, as seen by the Electrum server. pub chain_update: CheckPoint, + /// Last-used index update for [`KeychainTxOutIndex`](bdk_chain::keychain::KeychainTxOutIndex). pub keychain_update: BTreeMap, } impl ElectrumUpdate { - pub fn new(cp: CheckPoint) -> Self { + fn new(cp: CheckPoint) -> Self { Self { graph_update: HashMap::new(), chain_update: cp, @@ -27,6 +38,9 @@ impl ElectrumUpdate { } } + /// Determine the full transactions that are missing from `graph`. + /// + /// Refer to [`ElectrumUpdate`]. pub fn missing_full_txs(&self, graph: &TxGraph) -> Vec { self.graph_update .keys() @@ -35,6 +49,9 @@ impl ElectrumUpdate { .collect() } + /// Finalizes update with `missing` txids to fetch from `client`. + /// + /// Refer to [`ElectrumUpdate`]. pub fn finalize( self, client: &Client, @@ -73,6 +90,7 @@ impl ElectrumUpdate { missing: Vec, ) -> Result, Error> { let update = self.finalize(client, seen_at, missing)?; + // client.batch_transaction_get(txid) let relevant_heights = { let mut visited_heights = HashSet::new(); @@ -131,9 +149,19 @@ impl ElectrumUpdate { } } +/// Trait to extend [`Client`] functionality. pub trait ElectrumExt { - fn get_tip(&self) -> Result<(u32, BlockHash), Error>; - + /// Scan the blockchain (via electrum) for the data specified and returns a [`ElectrumUpdate`]. + /// + /// - `prev_tip`: the most recent blockchain tip present locally + /// - `keychain_spks`: keychains that we want to scan transactions for + /// - `txids`: transactions for which we want updated [`Anchor`]s + /// - `outpoints`: transactions associated with these outpoints (residing, spending) that we + /// want to included in the update + /// + /// The scan for each keychain stops after a gap of `stop_gap` script pubkeys with no associated + /// transactions. `batch_size` specifies the max number of script pubkeys to request for in a + /// single batch request. fn scan( &self, prev_tip: Option, @@ -144,6 +172,9 @@ pub trait ElectrumExt { batch_size: usize, ) -> Result, Error>; + /// Convenience method to call [`scan`] without requiring a keychain. + /// + /// [`scan`]: ElectrumExt::scan fn scan_without_keychain( &self, prev_tip: Option, @@ -169,12 +200,6 @@ pub trait ElectrumExt { } impl ElectrumExt for Client { - fn get_tip(&self) -> Result<(u32, BlockHash), Error> { - // TODO: unsubscribe when added to the client, or is there a better call to use here? - self.block_headers_subscribe() - .map(|data| (data.height as u32, data.header.block_hash())) - } - fn scan( &self, prev_tip: Option, @@ -194,16 +219,20 @@ impl ElectrumExt for Client { let outpoints = outpoints.into_iter().collect::>(); let update = loop { - let mut update = ElectrumUpdate::::new( - prepare_chain_update(self, prev_tip.clone())?, - ); - let anchor_block = update.chain_update.block_id(); + let (tip, _) = construct_update_tip(self, prev_tip.clone())?; + let mut update = ElectrumUpdate::::new(tip.clone()); + let cps = update + .chain_update + .iter() + .take(10) + .map(|cp| (cp.height(), cp)) + .collect::>(); if !request_spks.is_empty() { if !scanned_spks.is_empty() { scanned_spks.append(&mut populate_with_spks( self, - anchor_block, + &cps, &mut update, &mut scanned_spks .iter() @@ -216,7 +245,7 @@ impl ElectrumExt for Client { scanned_spks.extend( populate_with_spks( self, - anchor_block, + &cps, &mut update, keychain_spks, stop_gap, @@ -228,20 +257,14 @@ impl ElectrumExt for Client { } } - populate_with_txids(self, anchor_block, &mut update, &mut txids.iter().cloned())?; + populate_with_txids(self, &cps, &mut update, &mut txids.iter().cloned())?; - let _txs = populate_with_outpoints( - self, - anchor_block, - &mut update, - &mut outpoints.iter().cloned(), - )?; + let _txs = + populate_with_outpoints(self, &cps, &mut update, &mut outpoints.iter().cloned())?; // check for reorgs during scan process - let server_blockhash = self - .block_header(anchor_block.height as usize)? - .block_hash(); - if anchor_block.hash != server_blockhash { + let server_blockhash = self.block_header(tip.height() as usize)?.block_hash(); + if tip.hash() != server_blockhash { continue; // reorg } @@ -262,83 +285,86 @@ impl ElectrumExt for Client { } } -/// Prepare an update "template" based on the checkpoints of the `local_chain`. -fn prepare_chain_update( +/// Return a [`CheckPoint`] of the latest tip, that connects with `prev_tip`. +fn construct_update_tip( client: &Client, prev_tip: Option, -) -> Result { - let mut header_notification = client.block_headers_subscribe()?; - - let (new_blocks, mut last_cp) = 'retry: loop { - let tip = BlockId { - height: header_notification.height as _, - hash: header_notification.header.block_hash(), - }; - let tip_parent = BlockId { - height: (header_notification.height - 1) as _, - hash: header_notification.header.prev_blockhash, - }; +) -> Result<(CheckPoint, Option), Error> { + let HeaderNotification { height, .. } = client.block_headers_subscribe()?; + let new_tip_height = height as u32; + + // If electrum returns a tip height that is lower than our previous tip, then checkpoints do + // not need updating. We just return the previous tip and use that as the point of agreement. + if let Some(prev_tip) = prev_tip.as_ref() { + if new_tip_height < prev_tip.height() { + return Ok((prev_tip.clone(), Some(prev_tip.height()))); + } + } - // this records new blocks, including blocks that are to be replaced - let mut new_blocks = [tip_parent, tip] + // Atomically fetch the latest `ASSUME_FINAL_DEPTH` count of blocks from Electrum. We use this + // to construct our checkpoint update. + let mut new_blocks = { + let start_height = new_tip_height.saturating_sub(ASSUME_FINAL_DEPTH); + let hashes = client + .block_headers(start_height as _, ASSUME_FINAL_DEPTH as _)? + .headers .into_iter() - .map(|b| (b.height, b.hash)) - .collect::>(); - let mut agreement_cp = Option::::None; + .map(|h| h.block_hash()); + (start_height..).zip(hashes).collect::>() + }; + // Find the "point of agreement" (if any). + let agreement_cp = { + let mut agreement_cp = Option::::None; for cp in prev_tip.iter().flat_map(CheckPoint::iter) { let cp_block = cp.block_id(); - // TODO: a batch request may be safer, as a reorg that happens when we are obtaining - // `block_header`s will result in inconsistencies - let hash = client.block_header(cp_block.height as _)?.block_hash(); + let hash = match new_blocks.get(&cp_block.height) { + Some(&hash) => hash, + None => { + assert!( + new_tip_height >= cp_block.height, + "already checked that electrum's tip cannot be smaller" + ); + let hash = client.block_header(cp_block.height as _)?.block_hash(); + new_blocks.insert(cp_block.height, hash); + hash + } + }; if hash == cp_block.hash { agreement_cp = Some(cp); break; } - new_blocks.insert(cp_block.height, hash); - } - - // check for tip changes - loop { - match client.block_headers_pop()? { - Some(new_notification) => { - let new_height = new_notification.height; - header_notification = new_notification; - if new_height as u32 <= tip.height { - // we may have a reorg - // reorg-detection logic can be improved (false positives are possible) - continue 'retry; - } - } - None => { - let new_blocks = match &agreement_cp { - // `new_blocks` should only include blocks that are actually new - Some(agreement_cp) => new_blocks.split_off(&(agreement_cp.height() + 1)), - None => new_blocks, - }; - - break 'retry (new_blocks, agreement_cp); - } - }; } + agreement_cp }; - // construct checkpoints - for (height, hash) in new_blocks { - let cp = match last_cp.clone() { - Some(last_cp) => last_cp - .extend(BlockId { height, hash }) - .expect("must extend checkpoint"), - None => CheckPoint::new(BlockId { height, hash }), - }; - last_cp = Some(cp); - } + let agreement_height = agreement_cp.as_ref().map(CheckPoint::height); + + let new_tip = new_blocks + .into_iter() + // Prune `new_blocks` to only include blocks that are actually new. + .filter(|(height, _)| Some(*height) > agreement_height) + .map(|(height, hash)| BlockId { height, hash }) + .fold(agreement_cp, |prev_cp, block| { + Some(match prev_cp { + Some(cp) => cp.extend(block).expect("must extend checkpoint"), + None => CheckPoint::new(block), + }) + }) + .expect("must have at least one checkpoint"); - Ok(last_cp.expect("must have atleast one checkpoint")) + Ok((new_tip, agreement_height)) } +/// A [tx status] comprises of a concatenation of `tx_hash:height:`s. We transform a single one of +/// these concatenations into a [`ConfirmationHeightAnchor`] if possible. +/// +/// We use the lowest possible checkpoint as the anchor block (from `cps`). If an anchor block +/// cannot be found, or the transaction is unconfirmed, [`None`] is returned. +/// +/// [tx status](https://electrumx-spesmilo.readthedocs.io/en/latest/protocol-basics.html#status) fn determine_tx_anchor( - anchor_block: BlockId, + cps: &BTreeMap, raw_height: i32, txid: Txid, ) -> Option { @@ -350,6 +376,7 @@ fn determine_tx_anchor( == Txid::from_hex("4a5e1e4baab89f3a32518a88c31bc87f618f76673e2cc77ab2127b7afdeda33b") .expect("must deserialize genesis coinbase txid") { + let anchor_block = cps.values().next()?.block_id(); return Some(ConfirmationHeightAnchor { anchor_block, confirmation_height: 0, @@ -362,6 +389,7 @@ fn determine_tx_anchor( } h => { let h = h as u32; + let anchor_block = cps.range(h..).next().map(|(_, cp)| cp.block_id())?; if h > anchor_block.height { None } else { @@ -376,7 +404,7 @@ fn determine_tx_anchor( fn populate_with_outpoints( client: &Client, - anchor_block: BlockId, + cps: &BTreeMap, update: &mut ElectrumUpdate, outpoints: &mut impl Iterator, ) -> Result, Error> { @@ -425,7 +453,7 @@ fn populate_with_outpoints( } }; - let anchor = determine_tx_anchor(anchor_block, res.height, res.tx_hash); + let anchor = determine_tx_anchor(cps, res.height, res.tx_hash); let tx_entry = update.graph_update.entry(res.tx_hash).or_default(); if let Some(anchor) = anchor { @@ -438,7 +466,7 @@ fn populate_with_outpoints( fn populate_with_txids( client: &Client, - anchor_block: BlockId, + cps: &BTreeMap, update: &mut ElectrumUpdate, txids: &mut impl Iterator, ) -> Result<(), Error> { @@ -460,7 +488,7 @@ fn populate_with_txids( .into_iter() .find(|r| r.tx_hash == txid) { - Some(r) => determine_tx_anchor(anchor_block, r.height, txid), + Some(r) => determine_tx_anchor(cps, r.height, txid), None => continue, }; @@ -474,7 +502,7 @@ fn populate_with_txids( fn populate_with_spks( client: &Client, - anchor_block: BlockId, + cps: &BTreeMap, update: &mut ElectrumUpdate, spks: &mut impl Iterator, stop_gap: usize, @@ -508,7 +536,7 @@ fn populate_with_spks( for tx in spk_history { let tx_entry = update.graph_update.entry(tx.tx_hash).or_default(); - if let Some(anchor) = determine_tx_anchor(anchor_block, tx.height, tx.tx_hash) { + if let Some(anchor) = determine_tx_anchor(cps, tx.height, tx.tx_hash) { tx_entry.insert(anchor); } } diff --git a/crates/electrum/src/lib.rs b/crates/electrum/src/lib.rs index ec693fda9..716c4d3f7 100644 --- a/crates/electrum/src/lib.rs +++ b/crates/electrum/src/lib.rs @@ -20,6 +20,8 @@ //! [`batch_transaction_get`]: electrum_client::ElectrumApi::batch_transaction_get //! [`bdk_electrum_example`]: https://github.com/LLFourn/bdk_core_staging/tree/master/bdk_electrum_example +#![warn(missing_docs)] + mod electrum_ext; pub use bdk_chain; pub use electrum_client; diff --git a/crates/esplora/src/async_ext.rs b/crates/esplora/src/async_ext.rs index 7fc08e822..0d07b1520 100644 --- a/crates/esplora/src/async_ext.rs +++ b/crates/esplora/src/async_ext.rs @@ -4,13 +4,11 @@ use bdk_chain::{ collections::BTreeMap, keychain::LocalUpdate, local_chain::CheckPoint, - BlockId, ConfirmationTimeAnchor, + BlockId, ConfirmationTimeAnchor, TxGraph, }; use esplora_client::{Error, OutputStatus, TxStatus}; use futures::{stream::FuturesOrdered, TryStreamExt}; -use crate::map_confirmation_time_anchor; - /// Trait to extend [`esplora_client::AsyncClient`] functionality. /// /// This is the async version of [`EsploraExt`]. Refer to @@ -96,61 +94,9 @@ impl EsploraAsyncExt for esplora_client::AsyncClient { ) -> Result, Error> { let parallel_requests = Ord::max(parallel_requests, 1); - let (new_blocks, mut last_cp) = 'retry: loop { - let new_tip = loop { - let hash = self.get_tip_hash().await?; - let status = self.get_block_status(&hash).await?; - if status.in_best_chain && status.next_best.is_none() { - break BlockId { - height: status.height.expect("must have height"), - hash, - }; - } - }; - - let mut new_blocks = core::iter::once((new_tip.height, new_tip.hash)) - .collect::>(); - - let mut agreement_cp = Option::::None; - - for cp in prev_tip.iter().flat_map(CheckPoint::iter) { - let cp_block = cp.block_id(); - let hash = self.get_block_hash(cp_block.height).await?; - if hash == cp_block.hash { - agreement_cp = Some(cp); - break; - } - new_blocks.insert(cp_block.height, hash); - } - - // check for tip changes - // retry if there are changes to the tip - let status = self.get_block_status(&new_tip.hash).await?; - - if !status.in_best_chain || status.next_best.is_some() { - continue 'retry; - } - - // `new_blocks` should only include blocks that are actually new - let new_blocks = match &agreement_cp { - Some(agreement_cp) => new_blocks.split_off(&(agreement_cp.height() + 1)), - None => new_blocks, - }; - break 'retry (new_blocks, agreement_cp); - }; - - // construct checkpoints - for (&height, &hash) in new_blocks.iter() { - last_cp = Some(match last_cp { - Some(last_cp) => last_cp - .extend(BlockId { height, hash }) - .expect("must extend checkpoint"), - None => CheckPoint::new(BlockId { height, hash }), - }); - } - - let tip = last_cp.expect("must have atleast one checkpoint"); - let mut update = LocalUpdate::::new(tip.clone()); + let (tip, _) = construct_update_tip(self, prev_tip).await?; + let mut make_anchor = crate::confirmation_time_anchor_maker(&tip); + let mut update = LocalUpdate::::new(tip); for (keychain, spks) in keychain_spks { let mut spks = spks.into_iter(); @@ -202,7 +148,7 @@ impl EsploraAsyncExt for esplora_client::AsyncClient { empty_scripts = 0; } for tx in related_txs { - let anchor = map_confirmation_time_anchor(&tx.status, &tip); + let anchor = make_anchor(&tx.status); let _ = update.graph.insert_tx(tx.to_tx()); if let Some(anchor) = anchor { @@ -232,7 +178,7 @@ impl EsploraAsyncExt for esplora_client::AsyncClient { } match self.get_tx_status(&txid).await? { tx_status if tx_status.confirmed => { - if let Some(anchor) = map_confirmation_time_anchor(&tx_status, &tip) { + if let Some(anchor) = make_anchor(&tx_status) { let _ = update.graph.insert_anchor(txid, anchor); } } @@ -266,7 +212,7 @@ impl EsploraAsyncExt for esplora_client::AsyncClient { for (tx, status) in op_txs { let txid = tx.txid(); - let anchor = map_confirmation_time_anchor(&status, &tip); + let anchor = make_anchor(&status); let _ = update.graph.insert_tx(tx); if let Some(anchor) = anchor { @@ -275,36 +221,106 @@ impl EsploraAsyncExt for esplora_client::AsyncClient { } } - if tip.hash() != self.get_block_hash(tip.height()).await? { - // A reorg occurred, so let's find out where all the txids we found are now in the chain - let txids_found = update - .graph - .full_txs() - .map(|tx_node| tx_node.txid) - .collect::>(); - let new_update = EsploraAsyncExt::scan_without_keychain( - self, - Some(tip), - [], - txids_found, - [], - parallel_requests, - ) - .await?; - update.tip = new_update.tip; - update.graph = new_update.graph; - // update.chain = EsploraAsyncExt::scan_without_keychain( - // self, - // local_chain, - // [], - // txids_found, - // [], - // parallel_requests, - // ) - // .await? - // .chain; + // If a reorg occured during the update, anchors may be wrong. We handle this by scrapping + // all anchors, reconstructing checkpoints and reconstructing anchors. + while self.get_block_hash(update.tip.height()).await? != update.tip.hash() { + let (new_tip, _) = construct_update_tip(self, Some(update.tip.clone())).await?; + make_anchor = crate::confirmation_time_anchor_maker(&new_tip); + + // Reconstruct graph with only transactions (no anchors). + update.graph = TxGraph::new(update.graph.full_txs().map(|n| n.tx.clone())); + update.tip = new_tip; + + // Re-fetch anchors. + let anchors = { + let mut a = Vec::new(); + for n in update.graph.full_txs() { + let status = self.get_tx_status(&n.txid).await?; + if !status.confirmed { + continue; + } + if let Some(anchor) = make_anchor(&status) { + a.push((n.txid, anchor)); + } + } + a + }; + for (txid, anchor) in anchors { + let _ = update.graph.insert_anchor(txid, anchor); + } } Ok(update) } } + +/// Constructs a new checkpoint tip that can "connect" to our previous checkpoint history. We return +/// the new checkpoint tip alongside the height of agreement between the two histories (if any). +#[allow(clippy::result_large_err)] +async fn construct_update_tip( + client: &esplora_client::AsyncClient, + prev_tip: Option, +) -> Result<(CheckPoint, Option), Error> { + let new_tip_height = client.get_height().await?; + + // If esplora returns a tip height that is lower than our previous tip, then checkpoints do not + // need updating. We just return the previous tip and use that as the point of agreement. + if let Some(prev_tip) = prev_tip.as_ref() { + if new_tip_height < prev_tip.height() { + return Ok((prev_tip.clone(), Some(prev_tip.height()))); + } + } + + // Grab latest blocks from esplora atomically first. We assume that deeper blocks cannot be + // reorged. This ensures that our checkpoint history is consistent. + let mut new_blocks = client + .get_blocks(Some(new_tip_height)) + .await? + .into_iter() + .zip((0..new_tip_height).rev()) + .map(|(b, height)| (height, b.id)) + .collect::>(); + + let mut agreement_cp = Option::::None; + + for cp in prev_tip.iter().flat_map(CheckPoint::iter) { + let cp_block = cp.block_id(); + + // We check esplora blocks cached in `new_blocks` first, keeping the checkpoint history + // consistent even during reorgs. + let hash = match new_blocks.get(&cp_block.height) { + Some(&hash) => hash, + None => { + assert!( + new_tip_height >= cp_block.height, + "already checked that esplora's tip cannot be smaller" + ); + let hash = client.get_block_hash(cp_block.height).await?; + new_blocks.insert(cp_block.height, hash); + hash + } + }; + + if hash == cp_block.hash { + agreement_cp = Some(cp); + break; + } + } + + let agreement_height = agreement_cp.as_ref().map(CheckPoint::height); + + let new_tip = new_blocks + .into_iter() + // Prune `new_blocks` to only include blocks that are actually new. + .filter(|(height, _)| Some(*height) > agreement_height) + .map(|(height, hash)| BlockId { height, hash }) + .fold(agreement_cp, |prev_cp, block| { + Some(match prev_cp { + Some(cp) => cp.extend(block).expect("must extend cp"), + None => CheckPoint::new(block), + }) + }) + .expect("must have at least one checkpoint"); + + Ok((new_tip, agreement_height)) +} diff --git a/crates/esplora/src/blocking_ext.rs b/crates/esplora/src/blocking_ext.rs index 3649d7b21..27b9a4956 100644 --- a/crates/esplora/src/blocking_ext.rs +++ b/crates/esplora/src/blocking_ext.rs @@ -1,12 +1,10 @@ use bdk_chain::bitcoin::{BlockHash, OutPoint, Script, Txid}; use bdk_chain::collections::BTreeMap; use bdk_chain::local_chain::CheckPoint; -use bdk_chain::BlockId; use bdk_chain::{keychain::LocalUpdate, ConfirmationTimeAnchor}; +use bdk_chain::{BlockId, TxGraph}; use esplora_client::{Error, OutputStatus, TxStatus}; -use crate::map_confirmation_time_anchor; - /// Trait to extend [`esplora_client::BlockingClient`] functionality. /// /// Refer to [crate-level documentation] for more. @@ -78,60 +76,9 @@ impl EsploraExt for esplora_client::BlockingClient { ) -> Result, Error> { let parallel_requests = Ord::max(parallel_requests, 1); - let (new_blocks, mut last_cp) = 'retry: loop { - let new_tip = loop { - let hash = self.get_tip_hash()?; - let status = self.get_block_status(&hash)?; - if status.in_best_chain && status.next_best.is_none() { - break BlockId { - height: status.height.expect("must have height"), - hash, - }; - } - }; - - let mut new_blocks = core::iter::once((new_tip.height, new_tip.hash)) - .collect::>(); - - let mut agreement_cp = Option::::None; - - for cp in prev_tip.iter().flat_map(CheckPoint::iter) { - let cp_block = cp.block_id(); - let hash = self.get_block_hash(cp_block.height)?; - if hash == cp_block.hash { - agreement_cp = Some(cp); - break; - } - new_blocks.insert(cp_block.height, hash); - } - - // check for tip changes - // retry if there are changes to the tip - let status = self.get_block_status(&new_tip.hash)?; - if !status.in_best_chain || status.next_best.is_some() { - continue 'retry; - } - - // `new_blocks` should only include blocks that are actually new - let new_blocks = match &agreement_cp { - Some(agreement_cp) => new_blocks.split_off(&(agreement_cp.height() + 1)), - None => new_blocks, - }; - break 'retry (new_blocks, agreement_cp); - }; - - // construct checkpoints - for (&height, &hash) in new_blocks.iter() { - last_cp = Some(match last_cp { - Some(last_cp) => last_cp - .extend(BlockId { height, hash }) - .expect("must extend checkpoint"), - None => CheckPoint::new(BlockId { height, hash }), - }); - } - - let tip = last_cp.expect("must have atleast one checkpoint"); - let mut update = LocalUpdate::::new(tip.clone()); + let (tip, _) = construct_update_tip(self, prev_tip)?; + let mut make_anchor = crate::confirmation_time_anchor_maker(&tip); + let mut update = LocalUpdate::::new(tip); for (keychain, spks) in keychain_spks { let mut spks = spks.into_iter(); @@ -184,8 +131,7 @@ impl EsploraExt for esplora_client::BlockingClient { empty_scripts = 0; } for tx in related_txs { - let anchor = map_confirmation_time_anchor(&tx.status, &tip); - + let anchor = make_anchor(&tx.status); let _ = update.graph.insert_tx(tx.to_tx()); if let Some(anchor) = anchor { let _ = update.graph.insert_anchor(tx.txid, anchor); @@ -213,10 +159,8 @@ impl EsploraExt for esplora_client::BlockingClient { } } match self.get_tx_status(&txid)? { - tx_status @ TxStatus { - confirmed: true, .. - } => { - if let Some(anchor) = map_confirmation_time_anchor(&tx_status, &tip) { + tx_status if tx_status.confirmed => { + if let Some(anchor) = make_anchor(&tx_status) { let _ = update.graph.insert_anchor(txid, anchor); } } @@ -248,7 +192,7 @@ impl EsploraExt for esplora_client::BlockingClient { for (tx, status) in op_txs { let txid = tx.txid(); - let anchor = map_confirmation_time_anchor(&status, &tip); + let anchor = make_anchor(&status); let _ = update.graph.insert_tx(tx); if let Some(anchor) = anchor { @@ -257,25 +201,103 @@ impl EsploraExt for esplora_client::BlockingClient { } } - if tip.hash() != self.get_block_hash(tip.height())? { - // A reorg occurred, so let's find out where all the txids we found are now in the chain - let txids_found = update + // If a reorg occured during the update, anchors may be wrong. We handle this by scrapping + // all anchors, reconstructing checkpoints and reconstructing anchors. + while self.get_block_hash(update.tip.height())? != update.tip.hash() { + let (new_tip, _) = construct_update_tip(self, Some(update.tip.clone()))?; + make_anchor = crate::confirmation_time_anchor_maker(&new_tip); + + // Reconstruct graph with only transactions (no anchors). + update.graph = TxGraph::new(update.graph.full_txs().map(|n| n.tx.clone())); + update.tip = new_tip; + + // Re-fetch anchors. + let anchors = update .graph .full_txs() - .map(|tx_node| tx_node.txid) - .collect::>(); - let new_update = EsploraExt::scan_without_keychain( - self, - Some(tip), - [], - txids_found, - [], - parallel_requests, - )?; - update.tip = new_update.tip; - update.graph = new_update.graph; + .filter_map(|n| match self.get_tx_status(&n.txid) { + Err(err) => Some(Err(err)), + Ok(status) if status.confirmed => make_anchor(&status).map(|a| Ok((n.txid, a))), + _ => None, + }) + .collect::, _>>()?; + for (txid, anchor) in anchors { + let _ = update.graph.insert_anchor(txid, anchor); + } } Ok(update) } } + +/// Constructs a new checkpoint tip that can "connect" to our previous checkpoint history. We return +/// the new checkpoint tip alongside the height of agreement between the two histories (if any). +#[allow(clippy::result_large_err)] +fn construct_update_tip( + client: &esplora_client::BlockingClient, + prev_tip: Option, +) -> Result<(CheckPoint, Option), Error> { + let new_tip_height = client.get_height()?; + + // If esplora returns a tip height that is lower than our previous tip, then checkpoints do not + // need updating. We just return the previous tip and use that as the point of agreement. + if let Some(prev_tip) = prev_tip.as_ref() { + if new_tip_height < prev_tip.height() { + return Ok((prev_tip.clone(), Some(prev_tip.height()))); + } + } + + // Grab latest blocks from esplora atomically first. We assume that deeper blocks cannot be + // reorged. This ensures that our checkpoint history is consistent. + let mut new_blocks = { + let heights = (0..new_tip_height).rev(); + let hashes = client + .get_blocks(Some(new_tip_height))? + .into_iter() + .map(|b| b.id); + heights.zip(hashes).collect::>() + }; + + let mut agreement_cp = Option::::None; + + for cp in prev_tip.iter().flat_map(CheckPoint::iter) { + let cp_block = cp.block_id(); + + // We check esplora blocks cached in `new_blocks` first, keeping the checkpoint history + // consistent even during reorgs. + let hash = match new_blocks.get(&cp_block.height) { + Some(&hash) => hash, + None => { + assert!( + new_tip_height >= cp_block.height, + "already checked that esplora's tip cannot be smaller" + ); + let hash = client.get_block_hash(cp_block.height)?; + new_blocks.insert(cp_block.height, hash); + hash + } + }; + + if hash == cp_block.hash { + agreement_cp = Some(cp); + break; + } + } + + let agreement_height = agreement_cp.as_ref().map(CheckPoint::height); + + let new_tip = new_blocks + .into_iter() + // Prune `new_blocks` to only include blocks that are actually new. + .filter(|(height, _)| Some(*height) > agreement_height) + .map(|(height, hash)| BlockId { height, hash }) + .fold(agreement_cp, |prev_cp, block| { + Some(match prev_cp { + Some(cp) => cp.extend(block).expect("must extend cp"), + None => CheckPoint::new(block), + }) + }) + .expect("must have at least one checkpoint"); + + Ok((new_tip, agreement_height)) +} diff --git a/crates/esplora/src/lib.rs b/crates/esplora/src/lib.rs index d1c68e81f..07ccdab8f 100644 --- a/crates/esplora/src/lib.rs +++ b/crates/esplora/src/lib.rs @@ -1,4 +1,6 @@ #![doc = include_str!("../README.md")] +use std::collections::BTreeMap; + use bdk_chain::{local_chain::CheckPoint, ConfirmationTimeAnchor}; use esplora_client::TxStatus; @@ -14,16 +16,25 @@ mod async_ext; #[cfg(feature = "async")] pub use async_ext::*; -pub(crate) fn map_confirmation_time_anchor( - tx_status: &TxStatus, +pub(crate) fn confirmation_time_anchor_maker( tip: &CheckPoint, -) -> Option { - match (tx_status.block_time, tx_status.block_height) { - (Some(confirmation_time), Some(confirmation_height)) => Some(ConfirmationTimeAnchor { - anchor_block: tip.block_id(), - confirmation_height, - confirmation_time, - }), +) -> impl FnMut(&TxStatus) -> Option { + let cache = tip + .iter() + .take(10) + .map(|cp| (cp.height(), cp)) + .collect::>(); + + move |status| match (status.block_time, status.block_height) { + (Some(confirmation_time), Some(confirmation_height)) => { + let (_, anchor_cp) = cache.range(confirmation_height..).next()?; + + Some(ConfirmationTimeAnchor { + anchor_block: anchor_cp.block_id(), + confirmation_height, + confirmation_time, + }) + } _ => None, } } diff --git a/example-crates/wallet_esplora/src/main.rs b/example-crates/wallet_esplora/src/main.rs index 4e0476398..187091ff4 100644 --- a/example-crates/wallet_esplora/src/main.rs +++ b/example-crates/wallet_esplora/src/main.rs @@ -1,7 +1,7 @@ const DB_MAGIC: &str = "bdk_wallet_esplora_example"; const SEND_AMOUNT: u64 = 5000; const STOP_GAP: usize = 50; -const PARALLEL_REQUESTS: usize = 5; +const PARALLEL_REQUESTS: usize = 2; use std::{io::Write, str::FromStr}; From dd9f60c8eb6e51d6bf93841ba945d2de6924d0eb Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?=E5=BF=97=E5=AE=87?= Date: Wed, 28 Jun 2023 14:36:58 +0800 Subject: [PATCH 08/24] Add more documentation to `LocalChain` --- crates/chain/src/local_chain.rs | 10 ++++++++-- 1 file changed, 8 insertions(+), 2 deletions(-) diff --git a/crates/chain/src/local_chain.rs b/crates/chain/src/local_chain.rs index d4d55dae0..a1ebd1a59 100644 --- a/crates/chain/src/local_chain.rs +++ b/crates/chain/src/local_chain.rs @@ -10,7 +10,9 @@ use bitcoin::BlockHash; /// A structure that represents changes to [`LocalChain`]. pub type ChangeSet = BTreeMap>; -/// Represents a block of [`LocalChain`]. +/// A block of [`LocalChain`]. +/// +/// Blocks are presented in a linked-list. #[derive(Debug, Clone, PartialEq, Eq, PartialOrd, Ord)] pub struct CheckPoint(Arc); @@ -66,7 +68,7 @@ impl CheckPoint { })) } - /// Get previous checkpoint. + /// Get the previous checkpoint. pub fn prev(&self) -> Option { self.0.prev.clone().map(CheckPoint) } @@ -356,6 +358,10 @@ impl LocalChain { } } + /// Internal method for fixing pointers to make checkpoints a properly linked list. I.e. + /// [`CheckPoint::prev`] should return the previous checkpoint. + /// + /// We fix checkpoints from `start_height` and higher. fn fix_links(&mut self, start_height: u32) { let mut prev = self .checkpoints From 89186149e18dc2cff580ac9510dc5cb37acd2cf7 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?=E5=BF=97=E5=AE=87?= Date: Wed, 28 Jun 2023 15:17:54 +0800 Subject: [PATCH 09/24] Add test for `IndexedTxGraph::prune_and_apply_update` --- crates/bdk/tests/wallet.rs | 6 +- crates/chain/tests/test_indexed_tx_graph.rs | 81 ++++++++++++++++++++- 2 files changed, 81 insertions(+), 6 deletions(-) diff --git a/crates/bdk/tests/wallet.rs b/crates/bdk/tests/wallet.rs index ed014f70a..e8ded3146 100644 --- a/crates/bdk/tests/wallet.rs +++ b/crates/bdk/tests/wallet.rs @@ -429,11 +429,7 @@ fn test_create_tx_drain_wallet_and_drain_to_and_with_recipient() { fn test_create_tx_drain_to_and_utxos() { let (mut wallet, _) = get_funded_wallet(get_test_wpkh()); let addr = wallet.get_address(New); - let utxos: Vec<_> = wallet - .list_unspent() - .into_iter() - .map(|u| u.outpoint) - .collect(); + let utxos: Vec<_> = wallet.list_unspent().map(|u| u.outpoint).collect(); let mut builder = wallet.build_tx(); builder .drain_to(addr.script_pubkey()) diff --git a/crates/chain/tests/test_indexed_tx_graph.rs b/crates/chain/tests/test_indexed_tx_graph.rs index 3319b2594..53f53016e 100644 --- a/crates/chain/tests/test_indexed_tx_graph.rs +++ b/crates/chain/tests/test_indexed_tx_graph.rs @@ -8,7 +8,7 @@ use bdk_chain::{ keychain::{Balance, DerivationAdditions, KeychainTxOutIndex}, local_chain::LocalChain, tx_graph::Additions, - ChainPosition, ConfirmationHeightAnchor, + ChainPosition, ConfirmationHeightAnchor, TxGraph, }; use bitcoin::{secp256k1::Secp256k1, BlockHash, OutPoint, Script, Transaction, TxIn, TxOut}; use miniscript::Descriptor; @@ -76,6 +76,85 @@ fn insert_relevant_txs() { ) } +/// Ensure [`IndexedTxGraph::prune_and_apply_update`] prunes irrelevant transactions. +#[test] +fn prune_and_apply_update() { + let secp = Secp256k1::signing_only(); + const DESCRIPTOR: &str = "tr(xprv9xgqHN7yz9MwCkxsBPN5qetuNdQSUttZNKw1dcYTV4mkaAFiBVGQziHs3NRSWMkCzvgjEe3n9xV8oYywvM8at9yRqyaZVz6TYYhX98VjsUk/0/*)"; + const IRRELEVANT_DESCRIPTOR: &str = "tr(xprv9xgqHN7yz9MwCkxsBPN5qetuNdQSUttZNKw1dcYTV4mkaAFiBVGQziHs3NRSWMkCzvgjEe3n9xV8oYywvM8at9yRqyaZVz6TYYhX98VjsUk/1/*)"; + let (descriptor, _) = Descriptor::parse_descriptor(&secp, DESCRIPTOR).expect("must be valid"); + let (irrelevant_descriptor, _) = + Descriptor::parse_descriptor(&secp, IRRELEVANT_DESCRIPTOR).expect("must be valid"); + + // relevant spks + let spk_relevant_0 = descriptor.at_derivation_index(0).script_pubkey(); + let spk_relevant_1 = descriptor.at_derivation_index(1).script_pubkey(); + let spk_relevant_2 = descriptor.at_derivation_index(2).script_pubkey(); + + // irrelevant spks + let spk_irrelevant_0 = irrelevant_descriptor.at_derivation_index(0).script_pubkey(); + let spk_irrelevant_1 = irrelevant_descriptor.at_derivation_index(1).script_pubkey(); + let spk_irrelevant_2 = irrelevant_descriptor.at_derivation_index(2).script_pubkey(); + + let tx_a = Transaction { + output: vec![ + TxOut { + value: 1000, + script_pubkey: spk_relevant_0, + }, + TxOut { + value: 2000, + script_pubkey: spk_irrelevant_0, + }, + ], + ..common::new_tx(0) + }; + + let tx_b = Transaction { + output: vec![ + TxOut { + value: 3000, + script_pubkey: spk_irrelevant_1, + }, + TxOut { + value: 4000, + script_pubkey: spk_irrelevant_2, + }, + ], + ..common::new_tx(1) + }; + + let tx_c = Transaction { + output: vec![ + TxOut { + value: 5000, + script_pubkey: spk_relevant_1, + }, + TxOut { + value: 6000, + script_pubkey: spk_relevant_2, + }, + ], + ..common::new_tx(1) + }; + + let mut graph = IndexedTxGraph::>::default(); + graph.index.add_keychain((), descriptor); + graph.index.set_lookahead_for_all(1); + + let additions = graph.prune_and_apply_update(TxGraph::new([tx_a.clone(), tx_b, tx_c.clone()])); + assert_eq!( + additions, + IndexedAdditions { + graph_additions: Additions { + txs: [tx_a, tx_c].into(), + ..Default::default() + }, + index_additions: DerivationAdditions([((), 2_u32)].into()), + } + ); +} + #[test] /// Ensure consistency IndexedTxGraph list_* and balance methods. These methods lists /// relevant txouts and utxos from the information fetched from a ChainOracle (here a LocalChain). From f7d499aa98ba9c4c9bb949faf129d8d4c3a6dcb2 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?=E5=BF=97=E5=AE=87?= Date: Thu, 29 Jun 2023 11:42:47 +0800 Subject: [PATCH 10/24] [bitcoind_rpc] Add docs. --- crates/bitcoind_rpc/src/lib.rs | 304 +++++++++++++++++-------- example-crates/example_rpc/src/main.rs | 6 +- 2 files changed, 209 insertions(+), 101 deletions(-) diff --git a/crates/bitcoind_rpc/src/lib.rs b/crates/bitcoind_rpc/src/lib.rs index ccaf780b8..37d04cc7a 100644 --- a/crates/bitcoind_rpc/src/lib.rs +++ b/crates/bitcoind_rpc/src/lib.rs @@ -1,4 +1,7 @@ -use std::collections::HashSet; +//! This crate is used for updating [`bdk_chain`] structures with data from the `bitcoind` RPC +//! interface. + +#![warn(missing_docs)] use bdk_chain::{ bitcoin::{Block, Transaction, Txid}, @@ -8,20 +11,36 @@ use bdk_chain::{ }; pub use bitcoincore_rpc; use bitcoincore_rpc::{bitcoincore_rpc_json::GetBlockResult, Client, RpcApi}; +use std::collections::HashSet; +/// An update emitted from [`BitcoindRpcEmitter`]. This can either be of a block or a subset of +/// mempool transactions. #[derive(Debug, Clone)] -pub enum BitcoindRpcItem { +pub enum BitcoindRpcUpdate { + /// An emitted block. Block { + /// The checkpoint constructed from the block's height/hash and connected to the previous + /// block. cp: CheckPoint, + /// The result obtained from the `getblock` RPC call of this block's hash. info: Box, + /// block: Box, }, + /// An emitted subset of mempool transactions. + /// + /// [`BitcoindRpcEmitter`] attempts to avoid re-emitting transactions. Mempool { + /// The checkpoint of the last-seen tip. cp: CheckPoint, + /// Subset of mempool transactions. txs: Vec<(Transaction, u64)>, }, } +/// A closure that transforms a [`BitcoindRpcUpdate`] into a [`ConfirmationHeightAnchor`]. +/// +/// This is to be used as an input to [`BitcoindRpcUpdate::into_update`]. pub fn confirmation_height_anchor( info: &GetBlockResult, _txid: Txid, @@ -36,6 +55,9 @@ pub fn confirmation_height_anchor( } } +/// A closure that transforms a [`BitcoindRpcUpdate`] into a [`ConfirmationTimeAnchor`]. +/// +/// This is to be used as an input to [`BitcoindRpcUpdate::into_update`]. pub fn confirmation_time_anchor( info: &GetBlockResult, _txid: Txid, @@ -51,18 +73,29 @@ pub fn confirmation_time_anchor( } } -impl BitcoindRpcItem { +impl BitcoindRpcUpdate { + /// Returns whether the update is of a subset of the mempool. pub fn is_mempool(&self) -> bool { matches!(self, Self::Mempool { .. }) } + /// Returns whether the update is of a block. + pub fn is_block(&self) -> bool { + matches!(self, Self::Block { .. }) + } + + /// Transforms the [`BitcoindRpcUpdate`] into a [`LocalUpdate`]. + /// + /// [`confirmation_height_anchor`] and [`confirmation_time_anchor`] can be used as the `anchor` + /// intput to construct updates with [`ConfirmationHeightAnchor`]s and + /// [`ConfirmationTimeAnchor`]s respectively. pub fn into_update(self, anchor: F) -> LocalUpdate where A: Clone + Ord + PartialOrd, F: Fn(&GetBlockResult, Txid, usize) -> A, { match self { - BitcoindRpcItem::Block { cp, info, block } => LocalUpdate { + BitcoindRpcUpdate::Block { cp, info, block } => LocalUpdate { graph: { let mut g = TxGraph::::new(block.txdata); for (tx_pos, &txid) in info.tx.iter().enumerate() { @@ -72,7 +105,7 @@ impl BitcoindRpcItem { }, ..LocalUpdate::new(cp) }, - BitcoindRpcItem::Mempool { cp, txs } => LocalUpdate { + BitcoindRpcUpdate::Mempool { cp, txs } => LocalUpdate { graph: { let mut last_seens = Vec::<(Txid, u64)>::with_capacity(txs.len()); let mut g = TxGraph::::new(txs.into_iter().map(|(tx, last_seen)| { @@ -90,7 +123,43 @@ impl BitcoindRpcItem { } } -pub struct BitcoindRpcIter<'a> { +/// A structure that emits updates for [`bdk_chain`] structures, sourcing blockchain data from +/// [`bitcoincore_rpc::Client`]. +/// +/// Updates are of type [`BitcoindRpcUpdate`], where each update can either be of a whole block, or +/// a subset of the mempool. +/// +/// A [`BitcoindRpcEmitter`] emits updates starting from the `fallback_height` provided in [`new`], +/// or if `last_cp` is provided, we start from the height above the agreed-upon blockhash (between +/// `last_cp` and the state of `bitcoind`). Blocks are emitted in sequence (ascending order), and +/// the mempool contents emitted if the last emission is the chain tip. +/// +/// # [`Iterator`] implementation +/// +/// [`BitcoindRpcEmitter`] implements [`Iterator`] in a way such that even after [`Iterator::next`] +/// returns [`None`], subsequent calls may resume returning [`Some`]. +/// +/// Returning [`None`] means that the previous call to [`next`] is the mempool. This is useful if +/// the caller wishes to update once. +/// +/// ```rust,no_run +/// use bdk_bitcoind_rpc::{BitcoindRpcEmitter, BitcoindRpcUpdate}; +/// # let client = todo!(); +/// +/// for update in BitcoindRpcEmitter::new(&client, 709_632, None) { +/// match update.expect("todo: deal with the error properly") { +/// BitcoindRpcUpdate::Block { cp, .. } => println!("block {}:{}", cp.height(), cp.hash()), +/// BitcoindRpcUpdate::Mempool { .. } => println!("mempool"), +/// } +/// } +/// ``` +/// +/// Alternatively, if the caller wishes to keep [`BitcoindRpcEmitter`] in a dedicated update-thread, +/// the caller can continue to poll [`next`] (potentially with a delay). +/// +/// [`new`]: BitcoindRpcEmitter::new +/// [`next`]: Iterator::next +pub struct BitcoindRpcEmitter<'a> { client: &'a Client, fallback_height: u32, @@ -98,17 +167,29 @@ pub struct BitcoindRpcIter<'a> { last_info: Option, seen_txids: HashSet, + last_emission_was_mempool: bool, } -impl<'a> Iterator for BitcoindRpcIter<'a> { - type Item = Result; +impl<'a> Iterator for BitcoindRpcEmitter<'a> { + /// Represents an emitted item. + type Item = Result; fn next(&mut self) -> Option { - self.next_emission().transpose() + if self.last_emission_was_mempool { + self.last_emission_was_mempool = false; + None + } else { + Some(self.next_update()) + } } } -impl<'a> BitcoindRpcIter<'a> { +impl<'a> BitcoindRpcEmitter<'a> { + /// Constructs a new [`BitcoindRpcEmitter`] with the provided [`bitcoincore_rpc::Client`]. + /// + /// * `fallback_height` is the block height to start from if `last_cp` is not provided, or a + /// point of agreement is not found. + /// * `last_cp` is the last known checkpoint to build updates on (if any). pub fn new(client: &'a Client, fallback_height: u32, last_cp: Option) -> Self { Self { client, @@ -116,115 +197,142 @@ impl<'a> BitcoindRpcIter<'a> { last_cp, last_info: None, seen_txids: HashSet::new(), + last_emission_was_mempool: false, + } + } + + /// Continuously poll [`bitcoincore_rpc::Client`] until an update is found. + pub fn next_update(&mut self) -> Result { + loop { + match self.poll()? { + Some(item) => return Ok(item), + None => continue, + }; } } - fn next_emission(&mut self) -> Result, bitcoincore_rpc::Error> { + /// Performs a single round of polling [`bitcoincore_rpc::Client`] and updating the internal + /// state. This returns [`Ok(Some(BitcoindRpcUpdate))`] if an update is found. + pub fn poll(&mut self) -> Result, bitcoincore_rpc::Error> { let client = self.client; + self.last_emission_was_mempool = false; - 'main_loop: loop { - match (&mut self.last_cp, &mut self.last_info) { - (last_cp @ None, last_info @ None) => { - // get first item at fallback_height - let info = client - .get_block_info(&client.get_block_hash(self.fallback_height as _)?)?; - let block = self.client.get_block(&info.hash)?; - let cp = CheckPoint::new(BlockId { - height: info.height as _, - hash: info.hash, - }); - *last_info = Some(info.clone()); - *last_cp = Some(cp.clone()); - return Ok(Some(BitcoindRpcItem::Block { - cp, - info: Box::new(info), - block: Box::new(block), - })); + match (&mut self.last_cp, &mut self.last_info) { + // If `last_cp` and `last_info` are both none, we need to emit from the + // `fallback_height`. `last_cp` and `last_info` will both be updated to the emitted + // block. + (last_cp @ None, last_info @ None) => { + let info = + client.get_block_info(&client.get_block_hash(self.fallback_height as _)?)?; + let block = self.client.get_block(&info.hash)?; + let cp = CheckPoint::new(BlockId { + height: info.height as _, + hash: info.hash, + }); + *last_cp = Some(cp.clone()); + *last_info = Some(info.clone()); + Ok(Some(BitcoindRpcUpdate::Block { + cp, + info: Box::new(info), + block: Box::new(block), + })) + } + // If `last_cp` exists, but `last_info` does not, it means we have not fetched a + // block from the client yet, but we have a previous checkpoint which we can use to + // find the point of agreement with. + // + // We don't emit in this match case. Instead, we set the state to either: + // * { last_cp: Some, last_info: Some } : When we find a point of agreement. + // * { last_cp: None, last_indo: None } : When we cannot find a point of agreement. + (last_cp @ Some(_), last_info @ None) => { + for cp in last_cp.clone().iter().flat_map(CheckPoint::iter) { + let cp_block = cp.block_id(); + + let info = client.get_block_info(&cp_block.hash)?; + if info.confirmations < 0 { + // block is not in the main chain + continue; + } + // agreement found + *last_cp = Some(cp); + *last_info = Some(info); + return Ok(None); } - (last_cp @ Some(_), last_info @ None) => { - 'cp_loop: for cp in last_cp.clone().iter().flat_map(CheckPoint::iter) { - let cp_block = cp.block_id(); - let info = client.get_block_info(&cp_block.hash)?; + // no point of agreement found, next call will emit block @ fallback height + *last_cp = None; + *last_info = None; + Ok(None) + } + // If `last_cp` and `last_info` is both `Some`, we either emit a block at + // `last_info.nextblockhash` (if it exists), or we emit a subset of the mempool. + (Some(last_cp), last_info @ Some(_)) => { + // find next block + match last_info.as_ref().unwrap().nextblockhash { + Some(next_hash) => { + let info = self.client.get_block_info(&next_hash)?; + if info.confirmations < 0 { - // block is not in the main chain - continue 'cp_loop; + *last_info = None; + return Ok(None); } - // agreement - *last_cp = Some(cp); - *last_info = Some(info); - continue 'main_loop; + let block = self.client.get_block(&info.hash)?; + let cp = last_cp + .clone() + .extend(BlockId { + height: info.height as _, + hash: info.hash, + }) + .expect("must extend from checkpoint"); + + *last_cp = cp.clone(); + *last_info = Some(info.clone()); + + Ok(Some(BitcoindRpcUpdate::Block { + cp, + info: Box::new(info), + block: Box::new(block), + })) } + None => { + let mempool_txs = client + .get_raw_mempool()? + .into_iter() + .filter(|&txid| self.seen_txids.insert(txid)) + .map( + |txid| -> Result<(Transaction, u64), bitcoincore_rpc::Error> { + let first_seen = + client.get_mempool_entry(&txid).map(|entry| entry.time)?; + let tx = client.get_raw_transaction(&txid, None)?; + Ok((tx, first_seen)) + }, + ) + .collect::, _>>()?; - // no point of agreement found - // next loop will emit block @ fallback height - *last_cp = None; - *last_info = None; - } - (Some(last_cp), last_info @ Some(_)) => { - // find next block - match last_info.as_ref().unwrap().nextblockhash { - Some(next_hash) => { - let info = self.client.get_block_info(&next_hash)?; - - if info.confirmations < 0 { - *last_info = None; - continue 'main_loop; - } - - let block = self.client.get_block(&info.hash)?; - let cp = last_cp - .clone() - .extend(BlockId { - height: info.height as _, - hash: info.hash, - }) - .expect("must extend from checkpoint"); - - *last_cp = cp.clone(); - *last_info = Some(info.clone()); - - return Ok(Some(BitcoindRpcItem::Block { - cp, - info: Box::new(info), - block: Box::new(block), - })); - } - None => { - // emit from mempool! - let mempool_txs = client - .get_raw_mempool()? - .into_iter() - .filter(|&txid| self.seen_txids.insert(txid)) - .map( - |txid| -> Result<(Transaction, u64), bitcoincore_rpc::Error> { - let first_seen = client - .get_mempool_entry(&txid) - .map(|entry| entry.time)?; - let tx = client.get_raw_transaction(&txid, None)?; - Ok((tx, first_seen)) - }, - ) - .collect::, _>>()?; - - // remove last info... - *last_info = None; + // After a mempool emission, we want to find the point of agreement in + // the next round. + *last_info = None; - return Ok(Some(BitcoindRpcItem::Mempool { - txs: mempool_txs, - cp: last_cp.clone(), - })); - } + self.last_emission_was_mempool = true; + Ok(Some(BitcoindRpcUpdate::Mempool { + txs: mempool_txs, + cp: last_cp.clone(), + })) } } - (None, Some(info)) => unreachable!("got info with no checkpoint? info={:#?}", info), } + (None, Some(info)) => unreachable!("got info with no checkpoint? info={:#?}", info), } } } +/// Extends [`bitcoincore_rpc::Error`]. pub trait BitcoindRpcErrorExt { + /// Returns whether the error is a "not found" error. + /// + /// This is useful since [`BitcoindRpcEmitter`] emits [`Result<_, bitcoincore_rpc::Error>`]s as + /// [`Iterator::Item`]. fn is_not_found_error(&self) -> bool; } diff --git a/example-crates/example_rpc/src/main.rs b/example-crates/example_rpc/src/main.rs index c6e148958..b7eb3a859 100644 --- a/example-crates/example_rpc/src/main.rs +++ b/example-crates/example_rpc/src/main.rs @@ -10,7 +10,7 @@ use std::{ use bdk_bitcoind_rpc::{ bitcoincore_rpc::{Auth, Client, RpcApi}, - confirmation_time_anchor, BitcoindRpcItem, BitcoindRpcIter, + confirmation_time_anchor, BitcoindRpcEmitter, BitcoindRpcUpdate, }; use bdk_chain::{ bitcoin::{Address, Transaction}, @@ -151,13 +151,13 @@ fn main() -> anyhow::Result<()> { } => { graph.lock().unwrap().index.set_lookahead_for_all(lookahead); - let (chan, recv) = sync_channel::<(BitcoindRpcItem, u32)>(CHANNEL_BOUND); + let (chan, recv) = sync_channel::<(BitcoindRpcUpdate, u32)>(CHANNEL_BOUND); let prev_cp = chain.lock().unwrap().tip(); let join_handle = std::thread::spawn(move || -> anyhow::Result<()> { let mut tip_height = Option::::None; - for item in BitcoindRpcIter::new(&rpc_client, fallback_height, prev_cp) { + for item in BitcoindRpcEmitter::new(&rpc_client, fallback_height, prev_cp) { let item = item?; let is_block = !item.is_mempool(); let is_mempool = item.is_mempool(); From 8cab2c5ec3b506a7c79c5be9ed8ea387dcab991d Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?=E5=BF=97=E5=AE=87?= Date: Mon, 3 Jul 2023 09:45:21 +0800 Subject: [PATCH 11/24] `bdk_esplora` redesign The old logic cannot guarantee consistency of the chain history (i.e. we may result in a `LocalChain` state which contain blocks that cannot belong in the same chain). We also had to do a rescan if the tip has changed since the start of the scan. To fix this, we anchor transactions at the confirmation height and hash (which is provided by the esplora API). We fetch the `LocalChain` update separately, and guarantee consistency by fetching most-recent blocks atomically via the `GET /blocks` endpoint. Because the anchors need to be reflected in `LocalChain`, `TxGraph::missing_blocks(&self, chain: &LocalChain)` is introduced to fetch missing blocks in a separate call. --- crates/chain/src/tx_graph.rs | 29 +- crates/esplora/src/async_ext.rs | 667 ++++++++++++------ crates/esplora/src/blocking_ext.rs | 437 ++++++------ crates/esplora/src/lib.rs | 39 +- example-crates/wallet_esplora/src/main.rs | 28 +- .../wallet_esplora_async/src/main.rs | 16 +- 6 files changed, 751 insertions(+), 465 deletions(-) diff --git a/crates/chain/src/tx_graph.rs b/crates/chain/src/tx_graph.rs index bc72cc50f..ff9f70bbe 100644 --- a/crates/chain/src/tx_graph.rs +++ b/crates/chain/src/tx_graph.rs @@ -56,8 +56,8 @@ //! ``` use crate::{ - collections::*, keychain::Balance, Anchor, Append, BlockId, ChainOracle, ChainPosition, - ForEachTxOut, FullTxOut, + collections::*, keychain::Balance, local_chain::LocalChain, Anchor, Append, BlockId, + ChainOracle, ChainPosition, ForEachTxOut, FullTxOut, }; use alloc::vec::Vec; use bitcoin::{OutPoint, Script, Transaction, TxOut, Txid}; @@ -598,6 +598,31 @@ impl TxGraph { } impl TxGraph { + /// Find missing block heights of `chain`. + /// + /// This works by scanning through anchors, and seeing whether the anchor block of the anchor + /// exists in the [`LocalChain`]. + pub fn missing_blocks<'a>(&'a self, chain: &'a LocalChain) -> impl Iterator + 'a { + self.anchors + .iter() + .map(|(a, _)| a.anchor_block()) + .filter({ + let mut last_block = Option::::None; + move |block| { + if last_block.as_ref() == Some(block) { + false + } else { + last_block = Some(*block); + true + } + } + }) + .filter_map(|block| match chain.checkpoints().get(&block.height) { + Some(chain_cp) if chain_cp.hash() == block.hash => None, + _ => Some(block.height), + }) + } + /// Get the position of the transaction in `chain` with tip `chain_tip`. /// /// If the given transaction of `txid` does not exist in the chain of `chain_tip`, `None` is diff --git a/crates/esplora/src/async_ext.rs b/crates/esplora/src/async_ext.rs index 0d07b1520..c02d71aff 100644 --- a/crates/esplora/src/async_ext.rs +++ b/crates/esplora/src/async_ext.rs @@ -1,40 +1,55 @@ use async_trait::async_trait; +use bdk_chain::collections::btree_map; use bdk_chain::{ bitcoin::{BlockHash, OutPoint, Script, Txid}, collections::BTreeMap, - keychain::LocalUpdate, local_chain::CheckPoint, BlockId, ConfirmationTimeAnchor, TxGraph, }; -use esplora_client::{Error, OutputStatus, TxStatus}; +use esplora_client::{Error, TxStatus}; use futures::{stream::FuturesOrdered, TryStreamExt}; -/// Trait to extend [`esplora_client::AsyncClient`] functionality. +use crate::{anchor_from_status, ASSUME_FINAL_DEPTH}; + +/// Trait to extend the functionality of [`esplora_client::AsyncClient`]. /// -/// This is the async version of [`EsploraExt`]. Refer to -/// [crate-level documentation] for more. +/// Refer to [crate-level documentation] for more. /// -/// [`EsploraExt`]: crate::EsploraExt /// [crate-level documentation]: crate #[cfg_attr(target_arch = "wasm32", async_trait(?Send))] #[cfg_attr(not(target_arch = "wasm32"), async_trait)] pub trait EsploraAsyncExt { - /// Scan the blockchain (via esplora) for the data specified and returns a - /// [`LocalUpdate`]. + /// Prepare an [`LocalChain`] update with blocks fetched from Esplora. + /// + /// * `prev_tip` is the previous tip of [`LocalChain::tip`]. + /// * `get_heights` is the block heights that we are interested in fetching from Esplora. + /// + /// The result of this method can be applied to [`LocalChain::update`]. /// - /// - `local_chain`: the most recent block hashes present locally - /// - `keychain_spks`: keychains that we want to scan transactions for - /// - `txids`: transactions for which we want updated [`ConfirmationTimeAnchor`]s - /// - `outpoints`: transactions associated with these outpoints (residing, spending) that we - /// want to included in the update + /// [`LocalChain`]: bdk_chain::local_chain::LocalChain + /// [`LocalChain::tip`]: bdk_chain::local_chain::LocalChain::tip + /// [`LocalChain::update`]: bdk_chain::local_chain::LocalChain::update + #[allow(clippy::result_large_err)] + async fn update_local_chain( + &self, + prev_tip: Option, + get_heights: impl IntoIterator + Send> + Send, + ) -> Result; + + /// Scan Esplora for the data specified and return a [`TxGraph`] and a map of last active + /// indices. + /// + /// * `keychain_spks`: keychains that we want to scan transactions for + /// * `txids`: transactions for which we want updated [`ConfirmationTimeAnchor`]s + /// * `outpoints`: transactions associated with these outpoints (residing, spending) that we + /// want to include in the update /// /// The scan for each keychain stops after a gap of `stop_gap` script pubkeys with no associated /// transactions. `parallel_requests` specifies the max number of HTTP requests to make in /// parallel. - #[allow(clippy::result_large_err)] // FIXME - async fn scan( + #[allow(clippy::result_large_err)] + async fn update_tx_graph( &self, - prev_tip: Option, keychain_spks: BTreeMap< K, impl IntoIterator + Send> + Send, @@ -43,22 +58,20 @@ pub trait EsploraAsyncExt { outpoints: impl IntoIterator + Send> + Send, stop_gap: usize, parallel_requests: usize, - ) -> Result, Error>; + ) -> Result<(TxGraph, BTreeMap), Error>; - /// Convenience method to call [`scan`] without requiring a keychain. + /// Convenience method to call [`update_tx_graph`] without requiring a keychain. /// - /// [`scan`]: EsploraAsyncExt::scan - #[allow(clippy::result_large_err)] // FIXME - async fn scan_without_keychain( + /// [`update_tx_graph`]: EsploraAsyncExt::update_tx_graph + #[allow(clippy::result_large_err)] + async fn update_tx_graph_without_keychain( &self, - prev_tip: Option, misc_spks: impl IntoIterator + Send> + Send, txids: impl IntoIterator + Send> + Send, outpoints: impl IntoIterator + Send> + Send, parallel_requests: usize, - ) -> Result, Error> { - self.scan( - prev_tip, + ) -> Result, Error> { + self.update_tx_graph( [( (), misc_spks @@ -73,16 +86,117 @@ pub trait EsploraAsyncExt { parallel_requests, ) .await + .map(|(g, _)| g) } } #[cfg_attr(target_arch = "wasm32", async_trait(?Send))] #[cfg_attr(not(target_arch = "wasm32"), async_trait)] impl EsploraAsyncExt for esplora_client::AsyncClient { - #[allow(clippy::result_large_err)] // FIXME - async fn scan( + async fn update_local_chain( &self, prev_tip: Option, + get_heights: impl IntoIterator + Send> + Send, + ) -> Result { + let new_tip_height = self.get_height().await?; + + // If esplora returns a tip height that is lower than our previous tip, then checkpoints do + // not need updating. We just return the previous tip and use that as the point of + // agreement. + if let Some(prev_tip) = prev_tip.as_ref() { + if new_tip_height < prev_tip.height() { + return Ok(prev_tip.clone()); + } + } + + // Fetch new block IDs that are to be included in the update. This includes: + // 1. Atomically fetched most-recent blocks so we have a consistent view even during reorgs. + // 2. Heights the caller is interested in (as specified in `get_heights`). + let mut new_blocks = { + let heights = (0..=new_tip_height).rev(); + let hashes = self + .get_blocks(Some(new_tip_height)) + .await? + .into_iter() + .map(|b| b.id); + + let mut new_blocks = heights.zip(hashes).collect::>(); + + for height in get_heights { + // do not fetch blocks higher than known tip + if height > new_tip_height { + continue; + } + if let btree_map::Entry::Vacant(entry) = new_blocks.entry(height) { + let hash = self.get_block_hash(height).await?; + entry.insert(hash); + } + } + + new_blocks + }; + + // Determine the checkpoint to start building our update tip from. + let first_cp = match prev_tip { + Some(old_tip) => { + let old_tip_height = old_tip.height(); + let mut earliest_agreement_cp = Option::::None; + + for old_cp in old_tip.iter() { + let old_block = old_cp.block_id(); + + let new_hash = match new_blocks.entry(old_block.height) { + btree_map::Entry::Vacant(entry) => *entry.insert( + if old_tip_height - old_block.height >= ASSUME_FINAL_DEPTH { + old_block.hash + } else { + self.get_block_hash(old_block.height).await? + }, + ), + btree_map::Entry::Occupied(entry) => *entry.get(), + }; + + // Since we may introduce blocks below the point of agreement, we cannot break + // here unconditionally. We only break if we guarantee there are no new heights + // below our current. + if old_block.hash == new_hash { + earliest_agreement_cp = Some(old_cp); + + let first_new_height = *new_blocks + .keys() + .next() + .expect("must have atleast one new block"); + if first_new_height <= old_block.height { + break; + } + } + } + + earliest_agreement_cp + } + None => None, + } + .unwrap_or_else(|| { + let (&height, &hash) = new_blocks + .iter() + .next() + .expect("must have atleast one new block"); + CheckPoint::new(BlockId { height, hash }) + }); + + let new_tip = new_blocks + .split_off(&(first_cp.height() + 1)) + .into_iter() + .map(|(height, hash)| BlockId { height, hash }) + .fold(first_cp, |prev_cp, block| { + prev_cp.extend(block).expect("must extend checkpoint") + }); + + Ok(new_tip) + } + + async fn update_tx_graph( + &self, keychain_spks: BTreeMap< K, impl IntoIterator + Send> + Send, @@ -91,236 +205,365 @@ impl EsploraAsyncExt for esplora_client::AsyncClient { outpoints: impl IntoIterator + Send> + Send, stop_gap: usize, parallel_requests: usize, - ) -> Result, Error> { + ) -> Result<(TxGraph, BTreeMap), Error> { + type TxsOfSpkIndex = (u32, Vec); let parallel_requests = Ord::max(parallel_requests, 1); - - let (tip, _) = construct_update_tip(self, prev_tip).await?; - let mut make_anchor = crate::confirmation_time_anchor_maker(&tip); - let mut update = LocalUpdate::::new(tip); + let mut graph = TxGraph::::default(); + let mut last_active_indexes = BTreeMap::::new(); for (keychain, spks) in keychain_spks { let mut spks = spks.into_iter(); - let mut last_active_index = None; - let mut empty_scripts = 0; - type IndexWithTxs = (u32, Vec); + let mut last_index = Option::::None; + let mut last_active_index = Option::::None; loop { - let futures = (0..parallel_requests) - .filter_map(|_| { - let (index, script) = spks.next()?; + let handles = spks + .by_ref() + .take(parallel_requests) + .map(|(spk_index, spk)| { let client = self.clone(); - Some(async move { - let mut related_txs = client.scripthash_txs(&script, None).await?; - - let n_confirmed = - related_txs.iter().filter(|tx| tx.status.confirmed).count(); - // esplora pages on 25 confirmed transactions. If there are 25 or more we - // keep requesting to see if there's more. - if n_confirmed >= 25 { - loop { - let new_related_txs = client - .scripthash_txs( - &script, - Some(related_txs.last().unwrap().txid), - ) - .await?; - let n = new_related_txs.len(); - related_txs.extend(new_related_txs); - // we've reached the end - if n < 25 { - break; - } + async move { + let mut last_seen = None; + let mut spk_txs = Vec::new(); + loop { + let txs = client.scripthash_txs(&spk, last_seen).await?; + let tx_count = txs.len(); + last_seen = txs.last().map(|tx| tx.txid); + spk_txs.extend(txs); + if tx_count < 25 { + break Result::<_, Error>::Ok((spk_index, spk_txs)); } } - - Result::<_, esplora_client::Error>::Ok((index, related_txs)) - }) + } }) .collect::>(); - let n_futures = futures.len(); + if handles.is_empty() { + break; + } - for (index, related_txs) in futures.try_collect::>().await? { - if related_txs.is_empty() { - empty_scripts += 1; - } else { + for (index, txs) in handles.try_collect::>().await? { + last_index = Some(index); + if !txs.is_empty() { last_active_index = Some(index); - empty_scripts = 0; } - for tx in related_txs { - let anchor = make_anchor(&tx.status); - - let _ = update.graph.insert_tx(tx.to_tx()); - if let Some(anchor) = anchor { - let _ = update.graph.insert_anchor(tx.txid, anchor); + for tx in txs { + let _ = graph.insert_tx(tx.to_tx()); + if let Some(anchor) = anchor_from_status(&tx.status) { + let _ = graph.insert_anchor(tx.txid, anchor); } } } - if n_futures == 0 || empty_scripts >= stop_gap { + if last_index > last_active_index.map(|i| i + stop_gap as u32) { break; } } if let Some(last_active_index) = last_active_index { - update.keychain.insert(keychain, last_active_index); + last_active_indexes.insert(keychain, last_active_index); } } - for txid in txids.into_iter() { - if update.graph.get_tx(txid).is_none() { - match self.get_tx(&txid).await? { - Some(tx) => { - let _ = update.graph.insert_tx(tx); - } - None => continue, - } + let mut txids = txids.into_iter(); + loop { + let handles = txids + .by_ref() + .take(parallel_requests) + .filter(|&txid| graph.get_tx(txid).is_none()) + .map(|txid| { + let client = self.clone(); + async move { client.get_tx_status(&txid).await.map(|s| (txid, s)) } + }) + .collect::>(); + // .collect::>>>(); + + if handles.is_empty() { + break; } - match self.get_tx_status(&txid).await? { - tx_status if tx_status.confirmed => { - if let Some(anchor) = make_anchor(&tx_status) { - let _ = update.graph.insert_anchor(txid, anchor); - } + + for (txid, status) in handles.try_collect::>().await? { + if let Some(anchor) = anchor_from_status(&status) { + let _ = graph.insert_anchor(txid, anchor); } - _ => continue, } } for op in outpoints.into_iter() { - let mut op_txs = Vec::with_capacity(2); - if let ( - Some(tx), - tx_status @ TxStatus { - confirmed: true, .. - }, - ) = ( - self.get_tx(&op.txid).await?, - self.get_tx_status(&op.txid).await?, - ) { - op_txs.push((tx, tx_status)); - if let Some(OutputStatus { - txid: Some(txid), - status: Some(spend_status), - .. - }) = self.get_output_status(&op.txid, op.vout as _).await? - { - if let Some(spend_tx) = self.get_tx(&txid).await? { - op_txs.push((spend_tx, spend_status)); - } + if graph.get_tx(op.txid).is_none() { + if let Some(tx) = self.get_tx(&op.txid).await? { + let _ = graph.insert_tx(tx); } - } - - for (tx, status) in op_txs { - let txid = tx.txid(); - let anchor = make_anchor(&status); - - let _ = update.graph.insert_tx(tx); - if let Some(anchor) = anchor { - let _ = update.graph.insert_anchor(txid, anchor); + let status = self.get_tx_status(&op.txid).await?; + if let Some(anchor) = anchor_from_status(&status) { + let _ = graph.insert_anchor(op.txid, anchor); } } - } - // If a reorg occured during the update, anchors may be wrong. We handle this by scrapping - // all anchors, reconstructing checkpoints and reconstructing anchors. - while self.get_block_hash(update.tip.height()).await? != update.tip.hash() { - let (new_tip, _) = construct_update_tip(self, Some(update.tip.clone())).await?; - make_anchor = crate::confirmation_time_anchor_maker(&new_tip); - - // Reconstruct graph with only transactions (no anchors). - update.graph = TxGraph::new(update.graph.full_txs().map(|n| n.tx.clone())); - update.tip = new_tip; - - // Re-fetch anchors. - let anchors = { - let mut a = Vec::new(); - for n in update.graph.full_txs() { - let status = self.get_tx_status(&n.txid).await?; - if !status.confirmed { - continue; - } - if let Some(anchor) = make_anchor(&status) { - a.push((n.txid, anchor)); + if let Some(op_status) = self.get_output_status(&op.txid, op.vout as _).await? { + if let Some(txid) = op_status.txid { + if graph.get_tx(txid).is_none() { + if let Some(tx) = self.get_tx(&txid).await? { + let _ = graph.insert_tx(tx); + } + let status = self.get_tx_status(&txid).await?; + if let Some(anchor) = anchor_from_status(&status) { + let _ = graph.insert_anchor(txid, anchor); + } } } - a - }; - for (txid, anchor) in anchors { - let _ = update.graph.insert_anchor(txid, anchor); } } - Ok(update) + Ok((graph, last_active_indexes)) } } -/// Constructs a new checkpoint tip that can "connect" to our previous checkpoint history. We return -/// the new checkpoint tip alongside the height of agreement between the two histories (if any). -#[allow(clippy::result_large_err)] -async fn construct_update_tip( - client: &esplora_client::AsyncClient, - prev_tip: Option, -) -> Result<(CheckPoint, Option), Error> { - let new_tip_height = client.get_height().await?; - - // If esplora returns a tip height that is lower than our previous tip, then checkpoints do not - // need updating. We just return the previous tip and use that as the point of agreement. - if let Some(prev_tip) = prev_tip.as_ref() { - if new_tip_height < prev_tip.height() { - return Ok((prev_tip.clone(), Some(prev_tip.height()))); - } - } +// #[cfg_attr(target_arch = "wasm32", async_trait(?Send))] +// #[cfg_attr(not(target_arch = "wasm32"), async_trait)] +// impl EsploraAsyncExt for esplora_client::AsyncClient { +// #[allow(clippy::result_large_err)] // FIXME +// async fn scan( +// &self, +// prev_tip: Option, +// keychain_spks: BTreeMap< +// K, +// impl IntoIterator + Send> + Send, +// >, +// txids: impl IntoIterator + Send> + Send, +// outpoints: impl IntoIterator + Send> + Send, +// stop_gap: usize, +// parallel_requests: usize, +// ) -> Result, Error> { +// let parallel_requests = Ord::max(parallel_requests, 1); - // Grab latest blocks from esplora atomically first. We assume that deeper blocks cannot be - // reorged. This ensures that our checkpoint history is consistent. - let mut new_blocks = client - .get_blocks(Some(new_tip_height)) - .await? - .into_iter() - .zip((0..new_tip_height).rev()) - .map(|(b, height)| (height, b.id)) - .collect::>(); - - let mut agreement_cp = Option::::None; - - for cp in prev_tip.iter().flat_map(CheckPoint::iter) { - let cp_block = cp.block_id(); - - // We check esplora blocks cached in `new_blocks` first, keeping the checkpoint history - // consistent even during reorgs. - let hash = match new_blocks.get(&cp_block.height) { - Some(&hash) => hash, - None => { - assert!( - new_tip_height >= cp_block.height, - "already checked that esplora's tip cannot be smaller" - ); - let hash = client.get_block_hash(cp_block.height).await?; - new_blocks.insert(cp_block.height, hash); - hash - } - }; +// let (tip, _) = construct_update_tip(self, prev_tip).await?; +// let mut make_anchor = crate::confirmation_time_anchor_maker(&tip); +// let mut update = LocalUpdate::::new(tip); - if hash == cp_block.hash { - agreement_cp = Some(cp); - break; - } - } +// for (keychain, spks) in keychain_spks { +// let mut spks = spks.into_iter(); +// let mut last_active_index = None; +// let mut empty_scripts = 0; +// type IndexWithTxs = (u32, Vec); - let agreement_height = agreement_cp.as_ref().map(CheckPoint::height); - - let new_tip = new_blocks - .into_iter() - // Prune `new_blocks` to only include blocks that are actually new. - .filter(|(height, _)| Some(*height) > agreement_height) - .map(|(height, hash)| BlockId { height, hash }) - .fold(agreement_cp, |prev_cp, block| { - Some(match prev_cp { - Some(cp) => cp.extend(block).expect("must extend cp"), - None => CheckPoint::new(block), - }) - }) - .expect("must have at least one checkpoint"); - - Ok((new_tip, agreement_height)) -} +// loop { +// let futures = (0..parallel_requests) +// .filter_map(|_| { +// let (index, script) = spks.next()?; +// let client = self.clone(); +// Some(async move { +// let mut related_txs = client.scripthash_txs(&script, None).await?; + +// let n_confirmed = +// related_txs.iter().filter(|tx| tx.status.confirmed).count(); +// // esplora pages on 25 confirmed transactions. If there are 25 or more we +// // keep requesting to see if there's more. +// if n_confirmed >= 25 { +// loop { +// let new_related_txs = client +// .scripthash_txs( +// &script, +// Some(related_txs.last().unwrap().txid), +// ) +// .await?; +// let n = new_related_txs.len(); +// related_txs.extend(new_related_txs); +// // we've reached the end +// if n < 25 { +// break; +// } +// } +// } + +// Result::<_, esplora_client::Error>::Ok((index, related_txs)) +// }) +// }) +// .collect::>(); + +// let n_futures = futures.len(); + +// for (index, related_txs) in futures.try_collect::>().await? { +// if related_txs.is_empty() { +// empty_scripts += 1; +// } else { +// last_active_index = Some(index); +// empty_scripts = 0; +// } +// for tx in related_txs { +// let anchor = make_anchor(&tx.status); + +// let _ = update.graph.insert_tx(tx.to_tx()); +// if let Some(anchor) = anchor { +// let _ = update.graph.insert_anchor(tx.txid, anchor); +// } +// } +// } + +// if n_futures == 0 || empty_scripts >= stop_gap { +// break; +// } +// } + +// if let Some(last_active_index) = last_active_index { +// update.keychain.insert(keychain, last_active_index); +// } +// } + +// for txid in txids.into_iter() { +// if update.graph.get_tx(txid).is_none() { +// match self.get_tx(&txid).await? { +// Some(tx) => { +// let _ = update.graph.insert_tx(tx); +// } +// None => continue, +// } +// } +// match self.get_tx_status(&txid).await? { +// tx_status if tx_status.confirmed => { +// if let Some(anchor) = make_anchor(&tx_status) { +// let _ = update.graph.insert_anchor(txid, anchor); +// } +// } +// _ => continue, +// } +// } + +// for op in outpoints.into_iter() { +// let mut op_txs = Vec::with_capacity(2); +// if let ( +// Some(tx), +// tx_status @ TxStatus { +// confirmed: true, .. +// }, +// ) = ( +// self.get_tx(&op.txid).await?, +// self.get_tx_status(&op.txid).await?, +// ) { +// op_txs.push((tx, tx_status)); +// if let Some(OutputStatus { +// txid: Some(txid), +// status: Some(spend_status), +// .. +// }) = self.get_output_status(&op.txid, op.vout as _).await? +// { +// if let Some(spend_tx) = self.get_tx(&txid).await? { +// op_txs.push((spend_tx, spend_status)); +// } +// } +// } + +// for (tx, status) in op_txs { +// let txid = tx.txid(); +// let anchor = make_anchor(&status); + +// let _ = update.graph.insert_tx(tx); +// if let Some(anchor) = anchor { +// let _ = update.graph.insert_anchor(txid, anchor); +// } +// } +// } + +// // If a reorg occured during the update, anchors may be wrong. We handle this by scrapping +// // all anchors, reconstructing checkpoints and reconstructing anchors. +// while self.get_block_hash(update.tip.height()).await? != update.tip.hash() { +// let (new_tip, _) = construct_update_tip(self, Some(update.tip.clone())).await?; +// make_anchor = crate::confirmation_time_anchor_maker(&new_tip); + +// // Reconstruct graph with only transactions (no anchors). +// update.graph = TxGraph::new(update.graph.full_txs().map(|n| n.tx.clone())); +// update.tip = new_tip; + +// // Re-fetch anchors. +// let anchors = { +// let mut a = Vec::new(); +// for n in update.graph.full_txs() { +// let status = self.get_tx_status(&n.txid).await?; +// if !status.confirmed { +// continue; +// } +// if let Some(anchor) = make_anchor(&status) { +// a.push((n.txid, anchor)); +// } +// } +// a +// }; +// for (txid, anchor) in anchors { +// let _ = update.graph.insert_anchor(txid, anchor); +// } +// } + +// Ok(update) +// } +// } + +// /// Constructs a new checkpoint tip that can "connect" to our previous checkpoint history. We return +// /// the new checkpoint tip alongside the height of agreement between the two histories (if any). +// #[allow(clippy::result_large_err)] +// async fn construct_update_tip( +// client: &esplora_client::AsyncClient, +// prev_tip: Option, +// ) -> Result<(CheckPoint, Option), Error> { +// let new_tip_height = client.get_height().await?; + +// // If esplora returns a tip height that is lower than our previous tip, then checkpoints do not +// // need updating. We just return the previous tip and use that as the point of agreement. +// if let Some(prev_tip) = prev_tip.as_ref() { +// if new_tip_height < prev_tip.height() { +// return Ok((prev_tip.clone(), Some(prev_tip.height()))); +// } +// } + +// // Grab latest blocks from esplora atomically first. We assume that deeper blocks cannot be +// // reorged. This ensures that our checkpoint history is consistent. +// let mut new_blocks = client +// .get_blocks(Some(new_tip_height)) +// .await? +// .into_iter() +// .zip((0..new_tip_height).rev()) +// .map(|(b, height)| (height, b.id)) +// .collect::>(); + +// let mut agreement_cp = Option::::None; + +// for cp in prev_tip.iter().flat_map(CheckPoint::iter) { +// let cp_block = cp.block_id(); + +// // We check esplora blocks cached in `new_blocks` first, keeping the checkpoint history +// // consistent even during reorgs. +// let hash = match new_blocks.get(&cp_block.height) { +// Some(&hash) => hash, +// None => { +// assert!( +// new_tip_height >= cp_block.height, +// "already checked that esplora's tip cannot be smaller" +// ); +// let hash = client.get_block_hash(cp_block.height).await?; +// new_blocks.insert(cp_block.height, hash); +// hash +// } +// }; + +// if hash == cp_block.hash { +// agreement_cp = Some(cp); +// break; +// } +// } + +// let agreement_height = agreement_cp.as_ref().map(CheckPoint::height); + +// let new_tip = new_blocks +// .into_iter() +// // Prune `new_blocks` to only include blocks that are actually new. +// .filter(|(height, _)| Some(*height) > agreement_height) +// .map(|(height, hash)| BlockId { height, hash }) +// .fold(agreement_cp, |prev_cp, block| { +// Some(match prev_cp { +// Some(cp) => cp.push(block).expect("must extend cp"), +// None => CheckPoint::new(block), +// }) +// }) +// .expect("must have at least one checkpoint"); + +// Ok((new_tip, agreement_height)) +// } diff --git a/crates/esplora/src/blocking_ext.rs b/crates/esplora/src/blocking_ext.rs index 27b9a4956..b940563df 100644 --- a/crates/esplora/src/blocking_ext.rs +++ b/crates/esplora/src/blocking_ext.rs @@ -1,53 +1,73 @@ -use bdk_chain::bitcoin::{BlockHash, OutPoint, Script, Txid}; +use std::thread::JoinHandle; + +use bdk_chain::bitcoin::{OutPoint, Txid}; +use bdk_chain::collections::btree_map; use bdk_chain::collections::BTreeMap; -use bdk_chain::local_chain::CheckPoint; -use bdk_chain::{keychain::LocalUpdate, ConfirmationTimeAnchor}; -use bdk_chain::{BlockId, TxGraph}; -use esplora_client::{Error, OutputStatus, TxStatus}; +use bdk_chain::{ + bitcoin::{BlockHash, Script}, + local_chain::CheckPoint, +}; +use bdk_chain::{BlockId, ConfirmationTimeAnchor, TxGraph}; +use esplora_client::{Error, TxStatus}; + +use crate::{anchor_from_status, ASSUME_FINAL_DEPTH}; -/// Trait to extend [`esplora_client::BlockingClient`] functionality. +/// Trait to extend the functionality of [`esplora_client::BlockingClient`]. /// /// Refer to [crate-level documentation] for more. /// /// [crate-level documentation]: crate pub trait EsploraExt { - /// Scan the blockchain (via esplora) for the data specified and returns a - /// [`LocalUpdate`]. + /// Prepare an [`LocalChain`] update with blocks fetched from Esplora. + /// + /// * `prev_tip` is the previous tip of [`LocalChain::tip`]. + /// * `get_heights` is the block heights that we are interested in fetching from Esplora. /// - /// - `local_chain`: the most recent block hashes present locally - /// - `keychain_spks`: keychains that we want to scan transactions for - /// - `txids`: transactions for which we want updated [`ConfirmationTimeAnchor`]s - /// - `outpoints`: transactions associated with these outpoints (residing, spending) that we - /// want to included in the update + /// The result of this method can be applied to [`LocalChain::update`]. + /// + /// [`LocalChain`]: bdk_chain::local_chain::LocalChain + /// [`LocalChain::tip`]: bdk_chain::local_chain::LocalChain::tip + /// [`LocalChain::update`]: bdk_chain::local_chain::LocalChain::update + #[allow(clippy::result_large_err)] + fn update_local_chain( + &self, + prev_tip: Option, + get_heights: impl IntoIterator, + ) -> Result; + + /// Scan Esplora for the data specified and return a [`TxGraph`] and a map of last active + /// indices. + /// + /// * `keychain_spks`: keychains that we want to scan transactions for + /// * `txids`: transactions for which we want updated [`ConfirmationTimeAnchor`]s + /// * `outpoints`: transactions associated with these outpoints (residing, spending) that we + /// want to include in the update /// /// The scan for each keychain stops after a gap of `stop_gap` script pubkeys with no associated /// transactions. `parallel_requests` specifies the max number of HTTP requests to make in /// parallel. - #[allow(clippy::result_large_err)] // FIXME - fn scan( + #[allow(clippy::result_large_err)] + fn update_tx_graph( &self, - prev_tip: Option, keychain_spks: BTreeMap>, txids: impl IntoIterator, outpoints: impl IntoIterator, stop_gap: usize, parallel_requests: usize, - ) -> Result, Error>; + ) -> Result<(TxGraph, BTreeMap), Error>; - /// Convenience method to call [`scan`] without requiring a keychain. + /// Convenience method to call [`update_tx_graph`] without requiring a keychain. /// - /// [`scan`]: EsploraExt::scan - #[allow(clippy::result_large_err)] // FIXME - fn scan_without_keychain( + /// [`update_tx_graph`]: EsploraExt::update_tx_graph + #[allow(clippy::result_large_err)] + fn update_tx_graph_without_keychain( &self, - prev_tip: Option, misc_spks: impl IntoIterator, txids: impl IntoIterator, outpoints: impl IntoIterator, parallel_requests: usize, - ) -> Result, Error> { - self.scan( - prev_tip, + ) -> Result, Error> { + self.update_tx_graph( [( (), misc_spks @@ -61,243 +81,234 @@ pub trait EsploraExt { usize::MAX, parallel_requests, ) + .map(|(g, _)| g) } } impl EsploraExt for esplora_client::BlockingClient { - fn scan( + fn update_local_chain( &self, prev_tip: Option, + get_heights: impl IntoIterator, + ) -> Result { + let new_tip_height = self.get_height()?; + + // If esplora returns a tip height that is lower than our previous tip, then checkpoints do + // not need updating. We just return the previous tip and use that as the point of + // agreement. + if let Some(prev_tip) = prev_tip.as_ref() { + if new_tip_height < prev_tip.height() { + return Ok(prev_tip.clone()); + } + } + + // Fetch new block IDs that are to be included in the update. This includes: + // 1. Atomically fetched most-recent blocks so we have a consistent view even during reorgs. + // 2. Heights the caller is interested in (as specified in `get_heights`). + let mut new_blocks = { + let heights = (0..=new_tip_height).rev(); + let hashes = self + .get_blocks(Some(new_tip_height))? + .into_iter() + .map(|b| b.id); + + let mut new_blocks = heights.zip(hashes).collect::>(); + + for height in get_heights { + // do not fetch blocks higher than known tip + if height > new_tip_height { + continue; + } + if let btree_map::Entry::Vacant(entry) = new_blocks.entry(height) { + let hash = self.get_block_hash(height)?; + entry.insert(hash); + } + } + + new_blocks + }; + + // Determine the checkpoint to start building our update tip from. + let first_cp = match prev_tip { + Some(old_tip) => { + let old_tip_height = old_tip.height(); + let mut earliest_agreement_cp = Option::::None; + + for old_cp in old_tip.iter() { + let old_block = old_cp.block_id(); + + let new_hash = match new_blocks.entry(old_block.height) { + btree_map::Entry::Vacant(entry) => *entry.insert( + if old_tip_height - old_block.height >= ASSUME_FINAL_DEPTH { + old_block.hash + } else { + self.get_block_hash(old_block.height)? + }, + ), + btree_map::Entry::Occupied(entry) => *entry.get(), + }; + + // Since we may introduce blocks below the point of agreement, we cannot break + // here unconditionally. We only break if we guarantee there are no new heights + // below our current. + if old_block.hash == new_hash { + earliest_agreement_cp = Some(old_cp); + + let first_new_height = *new_blocks + .keys() + .next() + .expect("must have atleast one new block"); + if first_new_height <= old_block.height { + break; + } + } + } + + earliest_agreement_cp + } + None => None, + } + .unwrap_or_else(|| { + let (&height, &hash) = new_blocks + .iter() + .next() + .expect("must have atleast one new block"); + CheckPoint::new(BlockId { height, hash }) + }); + + let new_tip = new_blocks + .split_off(&(first_cp.height() + 1)) + .into_iter() + .map(|(height, hash)| BlockId { height, hash }) + .fold(first_cp, |prev_cp, block| { + prev_cp.extend(block).expect("must extend checkpoint") + }); + + Ok(new_tip) + } + + fn update_tx_graph( + &self, keychain_spks: BTreeMap>, txids: impl IntoIterator, outpoints: impl IntoIterator, stop_gap: usize, parallel_requests: usize, - ) -> Result, Error> { + ) -> Result<(TxGraph, BTreeMap), Error> { + type TxsOfSpkIndex = (u32, Vec); let parallel_requests = Ord::max(parallel_requests, 1); - - let (tip, _) = construct_update_tip(self, prev_tip)?; - let mut make_anchor = crate::confirmation_time_anchor_maker(&tip); - let mut update = LocalUpdate::::new(tip); + let mut graph = TxGraph::::default(); + let mut last_active_indexes = BTreeMap::::new(); for (keychain, spks) in keychain_spks { let mut spks = spks.into_iter(); - let mut last_active_index = None; - let mut empty_scripts = 0; - type IndexWithTxs = (u32, Vec); + let mut last_index = Option::::None; + let mut last_active_index = Option::::None; loop { - let handles = (0..parallel_requests) - .filter_map( - |_| -> Option>> { - let (index, script) = spks.next()?; + let handles = spks + .by_ref() + .take(parallel_requests) + .map(|(spk_index, spk)| { + std::thread::spawn({ let client = self.clone(); - Some(std::thread::spawn(move || { - let mut related_txs = client.scripthash_txs(&script, None)?; - - let n_confirmed = - related_txs.iter().filter(|tx| tx.status.confirmed).count(); - // esplora pages on 25 confirmed transactions. If there are 25 or more we - // keep requesting to see if there's more. - if n_confirmed >= 25 { - loop { - let new_related_txs = client.scripthash_txs( - &script, - Some(related_txs.last().unwrap().txid), - )?; - let n = new_related_txs.len(); - related_txs.extend(new_related_txs); - // we've reached the end - if n < 25 { - break; - } + move || -> Result { + let mut last_seen = None; + let mut spk_txs = Vec::new(); + loop { + let txs = client.scripthash_txs(&spk, last_seen)?; + let tx_count = txs.len(); + last_seen = txs.last().map(|tx| tx.txid); + spk_txs.extend(txs); + if tx_count < 25 { + break Ok((spk_index, spk_txs)); } } + } + }) + }) + .collect::>>>(); - Result::<_, esplora_client::Error>::Ok((index, related_txs)) - })) - }, - ) - .collect::>(); - - let n_handles = handles.len(); + if handles.is_empty() { + break; + } for handle in handles { - let (index, related_txs) = handle.join().unwrap()?; // TODO: don't unwrap - if related_txs.is_empty() { - empty_scripts += 1; - } else { + let (index, txs) = handle.join().expect("thread must not panic")?; + last_index = Some(index); + if !txs.is_empty() { last_active_index = Some(index); - empty_scripts = 0; } - for tx in related_txs { - let anchor = make_anchor(&tx.status); - let _ = update.graph.insert_tx(tx.to_tx()); - if let Some(anchor) = anchor { - let _ = update.graph.insert_anchor(tx.txid, anchor); + for tx in txs { + let _ = graph.insert_tx(tx.to_tx()); + if let Some(anchor) = anchor_from_status(&tx.status) { + let _ = graph.insert_anchor(tx.txid, anchor); } } } - if n_handles == 0 || empty_scripts >= stop_gap { + if last_index > last_active_index.map(|i| i + stop_gap as u32) { break; } } if let Some(last_active_index) = last_active_index { - update.keychain.insert(keychain, last_active_index); + last_active_indexes.insert(keychain, last_active_index); } } - for txid in txids.into_iter() { - if update.graph.get_tx(txid).is_none() { - match self.get_tx(&txid)? { - Some(tx) => { - let _ = update.graph.insert_tx(tx); - } - None => continue, - } + let mut txids = txids.into_iter(); + loop { + let handles = txids + .by_ref() + .take(parallel_requests) + .filter(|&txid| graph.get_tx(txid).is_none()) + .map(|txid| { + std::thread::spawn({ + let client = self.clone(); + move || client.get_tx_status(&txid).map(|s| (txid, s)) + }) + }) + .collect::>>>(); + + if handles.is_empty() { + break; } - match self.get_tx_status(&txid)? { - tx_status if tx_status.confirmed => { - if let Some(anchor) = make_anchor(&tx_status) { - let _ = update.graph.insert_anchor(txid, anchor); - } + + for handle in handles { + let (txid, status) = handle.join().expect("thread must not panic")?; + if let Some(anchor) = anchor_from_status(&status) { + let _ = graph.insert_anchor(txid, anchor); } - _ => continue, } } for op in outpoints.into_iter() { - let mut op_txs = Vec::with_capacity(2); - if let ( - Some(tx), - tx_status @ TxStatus { - confirmed: true, .. - }, - ) = (self.get_tx(&op.txid)?, self.get_tx_status(&op.txid)?) - { - op_txs.push((tx, tx_status)); - if let Some(OutputStatus { - txid: Some(txid), - status: Some(spend_status), - .. - }) = self.get_output_status(&op.txid, op.vout as _)? - { - if let Some(spend_tx) = self.get_tx(&txid)? { - op_txs.push((spend_tx, spend_status)); - } + if graph.get_tx(op.txid).is_none() { + if let Some(tx) = self.get_tx(&op.txid)? { + let _ = graph.insert_tx(tx); } - } - - for (tx, status) in op_txs { - let txid = tx.txid(); - let anchor = make_anchor(&status); - - let _ = update.graph.insert_tx(tx); - if let Some(anchor) = anchor { - let _ = update.graph.insert_anchor(txid, anchor); + let status = self.get_tx_status(&op.txid)?; + if let Some(anchor) = anchor_from_status(&status) { + let _ = graph.insert_anchor(op.txid, anchor); } } - } - // If a reorg occured during the update, anchors may be wrong. We handle this by scrapping - // all anchors, reconstructing checkpoints and reconstructing anchors. - while self.get_block_hash(update.tip.height())? != update.tip.hash() { - let (new_tip, _) = construct_update_tip(self, Some(update.tip.clone()))?; - make_anchor = crate::confirmation_time_anchor_maker(&new_tip); - - // Reconstruct graph with only transactions (no anchors). - update.graph = TxGraph::new(update.graph.full_txs().map(|n| n.tx.clone())); - update.tip = new_tip; - - // Re-fetch anchors. - let anchors = update - .graph - .full_txs() - .filter_map(|n| match self.get_tx_status(&n.txid) { - Err(err) => Some(Err(err)), - Ok(status) if status.confirmed => make_anchor(&status).map(|a| Ok((n.txid, a))), - _ => None, - }) - .collect::, _>>()?; - for (txid, anchor) in anchors { - let _ = update.graph.insert_anchor(txid, anchor); + if let Some(op_status) = self.get_output_status(&op.txid, op.vout as _)? { + if let Some(txid) = op_status.txid { + if graph.get_tx(txid).is_none() { + if let Some(tx) = self.get_tx(&txid)? { + let _ = graph.insert_tx(tx); + } + let status = self.get_tx_status(&txid)?; + if let Some(anchor) = anchor_from_status(&status) { + let _ = graph.insert_anchor(txid, anchor); + } + } + } } } - Ok(update) - } -} - -/// Constructs a new checkpoint tip that can "connect" to our previous checkpoint history. We return -/// the new checkpoint tip alongside the height of agreement between the two histories (if any). -#[allow(clippy::result_large_err)] -fn construct_update_tip( - client: &esplora_client::BlockingClient, - prev_tip: Option, -) -> Result<(CheckPoint, Option), Error> { - let new_tip_height = client.get_height()?; - - // If esplora returns a tip height that is lower than our previous tip, then checkpoints do not - // need updating. We just return the previous tip and use that as the point of agreement. - if let Some(prev_tip) = prev_tip.as_ref() { - if new_tip_height < prev_tip.height() { - return Ok((prev_tip.clone(), Some(prev_tip.height()))); - } + Ok((graph, last_active_indexes)) } - - // Grab latest blocks from esplora atomically first. We assume that deeper blocks cannot be - // reorged. This ensures that our checkpoint history is consistent. - let mut new_blocks = { - let heights = (0..new_tip_height).rev(); - let hashes = client - .get_blocks(Some(new_tip_height))? - .into_iter() - .map(|b| b.id); - heights.zip(hashes).collect::>() - }; - - let mut agreement_cp = Option::::None; - - for cp in prev_tip.iter().flat_map(CheckPoint::iter) { - let cp_block = cp.block_id(); - - // We check esplora blocks cached in `new_blocks` first, keeping the checkpoint history - // consistent even during reorgs. - let hash = match new_blocks.get(&cp_block.height) { - Some(&hash) => hash, - None => { - assert!( - new_tip_height >= cp_block.height, - "already checked that esplora's tip cannot be smaller" - ); - let hash = client.get_block_hash(cp_block.height)?; - new_blocks.insert(cp_block.height, hash); - hash - } - }; - - if hash == cp_block.hash { - agreement_cp = Some(cp); - break; - } - } - - let agreement_height = agreement_cp.as_ref().map(CheckPoint::height); - - let new_tip = new_blocks - .into_iter() - // Prune `new_blocks` to only include blocks that are actually new. - .filter(|(height, _)| Some(*height) > agreement_height) - .map(|(height, hash)| BlockId { height, hash }) - .fold(agreement_cp, |prev_cp, block| { - Some(match prev_cp { - Some(cp) => cp.extend(block).expect("must extend cp"), - None => CheckPoint::new(block), - }) - }) - .expect("must have at least one checkpoint"); - - Ok((new_tip, agreement_height)) } diff --git a/crates/esplora/src/lib.rs b/crates/esplora/src/lib.rs index 07ccdab8f..9954ccec0 100644 --- a/crates/esplora/src/lib.rs +++ b/crates/esplora/src/lib.rs @@ -1,7 +1,5 @@ #![doc = include_str!("../README.md")] -use std::collections::BTreeMap; - -use bdk_chain::{local_chain::CheckPoint, ConfirmationTimeAnchor}; +use bdk_chain::{BlockId, ConfirmationTimeAnchor}; use esplora_client::TxStatus; pub use esplora_client; @@ -16,25 +14,22 @@ mod async_ext; #[cfg(feature = "async")] pub use async_ext::*; -pub(crate) fn confirmation_time_anchor_maker( - tip: &CheckPoint, -) -> impl FnMut(&TxStatus) -> Option { - let cache = tip - .iter() - .take(10) - .map(|cp| (cp.height(), cp)) - .collect::>(); - - move |status| match (status.block_time, status.block_height) { - (Some(confirmation_time), Some(confirmation_height)) => { - let (_, anchor_cp) = cache.range(confirmation_height..).next()?; +const ASSUME_FINAL_DEPTH: u32 = 15; - Some(ConfirmationTimeAnchor { - anchor_block: anchor_cp.block_id(), - confirmation_height, - confirmation_time, - }) - } - _ => None, +fn anchor_from_status(status: &TxStatus) -> Option { + if let TxStatus { + block_height: Some(height), + block_hash: Some(hash), + block_time: Some(time), + .. + } = status.clone() + { + Some(ConfirmationTimeAnchor { + anchor_block: BlockId { height, hash }, + confirmation_height: height, + confirmation_time: time, + }) + } else { + None } } diff --git a/example-crates/wallet_esplora/src/main.rs b/example-crates/wallet_esplora/src/main.rs index 187091ff4..06441d4a0 100644 --- a/example-crates/wallet_esplora/src/main.rs +++ b/example-crates/wallet_esplora/src/main.rs @@ -1,12 +1,13 @@ const DB_MAGIC: &str = "bdk_wallet_esplora_example"; -const SEND_AMOUNT: u64 = 5000; -const STOP_GAP: usize = 50; -const PARALLEL_REQUESTS: usize = 2; +const SEND_AMOUNT: u64 = 1000; +const STOP_GAP: usize = 5; +const PARALLEL_REQUESTS: usize = 1; use std::{io::Write, str::FromStr}; use bdk::{ bitcoin::{Address, Network}, + chain::keychain::LocalUpdate, wallet::AddressIndex, SignOptions, Wallet, }; @@ -52,17 +53,20 @@ fn main() -> Result<(), Box> { (k, k_spks) }) .collect(); - let update = client.scan( - prev_tip, - keychain_spks, - None, - None, - STOP_GAP, - PARALLEL_REQUESTS, - )?; - println!(); + + let (update_graph, last_active_indices) = + client.update_tx_graph(keychain_spks, None, None, STOP_GAP, PARALLEL_REQUESTS)?; + let get_heights = wallet.tx_graph().missing_blocks(wallet.local_chain()); + let new_tip = client.update_local_chain(prev_tip, get_heights)?; + let update = LocalUpdate { + keychain: last_active_indices, + graph: update_graph, + ..LocalUpdate::new(new_tip) + }; + wallet.apply_update(update, false)?; wallet.commit()?; + println!(); let balance = wallet.get_balance(); println!("Wallet balance after syncing: {} sats", balance.total()); diff --git a/example-crates/wallet_esplora_async/src/main.rs b/example-crates/wallet_esplora_async/src/main.rs index a3a3399e1..385913ea4 100644 --- a/example-crates/wallet_esplora_async/src/main.rs +++ b/example-crates/wallet_esplora_async/src/main.rs @@ -2,6 +2,7 @@ use std::{io::Write, str::FromStr}; use bdk::{ bitcoin::{Address, Network}, + chain::keychain::LocalUpdate, wallet::AddressIndex, SignOptions, Wallet, }; @@ -37,7 +38,7 @@ async fn main() -> Result<(), Box> { let client = esplora_client::Builder::new("https://blockstream.info/testnet/api").build_async()?; - let prev_cp = wallet.latest_checkpoint(); + let prev_tip = wallet.latest_checkpoint(); let keychain_spks = wallet .spks_of_all_keychains() .into_iter() @@ -53,12 +54,19 @@ async fn main() -> Result<(), Box> { (k, k_spks) }) .collect(); - let update = client - .scan(prev_cp, keychain_spks, [], [], STOP_GAP, PARALLEL_REQUESTS) + let (update_graph, last_active_indices) = client + .update_tx_graph(keychain_spks, None, None, STOP_GAP, PARALLEL_REQUESTS) .await?; - println!(); + let get_heights = wallet.tx_graph().missing_blocks(wallet.local_chain()); + let new_tip = client.update_local_chain(prev_tip, get_heights).await?; + let update = LocalUpdate { + keychain: last_active_indices, + graph: update_graph, + ..LocalUpdate::new(new_tip) + }; wallet.apply_update(update, false)?; wallet.commit()?; + println!(); let balance = wallet.get_balance(); println!("Wallet balance after syncing: {} sats", balance.total()); From 861c27bf5a80181c4daf2c9e39d4a34d7497f28e Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?=E5=BF=97=E5=AE=87?= Date: Wed, 5 Jul 2023 15:09:04 +0800 Subject: [PATCH 12/24] Attempt to make `LocalChain::update` independent of checkpoint map This logic should carry over better when we change `LocalChain` to be monotone (#1005). --- crates/chain/src/local_chain.rs | 273 +++++++++++++++++-------- crates/chain/tests/test_local_chain.rs | 26 ++- 2 files changed, 203 insertions(+), 96 deletions(-) diff --git a/crates/chain/src/local_chain.rs b/crates/chain/src/local_chain.rs index a1ebd1a59..786a582a0 100644 --- a/crates/chain/src/local_chain.rs +++ b/crates/chain/src/local_chain.rs @@ -25,6 +25,13 @@ struct CPInner { prev: Option>, } +/// A safe representation of the underlying raw pointer of a [`CheckPoint`]. +/// +/// If two [`CheckPoint`]s return [`Pointer`]s that are equal, then the underlying raw pointer of +/// the checkpoints are equal. +#[derive(Debug, PartialEq, Eq, PartialOrd, Ord, Clone, Copy)] +pub struct Pointer(*const CPInner); + impl CheckPoint { /// Construct a [`CheckPoint`] from a [`BlockId`]. pub fn new(block: BlockId) -> Self { @@ -73,12 +80,19 @@ impl CheckPoint { self.0.prev.clone().map(CheckPoint) } - /// Iterate + /// Iterate from this checkpoint in descending height. pub fn iter(&self) -> CheckPointIter { CheckPointIter { current: Some(Arc::clone(&self.0)), } } + + /// Returns a safe representation of the underlying raw pointer of a [`CheckPoint`]. + /// + /// See [`Pointer`] to learn more. + pub fn as_ptr(&self) -> Pointer { + Pointer(Arc::as_ptr(&self.0)) + } } /// A structure that iterates over checkpoints backwards. @@ -230,88 +244,22 @@ impl LocalChain { /// /// [module-level documentation]: crate::local_chain pub fn update(&mut self, new_tip: CheckPoint) -> Result { - let mut updated_cps = BTreeMap::::new(); - let mut agreement_height = Option::::None; - let mut agreement_ptr_matches = false; - - for cp in new_tip.iter() { - let block = cp.block_id(); - - match self.checkpoints.get(&block.height) { - Some(original_cp) if original_cp.block_id() == block => { - let ptr_matches = Arc::as_ptr(&original_cp.0) == Arc::as_ptr(&cp.0); - - // only record the first agreement height - if agreement_height.is_none() && original_cp.block_id() == block { - agreement_height = Some(block.height); - agreement_ptr_matches = ptr_matches; - } - - // break if the internal pointers of the checkpoints are the same - if ptr_matches { - break; - } - } - // only insert into `updated_cps` if cp is actually updated (original cp is `None`, - // or block ids do not match) - _ => { - updated_cps.insert(block.height, cp.clone()); - } + match self.tip() { + Some(original_tip) => { + let (cp, changeset) = merge(original_tip, new_tip)?; + *self = Self::from_checkpoint(cp); + Ok(changeset) } - } - - // Lower bound of the range to invalidate in `self`. - let invalidate_lb = match agreement_height { - // if there is no agreement, we invalidate all of the original chain - None => u32::MIN, - // if the agreement is at the update's tip, we don't need to invalidate - Some(height) if height == new_tip.height() => u32::MAX, - Some(height) => height + 1, - }; - - let changeset = { - // Construct initial changeset of heights to invalidate in `self`. - let mut changeset = self - .checkpoints - .range(invalidate_lb..) - .map(|(&height, _)| (height, None)) - .collect::(); - - // The height of the first block to invalidate (if any) must be represented in the `update`. - if let Some(first_invalidated_height) = changeset.keys().next() { - if !updated_cps.contains_key(first_invalidated_height) { - return Err(CannotConnectError { - try_include: self - .checkpoints - .get(first_invalidated_height) - .expect("checkpoint already exists") - .block_id(), - }); + None => { + let mut changeset = ChangeSet::default(); + for cp in new_tip.iter() { + let block = cp.block_id(); + changeset.insert(block.height, Some(block.hash)); + self.checkpoints.insert(block.height, cp.clone()); } - } - - changeset.extend( - updated_cps - .iter() - .map(|(height, cp)| (*height, Some(cp.hash()))), - ); - changeset - }; - - // apply update if `update_cps` is non-empty - if let Some(&start_height) = updated_cps.keys().next() { - self.checkpoints.split_off(&invalidate_lb); - self.checkpoints.append(&mut updated_cps); - - // we never need to fix links if either: - // 1. the original chain is empty - // 2. the pointers match at the first point of agreement (where the block ids are equal) - if !(self.is_empty() || agreement_ptr_matches) { - self.fix_links(start_height); + Ok(changeset) } } - - Ok(changeset) } /// Apply the given `changeset`. @@ -444,18 +392,179 @@ impl std::error::Error for InsertBlockError {} #[derive(Clone, Debug, PartialEq)] pub struct CannotConnectError { /// The suggested checkpoint to include to connect the two chains. - pub try_include: BlockId, + pub try_include_height: u32, } impl core::fmt::Display for CannotConnectError { fn fmt(&self, f: &mut core::fmt::Formatter<'_>) -> core::fmt::Result { write!( f, - "introduced chain cannot connect with the original chain, try include {}:{}", - self.try_include.height, self.try_include.hash, + "introduced chain cannot connect with the original chain, try include height {}", + self.try_include_height, ) } } #[cfg(feature = "std")] impl std::error::Error for CannotConnectError {} + +fn merge( + original_tip: CheckPoint, + update_tip: CheckPoint, +) -> Result<(CheckPoint, ChangeSet), CannotConnectError> { + let mut o_visited = BTreeMap::::new(); + let mut o_iter = original_tip + .iter() + .inspect(|cp| { + o_visited.insert(cp.height(), cp.clone()); + }) + .peekable(); + + let mut u_visited = BTreeMap::::new(); + let u_iter = update_tip.iter().inspect(|cp| { + u_visited.insert(cp.height(), cp.clone()); + }); + + let mut highest_agreement = Option::::None; + + for u_cp in u_iter { + // match heights of `o_iter` and `u_iter` + let o_cp = loop { + match o_iter.peek() { + Some(o_cp) if o_cp.height() > u_cp.height() => o_iter.next(), + Some(o_cp) if o_cp.height() == u_cp.height() => break o_iter.next(), + _ => break None, + }; + }; + let o_cp = match o_cp { + Some(o_cp) => o_cp, + None => continue, + }; + + // perfect match! + if o_cp.as_ptr() == u_cp.as_ptr() { + let invalidate_lb = o_cp.height() + 1; + if let Some(invalidate_o) = o_visited.range(invalidate_lb..).next().map(|(&h, _)| h) { + if let Some(invalidate_u) = u_visited.range(invalidate_o..).next().map(|(&h, _)| h) + { + if invalidate_u != invalidate_o { + return Err(CannotConnectError { + try_include_height: invalidate_o, + }); + } + } + } + let changeset = u_visited + .split_off(&invalidate_lb) + .into_iter() + .map(|(h, cp)| (h, Some(cp.hash()))) + .collect::(); + return Ok((update_tip, changeset)); + } + + // find highest agreement height + if highest_agreement.is_none() && o_cp.hash() == u_cp.hash() { + highest_agreement = Some(o_cp.height()); + } + } + + // check invalidation + let mut explicit_invalidation = false; + let invalidate_lb = match highest_agreement { + Some(h) => { + let invalidate_lb = h + 1; + if let Some(invalidate_o) = o_visited.range(invalidate_lb..).next().map(|(&h, _)| h) { + if let Some(invalidate_u) = u_visited.range(invalidate_o..).next().map(|(&h, _)| h) + { + explicit_invalidation = true; + if invalidate_u != invalidate_o { + return Err(CannotConnectError { + try_include_height: invalidate_o, + }); + } + } + } + invalidate_lb + } + None => { + explicit_invalidation = true; + // If there is no agreement height, the lowest original checkpoint must be displaced by + // an update checkpoint. We can ensure this if `o_iter` is exhausted and the first + // height of `o_visited` exists in `u_visited`. + if let Some(cp) = o_iter.peek() { + return Err(CannotConnectError { + try_include_height: cp.height(), + }); + } + let first_o = o_visited + .keys() + .next() + .expect("must atleast have one height"); + if !u_visited.contains_key(first_o) { + return Err(CannotConnectError { + try_include_height: *first_o, + }); + } + // we invalidate everything in the original chain + 0 + } + }; + + // make changeset + let changeset = { + let mut changeset = match explicit_invalidation { + true => o_visited + .range(invalidate_lb..) + .map(|(&h, _)| (h, None)) + .collect::(), + false => ChangeSet::default(), + }; + for (h, u_cp) in &u_visited { + match o_visited.get(h) { + Some(o_cp) if o_cp.hash() == u_cp.hash() => continue, + _ => changeset.insert(*h, Some(u_cp.hash())), + }; + } + changeset + }; + + // get original cp before the first change + let first_change_height = match changeset.keys().next() { + Some(&h) => h, + // empty changeset + None => return Ok((original_tip, changeset)), + }; + + let start_cp = o_visited + .range(..first_change_height) + .next_back() + .map(|(_, cp)| cp.clone()); + + let chain_ext = { + let mut chain_ext = o_visited + .range(first_change_height..) + .map(|(&h, cp)| (h, cp.hash())) + .collect::>(); + for (&height, &hash_delta) in &changeset { + match hash_delta { + Some(hash) => chain_ext.insert(height, hash), + None => chain_ext.remove(&height), + }; + } + chain_ext + }; + + // build new chain + let mut cp = start_cp; + for (height, hash) in chain_ext { + let block = BlockId { height, hash }; + match cp.clone() { + Some(this_cp) => cp = Some(this_cp.extend(block).expect("must extend")), + _ => { + let _ = cp.insert(CheckPoint::new(block)); + } + }; + } + + Ok((cp.expect("must have checkpoint"), changeset)) +} diff --git a/crates/chain/tests/test_local_chain.rs b/crates/chain/tests/test_local_chain.rs index 4d6697841..a33bf81ec 100644 --- a/crates/chain/tests/test_local_chain.rs +++ b/crates/chain/tests/test_local_chain.rs @@ -1,6 +1,5 @@ -use bdk_chain::{ - local_chain::{CannotConnectError, ChangeSet, CheckPoint, InsertBlockError, LocalChain}, - BlockId, +use bdk_chain::local_chain::{ + CannotConnectError, ChangeSet, CheckPoint, InsertBlockError, LocalChain, }; use bitcoin::BlockHash; @@ -28,8 +27,13 @@ impl<'a> TestLocalChain<'a> { fn run(mut self) { let got_changeset = match self.chain.update(self.new_tip) { Ok(changeset) => changeset, - Err(err) => { - assert_eq!(ExpectedResult::Err(err), self.exp); + Err(got_err) => { + assert_eq!( + ExpectedResult::Err(got_err), + self.exp, + "{}: unexpected error", + self.name + ); return; } }; @@ -86,10 +90,7 @@ fn update() { chain: local_chain![(0, h!("A"))], new_tip: chain_update![(1, h!("B"))], exp: ExpectedResult::Err(CannotConnectError { - try_include: BlockId { - height: 0, - hash: h!("A"), - }, + try_include_height: 0, }), }, TestLocalChain { @@ -148,10 +149,7 @@ fn update() { chain: local_chain![(1, h!("B")), (2, h!("C"))], new_tip: chain_update![(0, h!("A")), (1, h!("B")), (3, h!("D"))], exp: ExpectedResult::Err(CannotConnectError { - try_include: BlockId { - height: 2, - hash: h!("C"), - }, + try_include_height: 2, }), }, // Transient invalidation: @@ -211,7 +209,7 @@ fn update() { name: "invalidation but no connection", chain: local_chain![(0, h!("A")), (1, h!("B")), (2, h!("C")), (4, h!("E"))], new_tip: chain_update![(1, h!("B'")), (2, h!("C'")), (3, h!("D"))], - exp: ExpectedResult::Err(CannotConnectError { try_include: BlockId { height: 0, hash: h!("A") } }), + exp: ExpectedResult::Err(CannotConnectError { try_include_height: 0 }), }, // Introduce blocks between two points of agreement // | 0 | 1 | 2 | 3 | 4 | 5 From 5924b29c8c920f4e20398a340c91e252fa323aab Mon Sep 17 00:00:00 2001 From: LLFourn Date: Fri, 7 Jul 2023 12:50:34 +0800 Subject: [PATCH 13/24] Implement simpler chain update algorithm By iterating backwards over the two chains in tandem to find the difference between them. --- crates/chain/src/local_chain.rs | 230 +++++++++---------------- crates/chain/tests/test_local_chain.rs | 18 +- 2 files changed, 94 insertions(+), 154 deletions(-) diff --git a/crates/chain/src/local_chain.rs b/crates/chain/src/local_chain.rs index 786a582a0..0ca0151e8 100644 --- a/crates/chain/src/local_chain.rs +++ b/crates/chain/src/local_chain.rs @@ -246,8 +246,8 @@ impl LocalChain { pub fn update(&mut self, new_tip: CheckPoint) -> Result { match self.tip() { Some(original_tip) => { - let (cp, changeset) = merge(original_tip, new_tip)?; - *self = Self::from_checkpoint(cp); + let changeset = merge_chains(original_tip, new_tip)?; + self.apply_changeset(&changeset); Ok(changeset) } None => { @@ -408,163 +408,95 @@ impl core::fmt::Display for CannotConnectError { #[cfg(feature = "std")] impl std::error::Error for CannotConnectError {} -fn merge( - original_tip: CheckPoint, - update_tip: CheckPoint, -) -> Result<(CheckPoint, ChangeSet), CannotConnectError> { - let mut o_visited = BTreeMap::::new(); - let mut o_iter = original_tip - .iter() - .inspect(|cp| { - o_visited.insert(cp.height(), cp.clone()); - }) - .peekable(); - - let mut u_visited = BTreeMap::::new(); - let u_iter = update_tip.iter().inspect(|cp| { - u_visited.insert(cp.height(), cp.clone()); - }); - - let mut highest_agreement = Option::::None; - - for u_cp in u_iter { - // match heights of `o_iter` and `u_iter` - let o_cp = loop { - match o_iter.peek() { - Some(o_cp) if o_cp.height() > u_cp.height() => o_iter.next(), - Some(o_cp) if o_cp.height() == u_cp.height() => break o_iter.next(), - _ => break None, - }; - }; - let o_cp = match o_cp { - Some(o_cp) => o_cp, - None => continue, - }; - - // perfect match! - if o_cp.as_ptr() == u_cp.as_ptr() { - let invalidate_lb = o_cp.height() + 1; - if let Some(invalidate_o) = o_visited.range(invalidate_lb..).next().map(|(&h, _)| h) { - if let Some(invalidate_u) = u_visited.range(invalidate_o..).next().map(|(&h, _)| h) - { - if invalidate_u != invalidate_o { - return Err(CannotConnectError { - try_include_height: invalidate_o, - }); - } - } - } - let changeset = u_visited - .split_off(&invalidate_lb) - .into_iter() - .map(|(h, cp)| (h, Some(cp.hash()))) - .collect::(); - return Ok((update_tip, changeset)); +fn merge_chains(orig: CheckPoint, update: CheckPoint) -> Result { + let mut changeset = ChangeSet::default(); + let mut orig = orig.iter(); + let mut update = update.iter(); + let mut curr_orig = None; + let mut curr_update = None; + let mut prev_orig: Option = None; + let mut prev_update: Option = None; + let mut point_of_agreement_found = false; + let mut prev_orig_was_invalidated = false; + let mut potentially_invalidated_heights = vec![]; + + // To find the difference between the new chain and the original we iterate over both of them + // from the tip backwards in tandem. We always dealing with the highest one from either chain + // first and move to the next highest. The crucial logic is applied when they have blocks at the + // same height. + loop { + if curr_orig.is_none() { + curr_orig = orig.next(); } - - // find highest agreement height - if highest_agreement.is_none() && o_cp.hash() == u_cp.hash() { - highest_agreement = Some(o_cp.height()); + if curr_update.is_none() { + curr_update = update.next(); } - } - // check invalidation - let mut explicit_invalidation = false; - let invalidate_lb = match highest_agreement { - Some(h) => { - let invalidate_lb = h + 1; - if let Some(invalidate_o) = o_visited.range(invalidate_lb..).next().map(|(&h, _)| h) { - if let Some(invalidate_u) = u_visited.range(invalidate_o..).next().map(|(&h, _)| h) - { - explicit_invalidation = true; - if invalidate_u != invalidate_o { + match (curr_orig.as_ref(), curr_update.as_ref()) { + // Update block that doesn't exist in the original chain + (o, Some(u)) if Some(u.height()) > o.map(|o| o.height()) => { + changeset.insert(u.height(), Some(u.hash())); + prev_update = curr_update.take(); + } + // Original block that isn't in the update + (Some(o), u) if Some(o.height()) > u.map(|u| u.height()) => { + // this block might be gone if an earlier block gets invalidated + potentially_invalidated_heights.push(o.height()); + prev_orig_was_invalidated = false; + prev_orig = curr_orig.take(); + } + (Some(o), Some(u)) => { + if o.hash() == u.hash() { + // We have found our point of agreement 🎉 -- we require that the previous (i.e. + // higher because we are iterating backwards) block in the original chain was + // invalidated (if it exists). This ensures that there is an unambigious point of + // connection to the original chain from the update chain (i.e. we know the + // precisely which original blocks are invalid). + if !prev_orig_was_invalidated && !point_of_agreement_found { + if let (Some(prev_orig), Some(_prev_update)) = (prev_orig, prev_update) { + return Err(CannotConnectError { + try_include_height: prev_orig.height(), + }); + } + } + point_of_agreement_found = true; + prev_orig_was_invalidated = false; + // OPTIMIZATION -- if we have the same underlying references at this + // point then we know everything else in the two chains will match so the + // changeset is fine. + if Arc::as_ptr(&o.0) == Arc::as_ptr(&u.0) { + break; + } + } else { + // We have an invalidation height so we set the height to the updated hash and + // also purge all the original chain block hashes above this block. + changeset.insert(u.height(), Some(u.hash())); + for invalidated_height in potentially_invalidated_heights.drain(..) { + changeset.insert(invalidated_height, None); + } + prev_orig_was_invalidated = true; + } + prev_update = curr_update.take(); + prev_orig = curr_orig.take(); + } + (None, None) => { + // When we don't have a point of agreement you can imagine it is implicitly the + // genesis block so we need to do the final connectivity check which in this case + // just means making sure the entire original chain was invalidated. + if !prev_orig_was_invalidated && !point_of_agreement_found { + if let Some(prev_orig) = prev_orig { return Err(CannotConnectError { - try_include_height: invalidate_o, + try_include_height: prev_orig.height(), }); } } + break; } - invalidate_lb - } - None => { - explicit_invalidation = true; - // If there is no agreement height, the lowest original checkpoint must be displaced by - // an update checkpoint. We can ensure this if `o_iter` is exhausted and the first - // height of `o_visited` exists in `u_visited`. - if let Some(cp) = o_iter.peek() { - return Err(CannotConnectError { - try_include_height: cp.height(), - }); - } - let first_o = o_visited - .keys() - .next() - .expect("must atleast have one height"); - if !u_visited.contains_key(first_o) { - return Err(CannotConnectError { - try_include_height: *first_o, - }); - } - // we invalidate everything in the original chain - 0 - } - }; - - // make changeset - let changeset = { - let mut changeset = match explicit_invalidation { - true => o_visited - .range(invalidate_lb..) - .map(|(&h, _)| (h, None)) - .collect::(), - false => ChangeSet::default(), - }; - for (h, u_cp) in &u_visited { - match o_visited.get(h) { - Some(o_cp) if o_cp.hash() == u_cp.hash() => continue, - _ => changeset.insert(*h, Some(u_cp.hash())), - }; - } - changeset - }; - - // get original cp before the first change - let first_change_height = match changeset.keys().next() { - Some(&h) => h, - // empty changeset - None => return Ok((original_tip, changeset)), - }; - - let start_cp = o_visited - .range(..first_change_height) - .next_back() - .map(|(_, cp)| cp.clone()); - - let chain_ext = { - let mut chain_ext = o_visited - .range(first_change_height..) - .map(|(&h, cp)| (h, cp.hash())) - .collect::>(); - for (&height, &hash_delta) in &changeset { - match hash_delta { - Some(hash) => chain_ext.insert(height, hash), - None => chain_ext.remove(&height), - }; - } - chain_ext - }; - - // build new chain - let mut cp = start_cp; - for (height, hash) in chain_ext { - let block = BlockId { height, hash }; - match cp.clone() { - Some(this_cp) => cp = Some(this_cp.extend(block).expect("must extend")), _ => { - let _ = cp.insert(CheckPoint::new(block)); + unreachable!("compiler cannot tell that everything has been covered") } - }; + } } - Ok((cp.expect("must have checkpoint"), changeset)) + Ok(changeset) } diff --git a/crates/chain/tests/test_local_chain.rs b/crates/chain/tests/test_local_chain.rs index a33bf81ec..ea1adaca1 100644 --- a/crates/chain/tests/test_local_chain.rs +++ b/crates/chain/tests/test_local_chain.rs @@ -57,15 +57,15 @@ impl<'a> TestLocalChain<'a> { ); } ExpectedResult::Err(err) => panic!( - "expected error ({}), got non-error result: {:?}", - err, got_changeset + "{}: expected error ({}), got non-error result: {:?}", + self.name, err, got_changeset ), } } } #[test] -fn update() { +fn update_local_chain() { [ TestLocalChain { name: "add first tip", @@ -93,6 +93,14 @@ fn update() { try_include_height: 0, }), }, + TestLocalChain { + name: "two disjoint chains cannot merge (existing chain longer)", + chain: local_chain![(1, h!("A"))], + new_tip: chain_update![(0, h!("B"))], + exp: ExpectedResult::Err(CannotConnectError { + try_include_height: 1, + }), + }, TestLocalChain { name: "duplicate chains should merge", chain: local_chain![(0, h!("A"))], @@ -233,14 +241,14 @@ fn update() { (5, Some(h!("F"))), ], }, - } + }, ] .into_iter() .for_each(TestLocalChain::run); } #[test] -fn insert_block() { +fn local_chain_insert_block() { struct TestCase { original: LocalChain, insert: (u32, BlockHash), From 4c95c83abca7a02c824f7388741e82cefc7df281 Mon Sep 17 00:00:00 2001 From: LLFourn Date: Fri, 7 Jul 2023 23:02:43 +0800 Subject: [PATCH 14/24] Rethink local chain internals and API Local chain is a linked list whose heights are indexed in a BTreeMap. --- crates/bdk/src/wallet/mod.rs | 11 +- crates/bitcoind_rpc/src/lib.rs | 2 +- crates/chain/src/local_chain.rs | 245 +++++++++---------- crates/chain/src/tx_graph.rs | 4 +- crates/chain/tests/test_indexed_tx_graph.rs | 15 +- crates/electrum/src/electrum_ext.rs | 2 +- crates/esplora/src/async_ext.rs | 253 +------------------- crates/esplora/src/blocking_ext.rs | 2 +- 8 files changed, 133 insertions(+), 401 deletions(-) diff --git a/crates/bdk/src/wallet/mod.rs b/crates/bdk/src/wallet/mod.rs index 39c9fa37d..99b18b311 100644 --- a/crates/bdk/src/wallet/mod.rs +++ b/crates/bdk/src/wallet/mod.rs @@ -378,7 +378,7 @@ impl Wallet { /// Get all the checkpoints the wallet is currently storing indexed by height. pub fn checkpoints(&self) -> CheckPointIter { - self.chain.iter_checkpoints(None) + self.chain.iter_checkpoints() } /// Returns the latest checkpoint. @@ -500,15 +500,18 @@ impl Wallet { // anchor tx to checkpoint with lowest height that is >= position's height let anchor = self .chain - .checkpoints() + .heights() .range(height..) .next() .ok_or(InsertTxError::ConfirmationHeightCannotBeGreaterThanTip { tip_height: self.chain.tip().map(|b| b.height()), tx_height: height, }) - .map(|(&_, cp)| ConfirmationTimeAnchor { - anchor_block: cp.block_id(), + .map(|(&anchor_height, &hash)| ConfirmationTimeAnchor { + anchor_block: BlockId { + height: anchor_height, + hash, + }, confirmation_height: height, confirmation_time: time, })?; diff --git a/crates/bitcoind_rpc/src/lib.rs b/crates/bitcoind_rpc/src/lib.rs index 37d04cc7a..e779cf428 100644 --- a/crates/bitcoind_rpc/src/lib.rs +++ b/crates/bitcoind_rpc/src/lib.rs @@ -280,7 +280,7 @@ impl<'a> BitcoindRpcEmitter<'a> { let block = self.client.get_block(&info.hash)?; let cp = last_cp .clone() - .extend(BlockId { + .push(BlockId { height: info.height as _, hash: info.hash, }) diff --git a/crates/chain/src/local_chain.rs b/crates/chain/src/local_chain.rs index 0ca0151e8..31490efdf 100644 --- a/crates/chain/src/local_chain.rs +++ b/crates/chain/src/local_chain.rs @@ -10,9 +10,9 @@ use bitcoin::BlockHash; /// A structure that represents changes to [`LocalChain`]. pub type ChangeSet = BTreeMap>; -/// A block of [`LocalChain`]. +/// A blockchain of [`LocalChain`]. /// -/// Blocks are presented in a linked-list. +/// The in a linked-list with newer blocks pointing to older ones. #[derive(Debug, Clone, PartialEq, Eq, PartialOrd, Ord)] pub struct CheckPoint(Arc); @@ -25,23 +25,17 @@ struct CPInner { prev: Option>, } -/// A safe representation of the underlying raw pointer of a [`CheckPoint`]. -/// -/// If two [`CheckPoint`]s return [`Pointer`]s that are equal, then the underlying raw pointer of -/// the checkpoints are equal. -#[derive(Debug, PartialEq, Eq, PartialOrd, Ord, Clone, Copy)] -pub struct Pointer(*const CPInner); - impl CheckPoint { - /// Construct a [`CheckPoint`] from a [`BlockId`]. + /// Construct a new base block at the front of a linked list. pub fn new(block: BlockId) -> Self { Self(Arc::new(CPInner { block, prev: None })) } - /// Extends [`CheckPoint`] with `block` and returns the new checkpoint tip. + /// Puts another checkpoint onto the linked list representing the blockchain. /// - /// Returns an `Err` of the initial checkpoint - pub fn extend(self, block: BlockId) -> Result { + /// Returns an `Err(self)` if the block you are pushing on is not at a greater height that the one you + /// are pushing on to. + pub fn push(self, block: BlockId) -> Result { if self.height() < block.height { Ok(Self(Arc::new(CPInner { block, @@ -52,6 +46,18 @@ impl CheckPoint { } } + /// Extends the checkpoint linked list by a iterator of block ids. + /// + /// Returns an `Err(self)` if there is block which does not have a greater height than the + /// previous one. + pub fn extend(self, blocks: impl IntoIterator) -> Result { + let mut curr = self.clone(); + for block in blocks { + curr = curr.push(block).map_err(|_| self.clone())?; + } + Ok(curr) + } + /// Get the [`BlockId`] of the checkpoint. pub fn block_id(&self) -> BlockId { self.0.block @@ -67,15 +73,7 @@ impl CheckPoint { self.0.block.hash } - /// Detach this checkpoint from the previous. - pub fn detach(self) -> Self { - Self(Arc::new(CPInner { - block: self.0.block, - prev: None, - })) - } - - /// Get the previous checkpoint. + /// Get the previous checkpoint in the chain pub fn prev(&self) -> Option { self.0.prev.clone().map(CheckPoint) } @@ -86,13 +84,6 @@ impl CheckPoint { current: Some(Arc::clone(&self.0)), } } - - /// Returns a safe representation of the underlying raw pointer of a [`CheckPoint`]. - /// - /// See [`Pointer`] to learn more. - pub fn as_ptr(&self) -> Pointer { - Pointer(Arc::as_ptr(&self.0)) - } } /// A structure that iterates over checkpoints backwards. @@ -113,16 +104,13 @@ impl Iterator for CheckPointIter { /// This is a local implementation of [`ChainOracle`]. #[derive(Debug, Default, Clone, PartialEq, Eq, PartialOrd, Ord)] pub struct LocalChain { - checkpoints: BTreeMap, + tip: Option, + index: BTreeMap, } impl From for BTreeMap { fn from(value: LocalChain) -> Self { - value - .checkpoints - .values() - .map(|cp| (cp.height(), cp.hash())) - .collect() + value.index } } @@ -151,19 +139,17 @@ impl ChainOracle for LocalChain { } Ok( match ( - self.checkpoints.get(&block.height), - self.checkpoints.get(&chain_tip.height), + self.index.get(&block.height), + self.index.get(&chain_tip.height), ) { - (Some(cp), Some(tip_cp)) => { - Some(cp.hash() == block.hash && tip_cp.hash() == chain_tip.hash) - } + (Some(cp), Some(tip_cp)) => Some(*cp == block.hash && *tip_cp == chain_tip.hash), _ => None, }, ) } fn get_chain_tip(&self) -> Result, Self::Error> { - Ok(self.checkpoints.values().last().map(CheckPoint::block_id)) + Ok(self.tip.as_ref().map(|tip| tip.block_id())) } } @@ -176,10 +162,13 @@ impl LocalChain { } /// Construct a [`LocalChain`] from a given `checkpoint` tip. - pub fn from_checkpoint(checkpoint: CheckPoint) -> Self { - Self { - checkpoints: checkpoint.iter().map(|cp| (cp.height(), cp)).collect(), - } + pub fn from_tip(tip: CheckPoint) -> Self { + let mut _self = Self { + tip: Some(tip), + ..Default::default() + }; + _self.reindex(0); + _self } /// Constructs a [`LocalChain`] from a [`BTreeMap`] of height to [`BlockHash`]. @@ -187,34 +176,31 @@ impl LocalChain { /// The [`BTreeMap`] enforces the height order. However, the caller must ensure the blocks are /// all of the same chain. pub fn from_blocks(blocks: BTreeMap) -> Self { - Self { - checkpoints: blocks - .into_iter() - .map({ - let mut prev = Option::::None; - move |(height, hash)| { - let cp = match prev.clone() { - Some(prev) => { - prev.extend(BlockId { height, hash }).expect("must extend") - } - None => CheckPoint::new(BlockId { height, hash }), - }; - prev = Some(cp.clone()); - (height, cp) - } - }) - .collect(), + let mut tip: Option = None; + + for block in &blocks { + match tip { + Some(curr) => { + tip = Some( + curr.push(BlockId::from(block)) + .expect("BTreeMap is ordered"), + ) + } + None => tip = Some(CheckPoint::new(BlockId::from(block))), + } } + + Self { index: blocks, tip } } /// Get the highest checkpoint. pub fn tip(&self) -> Option { - self.checkpoints.values().last().cloned() + self.tip.clone() } /// Returns whether the [`LocalChain`] is empty (has no checkpoints). pub fn is_empty(&self) -> bool { - self.checkpoints.is_empty() + self.tip.is_none() } /// Updates [`Self`] with the given `new_tip`. @@ -251,13 +237,8 @@ impl LocalChain { Ok(changeset) } None => { - let mut changeset = ChangeSet::default(); - for cp in new_tip.iter() { - let block = cp.block_id(); - changeset.insert(block.height, Some(block.hash)); - self.checkpoints.insert(block.height, cp.clone()); - } - Ok(changeset) + *self = Self::from_tip(new_tip); + Ok(self.initial_changeset()) } } } @@ -265,15 +246,38 @@ impl LocalChain { /// Apply the given `changeset`. pub fn apply_changeset(&mut self, changeset: &ChangeSet) { if let Some(start_height) = changeset.keys().next().cloned() { + let mut extension = BTreeMap::default(); + let mut base: Option = None; + if let Some(tip) = &self.tip { + for cp in tip.iter() { + if cp.height() >= start_height { + extension.insert(cp.height(), cp.hash()); + } else { + base = Some(cp); + break; + } + } + } + for (&height, &hash) in changeset { match hash { - Some(hash) => self - .checkpoints - .insert(height, CheckPoint::new(BlockId { height, hash })), - None => self.checkpoints.remove(&height), + Some(hash) => { + extension.insert(height, hash); + } + None => { + extension.remove(&height); + } }; } - self.fix_links(start_height); + let new_tip = match base { + Some(base) => Some( + base.extend(extension.into_iter().map(BlockId::from)) + .expect("extension is strictly greater than base"), + ), + None => LocalChain::from_blocks(extension).tip(), + }; + self.tip = new_tip; + self.reindex(start_height); } } @@ -283,84 +287,53 @@ impl LocalChain { /// /// Replacing the block hash of an existing checkpoint will result in an error. pub fn insert_block(&mut self, block_id: BlockId) -> Result { - use crate::collections::btree_map::Entry; - - match self.checkpoints.entry(block_id.height) { - Entry::Vacant(entry) => { - entry.insert(CheckPoint::new(block_id)); - self.fix_links(block_id.height); - Ok(core::iter::once((block_id.height, Some(block_id.hash))).collect()) - } - Entry::Occupied(entry) => { - let cp = entry.get(); - if cp.block_id() == block_id { - Ok(ChangeSet::default()) - } else { - Err(InsertBlockError { - height: block_id.height, - original_hash: cp.hash(), - update_hash: block_id.hash, - }) - } + if let Some(&original_hash) = self.index.get(&block_id.height) { + if original_hash != block_id.hash { + return Err(InsertBlockError { + height: block_id.height, + original_hash, + update_hash: block_id.hash, + }); + } else { + return Ok(ChangeSet::default()); } } + + let mut changeset = ChangeSet::default(); + changeset.insert(block_id.height, Some(block_id.hash)); + self.apply_changeset(&changeset); + Ok(changeset) } - /// Internal method for fixing pointers to make checkpoints a properly linked list. I.e. - /// [`CheckPoint::prev`] should return the previous checkpoint. - /// - /// We fix checkpoints from `start_height` and higher. - fn fix_links(&mut self, start_height: u32) { - let mut prev = self - .checkpoints - .range(..start_height) - .last() - .map(|(_, cp)| cp.clone()); - - for (_, cp) in self.checkpoints.range_mut(start_height..) { - if cp.0.prev.as_ref().map(Arc::as_ptr) != prev.as_ref().map(|cp| Arc::as_ptr(&cp.0)) { - cp.0 = Arc::new(CPInner { - block: cp.block_id(), - prev: prev.clone().map(|cp| cp.0), - }); + /// Reindex the heights in the chain from (and including) `from` height + fn reindex(&mut self, from: u32) { + let _ = self.index.split_off(&from); + if let Some(tip) = &self.tip { + for cp in tip.iter() { + if cp.height() < from { + break; + } + self.index.insert(cp.height(), cp.hash()); } - prev = Some(cp.clone()); } } /// Derives an initial [`ChangeSet`], meaning that it can be applied to an empty chain to /// recover the current chain. pub fn initial_changeset(&self) -> ChangeSet { - self.iter_checkpoints(None) - .map(|cp| (cp.height(), Some(cp.hash()))) - .collect() - } - - /// Get checkpoint of `height` (if any). - pub fn checkpoint(&self, height: u32) -> Option { - self.checkpoints.get(&height).cloned() + self.index.iter().map(|(k, v)| (*k, Some(*v))).collect() } /// Iterate over checkpoints in decending height order. - /// - /// `height_upper_bound` is inclusive. A value of `None` means there is no bound, so all - /// checkpoints will be traversed. - pub fn iter_checkpoints(&self, height_upper_bound: Option) -> CheckPointIter { + pub fn iter_checkpoints(&self) -> CheckPointIter { CheckPointIter { - current: match height_upper_bound { - Some(height) => self - .checkpoints - .range(..=height) - .last() - .map(|(_, cp)| cp.0.clone()), - None => self.checkpoints.values().last().map(|cp| cp.0.clone()), - }, + current: self.tip.as_ref().map(|tip| tip.0.clone()), } } - /// Get a reference to the internal checkpoint map. - pub fn checkpoints(&self) -> &BTreeMap { - &self.checkpoints + /// Get a reference to the internal index mapping the height to block hash + pub fn heights(&self) -> &BTreeMap { + &self.index } } diff --git a/crates/chain/src/tx_graph.rs b/crates/chain/src/tx_graph.rs index ff9f70bbe..de7a5bca5 100644 --- a/crates/chain/src/tx_graph.rs +++ b/crates/chain/src/tx_graph.rs @@ -617,8 +617,8 @@ impl TxGraph { } } }) - .filter_map(|block| match chain.checkpoints().get(&block.height) { - Some(chain_cp) if chain_cp.hash() == block.hash => None, + .filter_map(|block| match chain.heights().get(&block.height) { + Some(chain_hash) if *chain_hash == block.hash => None, _ => Some(block.height), }) } diff --git a/crates/chain/tests/test_indexed_tx_graph.rs b/crates/chain/tests/test_indexed_tx_graph.rs index 53f53016e..52796a55e 100644 --- a/crates/chain/tests/test_indexed_tx_graph.rs +++ b/crates/chain/tests/test_indexed_tx_graph.rs @@ -8,7 +8,7 @@ use bdk_chain::{ keychain::{Balance, DerivationAdditions, KeychainTxOutIndex}, local_chain::LocalChain, tx_graph::Additions, - ChainPosition, ConfirmationHeightAnchor, TxGraph, + BlockId, ChainPosition, ConfirmationHeightAnchor, TxGraph, }; use bitcoin::{secp256k1::Secp256k1, BlockHash, OutPoint, Script, Transaction, TxIn, TxOut}; use miniscript::Descriptor; @@ -291,8 +291,10 @@ fn test_list_owned_txouts() { ( *tx, local_chain - .checkpoint(height) - .map(|cp| cp.block_id()) + .heights() + .get(&height) + .cloned() + .map(|hash| BlockId { height, hash }) .map(|anchor_block| ConfirmationHeightAnchor { anchor_block, confirmation_height: anchor_block.height, @@ -309,9 +311,10 @@ fn test_list_owned_txouts() { |height: u32, graph: &IndexedTxGraph>| { let chain_tip = local_chain - .checkpoint(height) - .map(|cp| cp.block_id()) - .expect("block must exist"); + .heights() + .get(&height) + .map(|&hash| BlockId { height, hash }) + .unwrap_or_else(|| panic!("block must exist at {}", height)); let txouts = graph .graph() .filter_chain_txouts( diff --git a/crates/electrum/src/electrum_ext.rs b/crates/electrum/src/electrum_ext.rs index b96395eb5..6a9ebd1b9 100644 --- a/crates/electrum/src/electrum_ext.rs +++ b/crates/electrum/src/electrum_ext.rs @@ -347,7 +347,7 @@ fn construct_update_tip( .map(|(height, hash)| BlockId { height, hash }) .fold(agreement_cp, |prev_cp, block| { Some(match prev_cp { - Some(cp) => cp.extend(block).expect("must extend checkpoint"), + Some(cp) => cp.push(block).expect("must extend checkpoint"), None => CheckPoint::new(block), }) }) diff --git a/crates/esplora/src/async_ext.rs b/crates/esplora/src/async_ext.rs index c02d71aff..78e9334ea 100644 --- a/crates/esplora/src/async_ext.rs +++ b/crates/esplora/src/async_ext.rs @@ -189,7 +189,9 @@ impl EsploraAsyncExt for esplora_client::AsyncClient { .into_iter() .map(|(height, hash)| BlockId { height, hash }) .fold(first_cp, |prev_cp, block| { - prev_cp.extend(block).expect("must extend checkpoint") + prev_cp + .extend(core::iter::once(block)) + .expect("must extend checkpoint") }); Ok(new_tip) @@ -318,252 +320,3 @@ impl EsploraAsyncExt for esplora_client::AsyncClient { Ok((graph, last_active_indexes)) } } - -// #[cfg_attr(target_arch = "wasm32", async_trait(?Send))] -// #[cfg_attr(not(target_arch = "wasm32"), async_trait)] -// impl EsploraAsyncExt for esplora_client::AsyncClient { -// #[allow(clippy::result_large_err)] // FIXME -// async fn scan( -// &self, -// prev_tip: Option, -// keychain_spks: BTreeMap< -// K, -// impl IntoIterator + Send> + Send, -// >, -// txids: impl IntoIterator + Send> + Send, -// outpoints: impl IntoIterator + Send> + Send, -// stop_gap: usize, -// parallel_requests: usize, -// ) -> Result, Error> { -// let parallel_requests = Ord::max(parallel_requests, 1); - -// let (tip, _) = construct_update_tip(self, prev_tip).await?; -// let mut make_anchor = crate::confirmation_time_anchor_maker(&tip); -// let mut update = LocalUpdate::::new(tip); - -// for (keychain, spks) in keychain_spks { -// let mut spks = spks.into_iter(); -// let mut last_active_index = None; -// let mut empty_scripts = 0; -// type IndexWithTxs = (u32, Vec); - -// loop { -// let futures = (0..parallel_requests) -// .filter_map(|_| { -// let (index, script) = spks.next()?; -// let client = self.clone(); -// Some(async move { -// let mut related_txs = client.scripthash_txs(&script, None).await?; - -// let n_confirmed = -// related_txs.iter().filter(|tx| tx.status.confirmed).count(); -// // esplora pages on 25 confirmed transactions. If there are 25 or more we -// // keep requesting to see if there's more. -// if n_confirmed >= 25 { -// loop { -// let new_related_txs = client -// .scripthash_txs( -// &script, -// Some(related_txs.last().unwrap().txid), -// ) -// .await?; -// let n = new_related_txs.len(); -// related_txs.extend(new_related_txs); -// // we've reached the end -// if n < 25 { -// break; -// } -// } -// } - -// Result::<_, esplora_client::Error>::Ok((index, related_txs)) -// }) -// }) -// .collect::>(); - -// let n_futures = futures.len(); - -// for (index, related_txs) in futures.try_collect::>().await? { -// if related_txs.is_empty() { -// empty_scripts += 1; -// } else { -// last_active_index = Some(index); -// empty_scripts = 0; -// } -// for tx in related_txs { -// let anchor = make_anchor(&tx.status); - -// let _ = update.graph.insert_tx(tx.to_tx()); -// if let Some(anchor) = anchor { -// let _ = update.graph.insert_anchor(tx.txid, anchor); -// } -// } -// } - -// if n_futures == 0 || empty_scripts >= stop_gap { -// break; -// } -// } - -// if let Some(last_active_index) = last_active_index { -// update.keychain.insert(keychain, last_active_index); -// } -// } - -// for txid in txids.into_iter() { -// if update.graph.get_tx(txid).is_none() { -// match self.get_tx(&txid).await? { -// Some(tx) => { -// let _ = update.graph.insert_tx(tx); -// } -// None => continue, -// } -// } -// match self.get_tx_status(&txid).await? { -// tx_status if tx_status.confirmed => { -// if let Some(anchor) = make_anchor(&tx_status) { -// let _ = update.graph.insert_anchor(txid, anchor); -// } -// } -// _ => continue, -// } -// } - -// for op in outpoints.into_iter() { -// let mut op_txs = Vec::with_capacity(2); -// if let ( -// Some(tx), -// tx_status @ TxStatus { -// confirmed: true, .. -// }, -// ) = ( -// self.get_tx(&op.txid).await?, -// self.get_tx_status(&op.txid).await?, -// ) { -// op_txs.push((tx, tx_status)); -// if let Some(OutputStatus { -// txid: Some(txid), -// status: Some(spend_status), -// .. -// }) = self.get_output_status(&op.txid, op.vout as _).await? -// { -// if let Some(spend_tx) = self.get_tx(&txid).await? { -// op_txs.push((spend_tx, spend_status)); -// } -// } -// } - -// for (tx, status) in op_txs { -// let txid = tx.txid(); -// let anchor = make_anchor(&status); - -// let _ = update.graph.insert_tx(tx); -// if let Some(anchor) = anchor { -// let _ = update.graph.insert_anchor(txid, anchor); -// } -// } -// } - -// // If a reorg occured during the update, anchors may be wrong. We handle this by scrapping -// // all anchors, reconstructing checkpoints and reconstructing anchors. -// while self.get_block_hash(update.tip.height()).await? != update.tip.hash() { -// let (new_tip, _) = construct_update_tip(self, Some(update.tip.clone())).await?; -// make_anchor = crate::confirmation_time_anchor_maker(&new_tip); - -// // Reconstruct graph with only transactions (no anchors). -// update.graph = TxGraph::new(update.graph.full_txs().map(|n| n.tx.clone())); -// update.tip = new_tip; - -// // Re-fetch anchors. -// let anchors = { -// let mut a = Vec::new(); -// for n in update.graph.full_txs() { -// let status = self.get_tx_status(&n.txid).await?; -// if !status.confirmed { -// continue; -// } -// if let Some(anchor) = make_anchor(&status) { -// a.push((n.txid, anchor)); -// } -// } -// a -// }; -// for (txid, anchor) in anchors { -// let _ = update.graph.insert_anchor(txid, anchor); -// } -// } - -// Ok(update) -// } -// } - -// /// Constructs a new checkpoint tip that can "connect" to our previous checkpoint history. We return -// /// the new checkpoint tip alongside the height of agreement between the two histories (if any). -// #[allow(clippy::result_large_err)] -// async fn construct_update_tip( -// client: &esplora_client::AsyncClient, -// prev_tip: Option, -// ) -> Result<(CheckPoint, Option), Error> { -// let new_tip_height = client.get_height().await?; - -// // If esplora returns a tip height that is lower than our previous tip, then checkpoints do not -// // need updating. We just return the previous tip and use that as the point of agreement. -// if let Some(prev_tip) = prev_tip.as_ref() { -// if new_tip_height < prev_tip.height() { -// return Ok((prev_tip.clone(), Some(prev_tip.height()))); -// } -// } - -// // Grab latest blocks from esplora atomically first. We assume that deeper blocks cannot be -// // reorged. This ensures that our checkpoint history is consistent. -// let mut new_blocks = client -// .get_blocks(Some(new_tip_height)) -// .await? -// .into_iter() -// .zip((0..new_tip_height).rev()) -// .map(|(b, height)| (height, b.id)) -// .collect::>(); - -// let mut agreement_cp = Option::::None; - -// for cp in prev_tip.iter().flat_map(CheckPoint::iter) { -// let cp_block = cp.block_id(); - -// // We check esplora blocks cached in `new_blocks` first, keeping the checkpoint history -// // consistent even during reorgs. -// let hash = match new_blocks.get(&cp_block.height) { -// Some(&hash) => hash, -// None => { -// assert!( -// new_tip_height >= cp_block.height, -// "already checked that esplora's tip cannot be smaller" -// ); -// let hash = client.get_block_hash(cp_block.height).await?; -// new_blocks.insert(cp_block.height, hash); -// hash -// } -// }; - -// if hash == cp_block.hash { -// agreement_cp = Some(cp); -// break; -// } -// } - -// let agreement_height = agreement_cp.as_ref().map(CheckPoint::height); - -// let new_tip = new_blocks -// .into_iter() -// // Prune `new_blocks` to only include blocks that are actually new. -// .filter(|(height, _)| Some(*height) > agreement_height) -// .map(|(height, hash)| BlockId { height, hash }) -// .fold(agreement_cp, |prev_cp, block| { -// Some(match prev_cp { -// Some(cp) => cp.push(block).expect("must extend cp"), -// None => CheckPoint::new(block), -// }) -// }) -// .expect("must have at least one checkpoint"); - -// Ok((new_tip, agreement_height)) -// } diff --git a/crates/esplora/src/blocking_ext.rs b/crates/esplora/src/blocking_ext.rs index b940563df..c49397d8a 100644 --- a/crates/esplora/src/blocking_ext.rs +++ b/crates/esplora/src/blocking_ext.rs @@ -181,7 +181,7 @@ impl EsploraExt for esplora_client::BlockingClient { .into_iter() .map(|(height, hash)| BlockId { height, hash }) .fold(first_cp, |prev_cp, block| { - prev_cp.extend(block).expect("must extend checkpoint") + prev_cp.push(block).expect("must extend checkpoint") }); Ok(new_tip) From 53c129b821fd3c808e84f59f4778b80925e4def0 Mon Sep 17 00:00:00 2001 From: LLFourn Date: Fri, 7 Jul 2023 23:15:53 +0800 Subject: [PATCH 15/24] Early exit when there are no update blocks left --- crates/chain/src/local_chain.rs | 29 ++++++++++++++++++----------- 1 file changed, 18 insertions(+), 11 deletions(-) diff --git a/crates/chain/src/local_chain.rs b/crates/chain/src/local_chain.rs index 31490efdf..a33721ada 100644 --- a/crates/chain/src/local_chain.rs +++ b/crates/chain/src/local_chain.rs @@ -417,6 +417,12 @@ fn merge_chains(orig: CheckPoint, update: CheckPoint) -> Result { if o.hash() == u.hash() { @@ -426,7 +432,7 @@ fn merge_chains(orig: CheckPoint, update: CheckPoint) -> Result Result { - // When we don't have a point of agreement you can imagine it is implicitly the - // genesis block so we need to do the final connectivity check which in this case - // just means making sure the entire original chain was invalidated. - if !prev_orig_was_invalidated && !point_of_agreement_found { - if let Some(prev_orig) = prev_orig { - return Err(CannotConnectError { - try_include_height: prev_orig.height(), - }); - } - } break; } _ => { @@ -471,5 +467,16 @@ fn merge_chains(orig: CheckPoint, update: CheckPoint) -> Result Date: Sat, 8 Jul 2023 09:41:00 +0800 Subject: [PATCH 16/24] impl IntoIterator for CheckPoint --- crates/chain/src/local_chain.rs | 43 ++++++++++++++++++--------------- 1 file changed, 24 insertions(+), 19 deletions(-) diff --git a/crates/chain/src/local_chain.rs b/crates/chain/src/local_chain.rs index a33721ada..7a1966927 100644 --- a/crates/chain/src/local_chain.rs +++ b/crates/chain/src/local_chain.rs @@ -80,9 +80,7 @@ impl CheckPoint { /// Iterate from this checkpoint in descending height. pub fn iter(&self) -> CheckPointIter { - CheckPointIter { - current: Some(Arc::clone(&self.0)), - } + self.clone().into_iter() } } @@ -101,6 +99,17 @@ impl Iterator for CheckPointIter { } } +impl IntoIterator for CheckPoint { + type Item = CheckPoint; + type IntoIter = CheckPointIter; + + fn into_iter(self) -> Self::IntoIter { + CheckPointIter { + current: Some(self.0), + } + } +} + /// This is a local implementation of [`ChainOracle`]. #[derive(Debug, Default, Clone, PartialEq, Eq, PartialOrd, Ord)] pub struct LocalChain { @@ -248,14 +257,12 @@ impl LocalChain { if let Some(start_height) = changeset.keys().next().cloned() { let mut extension = BTreeMap::default(); let mut base: Option = None; - if let Some(tip) = &self.tip { - for cp in tip.iter() { - if cp.height() >= start_height { - extension.insert(cp.height(), cp.hash()); - } else { - base = Some(cp); - break; - } + for cp in self.iter_checkpoints() { + if cp.height() >= start_height { + extension.insert(cp.height(), cp.hash()); + } else { + base = Some(cp); + break; } } @@ -308,13 +315,11 @@ impl LocalChain { /// Reindex the heights in the chain from (and including) `from` height fn reindex(&mut self, from: u32) { let _ = self.index.split_off(&from); - if let Some(tip) = &self.tip { - for cp in tip.iter() { - if cp.height() < from { - break; - } - self.index.insert(cp.height(), cp.hash()); + for cp in self.iter_checkpoints() { + if cp.height() < from { + break; } + self.index.insert(cp.height(), cp.hash()); } } @@ -383,8 +388,8 @@ impl std::error::Error for CannotConnectError {} fn merge_chains(orig: CheckPoint, update: CheckPoint) -> Result { let mut changeset = ChangeSet::default(); - let mut orig = orig.iter(); - let mut update = update.iter(); + let mut orig = orig.into_iter(); + let mut update = update.into_iter(); let mut curr_orig = None; let mut curr_update = None; let mut prev_orig: Option = None; From 2efa8919dab87d5939c5c40a7d90379e16ce1462 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?=E5=BF=97=E5=AE=87?= Date: Sun, 9 Jul 2023 18:25:00 +0800 Subject: [PATCH 17/24] Add lb_height parameter when merging chains The lower bound height is the lowest checkpoint the update chain can introduce to the original chain. Also add optimisation when we have a perfect connection. Add `debug_assertions` to `LocalChain` methods that mutate. Add more `LocalChain` unit tests. --- crates/bdk/src/wallet/mod.rs | 2 +- crates/chain/src/local_chain.rs | 116 ++++++++++++++++---- crates/chain/tests/test_local_chain.rs | 48 +++++++- example-crates/example_electrum/src/main.rs | 2 +- example-crates/example_rpc/src/main.rs | 2 +- 5 files changed, 144 insertions(+), 26 deletions(-) diff --git a/crates/bdk/src/wallet/mod.rs b/crates/bdk/src/wallet/mod.rs index 99b18b311..9596074bd 100644 --- a/crates/bdk/src/wallet/mod.rs +++ b/crates/bdk/src/wallet/mod.rs @@ -1710,7 +1710,7 @@ impl Wallet { where D: PersistBackend, { - let mut changeset = ChangeSet::from(self.chain.update(update.tip)?); + let mut changeset = ChangeSet::from(self.chain.update(update.tip, None)?); let (_, index_additions) = self .indexed_graph .index diff --git a/crates/chain/src/local_chain.rs b/crates/chain/src/local_chain.rs index 7a1966927..30c9145a2 100644 --- a/crates/chain/src/local_chain.rs +++ b/crates/chain/src/local_chain.rs @@ -167,6 +167,10 @@ impl LocalChain { pub fn from_changeset(changeset: ChangeSet) -> Self { let mut chain = Self::default(); chain.apply_changeset(&changeset); + + #[cfg(debug_assertions)] + chain._check_consistency(Some(&changeset)); + chain } @@ -177,6 +181,10 @@ impl LocalChain { ..Default::default() }; _self.reindex(0); + + #[cfg(debug_assertions)] + _self._check_consistency(None); + _self } @@ -199,7 +207,12 @@ impl LocalChain { } } - Self { index: blocks, tip } + let chain = Self { index: blocks, tip }; + + #[cfg(debug_assertions)] + chain._check_consistency(None); + + chain } /// Get the highest checkpoint. @@ -212,23 +225,23 @@ impl LocalChain { self.tip.is_none() } - /// Updates [`Self`] with the given `new_tip`. + /// Updates [`Self`] with the given `update_tip`. /// /// The method returns [`ChangeSet`] on success. This represents the applied changes to /// [`Self`]. /// - /// To update, the `new_tip` must *connect* with `self`. If `self` and `new_tip` has a mutual - /// checkpoint (same height and hash), it can connect if: + /// To update, the `update_tip` must *connect* with `self`. If `self` and `update_tip` has a + /// mutual checkpoint (same height and hash), it can connect if: /// * The mutual checkpoint is the tip of `self`. - /// * An ancestor of `new_tip` has a height which is of the checkpoint one higher than the + /// * An ancestor of `update_tip` has a height which is of the checkpoint one higher than the /// mutual checkpoint from `self`. /// /// Additionally: - /// * If `self` is empty, `new_tip` will always connect. - /// * If `self` only has one checkpoint, `new_tip` must have an ancestor checkpoint with the + /// * If `self` is empty, `update_tip` will always connect. + /// * If `self` only has one checkpoint, `update_tip` must have an ancestor checkpoint with the /// same height as it. /// - /// To invalidate from a given checkpoint, `new_tip` must contain an ancestor checkpoint with + /// To invalidate from a given checkpoint, `update_tip` must contain an ancestor checkpoint with /// the same height but different hash. /// /// # Errors @@ -238,18 +251,40 @@ impl LocalChain { /// Refer to [module-level documentation] for more. /// /// [module-level documentation]: crate::local_chain - pub fn update(&mut self, new_tip: CheckPoint) -> Result { - match self.tip() { + pub fn update( + &mut self, + update_tip: CheckPoint, + update_lower_bound: Option, + ) -> Result { + let changeset = match self.tip() { Some(original_tip) => { - let changeset = merge_chains(original_tip, new_tip)?; - self.apply_changeset(&changeset); - Ok(changeset) + let (changeset, perfect_connection) = + merge_chains(original_tip, update_tip.clone(), update_lower_bound)?; + if perfect_connection { + self.tip = Some(update_tip); + for (height, hash) in &changeset { + match hash { + Some(hash) => self.index.insert(*height, *hash), + None => self.index.remove(height), + }; + } + changeset + } else { + self.apply_changeset(&changeset); + // return early as `apply_changeset` already calls `check_consistency` + return Ok(changeset); + } } None => { - *self = Self::from_tip(new_tip); - Ok(self.initial_changeset()) + *self = Self::from_tip(update_tip); + self.initial_changeset() } - } + }; + + #[cfg(debug_assertions)] + self._check_consistency(Some(&changeset)); + + Ok(changeset) } /// Apply the given `changeset`. @@ -285,6 +320,9 @@ impl LocalChain { }; self.tip = new_tip; self.reindex(start_height); + + #[cfg(debug_assertions)] + self._check_consistency(Some(changeset)); } } @@ -340,6 +378,33 @@ impl LocalChain { pub fn heights(&self) -> &BTreeMap { &self.index } + + /// Checkpoints that exist under `self.tip` and blocks indexed in `self.index` should be equal. + /// Additionally, if a `changeset` is provided, the changes specified in the `changeset` should + /// be reflected in `self.index`. + #[cfg(debug_assertions)] + fn _check_consistency(&self, changeset: Option<&ChangeSet>) { + debug_assert_eq!( + self.tip + .iter() + .flat_map(CheckPoint::iter) + .map(|cp| (cp.height(), cp.hash())) + .collect::>(), + self.index, + "checkpoint history and index must be consistent" + ); + + if let Some(changeset) = changeset { + for (height, exp_hash) in changeset { + let hash = self.index.get(height); + assert_eq!( + hash, + exp_hash.as_ref(), + "changeset changes should be reflected in the internal index" + ); + } + } + } } /// Represents a failure when trying to insert a checkpoint into [`LocalChain`]. @@ -386,10 +451,19 @@ impl core::fmt::Display for CannotConnectError { #[cfg(feature = "std")] impl std::error::Error for CannotConnectError {} -fn merge_chains(orig: CheckPoint, update: CheckPoint) -> Result { +fn merge_chains( + original_tip: CheckPoint, + update_tip: CheckPoint, + update_lb_height: Option, +) -> Result<(ChangeSet, bool), CannotConnectError> { let mut changeset = ChangeSet::default(); - let mut orig = orig.into_iter(); - let mut update = update.into_iter(); + let mut orig = original_tip.into_iter(); + let mut update = update_tip + .into_iter() + .take_while(|cp| match update_lb_height { + Some(lb_height) => lb_height <= cp.height(), + None => true, + }); let mut curr_orig = None; let mut curr_update = None; let mut prev_orig: Option = None; @@ -449,7 +523,7 @@ fn merge_chains(orig: CheckPoint, update: CheckPoint) -> Result Result { impl<'a> TestLocalChain<'a> { fn run(mut self) { - let got_changeset = match self.chain.update(self.new_tip) { + println!("[TestLocalChain] test: {}", self.name); + let got_changeset = match self.chain.update(self.new_tip, None) { Ok(changeset) => changeset, Err(got_err) => { assert_eq!( @@ -110,8 +111,12 @@ fn update_local_chain() { init_changeset: &[(0, Some(h!("A")))], }, }, + // Introduce an older checkpoint (B) + // | 0 | 1 | 2 | 3 + // chain | C D + // update | B C TestLocalChain { - name: "can introduce older checkpoints", + name: "can introduce older checkpoint", chain: local_chain![(2, h!("C")), (3, h!("D"))], new_tip: chain_update![(1, h!("B")), (2, h!("C"))], exp: ExpectedResult::Ok { @@ -119,6 +124,45 @@ fn update_local_chain() { init_changeset: &[(1, Some(h!("B"))), (2, Some(h!("C"))), (3, Some(h!("D")))], }, }, + // Introduce an older checkpoint (A) that is not directly behind PoA + // | 1 | 2 | 3 + // chain | B C + // update | A C + TestLocalChain { + name: "can introduce older checkpoint 2", + chain: local_chain![(3, h!("B")), (4, h!("C"))], + new_tip: chain_update![(2, h!("A")), (4, h!("C"))], + exp: ExpectedResult::Ok { + changeset: &[(2, Some(h!("A")))], + init_changeset: &[(2, Some(h!("A"))), (3, Some(h!("B"))), (4, Some(h!("C")))], + } + }, + // Introduce an older checkpoint (B) that is not the oldest checkpoint + // | 1 | 2 | 3 + // chain | A C + // update | B C + TestLocalChain { + name: "can introduce older checkpoint 3", + chain: local_chain![(1, h!("A")), (3, h!("C"))], + new_tip: chain_update![(2, h!("B")), (3, h!("C"))], + exp: ExpectedResult::Ok { + changeset: &[(2, Some(h!("B")))], + init_changeset: &[(1, Some(h!("A"))), (2, Some(h!("B"))), (3, Some(h!("C")))], + } + }, + // Introduce two older checkpoints below the PoA + // | 1 | 2 | 3 + // chain | C + // update | A B C + TestLocalChain { + name: "introduce two older checkpoints below PoA", + chain: local_chain![(3, h!("C"))], + new_tip: chain_update![(1, h!("A")), (2, h!("B")), (3, h!("C"))], + exp: ExpectedResult::Ok { + changeset: &[(1, Some(h!("A"))), (2, Some(h!("B")))], + init_changeset: &[(1, Some(h!("A"))), (2, Some(h!("B"))), (3, Some(h!("C")))], + }, + }, TestLocalChain { name: "fix blockhash before agreement point", chain: local_chain![(0, h!("im-wrong")), (1, h!("we-agree"))], diff --git a/example-crates/example_electrum/src/main.rs b/example-crates/example_electrum/src/main.rs index 89a54b7ef..c78d47da7 100644 --- a/example-crates/example_electrum/src/main.rs +++ b/example-crates/example_electrum/src/main.rs @@ -274,7 +274,7 @@ fn main() -> anyhow::Result<()> { let mut chain = chain.lock().unwrap(); let mut graph = graph.lock().unwrap(); - let chain_changeset = chain.update(final_update.tip)?; + let chain_changeset = chain.update(final_update.tip, None)?; let indexed_additions = { let mut additions = IndexedAdditions::::default(); diff --git a/example-crates/example_rpc/src/main.rs b/example-crates/example_rpc/src/main.rs index b7eb3a859..a1857cf80 100644 --- a/example-crates/example_rpc/src/main.rs +++ b/example-crates/example_rpc/src/main.rs @@ -194,7 +194,7 @@ fn main() -> anyhow::Result<()> { let mut chain = chain.lock().unwrap(); let mut graph = graph.lock().unwrap(); - let chain_changeset = chain.update(update.tip)?; + let chain_changeset = chain.update(update.tip, None)?; let mut indexed_additions = IndexedAdditions::::default(); From 7f4f3dba5e495f36eb6bf71024531a7a48ab88db Mon Sep 17 00:00:00 2001 From: remix Date: Mon, 5 Jun 2023 14:24:29 +0200 Subject: [PATCH 18/24] Add esplora CLI example --- Cargo.toml | 1 + crates/chain/src/local_chain.rs | 21 +- example-crates/example_esplora/Cargo.toml | 12 + example-crates/example_esplora/src/main.rs | 348 +++++++++++++++++++++ 4 files changed, 372 insertions(+), 10 deletions(-) create mode 100644 example-crates/example_esplora/Cargo.toml create mode 100644 example-crates/example_esplora/src/main.rs diff --git a/Cargo.toml b/Cargo.toml index 8798269e8..43623ea5d 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -8,6 +8,7 @@ members = [ "crates/bitcoind_rpc", "example-crates/example_cli", "example-crates/example_electrum", + "example-crates/example_esplora", "example-crates/example_rpc", "example-crates/wallet_electrum", "example-crates/wallet_esplora", diff --git a/crates/chain/src/local_chain.rs b/crates/chain/src/local_chain.rs index 30c9145a2..d333af99a 100644 --- a/crates/chain/src/local_chain.rs +++ b/crates/chain/src/local_chain.rs @@ -260,20 +260,21 @@ impl LocalChain { Some(original_tip) => { let (changeset, perfect_connection) = merge_chains(original_tip, update_tip.clone(), update_lower_bound)?; - if perfect_connection { - self.tip = Some(update_tip); - for (height, hash) in &changeset { - match hash { - Some(hash) => self.index.insert(*height, *hash), - None => self.index.remove(height), - }; - } - changeset - } else { + + if !perfect_connection { self.apply_changeset(&changeset); // return early as `apply_changeset` already calls `check_consistency` return Ok(changeset); } + + self.tip = Some(update_tip); + for (height, hash) in &changeset { + match hash { + Some(hash) => self.index.insert(*height, *hash), + None => self.index.remove(height), + }; + } + changeset } None => { *self = Self::from_tip(update_tip); diff --git a/example-crates/example_esplora/Cargo.toml b/example-crates/example_esplora/Cargo.toml new file mode 100644 index 000000000..ccad862e9 --- /dev/null +++ b/example-crates/example_esplora/Cargo.toml @@ -0,0 +1,12 @@ +[package] +name = "example_esplora" +version = "0.1.0" +edition = "2021" + +# See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html + +[dependencies] +bdk_chain = { path = "../../crates/chain", features = ["serde"] } +bdk_esplora = { path = "../../crates/esplora", features = ["blocking"] } +example_cli = { path = "../example_cli" } + diff --git a/example-crates/example_esplora/src/main.rs b/example-crates/example_esplora/src/main.rs new file mode 100644 index 000000000..50c659ca9 --- /dev/null +++ b/example-crates/example_esplora/src/main.rs @@ -0,0 +1,348 @@ +use std::{ + collections::BTreeMap, + io::{self, Write}, + sync::Mutex, +}; + +use bdk_chain::{ + bitcoin::{Address, Network, OutPoint, Txid}, + indexed_tx_graph::{IndexedAdditions, IndexedTxGraph}, + keychain::{LocalChangeSet, LocalUpdate}, + local_chain::{CheckPoint, LocalChain}, + Append, ConfirmationTimeAnchor, +}; + +use bdk_esplora::{esplora_client, EsploraExt}; + +use example_cli::{ + anyhow::{self, Context}, + clap::{self, Parser, Subcommand}, + Keychain, +}; + +const DB_MAGIC: &[u8] = b"bdk_example_esplora"; +const DB_PATH: &str = ".bdk_esplora_example.db"; + +#[derive(Subcommand, Debug, Clone)] +enum EsploraCommands { + /// Scans the addresses in the wallet sing the esplora API. + Scan { + /// When a gap this large has been found for a keychain, it will stop. + #[clap(long, default_value = "5")] + stop_gap: usize, + #[clap(flatten)] + scan_options: ScanOptions, + }, + /// Scans particular addresses using the esplora API. + Sync { + /// Scan all the unused addresses. + #[clap(long)] + unused_spks: bool, + /// Scan every address that you have derived. + #[clap(long)] + all_spks: bool, + /// Scan unspent outpoints for spends or changes to confirmation status of residing tx. + #[clap(long)] + utxos: bool, + /// Scan unconfirmed transactions for updates. + #[clap(long)] + unconfirmed: bool, + #[clap(flatten)] + scan_options: ScanOptions, + }, +} + +#[derive(Parser, Debug, Clone, PartialEq)] +pub struct ScanOptions { + /// Max number of concurrent esplora server requests. + #[clap(long, default_value = "10")] + pub parallel_requests: usize, +} + +fn main() -> anyhow::Result<()> { + let (args, keymap, index, db, init_changeset) = example_cli::init::< + EsploraCommands, + LocalChangeSet, + >(DB_MAGIC, DB_PATH)?; + + let graph = Mutex::new({ + let mut graph = IndexedTxGraph::new(index); + graph.apply_additions(init_changeset.indexed_additions); + graph + }); + + let chain = Mutex::new({ + let mut chain = LocalChain::default(); + chain.apply_changeset(&init_changeset.chain_changeset); + chain + }); + + let esplora_url = match args.network { + Network::Bitcoin => "https://mempool.space/api", + Network::Testnet => "https://mempool.space/testnet/api", + Network::Regtest => "http://localhost:3002", + Network::Signet => "https://mempool.space/signet/api", + }; + + let client = esplora_client::Builder::new(esplora_url).build_blocking()?; + + // Match the given command. Exectute and return if command is provided by example_cli + let esplora_cmd = match &args.command { + // Command that are handled by the specify example + example_cli::Commands::ChainSpecific(electrum_cmd) => electrum_cmd, + // General commands handled by example_cli. Execute the cmd and return. + general_cmd => { + let res = example_cli::handle_commands( + &graph, + &db, + &chain, + &keymap, + args.network, + |tx| { + client + .broadcast(tx) + .map(|_| ()) + .map_err(anyhow::Error::from) + }, + general_cmd.clone(), + ); + + db.lock().unwrap().commit()?; + return res; + } + }; + + let (graph_update, last_active_indices) = match &esplora_cmd { + EsploraCommands::Scan { + stop_gap, + scan_options, + } => { + let graph = graph.lock().unwrap(); + + let keychain_spks = graph + .index + .spks_of_all_keychains() + .into_iter() + .map(|(keychain, iter)| { + let mut first = true; + let spk_iter = iter.inspect(move |(i, _)| { + if first { + eprint!("\nscanning {}: ", keychain); + first = false; + } + eprint!("{} ", i); + let _ = io::stdout().flush(); + }); + (keychain, spk_iter) + }) + .collect::>(); + + drop(graph); + + client + .update_tx_graph( + keychain_spks, + core::iter::empty(), + core::iter::empty(), + *stop_gap, + scan_options.parallel_requests, + ) + .context("scanning for transactions")? + } + EsploraCommands::Sync { + mut unused_spks, + all_spks, + mut utxos, + mut unconfirmed, + scan_options, + } => { + // Get a short lock on the tracker to get the spks we're interested in + let graph = graph.lock().unwrap(); + let chain = chain.lock().unwrap(); + let chain_tip = chain.tip().map(|cp| cp.block_id()).unwrap_or_default(); + + if !(*all_spks || unused_spks || utxos || unconfirmed) { + unused_spks = true; + unconfirmed = true; + utxos = true; + } else if *all_spks { + unused_spks = false; + } + + let mut spks: Box> = + Box::new(core::iter::empty()); + if *all_spks { + let all_spks = graph + .index + .all_spks() + .iter() + .map(|(k, v)| (*k, v.clone())) + .collect::>(); + spks = Box::new(spks.chain(all_spks.into_iter().map(|(index, script)| { + eprintln!("scanning {:?}", index); + script + }))); + } + if unused_spks { + let unused_spks = graph + .index + .unused_spks(..) + .map(|(k, v)| (*k, v.clone())) + .collect::>(); + spks = Box::new(spks.chain(unused_spks.into_iter().map(|(index, script)| { + eprintln!( + "Checking if address {} {:?} has been used", + Address::from_script(&script, args.network).unwrap(), + index + ); + + script + }))); + } + + let mut outpoints: Box> = Box::new(core::iter::empty()); + + if utxos { + let init_outpoints = graph.index.outpoints().iter().cloned(); + + let utxos = graph + .graph() + .filter_chain_unspents(&*chain, chain_tip, init_outpoints) + .map(|(_, utxo)| utxo) + .collect::>(); + + outpoints = Box::new( + utxos + .into_iter() + .inspect(|utxo| { + eprintln!( + "Checking if outpoint {} (value: {}) has been spent", + utxo.outpoint, utxo.txout.value + ); + }) + .map(|utxo| utxo.outpoint), + ); + }; + + let mut txids: Box> = Box::new(core::iter::empty()); + + if unconfirmed { + let unconfirmed_txids = graph + .graph() + .list_chain_txs(&*chain, chain_tip) + .filter(|canonical_tx| !canonical_tx.observed_as.is_confirmed()) + .map(|canonical_tx| canonical_tx.node.txid) + .collect::>(); + + txids = Box::new(unconfirmed_txids.into_iter().inspect(|txid| { + eprintln!("Checking if {} is confirmed yet", txid); + })); + } + + // drop lock on graph and chain + drop((graph, chain)); + + ( + client + .update_tx_graph_without_keychain( + spks, + txids, + outpoints, + scan_options.parallel_requests, + ) + .context("syncing transaction updates")?, + Default::default(), + ) + } + }; + + println!(); + + let (heights_to_fetch, tip) = { + let chain = &*chain.lock().unwrap(); + + let heights_to_fetch = graph_update.missing_blocks(chain).collect::>(); + let tip = chain.tip(); + (heights_to_fetch, tip) + }; + + #[cfg(debug_assertions)] + println!( + "old chain: {:?}", + tip.iter() + .flat_map(CheckPoint::iter) + .map(|cp| cp.height()) + .collect::>() + ); + println!("prev tip: {}", tip.as_ref().map_or(0, CheckPoint::height)); + println!("missing blocks: {:?}", heights_to_fetch); + + let tip = client + .update_local_chain(tip, heights_to_fetch) + .context("scanning for blocks")?; + + #[cfg(debug_assertions)] + println!( + "new chain: {:?}", + tip.iter().map(|cp| cp.height()).collect::>() + ); + println!("new tip: {}", tip.height()); + + let update = LocalUpdate { + keychain: last_active_indices, + graph: graph_update, + tip, + }; + + // check that all anchors are part of the new tip's history + #[cfg(debug_assertions)] + { + use bdk_chain::bitcoin::BlockHash; + use bdk_chain::collections::HashMap; + let chain_heights = update + .tip + .iter() + .map(|cp| (cp.height(), cp.hash())) + .collect::>(); + for (anchor, _) in update.graph.all_anchors() { + assert_eq!(anchor.anchor_block.height, anchor.confirmation_height); + assert!(chain_heights.contains_key(&anchor.anchor_block.height)); + + let remote_hash = chain_heights + .get(&anchor.confirmation_height) + .expect("must have block"); + + // inform about mismatched blocks + if remote_hash != &anchor.anchor_block.hash { + println!("mismatched block @ {}!", anchor.confirmation_height); + println!("\t- anchor_block: {}", anchor.anchor_block.hash); + println!("\t- from_chain: {}", remote_hash); + } + } + } + + let db_changeset: LocalChangeSet = { + let mut chain = chain.lock().unwrap(); + let mut graph = graph.lock().unwrap(); + + let chain_changeset = chain.update(update.tip, None)?; + + let indexed_additions = { + let mut additions = IndexedAdditions::default(); + let (_, index_additions) = graph.index.reveal_to_target_multi(&update.keychain); + additions.append(IndexedAdditions::from(index_additions)); + additions.append(graph.apply_update(update.graph)); + additions + }; + + LocalChangeSet { + chain_changeset, + indexed_additions, + } + }; + + let mut db = db.lock().unwrap(); + db.stage(db_changeset); + db.commit()?; + Ok(()) +} From fedb2cec2270d9d600ba4f54e3447dddb3907f62 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?=E5=BF=97=E5=AE=87?= Date: Mon, 10 Jul 2023 15:12:06 +0800 Subject: [PATCH 19/24] Use `introduce_older_blocks` bool variable for `LocalChain::update` This replaces using a lower bound height (which is a more complex API). --- crates/bdk/src/wallet/mod.rs | 5 +++- crates/chain/src/keychain.rs | 12 +++++++++ crates/chain/src/local_chain.rs | 27 +++++++++++-------- crates/chain/tests/test_local_chain.rs | 2 +- crates/electrum/src/electrum_ext.rs | 2 ++ example-crates/example_electrum/src/main.rs | 3 ++- example-crates/example_esplora/src/main.rs | 29 ++++++++------------- example-crates/example_rpc/src/main.rs | 3 ++- 8 files changed, 50 insertions(+), 33 deletions(-) diff --git a/crates/bdk/src/wallet/mod.rs b/crates/bdk/src/wallet/mod.rs index 9596074bd..8cf7eaa6d 100644 --- a/crates/bdk/src/wallet/mod.rs +++ b/crates/bdk/src/wallet/mod.rs @@ -1710,7 +1710,10 @@ impl Wallet { where D: PersistBackend, { - let mut changeset = ChangeSet::from(self.chain.update(update.tip, None)?); + let mut changeset = ChangeSet::from( + self.chain + .update(update.tip, update.introduce_older_blocks)?, + ); let (_, index_additions) = self .indexed_graph .index diff --git a/crates/chain/src/keychain.rs b/crates/chain/src/keychain.rs index d83868890..ad2583cf9 100644 --- a/crates/chain/src/keychain.rs +++ b/crates/chain/src/keychain.rs @@ -96,12 +96,23 @@ impl AsRef> for DerivationAdditions { pub struct LocalUpdate { /// Last active derivation index per keychain (`K`). pub keychain: BTreeMap, + /// Update for the [`TxGraph`]. pub graph: TxGraph, + /// Update for the [`LocalChain`]. /// /// [`LocalChain`]: local_chain::LocalChain pub tip: CheckPoint, + + /// Whether the [`LocalChain`] update (`tip`) can introduce blocks below the original chain's + /// tip without invalidating blocks. + /// + /// Refer to [`LocalChain::update`] for more. + /// + /// [`LocalChain`]: local_chain::LocalChain + /// [`LocalChain::update`]: local_chain::LocalChain::update + pub introduce_older_blocks: bool, } impl LocalUpdate { @@ -111,6 +122,7 @@ impl LocalUpdate { keychain: BTreeMap::new(), graph: TxGraph::default(), tip, + introduce_older_blocks: false, } } } diff --git a/crates/chain/src/local_chain.rs b/crates/chain/src/local_chain.rs index d333af99a..a1c8502f5 100644 --- a/crates/chain/src/local_chain.rs +++ b/crates/chain/src/local_chain.rs @@ -227,6 +227,14 @@ impl LocalChain { /// Updates [`Self`] with the given `update_tip`. /// + /// `introduce_older_blocks` specifies whether the `update_tip`'s history can introduce blocks + /// below the original chain's tip without invalidating blocks. Block-by-block syncing + /// mechanisms would typically create updates that builds upon the previous tip. In this case, + /// this paramater would be false. Script-pubkey based syncing mechanisms may not introduce + /// transactions in a chronological order so some updates require introducing older blocks (to + /// anchor older transactions). For script-pubkey based syncing, this parameter would typically + /// be true. + /// /// The method returns [`ChangeSet`] on success. This represents the applied changes to /// [`Self`]. /// @@ -254,12 +262,12 @@ impl LocalChain { pub fn update( &mut self, update_tip: CheckPoint, - update_lower_bound: Option, + introduce_older_blocks: bool, ) -> Result { let changeset = match self.tip() { Some(original_tip) => { let (changeset, perfect_connection) = - merge_chains(original_tip, update_tip.clone(), update_lower_bound)?; + merge_chains(original_tip, update_tip.clone(), introduce_older_blocks)?; if !perfect_connection { self.apply_changeset(&changeset); @@ -455,16 +463,11 @@ impl std::error::Error for CannotConnectError {} fn merge_chains( original_tip: CheckPoint, update_tip: CheckPoint, - update_lb_height: Option, + introduce_older_blocks: bool, ) -> Result<(ChangeSet, bool), CannotConnectError> { let mut changeset = ChangeSet::default(); let mut orig = original_tip.into_iter(); - let mut update = update_tip - .into_iter() - .take_while(|cp| match update_lb_height { - Some(lb_height) => lb_height <= cp.height(), - None => true, - }); + let mut update = update_tip.into_iter(); let mut curr_orig = None; let mut curr_update = None; let mut prev_orig: Option = None; @@ -520,10 +523,12 @@ fn merge_chains( } point_of_agreement_found = true; prev_orig_was_invalidated = false; - // OPTIMIZATION -- if we have the same underlying references at this + // OPTIMIZATION 1 -- if we know that older blocks cannot be introduced without + // invalidation, we can break after finding the point of agreement + // OPTIMIZATION 2 -- if we have the same underlying references at this // point then we know everything else in the two chains will match so the // changeset is fine. - if Arc::as_ptr(&o.0) == Arc::as_ptr(&u.0) { + if !introduce_older_blocks || Arc::as_ptr(&o.0) == Arc::as_ptr(&u.0) { return Ok((changeset, true)); } } else { diff --git a/crates/chain/tests/test_local_chain.rs b/crates/chain/tests/test_local_chain.rs index ee8671d62..0ad1045da 100644 --- a/crates/chain/tests/test_local_chain.rs +++ b/crates/chain/tests/test_local_chain.rs @@ -26,7 +26,7 @@ enum ExpectedResult<'a> { impl<'a> TestLocalChain<'a> { fn run(mut self) { println!("[TestLocalChain] test: {}", self.name); - let got_changeset = match self.chain.update(self.new_tip, None) { + let got_changeset = match self.chain.update(self.new_tip, true) { Ok(changeset) => changeset, Err(got_err) => { assert_eq!( diff --git a/crates/electrum/src/electrum_ext.rs b/crates/electrum/src/electrum_ext.rs index 6a9ebd1b9..de78c6439 100644 --- a/crates/electrum/src/electrum_ext.rs +++ b/crates/electrum/src/electrum_ext.rs @@ -72,6 +72,7 @@ impl ElectrumUpdate { keychain: self.keychain_update, graph: graph_update, tip: self.chain_update, + introduce_older_blocks: true, }) } } @@ -145,6 +146,7 @@ impl ElectrumUpdate { graph }, tip: update.tip, + introduce_older_blocks: true, }) } } diff --git a/example-crates/example_electrum/src/main.rs b/example-crates/example_electrum/src/main.rs index c78d47da7..34091634d 100644 --- a/example-crates/example_electrum/src/main.rs +++ b/example-crates/example_electrum/src/main.rs @@ -274,7 +274,8 @@ fn main() -> anyhow::Result<()> { let mut chain = chain.lock().unwrap(); let mut graph = graph.lock().unwrap(); - let chain_changeset = chain.update(final_update.tip, None)?; + let chain_changeset = + chain.update(final_update.tip, final_update.introduce_older_blocks)?; let indexed_additions = { let mut additions = IndexedAdditions::::default(); diff --git a/example-crates/example_esplora/src/main.rs b/example-crates/example_esplora/src/main.rs index 50c659ca9..b7d48ec84 100644 --- a/example-crates/example_esplora/src/main.rs +++ b/example-crates/example_esplora/src/main.rs @@ -7,7 +7,7 @@ use std::{ use bdk_chain::{ bitcoin::{Address, Network, OutPoint, Txid}, indexed_tx_graph::{IndexedAdditions, IndexedTxGraph}, - keychain::{LocalChangeSet, LocalUpdate}, + keychain::LocalChangeSet, local_chain::{CheckPoint, LocalChain}, Append, ConfirmationTimeAnchor, }; @@ -112,7 +112,7 @@ fn main() -> anyhow::Result<()> { } }; - let (graph_update, last_active_indices) = match &esplora_cmd { + let (update_graph, update_keychain_indices) = match &esplora_cmd { EsploraCommands::Scan { stop_gap, scan_options, @@ -261,7 +261,7 @@ fn main() -> anyhow::Result<()> { let (heights_to_fetch, tip) = { let chain = &*chain.lock().unwrap(); - let heights_to_fetch = graph_update.missing_blocks(chain).collect::>(); + let heights_to_fetch = update_graph.missing_blocks(chain).collect::>(); let tip = chain.tip(); (heights_to_fetch, tip) }; @@ -277,34 +277,27 @@ fn main() -> anyhow::Result<()> { println!("prev tip: {}", tip.as_ref().map_or(0, CheckPoint::height)); println!("missing blocks: {:?}", heights_to_fetch); - let tip = client + let update_tip = client .update_local_chain(tip, heights_to_fetch) .context("scanning for blocks")?; #[cfg(debug_assertions)] println!( "new chain: {:?}", - tip.iter().map(|cp| cp.height()).collect::>() + update_tip.iter().map(|cp| cp.height()).collect::>() ); - println!("new tip: {}", tip.height()); - - let update = LocalUpdate { - keychain: last_active_indices, - graph: graph_update, - tip, - }; + println!("new tip: {}", update_tip.height()); // check that all anchors are part of the new tip's history #[cfg(debug_assertions)] { use bdk_chain::bitcoin::BlockHash; use bdk_chain::collections::HashMap; - let chain_heights = update - .tip + let chain_heights = update_tip .iter() .map(|cp| (cp.height(), cp.hash())) .collect::>(); - for (anchor, _) in update.graph.all_anchors() { + for (anchor, _) in update_graph.all_anchors() { assert_eq!(anchor.anchor_block.height, anchor.confirmation_height); assert!(chain_heights.contains_key(&anchor.anchor_block.height)); @@ -325,13 +318,13 @@ fn main() -> anyhow::Result<()> { let mut chain = chain.lock().unwrap(); let mut graph = graph.lock().unwrap(); - let chain_changeset = chain.update(update.tip, None)?; + let chain_changeset = chain.update(update_tip, true)?; let indexed_additions = { let mut additions = IndexedAdditions::default(); - let (_, index_additions) = graph.index.reveal_to_target_multi(&update.keychain); + let (_, index_additions) = graph.index.reveal_to_target_multi(&update_keychain_indices); additions.append(IndexedAdditions::from(index_additions)); - additions.append(graph.apply_update(update.graph)); + additions.append(graph.apply_update(update_graph)); additions }; diff --git a/example-crates/example_rpc/src/main.rs b/example-crates/example_rpc/src/main.rs index a1857cf80..9725f31a8 100644 --- a/example-crates/example_rpc/src/main.rs +++ b/example-crates/example_rpc/src/main.rs @@ -194,7 +194,8 @@ fn main() -> anyhow::Result<()> { let mut chain = chain.lock().unwrap(); let mut graph = graph.lock().unwrap(); - let chain_changeset = chain.update(update.tip, None)?; + let chain_changeset = + chain.update(update.tip, update.introduce_older_blocks)?; let mut indexed_additions = IndexedAdditions::::default(); From 37f7494dfb65f0fd81157d21a9c21beb8367f573 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?=E5=BF=97=E5=AE=87?= Date: Mon, 10 Jul 2023 23:30:25 +0800 Subject: [PATCH 20/24] Remove `prune` param in `Wallet::apply_update` and use 2 methods instead The introduced method is `Wallet::prune_and_apply_update`. --- crates/bdk/src/wallet/mod.rs | 46 ++++++++++++++----- example-crates/wallet_electrum/src/main.rs | 2 +- example-crates/wallet_esplora/src/main.rs | 2 +- .../wallet_esplora_async/src/main.rs | 2 +- 4 files changed, 38 insertions(+), 14 deletions(-) diff --git a/crates/bdk/src/wallet/mod.rs b/crates/bdk/src/wallet/mod.rs index 8cf7eaa6d..90a62340e 100644 --- a/crates/bdk/src/wallet/mod.rs +++ b/crates/bdk/src/wallet/mod.rs @@ -1699,14 +1699,11 @@ impl Wallet { /// Applies an update to the wallet and stages the changes (but does not [`commit`] them). /// Returns whether the `update` resulted in any changes. /// - /// If `prune` is set, irrelevant transactions are pruned. Relevant transactions change the UTXO - /// set of tracked script pubkeys (script pubkeys derived from tracked descriptors). - /// /// Usually you create an `update` by interacting with some blockchain data source and inserting /// transactions related to your wallet into it. /// /// [`commit`]: Self::commit - pub fn apply_update(&mut self, update: Update, prune: bool) -> Result + pub fn apply_update(&mut self, update: Update) -> Result where D: PersistBackend, { @@ -1719,14 +1716,41 @@ impl Wallet { .index .reveal_to_target_multi(&update.keychain); changeset.append(ChangeSet::from(IndexedAdditions::from(index_additions))); - changeset.append( - if prune { - self.indexed_graph.prune_and_apply_update(update.graph) - } else { - self.indexed_graph.apply_update(update.graph) - } - .into(), + changeset.append(ChangeSet::from( + self.indexed_graph.apply_update(update.graph), + )); + + let changed = !changeset.is_empty(); + self.persist.stage(changeset); + Ok(changed) + } + + /// Applies and update to the wallet (after pruning it of irrelevant transactions) and stages + /// the changes (does not [`commit`] them). + /// + /// Irrelevant transactions are transactions that do not change the UTXO set of tracked script + /// pubkeys (script pubkeys that are derived from tracked descriptors). + /// + /// To apply an update without pruning, use [`apply_update`]. + /// + /// [`commit`]: Self::commit + /// [`apply_update`]: Self::apply_update + pub fn prune_and_apply_update(&mut self, update: Update) -> Result + where + D: PersistBackend, + { + let mut changeset = ChangeSet::from( + self.chain + .update(update.tip, update.introduce_older_blocks)?, ); + let (_, index_additions) = self + .indexed_graph + .index + .reveal_to_target_multi(&update.keychain); + changeset.append(ChangeSet::from(IndexedAdditions::from(index_additions))); + changeset.append(ChangeSet::from( + self.indexed_graph.prune_and_apply_update(update.graph), + )); let changed = !changeset.is_empty(); self.persist.stage(changeset); diff --git a/example-crates/wallet_electrum/src/main.rs b/example-crates/wallet_electrum/src/main.rs index 32663b2b5..2355a6fb0 100644 --- a/example-crates/wallet_electrum/src/main.rs +++ b/example-crates/wallet_electrum/src/main.rs @@ -59,7 +59,7 @@ fn main() -> Result<(), Box> { let missing = electrum_update.missing_full_txs(wallet.as_ref()); let update = electrum_update.finalize_as_confirmation_time(&client, None, missing)?; - wallet.apply_update(update, false)?; + wallet.apply_update(update)?; wallet.commit()?; let balance = wallet.get_balance(); diff --git a/example-crates/wallet_esplora/src/main.rs b/example-crates/wallet_esplora/src/main.rs index 06441d4a0..d83f94ae4 100644 --- a/example-crates/wallet_esplora/src/main.rs +++ b/example-crates/wallet_esplora/src/main.rs @@ -64,7 +64,7 @@ fn main() -> Result<(), Box> { ..LocalUpdate::new(new_tip) }; - wallet.apply_update(update, false)?; + wallet.apply_update(update)?; wallet.commit()?; println!(); diff --git a/example-crates/wallet_esplora_async/src/main.rs b/example-crates/wallet_esplora_async/src/main.rs index 385913ea4..5c0a73d72 100644 --- a/example-crates/wallet_esplora_async/src/main.rs +++ b/example-crates/wallet_esplora_async/src/main.rs @@ -64,7 +64,7 @@ async fn main() -> Result<(), Box> { graph: update_graph, ..LocalUpdate::new(new_tip) }; - wallet.apply_update(update, false)?; + wallet.apply_update(update)?; wallet.commit()?; println!(); From 02ef3603107bc7d2a0836283821e72de5fae9b63 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?=E5=BF=97=E5=AE=87?= Date: Mon, 10 Jul 2023 23:53:28 +0800 Subject: [PATCH 21/24] Rename `CheckPoint::extend` to `extend_with_blocks` So we don't confuse it with `Extend::extend` (trait) that resides in `core::iter::Extend`. --- crates/chain/src/local_chain.rs | 7 +++++-- crates/esplora/src/async_ext.rs | 2 +- 2 files changed, 6 insertions(+), 3 deletions(-) diff --git a/crates/chain/src/local_chain.rs b/crates/chain/src/local_chain.rs index a1c8502f5..b98d1dac3 100644 --- a/crates/chain/src/local_chain.rs +++ b/crates/chain/src/local_chain.rs @@ -50,7 +50,10 @@ impl CheckPoint { /// /// Returns an `Err(self)` if there is block which does not have a greater height than the /// previous one. - pub fn extend(self, blocks: impl IntoIterator) -> Result { + pub fn extend_with_blocks( + self, + blocks: impl IntoIterator, + ) -> Result { let mut curr = self.clone(); for block in blocks { curr = curr.push(block).map_err(|_| self.clone())?; @@ -322,7 +325,7 @@ impl LocalChain { } let new_tip = match base { Some(base) => Some( - base.extend(extension.into_iter().map(BlockId::from)) + base.extend_with_blocks(extension.into_iter().map(BlockId::from)) .expect("extension is strictly greater than base"), ), None => LocalChain::from_blocks(extension).tip(), diff --git a/crates/esplora/src/async_ext.rs b/crates/esplora/src/async_ext.rs index 78e9334ea..d5d4fc10b 100644 --- a/crates/esplora/src/async_ext.rs +++ b/crates/esplora/src/async_ext.rs @@ -190,7 +190,7 @@ impl EsploraAsyncExt for esplora_client::AsyncClient { .map(|(height, hash)| BlockId { height, hash }) .fold(first_cp, |prev_cp, block| { prev_cp - .extend(core::iter::once(block)) + .extend_with_blocks(core::iter::once(block)) .expect("must extend checkpoint") }); From 69b5e633ce9d40ffc34938812e2f5f59aff4003f Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?=E5=BF=97=E5=AE=87?= Date: Tue, 18 Jul 2023 10:40:30 +0800 Subject: [PATCH 22/24] Update .gitignore to ignore example db files and remove dup. crate --- .gitignore | 3 + nursery/tmp_plan/bdk_tmp_plan/Cargo.toml | 13 - nursery/tmp_plan/bdk_tmp_plan/README.md | 3 - nursery/tmp_plan/bdk_tmp_plan/src/lib.rs | 436 ------------------ .../tmp_plan/bdk_tmp_plan/src/plan_impls.rs | 323 ------------- .../tmp_plan/bdk_tmp_plan/src/requirements.rs | 218 --------- nursery/tmp_plan/bdk_tmp_plan/src/template.rs | 76 --- 7 files changed, 3 insertions(+), 1069 deletions(-) delete mode 100644 nursery/tmp_plan/bdk_tmp_plan/Cargo.toml delete mode 100644 nursery/tmp_plan/bdk_tmp_plan/README.md delete mode 100644 nursery/tmp_plan/bdk_tmp_plan/src/lib.rs delete mode 100644 nursery/tmp_plan/bdk_tmp_plan/src/plan_impls.rs delete mode 100644 nursery/tmp_plan/bdk_tmp_plan/src/requirements.rs delete mode 100644 nursery/tmp_plan/bdk_tmp_plan/src/template.rs diff --git a/.gitignore b/.gitignore index d01301890..95285763a 100644 --- a/.gitignore +++ b/.gitignore @@ -4,3 +4,6 @@ Cargo.lock *.swp .idea + +# Example persisted files. +*.db diff --git a/nursery/tmp_plan/bdk_tmp_plan/Cargo.toml b/nursery/tmp_plan/bdk_tmp_plan/Cargo.toml deleted file mode 100644 index c2d615df8..000000000 --- a/nursery/tmp_plan/bdk_tmp_plan/Cargo.toml +++ /dev/null @@ -1,13 +0,0 @@ -[package] -name = "bdk_tmp_plan" -version = "0.1.0" -edition = "2021" - -# See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html - -[dependencies] -bdk_chain = { path = "../../../crates/chain", version = "0.3.1", features = ["miniscript"] } - -[features] -default = ["std"] -std = [] diff --git a/nursery/tmp_plan/bdk_tmp_plan/README.md b/nursery/tmp_plan/bdk_tmp_plan/README.md deleted file mode 100644 index 70cc100dc..000000000 --- a/nursery/tmp_plan/bdk_tmp_plan/README.md +++ /dev/null @@ -1,3 +0,0 @@ -# Temporary planning module - -A temporary place to hold the planning module until https://github.com/rust-bitcoin/rust-miniscript/pull/481 is merged and released diff --git a/nursery/tmp_plan/bdk_tmp_plan/src/lib.rs b/nursery/tmp_plan/bdk_tmp_plan/src/lib.rs deleted file mode 100644 index a64d44922..000000000 --- a/nursery/tmp_plan/bdk_tmp_plan/src/lib.rs +++ /dev/null @@ -1,436 +0,0 @@ -#![allow(unused)] -#![allow(missing_docs)] -//! A spending plan or *plan* for short is a representation of a particular spending path on a -//! descriptor. This allows us to analayze a choice of spending path without producing any -//! signatures or other witness data for it. -//! -//! To make a plan you provide the descriptor with "assets" like which keys you are able to use, hash -//! pre-images you have access to, the current block height etc. -//! -//! Once you've got a plan it can tell you its expected satisfaction weight which can be useful for -//! doing coin selection. Furthermore it provides which subset of those keys and hash pre-images you -//! will actually need as well as what locktime or sequence number you need to set. -//! -//! Once you've obstained signatures, hash pre-images etc required by the plan, it can create a -//! witness/script_sig for the input. -use bdk_chain::{bitcoin, collections::*, miniscript}; -use bitcoin::{ - blockdata::{locktime::LockTime, transaction::Sequence}, - hashes::{hash160, ripemd160, sha256}, - secp256k1::Secp256k1, - util::{ - address::WitnessVersion, - bip32::{DerivationPath, Fingerprint, KeySource}, - taproot::{LeafVersion, TapBranchHash, TapLeafHash}, - }, - EcdsaSig, SchnorrSig, Script, TxIn, Witness, -}; -use miniscript::{ - descriptor::{InnerXKey, Tr}, - hash256, DefiniteDescriptorKey, Descriptor, DescriptorPublicKey, ScriptContext, ToPublicKey, -}; - -pub(crate) fn varint_len(v: usize) -> usize { - bitcoin::VarInt(v as u64).len() as usize -} - -mod plan_impls; -mod requirements; -mod template; -pub use requirements::*; -pub use template::PlanKey; -use template::TemplateItem; - -#[derive(Clone, Debug)] -enum TrSpend { - KeySpend, - LeafSpend { - script: Script, - leaf_version: LeafVersion, - }, -} - -#[derive(Clone, Debug)] -enum Target { - Legacy, - Segwitv0 { - script_code: Script, - }, - Segwitv1 { - tr: Tr, - tr_plan: TrSpend, - }, -} - -impl Target {} - -#[derive(Clone, Debug)] -/// A plan represents a particular spending path for a descriptor. -/// -/// See the module level documentation for more info. -pub struct Plan { - template: Vec>, - target: Target, - set_locktime: Option, - set_sequence: Option, -} - -impl Default for Target { - fn default() -> Self { - Target::Legacy - } -} - -#[derive(Clone, Debug, Default)] -/// Signatures and hash pre-images that can be used to complete a plan. -pub struct SatisfactionMaterial { - /// Schnorr signautres under their keys - pub schnorr_sigs: BTreeMap, - /// ECDSA signatures under their keys - pub ecdsa_sigs: BTreeMap, - /// SHA256 pre-images under their images - pub sha256_preimages: BTreeMap>, - /// hash160 pre-images under their images - pub hash160_preimages: BTreeMap>, - /// hash256 pre-images under their images - pub hash256_preimages: BTreeMap>, - /// ripemd160 pre-images under their images - pub ripemd160_preimages: BTreeMap>, -} - -impl Plan -where - Ak: Clone, -{ - /// The expected satisfaction weight for the plan if it is completed. - pub fn expected_weight(&self) -> usize { - let script_sig_size = match self.target { - Target::Legacy => unimplemented!(), // self - // .template - // .iter() - // .map(|step| { - // let size = step.expected_size(); - // size + push_opcode_size(size) - // }) - // .sum() - Target::Segwitv0 { .. } | Target::Segwitv1 { .. } => 1, - }; - let witness_elem_sizes: Option> = match &self.target { - Target::Legacy => None, - Target::Segwitv0 { .. } => Some( - self.template - .iter() - .map(|step| step.expected_size()) - .collect(), - ), - Target::Segwitv1 { tr, tr_plan } => { - let mut witness_elems = self - .template - .iter() - .map(|step| step.expected_size()) - .collect::>(); - - if let TrSpend::LeafSpend { - script, - leaf_version, - } = tr_plan - { - let control_block = tr - .spend_info() - .control_block(&(script.clone(), *leaf_version)) - .expect("must exist"); - witness_elems.push(script.len()); - witness_elems.push(control_block.size()); - } - - Some(witness_elems) - } - }; - - let witness_size: usize = match witness_elem_sizes { - Some(elems) => { - varint_len(elems.len()) - + elems - .into_iter() - .map(|elem| varint_len(elem) + elem) - .sum::() - } - None => 0, - }; - - script_sig_size * 4 + witness_size - } - - pub fn requirements(&self) -> Requirements { - match self.try_complete(&SatisfactionMaterial::default()) { - PlanState::Complete { .. } => Requirements::default(), - PlanState::Incomplete(requirements) => requirements, - } - } - - pub fn try_complete(&self, auth_data: &SatisfactionMaterial) -> PlanState { - let unsatisfied_items = self - .template - .iter() - .filter(|step| match step { - TemplateItem::Sign(key) => { - !auth_data.schnorr_sigs.contains_key(&key.descriptor_key) - } - TemplateItem::Hash160(image) => !auth_data.hash160_preimages.contains_key(image), - TemplateItem::Hash256(image) => !auth_data.hash256_preimages.contains_key(image), - TemplateItem::Sha256(image) => !auth_data.sha256_preimages.contains_key(image), - TemplateItem::Ripemd160(image) => { - !auth_data.ripemd160_preimages.contains_key(image) - } - TemplateItem::Pk { .. } | TemplateItem::One | TemplateItem::Zero => false, - }) - .collect::>(); - - if unsatisfied_items.is_empty() { - let mut witness = self - .template - .iter() - .flat_map(|step| step.to_witness_stack(&auth_data)) - .collect::>(); - match &self.target { - Target::Segwitv0 { .. } => todo!(), - Target::Legacy => todo!(), - Target::Segwitv1 { - tr_plan: TrSpend::KeySpend, - .. - } => PlanState::Complete { - final_script_sig: None, - final_script_witness: Some(Witness::from_vec(witness)), - }, - Target::Segwitv1 { - tr, - tr_plan: - TrSpend::LeafSpend { - script, - leaf_version, - }, - } => { - let spend_info = tr.spend_info(); - let control_block = spend_info - .control_block(&(script.clone(), *leaf_version)) - .expect("must exist"); - witness.push(script.clone().into_bytes()); - witness.push(control_block.serialize()); - - PlanState::Complete { - final_script_sig: None, - final_script_witness: Some(Witness::from_vec(witness)), - } - } - } - } else { - let mut requirements = Requirements::default(); - - match &self.target { - Target::Legacy => { - todo!() - } - Target::Segwitv0 { .. } => { - todo!() - } - Target::Segwitv1 { tr, tr_plan } => { - let spend_info = tr.spend_info(); - match tr_plan { - TrSpend::KeySpend => match &self.template[..] { - [TemplateItem::Sign(ref plan_key)] => { - requirements.signatures = RequiredSignatures::TapKey { - merkle_root: spend_info.merkle_root(), - plan_key: plan_key.clone(), - }; - } - _ => unreachable!("tapkey spend will always have only one sign step"), - }, - TrSpend::LeafSpend { - script, - leaf_version, - } => { - let leaf_hash = TapLeafHash::from_script(&script, *leaf_version); - requirements.signatures = RequiredSignatures::TapScript { - leaf_hash, - plan_keys: vec![], - } - } - } - } - } - - let required_signatures = match requirements.signatures { - RequiredSignatures::Legacy { .. } => todo!(), - RequiredSignatures::Segwitv0 { .. } => todo!(), - RequiredSignatures::TapKey { .. } => return PlanState::Incomplete(requirements), - RequiredSignatures::TapScript { - plan_keys: ref mut keys, - .. - } => keys, - }; - - for step in unsatisfied_items { - match step { - TemplateItem::Sign(plan_key) => { - required_signatures.push(plan_key.clone()); - } - TemplateItem::Hash160(image) => { - requirements.hash160_images.insert(image.clone()); - } - TemplateItem::Hash256(image) => { - requirements.hash256_images.insert(image.clone()); - } - TemplateItem::Sha256(image) => { - requirements.sha256_images.insert(image.clone()); - } - TemplateItem::Ripemd160(image) => { - requirements.ripemd160_images.insert(image.clone()); - } - TemplateItem::Pk { .. } | TemplateItem::One | TemplateItem::Zero => { /* no requirements */ - } - } - } - - PlanState::Incomplete(requirements) - } - } - - /// Witness version for the plan - pub fn witness_version(&self) -> Option { - match self.target { - Target::Legacy => None, - Target::Segwitv0 { .. } => Some(WitnessVersion::V0), - Target::Segwitv1 { .. } => Some(WitnessVersion::V1), - } - } - - /// The minimum required locktime height or time on the transaction using the plan. - pub fn required_locktime(&self) -> Option { - self.set_locktime.clone() - } - - /// The minimum required sequence (height or time) on the input to satisfy the plan - pub fn required_sequence(&self) -> Option { - self.set_sequence.clone() - } - - /// The minmum required transaction version required on the transaction using the plan. - pub fn min_version(&self) -> Option { - if let Some(_) = self.set_sequence { - Some(2) - } else { - Some(1) - } - } -} - -/// The returned value from [`Plan::try_complete`]. -pub enum PlanState { - /// The plan is complete - Complete { - /// The script sig that should be set on the input - final_script_sig: Option