diff --git a/.gitignore b/.gitignore
index 95285763a..f3ee3a8e4 100644
--- a/.gitignore
+++ b/.gitignore
@@ -7,3 +7,5 @@ Cargo.lock
# Example persisted files.
*.db
+bdk_wallet_esplora_async_example.dat
+bdk_wallet_esplora_blocking_example.dat
diff --git a/crates/bdk/src/wallet/coin_selection.rs b/crates/bdk/src/wallet/coin_selection.rs
index 5122a1493..f2e4324cb 100644
--- a/crates/bdk/src/wallet/coin_selection.rs
+++ b/crates/bdk/src/wallet/coin_selection.rs
@@ -219,8 +219,6 @@ impl CoinSelectionResult {
pub trait CoinSelectionAlgorithm: core::fmt::Debug {
/// Perform the coin selection
///
- /// - `database`: a reference to the wallet's database that can be used to lookup additional
- /// details for a specific UTXO
/// - `required_utxos`: the utxos that must be spent regardless of `target_amount` with their
/// weight cost
/// - `optional_utxos`: the remaining available utxos to satisfy `target_amount` with their
diff --git a/crates/bdk/src/wallet/error.rs b/crates/bdk/src/wallet/error.rs
index 46cf8ef3c..f94c645dd 100644
--- a/crates/bdk/src/wallet/error.rs
+++ b/crates/bdk/src/wallet/error.rs
@@ -46,7 +46,7 @@ impl std::error::Error for MiniscriptPsbtError {}
#[derive(Debug)]
/// Error returned from [`TxBuilder::finish`]
///
-/// [`TxBuilder::finish`]: crate::wallet::tx_builder::TxBuilder::finish
+/// [`TxBuilder::finish`]: super::tx_builder::TxBuilder::finish
pub enum CreateTxError
{
/// There was a problem with the descriptors passed in
Descriptor(DescriptorError),
@@ -248,9 +248,7 @@ impl
From for CreateTxError {
impl std::error::Error for CreateTxError {}
#[derive(Debug)]
-/// Error returned from [`Wallet::build_fee_bump`]
-///
-/// [`Wallet::build_fee_bump`]: super::Wallet::build_fee_bump
+/// Error returned from [`crate::Wallet::build_fee_bump`]
pub enum BuildFeeBumpError {
/// Happens when trying to spend an UTXO that is not in the internal database
UnknownUtxo(OutPoint),
diff --git a/crates/bdk/src/wallet/mod.rs b/crates/bdk/src/wallet/mod.rs
index bd33a9047..2fd259d79 100644
--- a/crates/bdk/src/wallet/mod.rs
+++ b/crates/bdk/src/wallet/mod.rs
@@ -20,6 +20,7 @@ use alloc::{
vec::Vec,
};
pub use bdk_chain::keychain::Balance;
+use bdk_chain::spk_client::{FullScanRequest, SyncRequest};
use bdk_chain::{
indexed_tx_graph,
keychain::{self, KeychainTxOutIndex},
@@ -28,7 +29,7 @@ use bdk_chain::{
},
tx_graph::{CanonicalTx, TxGraph},
Append, BlockId, ChainPosition, ConfirmationTime, ConfirmationTimeHeightAnchor, FullTxOut,
- IndexedTxGraph, Persist, PersistBackend,
+ IndexedTxGraph, Persist, PersistBackend, SpkIterator,
};
use bitcoin::secp256k1::{All, Secp256k1};
use bitcoin::sighash::{EcdsaSighashType, TapSighashType};
@@ -42,6 +43,7 @@ use core::fmt;
use core::ops::Deref;
use descriptor::error::Error as DescriptorError;
use miniscript::psbt::{PsbtExt, PsbtInputExt, PsbtInputSatisfier};
+use miniscript::{Descriptor, DescriptorPublicKey};
use bdk_chain::tx_graph::CalculateFeeError;
@@ -942,7 +944,7 @@ impl Wallet {
/// # let mut wallet: Wallet<()> = todo!();
/// # let txid:Txid = todo!();
/// let tx = wallet.get_tx(txid).expect("transaction").tx_node.tx;
- /// let fee = wallet.calculate_fee(tx).expect("fee");
+ /// let fee = wallet.calculate_fee(&tx).expect("fee");
/// ```
///
/// ```rust, no_run
@@ -973,7 +975,7 @@ impl Wallet {
/// # let mut wallet: Wallet<()> = todo!();
/// # let txid:Txid = todo!();
/// let tx = wallet.get_tx(txid).expect("transaction").tx_node.tx;
- /// let fee_rate = wallet.calculate_fee_rate(tx).expect("fee rate");
+ /// let fee_rate = wallet.calculate_fee_rate(&tx).expect("fee rate");
/// ```
///
/// ```rust, no_run
@@ -981,8 +983,8 @@ impl Wallet {
/// # use bdk::Wallet;
/// # let mut wallet: Wallet<()> = todo!();
/// # let mut psbt: PartiallySignedTransaction = todo!();
- /// let tx = &psbt.clone().extract_tx();
- /// let fee_rate = wallet.calculate_fee_rate(tx).expect("fee rate");
+ /// let tx = psbt.clone().extract_tx();
+ /// let fee_rate = wallet.calculate_fee_rate(&tx).expect("fee rate");
/// ```
/// [`insert_txout`]: Self::insert_txout
pub fn calculate_fee_rate(&self, tx: &Transaction) -> Result {
@@ -1003,8 +1005,8 @@ impl Wallet {
/// # use bdk::Wallet;
/// # let mut wallet: Wallet<()> = todo!();
/// # let txid:Txid = todo!();
- /// let tx = wallet.get_tx(txid).expect("transaction").tx_node.tx;
- /// let (sent, received) = wallet.sent_and_received(tx);
+ /// let tx = wallet.get_tx(txid).expect("tx exists").tx_node.tx;
+ /// let (sent, received) = wallet.sent_and_received(&tx);
/// ```
///
/// ```rust, no_run
@@ -1065,7 +1067,7 @@ impl Wallet {
pub fn get_tx(
&self,
txid: Txid,
- ) -> Option> {
+ ) -> Option, ConfirmationTimeHeightAnchor>> {
let graph = self.indexed_graph.graph();
Some(CanonicalTx {
@@ -1128,18 +1130,13 @@ impl Wallet {
// anchor tx to checkpoint with lowest height that is >= position's height
let anchor = self
.chain
- .blocks()
- .range(height..)
- .next()
+ .query_from(height)
.ok_or(InsertTxError::ConfirmationHeightCannotBeGreaterThanTip {
tip_height: self.chain.tip().height(),
tx_height: height,
})
- .map(|(&anchor_height, &hash)| ConfirmationTimeHeightAnchor {
- anchor_block: BlockId {
- height: anchor_height,
- hash,
- },
+ .map(|anchor_cp| ConfirmationTimeHeightAnchor {
+ anchor_block: anchor_cp.block_id(),
confirmation_height: height,
confirmation_time: time,
})?;
@@ -1167,7 +1164,8 @@ impl Wallet {
/// Iterate over the transactions in the wallet.
pub fn transactions(
&self,
- ) -> impl Iterator- > + '_ {
+ ) -> impl Iterator
- , ConfirmationTimeHeightAnchor>> + '_
+ {
self.indexed_graph
.graph()
.list_chain_txs(&self.chain, self.chain.tip().block_id())
@@ -1670,6 +1668,7 @@ impl
Wallet {
let mut tx = graph
.get_tx(txid)
.ok_or(BuildFeeBumpError::TransactionNotFound(txid))?
+ .as_ref()
.clone();
let pos = graph
@@ -1739,7 +1738,7 @@ impl Wallet {
sequence: Some(txin.sequence),
psbt_input: Box::new(psbt::Input {
witness_utxo: Some(txout.clone()),
- non_witness_utxo: Some(prev_tx.clone()),
+ non_witness_utxo: Some(prev_tx.as_ref().clone()),
..Default::default()
}),
},
@@ -2295,7 +2294,7 @@ impl Wallet {
psbt_input.witness_utxo = Some(prev_tx.output[prev_output.vout as usize].clone());
}
if !desc.is_taproot() && (!desc.is_witness() || !only_witness_utxo) {
- psbt_input.non_witness_utxo = Some(prev_tx.clone());
+ psbt_input.non_witness_utxo = Some(prev_tx.as_ref().clone());
}
}
Ok(psbt_input)
@@ -2500,6 +2499,31 @@ impl Wallet {
.batch_insert_relevant_unconfirmed(unconfirmed_txs);
self.persist.stage(ChangeSet::from(indexed_graph_changeset));
}
+
+ /// Create a [`SyncRequest`] for this wallet for all revealed spks.
+ ///
+ /// This is the first step when performing a spk-based wallet sync, the returned [`SyncRequest`] collects
+ /// all revealed script pub keys from the wallet keychain needed to start a blockchain sync with a spk based
+ /// blockchain client.
+ pub fn sync_revealed_spks_request(&self) -> SyncRequest {
+ let chain_tip = self.local_chain().tip();
+ self.spk_index().sync_revealed_spks_request(chain_tip)
+ }
+
+ /// Create a [`FullScanRequest] for this wallet.
+ ///
+ /// This is the first step when performing a spk-based wallet full scan, the returned [`FullScanRequest]
+ /// collects iterators for the wallet's keychain script pub keys needed to start a blockchain full scan
+ /// with a spk based blockchain client.
+ ///
+ /// This operation is generally only used when importing or restoring a previously used wallet
+ /// in which the list of used scripts is not known.
+ pub fn full_scan_request(
+ &self,
+ ) -> FullScanRequest>> {
+ let chain_tip = self.local_chain().tip();
+ self.spk_index().full_scan_request(chain_tip)
+ }
}
impl AsRef> for Wallet {
diff --git a/crates/bdk/tests/wallet.rs b/crates/bdk/tests/wallet.rs
index e367b0bb5..b31b44bb2 100644
--- a/crates/bdk/tests/wallet.rs
+++ b/crates/bdk/tests/wallet.rs
@@ -208,12 +208,12 @@ fn test_get_funded_wallet_sent_and_received() {
let mut tx_amounts: Vec<(Txid, (u64, u64))> = wallet
.transactions()
- .map(|ct| (ct.tx_node.txid, wallet.sent_and_received(ct.tx_node.tx)))
+ .map(|ct| (ct.tx_node.txid, wallet.sent_and_received(&ct.tx_node)))
.collect();
tx_amounts.sort_by(|a1, a2| a1.0.cmp(&a2.0));
let tx = wallet.get_tx(txid).expect("transaction").tx_node.tx;
- let (sent, received) = wallet.sent_and_received(tx);
+ let (sent, received) = wallet.sent_and_received(&tx);
// The funded wallet contains a tx with a 76_000 sats input and two outputs, one spending 25_000
// to a foreign address and one returning 50_000 back to the wallet as change. The remaining 1000
@@ -227,7 +227,7 @@ fn test_get_funded_wallet_tx_fees() {
let (wallet, txid) = get_funded_wallet(get_test_wpkh());
let tx = wallet.get_tx(txid).expect("transaction").tx_node.tx;
- let tx_fee = wallet.calculate_fee(tx).expect("transaction fee");
+ let tx_fee = wallet.calculate_fee(&tx).expect("transaction fee");
// The funded wallet contains a tx with a 76_000 sats input and two outputs, one spending 25_000
// to a foreign address and one returning 50_000 back to the wallet as change. The remaining 1000
@@ -240,7 +240,9 @@ fn test_get_funded_wallet_tx_fee_rate() {
let (wallet, txid) = get_funded_wallet(get_test_wpkh());
let tx = wallet.get_tx(txid).expect("transaction").tx_node.tx;
- let tx_fee_rate = wallet.calculate_fee_rate(tx).expect("transaction fee rate");
+ let tx_fee_rate = wallet
+ .calculate_fee_rate(&tx)
+ .expect("transaction fee rate");
// The funded wallet contains a tx with a 76_000 sats input and two outputs, one spending 25_000
// to a foreign address and one returning 50_000 back to the wallet as change. The remaining 1000
@@ -1307,7 +1309,7 @@ fn test_add_foreign_utxo_where_outpoint_doesnt_match_psbt_input() {
.add_foreign_utxo(
utxo2.outpoint,
psbt::Input {
- non_witness_utxo: Some(tx1),
+ non_witness_utxo: Some(tx1.as_ref().clone()),
..Default::default()
},
satisfaction_weight
@@ -1320,7 +1322,7 @@ fn test_add_foreign_utxo_where_outpoint_doesnt_match_psbt_input() {
.add_foreign_utxo(
utxo2.outpoint,
psbt::Input {
- non_witness_utxo: Some(tx2),
+ non_witness_utxo: Some(tx2.as_ref().clone()),
..Default::default()
},
satisfaction_weight
@@ -1384,7 +1386,7 @@ fn test_add_foreign_utxo_only_witness_utxo() {
let mut builder = builder.clone();
let tx2 = wallet2.get_tx(txid2).unwrap().tx_node.tx;
let psbt_input = psbt::Input {
- non_witness_utxo: Some(tx2.clone()),
+ non_witness_utxo: Some(tx2.as_ref().clone()),
..Default::default()
};
builder
@@ -3050,7 +3052,8 @@ fn test_taproot_sign_using_non_witness_utxo() {
let mut psbt = builder.finish().unwrap();
psbt.inputs[0].witness_utxo = None;
- psbt.inputs[0].non_witness_utxo = Some(wallet.get_tx(prev_txid).unwrap().tx_node.tx.clone());
+ psbt.inputs[0].non_witness_utxo =
+ Some(wallet.get_tx(prev_txid).unwrap().tx_node.as_ref().clone());
assert!(
psbt.inputs[0].non_witness_utxo.is_some(),
"Previous tx should be present in the database"
diff --git a/crates/bitcoind_rpc/tests/test_emitter.rs b/crates/bitcoind_rpc/tests/test_emitter.rs
index 2161db0df..97946da99 100644
--- a/crates/bitcoind_rpc/tests/test_emitter.rs
+++ b/crates/bitcoind_rpc/tests/test_emitter.rs
@@ -57,12 +57,15 @@ pub fn test_sync_local_chain() -> anyhow::Result<()> {
}
assert_eq!(
- local_chain.blocks(),
- &exp_hashes
+ local_chain
+ .iter_checkpoints()
+ .map(|cp| (cp.height(), cp.hash()))
+ .collect::>(),
+ exp_hashes
.iter()
.enumerate()
.map(|(i, hash)| (i as u32, *hash))
- .collect(),
+ .collect::>(),
"final local_chain state is unexpected",
);
@@ -110,12 +113,15 @@ pub fn test_sync_local_chain() -> anyhow::Result<()> {
}
assert_eq!(
- local_chain.blocks(),
- &exp_hashes
+ local_chain
+ .iter_checkpoints()
+ .map(|cp| (cp.height(), cp.hash()))
+ .collect::>(),
+ exp_hashes
.iter()
.enumerate()
.map(|(i, hash)| (i as u32, *hash))
- .collect(),
+ .collect::>(),
"final local_chain state is unexpected after reorg",
);
diff --git a/crates/chain/Cargo.toml b/crates/chain/Cargo.toml
index 0a77708a1..6c5a59915 100644
--- a/crates/chain/Cargo.toml
+++ b/crates/chain/Cargo.toml
@@ -15,7 +15,7 @@ readme = "README.md"
[dependencies]
# For no-std, remember to enable the bitcoin/no-std feature
bitcoin = { version = "0.30.0", default-features = false }
-serde_crate = { package = "serde", version = "1", optional = true, features = ["derive"] }
+serde_crate = { package = "serde", version = "1", optional = true, features = ["derive", "rc"] }
# Use hashbrown as a feature flag to have HashSet and HashMap from it.
hashbrown = { version = "0.9.1", optional = true, features = ["serde"] }
diff --git a/crates/chain/src/keychain/txout_index.rs b/crates/chain/src/keychain/txout_index.rs
index 79f98fad2..8cb8b3a0c 100644
--- a/crates/chain/src/keychain/txout_index.rs
+++ b/crates/chain/src/keychain/txout_index.rs
@@ -5,12 +5,15 @@ use crate::{
spk_iter::BIP32_MAX_INDEX,
SpkIterator, SpkTxOutIndex,
};
-use bitcoin::{OutPoint, Script, Transaction, TxOut, Txid};
+use alloc::vec::Vec;
+use bitcoin::{OutPoint, Script, ScriptBuf, Transaction, TxOut, Txid};
use core::{
fmt::Debug,
ops::{Bound, RangeBounds},
};
+use crate::local_chain::CheckPoint;
+use crate::spk_client::{FullScanRequest, SyncRequest};
use crate::Append;
const DEFAULT_LOOKAHEAD: u32 = 25;
@@ -110,13 +113,13 @@ pub struct KeychainTxOutIndex {
lookahead: u32,
}
-impl Default for KeychainTxOutIndex {
+impl Default for KeychainTxOutIndex {
fn default() -> Self {
Self::new(DEFAULT_LOOKAHEAD)
}
}
-impl Indexer for KeychainTxOutIndex {
+impl Indexer for KeychainTxOutIndex {
type ChangeSet = super::ChangeSet;
fn index_txout(&mut self, outpoint: OutPoint, txout: &TxOut) -> Self::ChangeSet {
@@ -134,20 +137,20 @@ impl Indexer for KeychainTxOutIndex {
changeset
}
- fn initial_changeset(&self) -> Self::ChangeSet {
- super::ChangeSet(self.last_revealed.clone())
- }
-
fn apply_changeset(&mut self, changeset: Self::ChangeSet) {
self.apply_changeset(changeset)
}
+ fn initial_changeset(&self) -> Self::ChangeSet {
+ super::ChangeSet(self.last_revealed.clone())
+ }
+
fn is_tx_relevant(&self, tx: &bitcoin::Transaction) -> bool {
self.inner.is_relevant(tx)
}
}
-impl KeychainTxOutIndex {
+impl KeychainTxOutIndex {
/// Construct a [`KeychainTxOutIndex`] with the given `lookahead`.
///
/// The `lookahead` is the number of script pubkeys to derive and cache from the internal
@@ -169,7 +172,7 @@ impl KeychainTxOutIndex {
}
/// Methods that are *re-exposed* from the internal [`SpkTxOutIndex`].
-impl KeychainTxOutIndex {
+impl KeychainTxOutIndex {
/// Return a reference to the internal [`SpkTxOutIndex`].
///
/// **WARNING:** The internal index will contain lookahead spks. Refer to
@@ -291,7 +294,7 @@ impl KeychainTxOutIndex {
}
}
-impl KeychainTxOutIndex {
+impl KeychainTxOutIndex {
/// Return a reference to the internal map of keychain to descriptors.
pub fn keychains(&self) -> &BTreeMap> {
&self.keychains
@@ -669,6 +672,43 @@ impl KeychainTxOutIndex {
.collect()
}
+ /// Create a [`SyncRequest`] for this [`KeychainTxOutIndex`] for all revealed spks.
+ ///
+ /// This is the first step when performing a spk-based wallet sync, the returned [`SyncRequest`] collects
+ /// all revealed script pub keys needed to start a blockchain sync with a spk based blockchain client. A
+ /// [`CheckPoint`] representing the current chain tip must be provided.
+ pub fn sync_revealed_spks_request(&self, chain_tip: CheckPoint) -> SyncRequest {
+ // Sync all revealed SPKs
+ let spks = self
+ .revealed_spks()
+ .map(|(_keychain, index, spk)| (index, ScriptBuf::from(spk)))
+ .collect::>();
+
+ let mut req = SyncRequest::new(chain_tip);
+ req.add_spks(spks);
+ req
+ }
+
+ /// Create a [`FullScanRequest`] for this [`KeychainTxOutIndex`].
+ ///
+ /// This is the first step when performing a spk-based full scan, the returned [`FullScanRequest`]
+ /// collects iterators for the index's keychain script pub keys to start a blockchain full scan with a
+ /// spk based blockchain client. A [`CheckPoint`] representing the current chain tip must be provided.
+ ///
+ /// This operation is generally only used when importing or restoring previously used keychains
+ /// in which the list of used scripts is not known.
+ pub fn full_scan_request(
+ &self,
+ chain_tip: CheckPoint,
+ ) -> FullScanRequest>> {
+ let spks_by_keychain: BTreeMap>> =
+ self.all_unbounded_spk_iters();
+
+ let mut req = FullScanRequest::new(chain_tip);
+ req.add_spks_by_keychain(spks_by_keychain);
+ req
+ }
+
/// Applies the derivation changeset to the [`KeychainTxOutIndex`], extending the number of
/// derived scripts per keychain, as specified in the `changeset`.
pub fn apply_changeset(&mut self, changeset: super::ChangeSet) {
diff --git a/crates/chain/src/lib.rs b/crates/chain/src/lib.rs
index 206566971..9d2cb06eb 100644
--- a/crates/chain/src/lib.rs
+++ b/crates/chain/src/lib.rs
@@ -52,6 +52,9 @@ mod spk_iter;
#[cfg(feature = "miniscript")]
pub use spk_iter::*;
+/// Helper types for use with spk-based blockchain clients.
+pub mod spk_client;
+
#[allow(unused_imports)]
#[macro_use]
extern crate alloc;
diff --git a/crates/chain/src/local_chain.rs b/crates/chain/src/local_chain.rs
index 9be62dee3..5d6034ff3 100644
--- a/crates/chain/src/local_chain.rs
+++ b/crates/chain/src/local_chain.rs
@@ -34,6 +34,20 @@ struct CPInner {
prev: Option>,
}
+impl PartialEq for CheckPoint {
+ fn eq(&self, other: &Self) -> bool {
+ let mut self_cps = self.iter().map(|cp| cp.block_id());
+ let mut other_cps = other.iter().map(|cp| cp.block_id());
+ loop {
+ match (self_cps.next(), other_cps.next()) {
+ (Some(self_cp), Some(other_cp)) if self_cp == other_cp => continue,
+ (None, None) => break true,
+ _ => break false,
+ }
+ }
+ }
+}
+
impl CheckPoint {
/// Construct a new base block at the front of a linked list.
pub fn new(block: BlockId) -> Self {
@@ -148,6 +162,23 @@ impl CheckPoint {
pub fn iter(&self) -> CheckPointIter {
self.clone().into_iter()
}
+
+ /// Query for checkpoint at `height`.
+ ///
+ /// Returns `None` if checkpoint at `height` does not exist.
+ pub fn query(&self, height: u32) -> Option {
+ self.iter()
+ // optimization to avoid iterating the entire chain if we do not get a direct hit
+ .take_while(|cp| cp.height() >= height)
+ .find(|cp| cp.height() == height)
+ }
+
+ /// Query for checkpoint that is greater or equal to `height`.
+ ///
+ /// Returns `None` if no checkpoints has a height equal or greater than `height`.
+ pub fn query_from(&self, height: u32) -> Option {
+ self.iter().take_while(|cp| cp.height() >= height).last()
+ }
}
/// Iterates over checkpoints backwards.
@@ -188,7 +219,7 @@ impl IntoIterator for CheckPoint {
/// Script-pubkey based syncing mechanisms may not introduce transactions in a chronological order
/// so some updates require introducing older blocks (to anchor older transactions). For
/// script-pubkey based syncing, `introduce_older_blocks` would typically be `true`.
-#[derive(Debug, Clone)]
+#[derive(Debug, Clone, PartialEq)]
pub struct Update {
/// The update chain's new tip.
pub tip: CheckPoint,
@@ -202,21 +233,19 @@ pub struct Update {
}
/// This is a local implementation of [`ChainOracle`].
-#[derive(Debug, Clone)]
+#[derive(Debug, Clone, PartialEq)]
pub struct LocalChain {
tip: CheckPoint,
- index: BTreeMap,
-}
-
-impl PartialEq for LocalChain {
- fn eq(&self, other: &Self) -> bool {
- self.index == other.index
- }
}
+// TODO: Figure out whether we can get rid of this
impl From for BTreeMap {
fn from(value: LocalChain) -> Self {
- value.index
+ value
+ .tip
+ .iter()
+ .map(|cp| (cp.height(), cp.hash()))
+ .collect()
}
}
@@ -228,18 +257,16 @@ impl ChainOracle for LocalChain {
block: BlockId,
chain_tip: BlockId,
) -> Result, Self::Error> {
- if block.height > chain_tip.height {
- return Ok(None);
+ let chain_tip_cp = match self.tip.query(chain_tip.height) {
+ // we can only determine whether `block` is in chain of `chain_tip` if `chain_tip` can
+ // be identified in chain
+ Some(cp) if cp.hash() == chain_tip.hash => cp,
+ _ => return Ok(None),
+ };
+ match chain_tip_cp.query(block.height) {
+ Some(cp) => Ok(Some(cp.hash() == block.hash)),
+ None => Ok(None),
}
- Ok(
- match (
- self.index.get(&block.height),
- self.index.get(&chain_tip.height),
- ) {
- (Some(cp), Some(tip_cp)) => Some(*cp == block.hash && *tip_cp == chain_tip.hash),
- _ => None,
- },
- )
}
fn get_chain_tip(&self) -> Result {
@@ -250,7 +277,7 @@ impl ChainOracle for LocalChain {
impl LocalChain {
/// Get the genesis hash.
pub fn genesis_hash(&self) -> BlockHash {
- self.index.get(&0).copied().expect("must have genesis hash")
+ self.tip.query(0).expect("genesis must exist").hash()
}
/// Construct [`LocalChain`] from genesis `hash`.
@@ -259,7 +286,6 @@ impl LocalChain {
let height = 0;
let chain = Self {
tip: CheckPoint::new(BlockId { height, hash }),
- index: core::iter::once((height, hash)).collect(),
};
let changeset = chain.initial_changeset();
(chain, changeset)
@@ -276,7 +302,6 @@ impl LocalChain {
let (mut chain, _) = Self::from_genesis_hash(genesis_hash);
chain.apply_changeset(&changeset)?;
- debug_assert!(chain._check_index_is_consistent_with_tip());
debug_assert!(chain._check_changeset_is_applied(&changeset));
Ok(chain)
@@ -284,18 +309,11 @@ impl LocalChain {
/// Construct a [`LocalChain`] from a given `checkpoint` tip.
pub fn from_tip(tip: CheckPoint) -> Result {
- let mut chain = Self {
- tip,
- index: BTreeMap::new(),
- };
- chain.reindex(0);
-
- if chain.index.get(&0).copied().is_none() {
+ let genesis_cp = tip.iter().last().expect("must have at least one element");
+ if genesis_cp.height() != 0 {
return Err(MissingGenesisError);
}
-
- debug_assert!(chain._check_index_is_consistent_with_tip());
- Ok(chain)
+ Ok(Self { tip })
}
/// Constructs a [`LocalChain`] from a [`BTreeMap`] of height to [`BlockHash`].
@@ -303,12 +321,11 @@ impl LocalChain {
/// The [`BTreeMap`] enforces the height order. However, the caller must ensure the blocks are
/// all of the same chain.
pub fn from_blocks(blocks: BTreeMap) -> Result {
- if !blocks.contains_key(&0) {
+ if blocks.get(&0).is_none() {
return Err(MissingGenesisError);
}
let mut tip: Option = None;
-
for block in &blocks {
match tip {
Some(curr) => {
@@ -321,13 +338,9 @@ impl LocalChain {
}
}
- let chain = Self {
- index: blocks,
+ Ok(Self {
tip: tip.expect("already checked to have genesis"),
- };
-
- debug_assert!(chain._check_index_is_consistent_with_tip());
- Ok(chain)
+ })
}
/// Get the highest checkpoint.
@@ -494,9 +507,7 @@ impl LocalChain {
None => LocalChain::from_blocks(extension)?.tip(),
};
self.tip = new_tip;
- self.reindex(start_height);
- debug_assert!(self._check_index_is_consistent_with_tip());
debug_assert!(self._check_changeset_is_applied(changeset));
}
@@ -509,16 +520,16 @@ impl LocalChain {
///
/// Replacing the block hash of an existing checkpoint will result in an error.
pub fn insert_block(&mut self, block_id: BlockId) -> Result {
- if let Some(&original_hash) = self.index.get(&block_id.height) {
+ if let Some(original_cp) = self.tip.query(block_id.height) {
+ let original_hash = original_cp.hash();
if original_hash != block_id.hash {
return Err(AlterCheckPointError {
height: block_id.height,
original_hash,
update_hash: Some(block_id.hash),
});
- } else {
- return Ok(ChangeSet::default());
}
+ return Ok(ChangeSet::default());
}
let mut changeset = ChangeSet::default();
@@ -542,33 +553,41 @@ impl LocalChain {
/// This will fail with [`MissingGenesisError`] if the caller attempts to disconnect from the
/// genesis block.
pub fn disconnect_from(&mut self, block_id: BlockId) -> Result {
- if self.index.get(&block_id.height) != Some(&block_id.hash) {
- return Ok(ChangeSet::default());
- }
-
- let changeset = self
- .index
- .range(block_id.height..)
- .map(|(&height, _)| (height, None))
- .collect::();
- self.apply_changeset(&changeset).map(|_| changeset)
- }
-
- /// Reindex the heights in the chain from (and including) `from` height
- fn reindex(&mut self, from: u32) {
- let _ = self.index.split_off(&from);
- for cp in self.iter_checkpoints() {
- if cp.height() < from {
+ let mut remove_from = Option::::None;
+ let mut changeset = ChangeSet::default();
+ for cp in self.tip().iter() {
+ let cp_id = cp.block_id();
+ if cp_id.height < block_id.height {
break;
}
- self.index.insert(cp.height(), cp.hash());
+ changeset.insert(cp_id.height, None);
+ if cp_id == block_id {
+ remove_from = Some(cp);
+ }
}
+ self.tip = match remove_from.map(|cp| cp.prev()) {
+ // The checkpoint below the earliest checkpoint to remove will be the new tip.
+ Some(Some(new_tip)) => new_tip,
+ // If there is no checkpoint below the earliest checkpoint to remove, it means the
+ // "earliest checkpoint to remove" is the genesis block. We disallow removing the
+ // genesis block.
+ Some(None) => return Err(MissingGenesisError),
+ // If there is nothing to remove, we return an empty changeset.
+ None => return Ok(ChangeSet::default()),
+ };
+ Ok(changeset)
}
/// Derives an initial [`ChangeSet`], meaning that it can be applied to an empty chain to
/// recover the current chain.
pub fn initial_changeset(&self) -> ChangeSet {
- self.index.iter().map(|(k, v)| (*k, Some(*v))).collect()
+ self.tip
+ .iter()
+ .map(|cp| {
+ let block_id = cp.block_id();
+ (block_id.height, Some(block_id.hash))
+ })
+ .collect()
}
/// Iterate over checkpoints in descending height order.
@@ -578,28 +597,43 @@ impl LocalChain {
}
}
- /// Get a reference to the internal index mapping the height to block hash.
- pub fn blocks(&self) -> &BTreeMap {
- &self.index
- }
-
- fn _check_index_is_consistent_with_tip(&self) -> bool {
- let tip_history = self
- .tip
- .iter()
- .map(|cp| (cp.height(), cp.hash()))
- .collect::>();
- self.index == tip_history
- }
-
fn _check_changeset_is_applied(&self, changeset: &ChangeSet) -> bool {
- for (height, exp_hash) in changeset {
- if self.index.get(height) != exp_hash.as_ref() {
- return false;
+ let mut curr_cp = self.tip.clone();
+ for (height, exp_hash) in changeset.iter().rev() {
+ match curr_cp.query(*height) {
+ Some(query_cp) => {
+ if query_cp.height() != *height || Some(query_cp.hash()) != *exp_hash {
+ return false;
+ }
+ curr_cp = query_cp;
+ }
+ None => {
+ if exp_hash.is_some() {
+ return false;
+ }
+ }
}
}
true
}
+
+ /// Query for checkpoint at given `height` (if it exists).
+ ///
+ /// This is a shorthand for calling [`CheckPoint::query`] on the [`tip`].
+ ///
+ /// [`tip`]: LocalChain::tip
+ pub fn query(&self, height: u32) -> Option {
+ self.tip.query(height)
+ }
+
+ /// Query for checkpoint that is greater or equal to `height`.
+ ///
+ /// This is a shorthand for calling [`CheckPoint::query_from`] on the [`tip`].
+ ///
+ /// [`tip`]: LocalChain::tip
+ pub fn query_from(&self, height: u32) -> Option {
+ self.tip.query_from(height)
+ }
}
/// An error which occurs when a [`LocalChain`] is constructed without a genesis checkpoint.
diff --git a/crates/chain/src/spk_client.rs b/crates/chain/src/spk_client.rs
new file mode 100644
index 000000000..e52a6f3d8
--- /dev/null
+++ b/crates/chain/src/spk_client.rs
@@ -0,0 +1,212 @@
+use crate::collections::BTreeMap;
+use crate::local_chain::CheckPoint;
+use crate::{local_chain, ConfirmationTimeHeightAnchor, TxGraph};
+use alloc::{boxed::Box, vec::Vec};
+use bitcoin::{OutPoint, ScriptBuf, Txid};
+use core::default::Default;
+use core::fmt::Debug;
+use std::sync::Arc;
+
+/// Helper types for use with spk-based blockchain clients.
+
+type InspectSpkFn = Arc>;
+type InspectKeychainSpkFn = Arc>;
+type InspectTxidFn = Arc>;
+type InspectOutPointFn = Arc>;
+
+/// Data required to perform a spk-based blockchain client sync.
+///
+/// A client sync fetches relevant chain data for a known list of scripts, transaction ids and
+/// outpoints. The sync process also updates the chain from the given [`CheckPoint`].
+pub struct SyncRequest {
+ /// A checkpoint for the current chain [`LocalChain::tip`].
+ /// The sync process will return a new chain update that extends this tip.
+ ///
+ /// [`LocalChain::tip`]: local_chain::LocalChain::tip
+ pub chain_tip: CheckPoint,
+ /// Transactions that spend from or to these indexed script pubkeys.
+ spks: Vec<(u32, ScriptBuf)>,
+ /// Transactions with these txids.
+ txids: Vec,
+ /// Transactions with these outpoints or spend from these outpoints.
+ outpoints: Vec,
+ /// An optional call-back function to inspect sync'd spks
+ inspect_spks: Option,
+ /// An optional call-back function to inspect sync'd txids
+ inspect_txids: Option,
+ /// An optional call-back function to inspect sync'd outpoints
+ inspect_outpoints: Option,
+}
+
+fn null_inspect_spks(_index: u32, _spk: &ScriptBuf) {}
+fn null_inspect_keychain_spks(_keychain: K, _index: u32, _spk: &ScriptBuf) {}
+fn null_inspect_txids(_txid: &Txid) {}
+fn null_inspect_outpoints(_outpoint: &OutPoint) {}
+
+impl SyncRequest {
+ /// Create a new [`SyncRequest`] from the current chain tip [`CheckPoint`].
+ pub fn new(chain_tip: CheckPoint) -> Self {
+ Self {
+ chain_tip,
+ spks: Default::default(),
+ txids: Default::default(),
+ outpoints: Default::default(),
+ inspect_spks: Default::default(),
+ inspect_txids: Default::default(),
+ inspect_outpoints: Default::default(),
+ }
+ }
+
+ /// Add [`ScriptBuf`]s to be sync'd with this request.
+ pub fn add_spks(&mut self, spks: impl IntoIterator- ) {
+ self.spks.extend(spks.into_iter())
+ }
+
+ /// Take the [`ScriptBuf`]s to be sync'd with this request.
+ pub fn take_spks(&mut self) -> impl Iterator
- {
+ let spks = core::mem::take(&mut self.spks);
+ let inspect = self
+ .inspect_spks
+ .take()
+ .unwrap_or(Arc::new(Box::new(null_inspect_spks)));
+ spks.into_iter()
+ .inspect(move |(index, spk)| inspect(*index, spk))
+ }
+
+ /// Add a function that will be called for each [`ScriptBuf`] sync'd in this request.
+ pub fn inspect_spks(&mut self, inspect: impl Fn(u32, &ScriptBuf) + Send + Sync + 'static) {
+ self.inspect_spks = Some(Arc::new(Box::new(inspect)))
+ }
+
+ /// Add [`Txid`]s to be sync'd with this request.
+ pub fn add_txids(&mut self, txids: impl IntoIterator
- ) {
+ self.txids.extend(txids.into_iter())
+ }
+
+ /// Take the [`Txid`]s to be sync'd with this request.
+ pub fn take_txids(&mut self) -> impl Iterator
- {
+ let txids = core::mem::take(&mut self.txids);
+ let inspect = self
+ .inspect_txids
+ .clone()
+ .unwrap_or(Arc::new(Box::new(null_inspect_txids)));
+ txids.into_iter().inspect(move |t| inspect(t))
+ }
+
+ /// Add a function that will be called for each [`Txid`] sync'd in this request.
+ pub fn inspect_txids(&mut self, inspect: impl Fn(&Txid) + Send + Sync + 'static) {
+ self.inspect_txids = Some(Arc::new(Box::new(inspect)))
+ }
+
+ /// Add [`OutPoint`]s to be sync'd with this request.
+ pub fn add_outpoints(&mut self, outpoints: impl IntoIterator
- ) {
+ self.outpoints.extend(outpoints.into_iter())
+ }
+
+ /// Take the [`OutPoint`]s to be sync'd with this request.
+ pub fn take_outpoints(&mut self) -> impl Iterator
- {
+ let outpoints = core::mem::take(&mut self.outpoints);
+ let inspect = self
+ .inspect_outpoints
+ .take()
+ .unwrap_or(Arc::new(Box::new(null_inspect_outpoints)));
+ outpoints.into_iter().inspect(move |o| inspect(o))
+ }
+
+ /// Add a function that will be called for each [`OutPoint`] sync'd in this request.
+ pub fn inspect_outpoints(&mut self, inspect: impl Fn(&OutPoint) + Send + Sync + 'static) {
+ self.inspect_outpoints = Some(Arc::new(Box::new(inspect)))
+ }
+}
+
+/// Data returned from a spk-based blockchain client sync.
+///
+/// See also [`SyncRequest`].
+pub struct SyncResult {
+ /// The update to apply to the receiving [`TxGraph`].
+ pub graph_update: TxGraph
,
+ /// The update to apply to the receiving [`LocalChain`](local_chain::LocalChain).
+ pub chain_update: local_chain::Update,
+}
+
+/// Data required to perform a spk-based blockchain client full scan.
+///
+/// A client full scan iterates through all the scripts for the given keychains, fetching relevant
+/// data until some stop gap number of scripts is found that have no data. This operation is
+/// generally only used when importing or restoring previously used keychains in which the list of
+/// used scripts is not known. The full scan process also updates the chain from the given [`CheckPoint`].
+pub struct FullScanRequest {
+ /// A checkpoint for the current [`LocalChain::tip`].
+ /// The full scan process will return a new chain update that extends this tip.
+ ///
+ /// [`LocalChain::tip`]: local_chain::LocalChain::tip
+ pub chain_tip: CheckPoint,
+ /// Iterators of script pubkeys indexed by the keychain index.
+ spks_by_keychain: BTreeMap,
+ /// An optional call-back function to inspect scanned spks
+ inspect_spks: Option>,
+}
+
+/// Create a new [`FullScanRequest`] from the current chain tip [`CheckPoint`].
+impl + Send>
+ FullScanRequest
+{
+ /// Create a new [`FullScanRequest`] from the current chain tip [`CheckPoint`].
+ pub fn new(chain_tip: CheckPoint) -> Self {
+ Self {
+ chain_tip,
+ spks_by_keychain: Default::default(),
+ inspect_spks: Default::default(),
+ }
+ }
+
+ /// Add map of keychain's to tuple of index, [`ScriptBuf`] iterators to be scanned with this
+ /// request.
+ ///
+ /// Adding a map with a keychain that has already been added will overwrite the previously added
+ /// keychain [`ScriptBuf`] iterator.
+ pub fn add_spks_by_keychain(&mut self, spks_by_keychain: BTreeMap) {
+ self.spks_by_keychain.extend(spks_by_keychain)
+ }
+
+ /// Take the map of keychain, [`ScriptBuf`]s to be full scanned with this request.
+ pub fn take_spks_by_keychain(
+ &mut self,
+ ) -> BTreeMap<
+ K,
+ Box + Send> + Send>,
+ > {
+ let spks = core::mem::take(&mut self.spks_by_keychain);
+ let inspect = self
+ .inspect_spks
+ .clone()
+ .unwrap_or(Arc::new(Box::new(null_inspect_keychain_spks)));
+
+ spks.into_iter()
+ .map(move |(k, spk_iter)| {
+ let inspect = inspect.clone();
+ let keychain = k.clone();
+ let spk_iter_inspected =
+ Box::new(spk_iter.inspect(move |(i, spk)| inspect(keychain.clone(), *i, spk)));
+ (k, spk_iter_inspected)
+ })
+ .collect()
+ }
+
+ /// Add a function that will be called for each [`ScriptBuf`] sync'd in this request.
+ pub fn inspect_spks(&mut self, inspect: impl Fn(K, u32, &ScriptBuf) + Send + Sync + 'static) {
+ self.inspect_spks = Some(Arc::new(Box::new(inspect)))
+ }
+}
+
+/// Data returned from a spk-based blockchain client full scan.
+///
+/// See also [`FullScanRequest`].
+pub struct FullScanResult {
+ /// The update to apply to the receiving [`LocalChain`](local_chain::LocalChain).
+ pub graph_update: TxGraph,
+ /// The update to apply to the receiving [`TxGraph`].
+ pub chain_update: local_chain::Update,
+ /// Last active indices for the corresponding keychains (`K`).
+ pub last_active_indices: BTreeMap,
+}
diff --git a/crates/chain/src/tx_graph.rs b/crates/chain/src/tx_graph.rs
index 34cbccf5c..4a7538cab 100644
--- a/crates/chain/src/tx_graph.rs
+++ b/crates/chain/src/tx_graph.rs
@@ -1,26 +1,27 @@
//! Module for structures that store and traverse transactions.
//!
-//! [`TxGraph`] contains transactions and indexes them so you can easily traverse the graph of those transactions.
-//! `TxGraph` is *monotone* in that you can always insert a transaction -- it doesn't care whether that
-//! transaction is in the current best chain or whether it conflicts with any of the
-//! existing transactions or what order you insert the transactions. This means that you can always
-//! combine two [`TxGraph`]s together, without resulting in inconsistencies.
-//! Furthermore, there is currently no way to delete a transaction.
+//! [`TxGraph`] contains transactions and indexes them so you can easily traverse the graph of
+//! those transactions. `TxGraph` is *monotone* in that you can always insert a transaction -- it
+//! does not care whether that transaction is in the current best chain or whether it conflicts with
+//! any of the existing transactions or what order you insert the transactions. This means that you
+//! can always combine two [`TxGraph`]s together, without resulting in inconsistencies. Furthermore,
+//! there is currently no way to delete a transaction.
//!
-//! Transactions can be either whole or partial (i.e., transactions for which we only
-//! know some outputs, which we usually call "floating outputs"; these are usually inserted
-//! using the [`insert_txout`] method.).
+//! Transactions can be either whole or partial (i.e., transactions for which we only know some
+//! outputs, which we usually call "floating outputs"; these are usually inserted using the
+//! [`insert_txout`] method.).
//!
-//! The graph contains transactions in the form of [`TxNode`]s. Each node contains the
-//! txid, the transaction (whole or partial), the blocks it's anchored in (see the [`Anchor`]
-//! documentation for more details), and the timestamp of the last time we saw
-//! the transaction as unconfirmed.
+//! The graph contains transactions in the form of [`TxNode`]s. Each node contains the txid, the
+//! transaction (whole or partial), the blocks that it is anchored to (see the [`Anchor`]
+//! documentation for more details), and the timestamp of the last time we saw the transaction as
+//! unconfirmed.
//!
//! Conflicting transactions are allowed to coexist within a [`TxGraph`]. This is useful for
//! identifying and traversing conflicts and descendants of a given transaction. Some [`TxGraph`]
-//! methods only consider "canonical" (i.e., in the best chain or in mempool) transactions,
-//! we decide which transactions are canonical based on anchors `last_seen_unconfirmed`;
-//! see the [`try_get_chain_position`] documentation for more details.
+//! methods only consider transactions that are "canonical" (i.e., in the best chain or in mempool).
+//! We decide which transactions are canonical based on the transaction's anchors and the
+//! `last_seen` (as unconfirmed) timestamp; see the [`try_get_chain_position`] documentation for
+//! more details.
//!
//! The [`ChangeSet`] reports changes made to a [`TxGraph`]; it can be used to either save to
//! persistent storage, or to be applied to another [`TxGraph`].
@@ -30,10 +31,22 @@
//!
//! # Applying changes
//!
-//! Methods that apply changes to [`TxGraph`] will return [`ChangeSet`].
-//! [`ChangeSet`] can be applied back to a [`TxGraph`] or be used to inform persistent storage
+//! Methods that change the state of [`TxGraph`] will return [`ChangeSet`]s.
+//! [`ChangeSet`]s can be applied back to a [`TxGraph`] or be used to inform persistent storage
//! of the changes to [`TxGraph`].
//!
+//! # Generics
+//!
+//! Anchors are represented as generics within `TxGraph`. To make use of all functionality of the
+//! `TxGraph`, anchors (`A`) should implement [`Anchor`].
+//!
+//! Anchors are made generic so that different types of data can be stored with how a transaction is
+//! *anchored* to a given block. An example of this is storing a merkle proof of the transaction to
+//! the confirmation block - this can be done with a custom [`Anchor`] type. The minimal [`Anchor`]
+//! type would just be a [`BlockId`] which just represents the height and hash of the block which
+//! the transaction is contained in. Note that a transaction can be contained in multiple
+//! conflicting blocks (by nature of the Bitcoin network).
+//!
//! ```
//! # use bdk_chain::BlockId;
//! # use bdk_chain::tx_graph::TxGraph;
@@ -76,10 +89,11 @@
//! [`insert_txout`]: TxGraph::insert_txout
use crate::{
- collections::*, keychain::Balance, local_chain::LocalChain, Anchor, Append, BlockId,
- ChainOracle, ChainPosition, FullTxOut,
+ collections::*, keychain::Balance, Anchor, Append, BlockId, ChainOracle, ChainPosition,
+ FullTxOut,
};
use alloc::collections::vec_deque::VecDeque;
+use alloc::sync::Arc;
use alloc::vec::Vec;
use bitcoin::{OutPoint, Script, Transaction, TxOut, Txid};
use core::fmt::{self, Formatter};
@@ -122,7 +136,7 @@ pub struct TxNode<'a, T, A> {
/// Txid of the transaction.
pub txid: Txid,
/// A partial or full representation of the transaction.
- pub tx: &'a T,
+ pub tx: T,
/// The blocks that the transaction is "anchored" in.
pub anchors: &'a BTreeSet ,
/// The last-seen unix timestamp of the transaction as unconfirmed.
@@ -133,7 +147,7 @@ impl<'a, T, A> Deref for TxNode<'a, T, A> {
type Target = T;
fn deref(&self) -> &Self::Target {
- self.tx
+ &self.tx
}
}
@@ -143,7 +157,7 @@ impl<'a, T, A> Deref for TxNode<'a, T, A> {
/// outputs).
#[derive(Clone, Debug, PartialEq)]
enum TxNodeInternal {
- Whole(Transaction),
+ Whole(Arc),
Partial(BTreeMap),
}
@@ -198,6 +212,7 @@ impl TxGraph {
pub fn all_txouts(&self) -> impl Iterator- {
self.txs.iter().flat_map(|(txid, (tx, _, _))| match tx {
TxNodeInternal::Whole(tx) => tx
+ .as_ref()
.output
.iter()
.enumerate()
@@ -229,13 +244,13 @@ impl
TxGraph {
}
/// Iterate over all full transactions in the graph.
- pub fn full_txs(&self) -> impl Iterator- > {
+ pub fn full_txs(&self) -> impl Iterator
- , A>> {
self.txs
.iter()
.filter_map(|(&txid, (tx, anchors, last_seen))| match tx {
TxNodeInternal::Whole(tx) => Some(TxNode {
txid,
- tx,
+ tx: tx.clone(),
anchors,
last_seen_unconfirmed: *last_seen,
}),
@@ -248,16 +263,16 @@ impl
TxGraph {
/// Refer to [`get_txout`] for getting a specific [`TxOut`].
///
/// [`get_txout`]: Self::get_txout
- pub fn get_tx(&self, txid: Txid) -> Option<&Transaction> {
+ pub fn get_tx(&self, txid: Txid) -> Option> {
self.get_tx_node(txid).map(|n| n.tx)
}
/// Get a transaction node by txid. This only returns `Some` for full transactions.
- pub fn get_tx_node(&self, txid: Txid) -> Option> {
+ pub fn get_tx_node(&self, txid: Txid) -> Option, A>> {
match &self.txs.get(&txid)? {
(TxNodeInternal::Whole(tx), anchors, last_seen) => Some(TxNode {
txid,
- tx,
+ tx: tx.clone(),
anchors,
last_seen_unconfirmed: *last_seen,
}),
@@ -268,7 +283,7 @@ impl TxGraph {
/// Obtains a single tx output (if any) at the specified outpoint.
pub fn get_txout(&self, outpoint: OutPoint) -> Option<&TxOut> {
match &self.txs.get(&outpoint.txid)?.0 {
- TxNodeInternal::Whole(tx) => tx.output.get(outpoint.vout as usize),
+ TxNodeInternal::Whole(tx) => tx.as_ref().output.get(outpoint.vout as usize),
TxNodeInternal::Partial(txouts) => txouts.get(&outpoint.vout),
}
}
@@ -279,6 +294,7 @@ impl TxGraph {
pub fn tx_outputs(&self, txid: Txid) -> Option> {
Some(match &self.txs.get(&txid)?.0 {
TxNodeInternal::Whole(tx) => tx
+ .as_ref()
.output
.iter()
.enumerate()
@@ -356,16 +372,15 @@ impl TxGraph {
&self,
txid: Txid,
) -> impl DoubleEndedIterator- )> + '_ {
- let start = OutPoint { txid, vout: 0 };
- let end = OutPoint {
- txid,
- vout: u32::MAX,
- };
+ let start = OutPoint::new(txid, 0);
+ let end = OutPoint::new(txid, u32::MAX);
self.spends
.range(start..=end)
.map(|(outpoint, spends)| (outpoint.vout, spends))
}
+}
+impl
TxGraph {
/// Creates an iterator that filters and maps ancestor transactions.
///
/// The iterator starts with the ancestors of the supplied `tx` (ancestor transactions of `tx`
@@ -379,13 +394,10 @@ impl TxGraph {
///
/// The supplied closure returns an `Option`, allowing the caller to map each `Transaction`
/// it visits and decide whether to visit ancestors.
- pub fn walk_ancestors<'g, F, O>(
- &'g self,
- tx: &'g Transaction,
- walk_map: F,
- ) -> TxAncestors<'g, A, F>
+ pub fn walk_ancestors<'g, T, F, O>(&'g self, tx: T, walk_map: F) -> TxAncestors<'g, A, F>
where
- F: FnMut(usize, &'g Transaction) -> Option + 'g,
+ T: Into>,
+ F: FnMut(usize, Arc) -> Option + 'g,
{
TxAncestors::new_exclude_root(self, tx, walk_map)
}
@@ -406,7 +418,9 @@ impl TxGraph {
{
TxDescendants::new_exclude_root(self, txid, walk_map)
}
+}
+impl TxGraph {
/// Creates an iterator that both filters and maps conflicting transactions (this includes
/// descendants of directly-conflicting transactions, which are also considered conflicts).
///
@@ -419,7 +433,7 @@ impl TxGraph {
where
F: FnMut(usize, Txid) -> Option + 'g,
{
- let txids = self.direct_conflitcs(tx).map(|(_, txid)| txid);
+ let txids = self.direct_conflicts(tx).map(|(_, txid)| txid);
TxDescendants::from_multiple_include_root(self, txids, walk_map)
}
@@ -430,7 +444,7 @@ impl TxGraph {
/// Note that this only returns directly conflicting txids and won't include:
/// - descendants of conflicting transactions (which are technically also conflicting)
/// - transactions conflicting with the given transaction's ancestors
- pub fn direct_conflitcs<'g>(
+ pub fn direct_conflicts<'g>(
&'g self,
tx: &'g Transaction,
) -> impl Iterator- + '_ {
@@ -467,9 +481,7 @@ impl
TxGraph {
new_graph.apply_changeset(self.initial_changeset().map_anchors(f));
new_graph
}
-}
-impl TxGraph {
/// Construct a new [`TxGraph`] from a list of transactions.
pub fn new(txs: impl IntoIterator- ) -> Self {
let mut new = Self::default();
@@ -506,9 +518,10 @@ impl
TxGraph {
/// The [`ChangeSet`] returned will be empty if `tx` already exists.
pub fn insert_tx(&mut self, tx: Transaction) -> ChangeSet {
let mut update = Self::default();
- update
- .txs
- .insert(tx.txid(), (TxNodeInternal::Whole(tx), BTreeSet::new(), 0));
+ update.txs.insert(
+ tx.txid(),
+ (TxNodeInternal::Whole(tx.into()), BTreeSet::new(), 0),
+ );
self.apply_update(update)
}
@@ -567,7 +580,8 @@ impl TxGraph {
/// Applies [`ChangeSet`] to [`TxGraph`].
pub fn apply_changeset(&mut self, changeset: ChangeSet ) {
- for tx in changeset.txs {
+ for wrapped_tx in changeset.txs {
+ let tx = wrapped_tx.as_ref();
let txid = tx.txid();
tx.input
@@ -582,18 +596,20 @@ impl TxGraph {
match self.txs.get_mut(&txid) {
Some((tx_node @ TxNodeInternal::Partial(_), _, _)) => {
- *tx_node = TxNodeInternal::Whole(tx);
+ *tx_node = TxNodeInternal::Whole(wrapped_tx.clone());
}
Some((TxNodeInternal::Whole(tx), _, _)) => {
debug_assert_eq!(
- tx.txid(),
+ tx.as_ref().txid(),
txid,
"tx should produce txid that is same as key"
);
}
None => {
- self.txs
- .insert(txid, (TxNodeInternal::Whole(tx), BTreeSet::new(), 0));
+ self.txs.insert(
+ txid,
+ (TxNodeInternal::Whole(wrapped_tx), BTreeSet::new(), 0),
+ );
}
}
}
@@ -630,7 +646,7 @@ impl TxGraph {
/// The [`ChangeSet`] would be the set difference between `update` and `self` (transactions that
/// exist in `update` but not in `self`).
pub(crate) fn determine_changeset(&self, update: TxGraph ) -> ChangeSet {
- let mut changeset = ChangeSet::default();
+ let mut changeset = ChangeSet:: ::default();
for (&txid, (update_tx_node, _, update_last_seen)) in &update.txs {
let prev_last_seen: u64 = match (self.txs.get(&txid), update_tx_node) {
@@ -680,69 +696,6 @@ impl TxGraph {
}
impl TxGraph {
- /// Find missing block heights of `chain`.
- ///
- /// This works by scanning through anchors, and seeing whether the anchor block of the anchor
- /// exists in the [`LocalChain`]. The returned iterator does not output duplicate heights.
- pub fn missing_heights<'a>(&'a self, chain: &'a LocalChain) -> impl Iterator- + 'a {
- // Map of txids to skip.
- //
- // Usually, if a height of a tx anchor is missing from the chain, we would want to return
- // this height in the iterator. The exception is when the tx is confirmed in chain. All the
- // other missing-height anchors of this tx can be skipped.
- //
- // * Some(true) => skip all anchors of this txid
- // * Some(false) => do not skip anchors of this txid
- // * None => we do not know whether we can skip this txid
- let mut txids_to_skip = HashMap::
::new();
-
- // Keeps track of the last height emitted so we don't double up.
- let mut last_height_emitted = Option::::None;
-
- self.anchors
- .iter()
- .filter(move |(_, txid)| {
- let skip = *txids_to_skip.entry(*txid).or_insert_with(|| {
- let tx_anchors = match self.txs.get(txid) {
- Some((_, anchors, _)) => anchors,
- None => return true,
- };
- let mut has_missing_height = false;
- for anchor_block in tx_anchors.iter().map(Anchor::anchor_block) {
- match chain.blocks().get(&anchor_block.height) {
- None => {
- has_missing_height = true;
- continue;
- }
- Some(chain_hash) => {
- if chain_hash == &anchor_block.hash {
- return true;
- }
- }
- }
- }
- !has_missing_height
- });
- #[cfg(feature = "std")]
- debug_assert!({
- println!("txid={} skip={}", txid, skip);
- true
- });
- !skip
- })
- .filter_map(move |(a, _)| {
- let anchor_block = a.anchor_block();
- if Some(anchor_block.height) != last_height_emitted
- && !chain.blocks().contains_key(&anchor_block.height)
- {
- last_height_emitted = Some(anchor_block.height);
- Some(anchor_block.height)
- } else {
- None
- }
- })
- }
-
/// Get the position of the transaction in `chain` with tip `chain_tip`.
///
/// Chain data is fetched from `chain`, a [`ChainOracle`] implementation.
@@ -791,10 +744,10 @@ impl TxGraph {
TxNodeInternal::Whole(tx) => {
// A coinbase tx that is not anchored in the best chain cannot be unconfirmed and
// should always be filtered out.
- if tx.is_coin_base() {
+ if tx.as_ref().is_coin_base() {
return Ok(None);
}
- tx
+ tx.clone()
}
TxNodeInternal::Partial(_) => {
// Partial transactions (outputs only) cannot have conflicts.
@@ -811,8 +764,8 @@ impl TxGraph {
// First of all, we retrieve all our ancestors. Since we're using `new_include_root`, the
// resulting array will also include `tx`
let unconfirmed_ancestor_txs =
- TxAncestors::new_include_root(self, tx, |_, ancestor_tx: &Transaction| {
- let tx_node = self.get_tx_node(ancestor_tx.txid())?;
+ TxAncestors::new_include_root(self, tx.clone(), |_, ancestor_tx: Arc| {
+ let tx_node = self.get_tx_node(ancestor_tx.as_ref().txid())?;
// We're filtering the ancestors to keep only the unconfirmed ones (= no anchors in
// the best chain)
for block in tx_node.anchors {
@@ -828,8 +781,10 @@ impl TxGraph {
// We determine our tx's last seen, which is the max between our last seen,
// and our unconf descendants' last seen.
- let unconfirmed_descendants_txs =
- TxDescendants::new_include_root(self, tx.txid(), |_, descendant_txid: Txid| {
+ let unconfirmed_descendants_txs = TxDescendants::new_include_root(
+ self,
+ tx.as_ref().txid(),
+ |_, descendant_txid: Txid| {
let tx_node = self.get_tx_node(descendant_txid)?;
// We're filtering the ancestors to keep only the unconfirmed ones (= no anchors in
// the best chain)
@@ -841,8 +796,9 @@ impl TxGraph {
}
}
Some(Ok(tx_node))
- })
- .collect::, C::Error>>()?;
+ },
+ )
+ .collect::, C::Error>>()?;
let tx_last_seen = unconfirmed_descendants_txs
.iter()
@@ -853,7 +809,8 @@ impl TxGraph {
// Now we traverse our ancestors and consider all their conflicts
for tx_node in unconfirmed_ancestor_txs {
// We retrieve all the transactions conflicting with this specific ancestor
- let conflicting_txs = self.walk_conflicts(tx_node.tx, |_, txid| self.get_tx_node(txid));
+ let conflicting_txs =
+ self.walk_conflicts(tx_node.tx.as_ref(), |_, txid| self.get_tx_node(txid));
// If a conflicting tx is in the best chain, or has `last_seen` higher than this ancestor, then
// this tx cannot exist in the best chain
@@ -867,7 +824,7 @@ impl TxGraph {
return Ok(None);
}
if conflicting_tx.last_seen_unconfirmed == *last_seen
- && conflicting_tx.txid() > tx.txid()
+ && conflicting_tx.as_ref().txid() > tx.as_ref().txid()
{
// Conflicting tx has priority if txid of conflicting tx > txid of original tx
return Ok(None);
@@ -960,7 +917,7 @@ impl TxGraph {
&'a self,
chain: &'a C,
chain_tip: BlockId,
- ) -> impl Iterator- , C::Error>> {
+ ) -> impl Iterator
- , A>, C::Error>> {
self.full_txs().filter_map(move |tx| {
self.try_get_chain_position(chain, chain_tip, tx.txid)
.map(|v| {
@@ -982,7 +939,7 @@ impl
TxGraph {
&'a self,
chain: &'a C,
chain_tip: BlockId,
- ) -> impl Iterator- > {
+ ) -> impl Iterator
- , A>> {
self.try_list_chain_txs(chain, chain_tip)
.map(|r| r.expect("oracle is infallible"))
}
@@ -1021,7 +978,7 @@ impl
TxGraph {
None => return Ok(None),
};
- let txout = match tx_node.tx.output.get(op.vout as usize) {
+ let txout = match tx_node.tx.as_ref().output.get(op.vout as usize) {
Some(txout) => txout.clone(),
None => return Ok(None),
};
@@ -1043,7 +1000,7 @@ impl TxGraph {
txout,
chain_position,
spent_by,
- is_on_coinbase: tx_node.tx.is_coin_base(),
+ is_on_coinbase: tx_node.tx.as_ref().is_coin_base(),
},
)))
},
@@ -1209,7 +1166,7 @@ impl TxGraph {
#[must_use]
pub struct ChangeSet {
/// Added transactions.
- pub txs: BTreeSet,
+ pub txs: BTreeSet>,
/// Added txouts.
pub txouts: BTreeMap,
/// Added anchors.
@@ -1247,8 +1204,6 @@ impl ChangeSet {
///
/// This is useful if you want to find which heights you need to fetch data about in order to
/// confirm or exclude these anchors.
- ///
- /// See also: [`TxGraph::missing_heights`]
pub fn anchor_heights(&self) -> impl Iterator- + '_
where
A: Anchor,
@@ -1263,24 +1218,6 @@ impl
ChangeSet {
!duplicate
})
}
-
- /// Returns an iterator for the [`anchor_heights`] in this changeset that are not included in
- /// `local_chain`. This tells you which heights you need to include in `local_chain` in order
- /// for it to conclusively act as a [`ChainOracle`] for the transaction anchors this changeset
- /// will add.
- ///
- /// [`ChainOracle`]: crate::ChainOracle
- /// [`anchor_heights`]: Self::anchor_heights
- pub fn missing_heights_from<'a>(
- &'a self,
- local_chain: &'a LocalChain,
- ) -> impl Iterator- + 'a
- where
- A: Anchor,
- {
- self.anchor_heights()
- .filter(move |height| !local_chain.blocks().contains_key(height))
- }
}
impl
Append for ChangeSet {
@@ -1345,7 +1282,7 @@ impl AsRef> for TxGraph {
pub struct TxAncestors<'g, A, F> {
graph: &'g TxGraph ,
visited: HashSet,
- queue: VecDeque<(usize, &'g Transaction)>,
+ queue: VecDeque<(usize, Arc)>,
filter_map: F,
}
@@ -1353,13 +1290,13 @@ impl<'g, A, F> TxAncestors<'g, A, F> {
/// Creates a `TxAncestors` that includes the starting `Transaction` when iterating.
pub(crate) fn new_include_root(
graph: &'g TxGraph,
- tx: &'g Transaction,
+ tx: impl Into>,
filter_map: F,
) -> Self {
Self {
graph,
visited: Default::default(),
- queue: [(0, tx)].into(),
+ queue: [(0, tx.into())].into(),
filter_map,
}
}
@@ -1367,7 +1304,7 @@ impl<'g, A, F> TxAncestors<'g, A, F> {
/// Creates a `TxAncestors` that excludes the starting `Transaction` when iterating.
pub(crate) fn new_exclude_root(
graph: &'g TxGraph,
- tx: &'g Transaction,
+ tx: impl Into>,
filter_map: F,
) -> Self {
let mut ancestors = Self {
@@ -1376,7 +1313,7 @@ impl<'g, A, F> TxAncestors<'g, A, F> {
queue: Default::default(),
filter_map,
};
- ancestors.populate_queue(1, tx);
+ ancestors.populate_queue(1, tx.into());
ancestors
}
@@ -1389,12 +1326,13 @@ impl<'g, A, F> TxAncestors<'g, A, F> {
filter_map: F,
) -> Self
where
- I: IntoIterator- ,
+ I: IntoIterator,
+ I::Item: Into
>,
{
Self {
graph,
visited: Default::default(),
- queue: txs.into_iter().map(|tx| (0, tx)).collect(),
+ queue: txs.into_iter().map(|tx| (0, tx.into())).collect(),
filter_map,
}
}
@@ -1408,7 +1346,8 @@ impl<'g, A, F> TxAncestors<'g, A, F> {
filter_map: F,
) -> Self
where
- I: IntoIterator- ,
+ I: IntoIterator,
+ I::Item: Into
>,
{
let mut ancestors = Self {
graph,
@@ -1417,12 +1356,12 @@ impl<'g, A, F> TxAncestors<'g, A, F> {
filter_map,
};
for tx in txs {
- ancestors.populate_queue(1, tx);
+ ancestors.populate_queue(1, tx.into());
}
ancestors
}
- fn populate_queue(&mut self, depth: usize, tx: &'g Transaction) {
+ fn populate_queue(&mut self, depth: usize, tx: Arc) {
let ancestors = tx
.input
.iter()
@@ -1436,7 +1375,7 @@ impl<'g, A, F> TxAncestors<'g, A, F> {
impl<'g, A, F, O> Iterator for TxAncestors<'g, A, F>
where
- F: FnMut(usize, &'g Transaction) -> Option,
+ F: FnMut(usize, Arc) -> Option,
{
type Item = O;
@@ -1445,7 +1384,7 @@ where
// we have exhausted all paths when queue is empty
let (ancestor_depth, tx) = self.queue.pop_front()?;
// ignore paths when user filters them out
- let item = match (self.filter_map)(ancestor_depth, tx) {
+ let item = match (self.filter_map)(ancestor_depth, tx.clone()) {
Some(item) => item,
None => continue,
};
diff --git a/crates/chain/tests/test_indexed_tx_graph.rs b/crates/chain/tests/test_indexed_tx_graph.rs
index 41b1d4d3e..0fd2a71b8 100644
--- a/crates/chain/tests/test_indexed_tx_graph.rs
+++ b/crates/chain/tests/test_indexed_tx_graph.rs
@@ -1,13 +1,13 @@
#[macro_use]
mod common;
-use std::collections::BTreeSet;
+use std::{collections::BTreeSet, sync::Arc};
use bdk_chain::{
indexed_tx_graph::{self, IndexedTxGraph},
keychain::{self, Balance, KeychainTxOutIndex},
local_chain::LocalChain,
- tx_graph, BlockId, ChainPosition, ConfirmationHeightAnchor,
+ tx_graph, ChainPosition, ConfirmationHeightAnchor,
};
use bitcoin::{secp256k1::Secp256k1, OutPoint, Script, ScriptBuf, Transaction, TxIn, TxOut};
use miniscript::Descriptor;
@@ -66,7 +66,7 @@ fn insert_relevant_txs() {
let changeset = indexed_tx_graph::ChangeSet {
graph: tx_graph::ChangeSet {
- txs: txs.clone().into(),
+ txs: txs.iter().cloned().map(Arc::new).collect(),
..Default::default()
},
indexer: keychain::ChangeSet([((), 9_u32)].into()),
@@ -80,7 +80,6 @@ fn insert_relevant_txs() {
assert_eq!(graph.initial_changeset(), changeset,);
}
-#[test]
/// Ensure consistency IndexedTxGraph list_* and balance methods. These methods lists
/// relevant txouts and utxos from the information fetched from a ChainOracle (here a LocalChain).
///
@@ -108,7 +107,7 @@ fn insert_relevant_txs() {
///
/// Finally Add more blocks to local chain until tx1 coinbase maturity hits.
/// Assert maturity at coinbase maturity inflection height. Block height 98 and 99.
-
+#[test]
fn test_list_owned_txouts() {
// Create Local chains
let local_chain = LocalChain::from_blocks((0..150).map(|i| (i as u32, h!("random"))).collect())
@@ -213,10 +212,8 @@ fn test_list_owned_txouts() {
(
*tx,
local_chain
- .blocks()
- .get(&height)
- .cloned()
- .map(|hash| BlockId { height, hash })
+ .query(height)
+ .map(|cp| cp.block_id())
.map(|anchor_block| ConfirmationHeightAnchor {
anchor_block,
confirmation_height: anchor_block.height,
@@ -231,9 +228,8 @@ fn test_list_owned_txouts() {
|height: u32,
graph: &IndexedTxGraph>| {
let chain_tip = local_chain
- .blocks()
- .get(&height)
- .map(|&hash| BlockId { height, hash })
+ .query(height)
+ .map(|cp| cp.block_id())
.unwrap_or_else(|| panic!("block must exist at {}", height));
let txouts = graph
.graph()
diff --git a/crates/chain/tests/test_local_chain.rs b/crates/chain/tests/test_local_chain.rs
index c1a1cd7f9..b601a17f9 100644
--- a/crates/chain/tests/test_local_chain.rs
+++ b/crates/chain/tests/test_local_chain.rs
@@ -528,6 +528,45 @@ fn checkpoint_from_block_ids() {
}
}
+#[test]
+fn checkpoint_query() {
+ struct TestCase {
+ chain: LocalChain,
+ /// The heights we want to call [`CheckPoint::query`] with, represented as an inclusive
+ /// range.
+ ///
+ /// If a [`CheckPoint`] exists at that height, we expect [`CheckPoint::query`] to return
+ /// it. If not, [`CheckPoint::query`] should return `None`.
+ query_range: (u32, u32),
+ }
+
+ let test_cases = [
+ TestCase {
+ chain: local_chain![(0, h!("_")), (1, h!("A"))],
+ query_range: (0, 2),
+ },
+ TestCase {
+ chain: local_chain![(0, h!("_")), (2, h!("B")), (3, h!("C"))],
+ query_range: (0, 3),
+ },
+ ];
+
+ for t in test_cases.into_iter() {
+ let tip = t.chain.tip();
+ for h in t.query_range.0..=t.query_range.1 {
+ let query_result = tip.query(h);
+ let exp_hash = t.chain.query(h).map(|cp| cp.hash());
+ match query_result {
+ Some(cp) => {
+ assert_eq!(Some(cp.hash()), exp_hash);
+ assert_eq!(cp.height(), h);
+ }
+ None => assert!(query_result.is_none()),
+ }
+ }
+ }
+}
+
#[test]
fn local_chain_apply_header_connected_to() {
fn header_from_prev_blockhash(prev_blockhash: BlockHash) -> Header {
diff --git a/crates/chain/tests/test_tx_graph.rs b/crates/chain/tests/test_tx_graph.rs
index 37e8c7192..11ac8032a 100644
--- a/crates/chain/tests/test_tx_graph.rs
+++ b/crates/chain/tests/test_tx_graph.rs
@@ -13,6 +13,7 @@ use bitcoin::{
use common::*;
use core::iter;
use rand::RngCore;
+use std::sync::Arc;
use std::vec;
#[test]
@@ -119,7 +120,7 @@ fn insert_txouts() {
assert_eq!(
graph.insert_tx(update_txs.clone()),
ChangeSet {
- txs: [update_txs.clone()].into(),
+ txs: [Arc::new(update_txs.clone())].into(),
..Default::default()
}
);
@@ -143,7 +144,7 @@ fn insert_txouts() {
assert_eq!(
changeset,
ChangeSet {
- txs: [update_txs.clone()].into(),
+ txs: [Arc::new(update_txs.clone())].into(),
txouts: update_ops.clone().into(),
anchors: [(conf_anchor, update_txs.txid()), (unconf_anchor, h!("tx2"))].into(),
last_seen: [(h!("tx2"), 1000000)].into()
@@ -194,7 +195,7 @@ fn insert_txouts() {
assert_eq!(
graph.initial_changeset(),
ChangeSet {
- txs: [update_txs.clone()].into(),
+ txs: [Arc::new(update_txs.clone())].into(),
txouts: update_ops.into_iter().chain(original_ops).collect(),
anchors: [(conf_anchor, update_txs.txid()), (unconf_anchor, h!("tx2"))].into(),
last_seen: [(h!("tx2"), 1000000)].into()
@@ -276,7 +277,10 @@ fn insert_tx_can_retrieve_full_tx_from_graph() {
let mut graph = TxGraph::<()>::default();
let _ = graph.insert_tx(tx.clone());
- assert_eq!(graph.get_tx(tx.txid()), Some(&tx));
+ assert_eq!(
+ graph.get_tx(tx.txid()).map(|tx| tx.as_ref().clone()),
+ Some(tx)
+ );
}
#[test]
@@ -643,7 +647,7 @@ fn test_walk_ancestors() {
..common::new_tx(0)
};
- let mut graph = TxGraph::::new(vec![
+ let mut graph = TxGraph::::new([
tx_a0.clone(),
tx_b0.clone(),
tx_b1.clone(),
@@ -664,17 +668,17 @@ fn test_walk_ancestors() {
let ancestors = [
graph
- .walk_ancestors(&tx_c0, |depth, tx| Some((depth, tx)))
+ .walk_ancestors(tx_c0.clone(), |depth, tx| Some((depth, tx)))
.collect::>(),
graph
- .walk_ancestors(&tx_d0, |depth, tx| Some((depth, tx)))
+ .walk_ancestors(tx_d0.clone(), |depth, tx| Some((depth, tx)))
.collect::>(),
graph
- .walk_ancestors(&tx_e0, |depth, tx| Some((depth, tx)))
+ .walk_ancestors(tx_e0.clone(), |depth, tx| Some((depth, tx)))
.collect::>(),
// Only traverse unconfirmed ancestors of tx_e0 this time
graph
- .walk_ancestors(&tx_e0, |depth, tx| {
+ .walk_ancestors(tx_e0.clone(), |depth, tx| {
let tx_node = graph.get_tx_node(tx.txid())?;
for block in tx_node.anchors {
match local_chain.is_block_in_chain(block.anchor_block(), tip.block_id()) {
@@ -701,8 +705,14 @@ fn test_walk_ancestors() {
vec![(1, &tx_d1), (2, &tx_c2), (2, &tx_c3), (3, &tx_b2)],
];
- for (txids, expected_txids) in ancestors.iter().zip(expected_ancestors.iter()) {
- assert_eq!(txids, expected_txids);
+ for (txids, expected_txids) in ancestors.into_iter().zip(expected_ancestors) {
+ assert_eq!(
+ txids,
+ expected_txids
+ .into_iter()
+ .map(|(i, tx)| (i, Arc::new(tx.clone())))
+ .collect::>()
+ );
}
}
@@ -1048,139 +1058,6 @@ fn test_changeset_last_seen_append() {
}
}
-#[test]
-fn test_missing_blocks() {
- /// An anchor implementation for testing, made up of `(the_anchor_block, random_data)`.
- #[derive(Debug, Clone, Eq, PartialEq, PartialOrd, Ord, core::hash::Hash)]
- struct TestAnchor(BlockId);
-
- impl Anchor for TestAnchor {
- fn anchor_block(&self) -> BlockId {
- self.0
- }
- }
-
- struct Scenario<'a> {
- name: &'a str,
- graph: TxGraph,
- chain: LocalChain,
- exp_heights: &'a [u32],
- }
-
- const fn new_anchor(height: u32, hash: BlockHash) -> TestAnchor {
- TestAnchor(BlockId { height, hash })
- }
-
- fn new_scenario<'a>(
- name: &'a str,
- graph_anchors: &'a [(Txid, TestAnchor)],
- chain: &'a [(u32, BlockHash)],
- exp_heights: &'a [u32],
- ) -> Scenario<'a> {
- Scenario {
- name,
- graph: {
- let mut g = TxGraph::default();
- for (txid, anchor) in graph_anchors {
- let _ = g.insert_anchor(*txid, anchor.clone());
- }
- g
- },
- chain: {
- let (mut c, _) = LocalChain::from_genesis_hash(h!("genesis"));
- for (height, hash) in chain {
- let _ = c.insert_block(BlockId {
- height: *height,
- hash: *hash,
- });
- }
- c
- },
- exp_heights,
- }
- }
-
- fn run(scenarios: &[Scenario]) {
- for scenario in scenarios {
- let Scenario {
- name,
- graph,
- chain,
- exp_heights,
- } = scenario;
-
- let heights = graph.missing_heights(chain).collect::>();
- assert_eq!(&heights, exp_heights, "scenario: {}", name);
- }
- }
-
- run(&[
- new_scenario(
- "2 txs with the same anchor (2:B) which is missing from chain",
- &[
- (h!("tx_1"), new_anchor(2, h!("B"))),
- (h!("tx_2"), new_anchor(2, h!("B"))),
- ],
- &[(1, h!("A")), (3, h!("C"))],
- &[2],
- ),
- new_scenario(
- "2 txs with different anchors at the same height, one of the anchors is missing",
- &[
- (h!("tx_1"), new_anchor(2, h!("B1"))),
- (h!("tx_2"), new_anchor(2, h!("B2"))),
- ],
- &[(1, h!("A")), (2, h!("B1"))],
- &[],
- ),
- new_scenario(
- "tx with 2 anchors of same height which are missing from the chain",
- &[
- (h!("tx"), new_anchor(3, h!("C1"))),
- (h!("tx"), new_anchor(3, h!("C2"))),
- ],
- &[(1, h!("A")), (4, h!("D"))],
- &[3],
- ),
- new_scenario(
- "tx with 2 anchors at the same height, chain has this height but does not match either anchor",
- &[
- (h!("tx"), new_anchor(4, h!("D1"))),
- (h!("tx"), new_anchor(4, h!("D2"))),
- ],
- &[(4, h!("D3")), (5, h!("E"))],
- &[],
- ),
- new_scenario(
- "tx with 2 anchors at different heights, one anchor exists in chain, should return nothing",
- &[
- (h!("tx"), new_anchor(3, h!("C"))),
- (h!("tx"), new_anchor(4, h!("D"))),
- ],
- &[(4, h!("D")), (5, h!("E"))],
- &[],
- ),
- new_scenario(
- "tx with 2 anchors at different heights, first height is already in chain with different hash, iterator should only return 2nd height",
- &[
- (h!("tx"), new_anchor(5, h!("E1"))),
- (h!("tx"), new_anchor(6, h!("F1"))),
- ],
- &[(4, h!("D")), (5, h!("E")), (7, h!("G"))],
- &[6],
- ),
- new_scenario(
- "tx with 2 anchors at different heights, neither height is in chain, both heights should be returned",
- &[
- (h!("tx"), new_anchor(3, h!("C"))),
- (h!("tx"), new_anchor(4, h!("D"))),
- ],
- &[(1, h!("A")), (2, h!("B"))],
- &[3, 4],
- ),
- ]);
-}
-
#[test]
/// The `map_anchors` allow a caller to pass a function to reconstruct the [`TxGraph`] with any [`Anchor`],
/// even though the function is non-deterministic.
diff --git a/crates/esplora/src/async_ext.rs b/crates/esplora/src/async_ext.rs
index 4d6e0dfa8..86e469d0c 100644
--- a/crates/esplora/src/async_ext.rs
+++ b/crates/esplora/src/async_ext.rs
@@ -1,5 +1,10 @@
+use std::collections::BTreeSet;
+use std::fmt::Debug;
+
use async_trait::async_trait;
use bdk_chain::collections::btree_map;
+use bdk_chain::spk_client::{FullScanRequest, FullScanResult, SyncRequest, SyncResult};
+use bdk_chain::Anchor;
use bdk_chain::{
bitcoin::{BlockHash, OutPoint, ScriptBuf, TxOut, Txid},
collections::BTreeMap,
@@ -22,260 +27,258 @@ type Error = Box;
#[cfg_attr(target_arch = "wasm32", async_trait(?Send))]
#[cfg_attr(not(target_arch = "wasm32"), async_trait)]
pub trait EsploraAsyncExt {
- /// Prepare a [`LocalChain`] update with blocks fetched from Esplora.
- ///
- /// * `local_tip` is the previous tip of [`LocalChain::tip`].
- /// * `request_heights` is the block heights that we are interested in fetching from Esplora.
- ///
- /// The result of this method can be applied to [`LocalChain::apply_update`].
- ///
- /// ## Consistency
- ///
- /// The chain update returned is guaranteed to be consistent as long as there is not a *large* re-org
- /// during the call. The size of re-org we can tollerate is server dependent but will be at
- /// least 10.
- ///
- /// [`LocalChain`]: bdk_chain::local_chain::LocalChain
- /// [`LocalChain::tip`]: bdk_chain::local_chain::LocalChain::tip
- /// [`LocalChain::apply_update`]: bdk_chain::local_chain::LocalChain::apply_update
- async fn update_local_chain(
- &self,
- local_tip: CheckPoint,
- request_heights: impl IntoIterator + Send> + Send,
- ) -> Result;
-
- /// Full scan the keychain scripts specified with the blockchain (via an Esplora client) and
- /// returns a [`TxGraph`] and a map of last active indices.
+ /// Scan keychain scripts for transactions against Esplora, returning an update that can be
+ /// applied to the receiving structures.
///
- /// * `keychain_spks`: keychains that we want to scan transactions for
+ /// The full scan for each keychain stops after a gap of `stop_gap` script pubkeys with no
+ /// associated transactions. `parallel_requests` specifies the max number of HTTP requests to
+ /// make in parallel.
///
- /// The full scan for each keychain stops after a gap of `stop_gap` script pubkeys with no associated
- /// transactions. `parallel_requests` specifies the max number of HTTP requests to make in
- /// parallel.
- async fn full_scan(
+ /// [`LocalChain::tip`]: local_chain::LocalChain::tip
+ async fn full_scan<
+ K: Ord + Clone + Send + Debug + 'static,
+ I: Iterator- + Send,
+ >(
&self,
- keychain_spks: BTreeMap<
- K,
- impl IntoIterator
+ Send> + Send,
- >,
+ request: FullScanRequest,
stop_gap: usize,
parallel_requests: usize,
- ) -> Result<(TxGraph, BTreeMap), Error>;
+ ) -> Result, Error>;
/// Sync a set of scripts with the blockchain (via an Esplora client) for the data
/// specified and return a [`TxGraph`].
///
- /// * `misc_spks`: scripts that we want to sync transactions for
- /// * `txids`: transactions for which we want updated [`ConfirmationTimeHeightAnchor`]s
- /// * `outpoints`: transactions associated with these outpoints (residing, spending) that we
- /// want to include in the update
- ///
/// If the scripts to sync are unknown, such as when restoring or importing a keychain that
/// may include scripts that have been used, use [`full_scan`] with the keychain.
///
+ /// [`LocalChain::tip`]: local_chain::LocalChain::tip
/// [`full_scan`]: EsploraAsyncExt::full_scan
async fn sync(
&self,
- misc_spks: impl IntoIterator + Send> + Send,
- txids: impl IntoIterator + Send> + Send,
- outpoints: impl IntoIterator + Send> + Send,
+ request: SyncRequest,
parallel_requests: usize,
- ) -> Result, Error>;
+ ) -> Result;
}
#[cfg_attr(target_arch = "wasm32", async_trait(?Send))]
#[cfg_attr(not(target_arch = "wasm32"), async_trait)]
impl EsploraAsyncExt for esplora_client::AsyncClient {
- async fn update_local_chain(
+ async fn full_scan<
+ K: Ord + Clone + Send + Debug + 'static,
+ I: Iterator- + Send,
+ >(
&self,
- local_tip: CheckPoint,
- request_heights: impl IntoIterator
+ Send> + Send,
- ) -> Result {
- // Fetch latest N (server dependent) blocks from Esplora. The server guarantees these are
- // consistent.
- let mut fetched_blocks = self
- .get_blocks(None)
- .await?
- .into_iter()
- .map(|b| (b.time.height, b.id))
- .collect::>();
- let new_tip_height = fetched_blocks
- .keys()
- .last()
- .copied()
- .expect("must have atleast one block");
-
- // Fetch blocks of heights that the caller is interested in, skipping blocks that are
- // already fetched when constructing `fetched_blocks`.
- for height in request_heights {
- // do not fetch blocks higher than remote tip
- if height > new_tip_height {
- continue;
- }
- // only fetch what is missing
- if let btree_map::Entry::Vacant(entry) = fetched_blocks.entry(height) {
- // ❗The return value of `get_block_hash` is not strictly guaranteed to be consistent
- // with the chain at the time of `get_blocks` above (there could have been a deep
- // re-org). Since `get_blocks` returns 10 (or so) blocks we are assuming that it's
- // not possible to have a re-org deeper than that.
- entry.insert(self.get_block_hash(height).await?);
- }
- }
-
- // Ensure `fetched_blocks` can create an update that connects with the original chain by
- // finding a "Point of Agreement".
- for (height, local_hash) in local_tip.iter().map(|cp| (cp.height(), cp.hash())) {
- if height > new_tip_height {
- continue;
- }
-
- let fetched_hash = match fetched_blocks.entry(height) {
- btree_map::Entry::Occupied(entry) => *entry.get(),
- btree_map::Entry::Vacant(entry) => {
- *entry.insert(self.get_block_hash(height).await?)
- }
- };
-
- // We have found point of agreement so the update will connect!
- if fetched_hash == local_hash {
- break;
- }
- }
+ mut request: FullScanRequest,
+ stop_gap: usize,
+ parallel_requests: usize,
+ ) -> Result, Error> {
+ let update_blocks = init_chain_update(self, &request.chain_tip).await?;
+ let (graph_update, last_active_indices) = full_scan_for_index_and_graph(
+ self,
+ request.take_spks_by_keychain(),
+ stop_gap,
+ parallel_requests,
+ )
+ .await?;
+ let chain_update = finalize_chain_update(
+ self,
+ &request.chain_tip,
+ graph_update.all_anchors(),
+ update_blocks,
+ )
+ .await?;
- Ok(local_chain::Update {
- tip: CheckPoint::from_block_ids(fetched_blocks.into_iter().map(BlockId::from))
- .expect("must be in height order"),
- introduce_older_blocks: true,
+ Ok(FullScanResult {
+ graph_update,
+ chain_update,
+ last_active_indices,
})
}
- async fn full_scan