From 541d0cfbc66ef7a7f39ba0b5183d3b494513e423 Mon Sep 17 00:00:00 2001 From: xdustinface Date: Mon, 27 Apr 2026 22:01:24 +1000 Subject: [PATCH 1/2] feat: per-wallet filter scan and runtime wallet catch-up Filter matching and block processing now operate per wallet, so a wallet added at runtime catches up without forcing the already-synced wallets to reprocess anything. - `WalletInterface` restructured around per-wallet ops: `process_block_for_wallets`, `wallets_behind`, `monitored_addresses_for`, `wallet_synced_height`, and monotonic per-wallet height updates. Aggregate heights are derived (min of `synced_height`, max of `last_processed_height`) rather than stored. - `FiltersManager::scan_batch` matches each behind wallet's addresses only against filter heights it hasn't yet covered; already-synced wallets are skipped entirely. Matched blocks flow through `BlocksNeeded` carrying the per-block wallet set so `BlocksManager` processes each block only against the wallets whose filters matched. `FiltersBatch` records the scanned-wallet set so commit advances only their `synced_height`. - `FiltersManager::tick` detects when a wallet's `synced_height` sits below the current `committed_height` (a runtime add behind scan progress), clears in-flight pipeline state, lowers `committed_height` to the new aggregate floor, and re-enters `start_download` on the next 100ms tick. Runs in `Syncing`, `Synced`, and `WaitForEvents`. Based on: - #689 --- dash-spv-ffi/src/callbacks.rs | 88 +- dash-spv/src/sync/blocks/manager.rs | 122 +- dash-spv/src/sync/blocks/pipeline.rs | 214 ++- dash-spv/src/sync/blocks/sync_manager.rs | 17 +- dash-spv/src/sync/events.rs | 22 +- dash-spv/src/sync/filters/batch.rs | 40 +- .../src/sync/filters/block_match_tracker.rs | 268 ++++ dash-spv/src/sync/filters/manager.rs | 1212 +++++++++++++++-- dash-spv/src/sync/filters/mod.rs | 1 + dash-spv/src/sync/filters/sync_manager.rs | 48 +- dash-spv/src/sync/mempool/sync_manager.rs | 10 +- dash-spv/tests/dashd_sync/helpers.rs | 2 +- key-wallet-ffi/src/wallet_manager_tests.rs | 7 +- key-wallet-manager/Cargo.toml | 1 + .../examples/wallet_creation.rs | 7 +- key-wallet-manager/src/event_tests.rs | 4 +- key-wallet-manager/src/lib.rs | 59 +- key-wallet-manager/src/matching.rs | 35 +- key-wallet-manager/src/process_block.rs | 151 +- .../src/test_utils/mock_wallet.rs | 274 +++- key-wallet-manager/src/test_utils/mod.rs | 3 + key-wallet-manager/src/wallet_interface.rs | 71 +- key-wallet-manager/tests/integration_test.rs | 75 +- .../tests/spv_integration_tests.rs | 36 +- 24 files changed, 2439 insertions(+), 328 deletions(-) create mode 100644 dash-spv/src/sync/filters/block_match_tracker.rs diff --git a/dash-spv-ffi/src/callbacks.rs b/dash-spv-ffi/src/callbacks.rs index 5ef9103c1..31c098316 100644 --- a/dash-spv-ffi/src/callbacks.rs +++ b/dash-spv-ffi/src/callbacks.rs @@ -347,7 +347,7 @@ impl FFISyncEventCallbacks { } => { if let Some(cb) = self.on_blocks_needed { let ffi_blocks: Vec = blocks - .iter() + .keys() .map(|key| FFIBlockNeeded { height: key.height(), hash: *key.hash().as_byte_array(), @@ -361,15 +361,17 @@ impl FFISyncEventCallbacks { height, new_addresses, confirmed_txids, + .. } => { if let Some(cb) = self.on_block_processed { let hash_bytes = block_hash.as_byte_array(); let txid_bytes: Vec<[u8; 32]> = confirmed_txids.iter().map(|txid| *txid.as_byte_array()).collect(); + let total_new_addresses: usize = new_addresses.values().map(|v| v.len()).sum(); cb( *height, hash_bytes as *const [u8; 32], - new_addresses.len() as u32, + total_new_addresses as u32, txid_bytes.as_ptr(), txid_bytes.len() as u32, self.user_data, @@ -755,3 +757,85 @@ impl FFIWalletEventCallbacks { } } } + +#[cfg(test)] +mod tests { + use super::*; + use dashcore::hashes::Hash; + use dashcore::{Address, BlockHash, Network, Txid}; + use key_wallet_manager::{FilterMatchKey, WalletId}; + use std::collections::{BTreeMap, BTreeSet}; + use std::sync::atomic::{AtomicU32, Ordering}; + + /// `BlocksNeeded` dispatch must pass exactly one entry per + /// `FilterMatchKey` to the FFI callback (i.e. iterate keys, not + /// inflated by the per-block wallet attribution). + #[test] + fn test_blocks_needed_dispatch_passes_unique_keys_count() { + static COUNT: AtomicU32 = AtomicU32::new(u32::MAX); + extern "C" fn cb(_blocks: *const FFIBlockNeeded, count: u32, _user: *mut c_void) { + COUNT.store(count, Ordering::SeqCst); + } + + let callbacks = FFISyncEventCallbacks { + on_blocks_needed: Some(cb), + ..FFISyncEventCallbacks::default() + }; + + let mut blocks: BTreeMap> = BTreeMap::new(); + // Two distinct blocks, each attributed to two wallets. The dispatch + // must report 2 (unique keys), not 4. + blocks.insert( + FilterMatchKey::new(10, BlockHash::from_byte_array([1u8; 32])), + BTreeSet::from([[1u8; 32], [2u8; 32]]), + ); + blocks.insert( + FilterMatchKey::new(20, BlockHash::from_byte_array([2u8; 32])), + BTreeSet::from([[1u8; 32], [2u8; 32]]), + ); + + callbacks.dispatch(&SyncEvent::BlocksNeeded { + blocks, + }); + assert_eq!(COUNT.load(Ordering::SeqCst), 2); + } + + /// `BlockProcessed` dispatch must report the total address count + /// summed across all per-wallet entries in the `new_addresses` map. + #[test] + fn test_block_processed_dispatch_sums_per_wallet_addresses() { + static NEW_ADDR_COUNT: AtomicU32 = AtomicU32::new(u32::MAX); + extern "C" fn cb( + _height: u32, + _hash: *const [u8; 32], + new_address_count: u32, + _txids: *const [u8; 32], + _txid_count: u32, + _user: *mut c_void, + ) { + NEW_ADDR_COUNT.store(new_address_count, Ordering::SeqCst); + } + + let callbacks = FFISyncEventCallbacks { + on_block_processed: Some(cb), + ..FFISyncEventCallbacks::default() + }; + + let addr_a = Address::dummy(Network::Regtest, 1); + let addr_b = Address::dummy(Network::Regtest, 2); + let addr_c = Address::dummy(Network::Regtest, 3); + let mut new_addresses: BTreeMap> = BTreeMap::new(); + // Wallet 1 contributes 2 new addresses, wallet 2 contributes 1. Total = 3. + new_addresses.insert([1u8; 32], vec![addr_a, addr_b]); + new_addresses.insert([2u8; 32], vec![addr_c]); + + callbacks.dispatch(&SyncEvent::BlockProcessed { + block_hash: BlockHash::from_byte_array([7u8; 32]), + height: 100, + wallets: BTreeSet::new(), + new_addresses, + confirmed_txids: vec![Txid::from_byte_array([9u8; 32])], + }); + assert_eq!(NEW_ADDR_COUNT.load(Ordering::SeqCst), 3); + } +} diff --git a/dash-spv/src/sync/blocks/manager.rs b/dash-spv/src/sync/blocks/manager.rs index 45ffed61b..6f69e27dc 100644 --- a/dash-spv/src/sync/blocks/manager.rs +++ b/dash-spv/src/sync/blocks/manager.rs @@ -79,15 +79,17 @@ impl BlocksManager 0 { tracing::info!( "Found {} relevant transactions ({} new, {} existing) {} at height {}, new addresses: {}", @@ -96,7 +98,7 @@ impl BlocksManager BlocksManager = result.relevant_txids().cloned().collect(); // Collect new addresses for gap limit rescanning - let new_addresses: Vec<_> = result.new_addresses.into_iter().collect(); - if !new_addresses.is_empty() { + let new_addresses = result.new_addresses; + if new_addresses_total > 0 { tracing::debug!( - "Block {} generated {} new addresses for gap limit maintenance", + "Block {} generated {} new addresses for gap limit maintenance across {} wallets", height, + new_addresses_total, new_addresses.len() ); } @@ -124,6 +127,7 @@ impl BlocksManager; @@ -215,8 +219,8 @@ mod tests { let requests = network.request_sender(); let block_hash = dashcore::BlockHash::dummy(0); - let mut blocks = BTreeSet::new(); - blocks.insert(FilterMatchKey::new(100, block_hash)); + let mut blocks = BTreeMap::new(); + blocks.insert(FilterMatchKey::new(100, block_hash), BTreeSet::from([MOCK_WALLET_ID])); let event = SyncEvent::BlocksNeeded { blocks, }; @@ -227,4 +231,100 @@ mod tests { assert_eq!(manager.state(), SyncState::Syncing); assert!(events.is_empty()); } + + /// `process_buffered_blocks` must call `process_block_for_wallets` with + /// the exact wallet set carried in the pipeline so already-synced + /// wallets are not touched by routing logic. + #[tokio::test] + async fn test_process_buffered_blocks_routes_wallet_set() { + use dashcore::block::Header; + use dashcore::{Block, TxMerkleNode}; + use dashcore_hashes::Hash; + + let mut manager = create_test_manager().await; + manager.progress.set_state(SyncState::Syncing); + + let header = Header { + version: dashcore::blockdata::block::Version::from_consensus(1), + prev_blockhash: dashcore::BlockHash::all_zeros(), + merkle_root: TxMerkleNode::all_zeros(), + time: 0, + bits: dashcore::CompactTarget::from_consensus(0), + nonce: 0, + }; + let block = Block { + header, + txdata: vec![], + }; + manager.pipeline.add_from_storage(block.clone(), 100, BTreeSet::from([MOCK_WALLET_ID])); + + let events = manager.process_buffered_blocks().await.unwrap(); + assert!(matches!(events.first(), Some(SyncEvent::BlockProcessed { .. }))); + + // MOCK_WALLET_ID was in the routed set, so MockWallet recorded the + // block. (MockWallet::process_block_for_wallets returns early when + // its id is absent.) + let processed = manager.wallet.read().await.processed_blocks(); + let processed = processed.lock().await; + assert_eq!(processed.len(), 1); + assert_eq!(processed[0].1, 100); + } + + /// A wallet that is NOT in the pipeline's interested set must not be + /// routed the block. Two wallets are registered, but only `wallet_in` + /// appears in the routed set; the other wallet's processed log must + /// stay empty for that block. + #[tokio::test] + async fn test_process_buffered_blocks_excludes_uninterested_wallet() { + use dashcore::block::Header; + use dashcore::{Block, TxMerkleNode}; + use dashcore_hashes::Hash; + use key_wallet_manager::test_utils::{MockWalletState, MultiMockWallet}; + use key_wallet_manager::WalletId; + + let storage = DiskStorageManager::with_temp_dir().await.unwrap(); + let multi = MultiMockWallet::new(); + let wallet_in: WalletId = [0xAA; 32]; + let wallet_out: WalletId = [0xBB; 32]; + let multi = Arc::new(RwLock::new(multi)); + { + let mut w = multi.write().await; + w.insert_wallet(wallet_in, MockWalletState::default()); + w.insert_wallet(wallet_out, MockWalletState::default()); + } + let mut manager: BlocksManager< + PersistentBlockHeaderStorage, + PersistentBlockStorage, + MultiMockWallet, + > = BlocksManager::new(multi.clone(), storage.block_headers(), storage.blocks()).await; + manager.progress.set_state(SyncState::Syncing); + + let header = Header { + version: dashcore::blockdata::block::Version::from_consensus(1), + prev_blockhash: dashcore::BlockHash::all_zeros(), + merkle_root: TxMerkleNode::all_zeros(), + time: 0, + bits: dashcore::CompactTarget::from_consensus(0), + nonce: 0, + }; + let block = Block { + header, + txdata: vec![], + }; + // Only wallet_in is in the routed set. + manager.pipeline.add_from_storage(block.clone(), 100, BTreeSet::from([wallet_in])); + + let _ = manager.process_buffered_blocks().await.unwrap(); + + let processed = multi.read().await.processed(); + let processed = processed.lock().await; + // Exactly one entry, for wallet_in only. + assert_eq!(processed.len(), 1); + assert_eq!(processed[0].0, wallet_in); + assert_eq!(processed[0].2, 100); + assert!( + !processed.iter().any(|(id, _, _)| *id == wallet_out), + "wallet_out was not in the routed set, must not be processed" + ); + } } diff --git a/dash-spv/src/sync/blocks/pipeline.rs b/dash-spv/src/sync/blocks/pipeline.rs index 2338841e8..fc6475ab7 100644 --- a/dash-spv/src/sync/blocks/pipeline.rs +++ b/dash-spv/src/sync/blocks/pipeline.rs @@ -11,7 +11,7 @@ use crate::network::RequestSender; use crate::sync::download_coordinator::{DownloadConfig, DownloadCoordinator}; use dashcore::blockdata::block::Block; use dashcore::BlockHash; -use key_wallet_manager::FilterMatchKey; +use key_wallet_manager::{FilterMatchKey, WalletId}; /// Maximum number of concurrent block downloads. const MAX_CONCURRENT_BLOCK_DOWNLOADS: usize = 20; @@ -36,6 +36,9 @@ pub(super) struct BlocksPipeline { downloaded: BTreeMap, /// Map hash -> height for looking up height when block arrives. hash_to_height: HashMap, + /// Per-block interested wallets, populated when the block is queued. + /// Only those wallets get the block processed. + hash_to_wallets: HashMap>, } impl std::fmt::Debug for BlocksPipeline { @@ -66,17 +69,25 @@ impl BlocksPipeline { pending_heights: BTreeSet::new(), downloaded: BTreeMap::new(), hash_to_height: HashMap::new(), + hash_to_wallets: HashMap::new(), } } - /// Queue blocks with their heights for download. - /// - /// This is the preferred method as it enables height-ordered processing. - pub(super) fn queue(&mut self, blocks: impl IntoIterator) { - for key in blocks { - self.coordinator.enqueue([*key.hash()]); - self.pending_heights.insert(key.height()); - self.hash_to_height.insert(*key.hash(), key.height()); + /// Queue blocks with their heights and per-block interested wallet sets. + pub(super) fn queue( + &mut self, + blocks: impl IntoIterator)>, + ) { + for (key, wallets) in blocks { + let hash = *key.hash(); + let already_tracked = + self.hash_to_height.contains_key(&hash) || self.hash_to_wallets.contains_key(&hash); + if !already_tracked { + self.coordinator.enqueue([hash]); + self.pending_heights.insert(key.height()); + self.hash_to_height.insert(hash, key.height()); + } + self.hash_to_wallets.entry(hash).or_default().extend(wallets); } } @@ -141,12 +152,13 @@ impl BlocksPipeline { true } - /// Take the next block that's safe to process in height order. + /// Take the next block that's safe to process in height order, along with + /// the wallet set whose filters matched this block. /// /// Returns None if: /// - No downloaded blocks available, or /// - Waiting for a lower-height block still pending - pub(super) fn take_next_ordered_block(&mut self) -> Option<(Block, u32)> { + pub(super) fn take_next_ordered_block(&mut self) -> Option<(Block, u32, BTreeSet)> { let lowest_downloaded = *self.downloaded.keys().next()?; // Check if any pending blocks have lower heights @@ -156,15 +168,22 @@ impl BlocksPipeline { } } - // Safe to return this block let block = self.downloaded.remove(&lowest_downloaded).unwrap(); - Some((block, lowest_downloaded)) + let wallets = self.hash_to_wallets.remove(&block.block_hash()).unwrap_or_default(); + Some((block, lowest_downloaded, wallets)) } /// Add a block that was loaded from storage (skip download). /// /// Used when blocks are already persisted from a previous sync. - pub(super) fn add_from_storage(&mut self, block: Block, height: u32) { + pub(super) fn add_from_storage( + &mut self, + block: Block, + height: u32, + wallets: BTreeSet, + ) { + let hash = block.block_hash(); + self.hash_to_wallets.entry(hash).or_default().extend(wallets); self.downloaded.insert(height, block); } @@ -212,7 +231,7 @@ mod tests { fn test_queue_block() { let mut pipeline = BlocksPipeline::new(); let block = make_test_block(1); - pipeline.queue([FilterMatchKey::new(100, block.block_hash())]); + pipeline.queue([(FilterMatchKey::new(100, block.block_hash()), BTreeSet::new())]); assert_eq!(pipeline.coordinator.pending_count(), 1); assert!(!pipeline.is_complete()); @@ -226,9 +245,9 @@ mod tests { let block2 = make_test_block(2); let block3 = make_test_block(3); pipeline.queue([ - FilterMatchKey::new(100, block1.block_hash()), - FilterMatchKey::new(101, block2.block_hash()), - FilterMatchKey::new(102, block3.block_hash()), + (FilterMatchKey::new(100, block1.block_hash()), BTreeSet::new()), + (FilterMatchKey::new(101, block2.block_hash()), BTreeSet::new()), + (FilterMatchKey::new(102, block3.block_hash()), BTreeSet::new()), ]); assert_eq!(pipeline.coordinator.pending_count(), 3); @@ -245,7 +264,7 @@ mod tests { let hash = block.block_hash(); // Queue with height tracking - pipeline.queue([FilterMatchKey::new(100, block.block_hash())]); + pipeline.queue([(FilterMatchKey::new(100, block.block_hash()), BTreeSet::new())]); // Simulate sending via coordinator let hashes = pipeline.coordinator.take_pending(1); @@ -276,7 +295,7 @@ mod tests { // Queue more blocks than max concurrent for i in 0..=MAX_CONCURRENT_BLOCK_DOWNLOADS { let block = make_test_block(i as u8); - pipeline.queue([FilterMatchKey::new(i as u32, block.block_hash())]); + pipeline.queue([(FilterMatchKey::new(i as u32, block.block_hash()), BTreeSet::new())]); } // Take and mark as downloading up to limit @@ -301,6 +320,7 @@ mod tests { pending_heights: BTreeSet::new(), downloaded: BTreeMap::new(), hash_to_height: HashMap::new(), + hash_to_wallets: HashMap::new(), }; // Use coordinator directly to set up in-flight state @@ -328,7 +348,7 @@ mod tests { // Use add_from_storage to test ordering logic without network // Add block 2 first (out of order) - pipeline.add_from_storage(block2.clone(), 101); + pipeline.add_from_storage(block2.clone(), 101, BTreeSet::new()); // Also track height 100 as pending to simulate waiting pipeline.pending_heights.insert(100); @@ -337,15 +357,15 @@ mod tests { // Add block 1 pipeline.pending_heights.remove(&100); - pipeline.add_from_storage(block1.clone(), 100); + pipeline.add_from_storage(block1.clone(), 100, BTreeSet::new()); // Now block 1 is ready (lowest height) - let (block, height) = pipeline.take_next_ordered_block().unwrap(); + let (block, height, _) = pipeline.take_next_ordered_block().unwrap(); assert_eq!(height, 100); assert_eq!(block.block_hash(), hash1); // Block 2 is now ready - let (block, height) = pipeline.take_next_ordered_block().unwrap(); + let (block, height, _) = pipeline.take_next_ordered_block().unwrap(); assert_eq!(height, 101); assert_eq!(block.block_hash(), hash2); @@ -360,7 +380,7 @@ mod tests { // Add block at height 101, but height 100 is still pending pipeline.pending_heights.insert(100); - pipeline.add_from_storage(block2.clone(), 101); + pipeline.add_from_storage(block2.clone(), 101, BTreeSet::new()); // Cannot take block 2 - block at height 100 is still pending assert!(pipeline.take_next_ordered_block().is_none()); @@ -369,7 +389,7 @@ mod tests { pipeline.pending_heights.remove(&100); // Now block 2 is ready - let (_, height) = pipeline.take_next_ordered_block().unwrap(); + let (_, height, _) = pipeline.take_next_ordered_block().unwrap(); assert_eq!(height, 101); } @@ -379,11 +399,11 @@ mod tests { let block = make_test_block(1); let hash = block.block_hash(); - pipeline.add_from_storage(block.clone(), 100); + pipeline.add_from_storage(block.clone(), 100, BTreeSet::new()); assert_eq!(pipeline.downloaded.len(), 1); - let (taken_block, height) = pipeline.take_next_ordered_block().unwrap(); + let (taken_block, height, _) = pipeline.take_next_ordered_block().unwrap(); assert_eq!(height, 100); assert_eq!(taken_block.block_hash(), hash); } @@ -395,7 +415,7 @@ mod tests { // Adding to downloaded makes it incomplete let block = make_test_block(1); - pipeline.add_from_storage(block, 100); + pipeline.add_from_storage(block, 100, BTreeSet::new()); assert!(!pipeline.is_complete()); // Take the block @@ -416,13 +436,147 @@ mod tests { assert!(pipeline.is_complete()); } + #[test] + fn test_queue_propagates_wallet_set_through_take_next() { + // A block queued with a non-empty wallet set must yield that exact + // wallet set when taken in height order via `take_next_ordered_block`. + let mut pipeline = BlocksPipeline::new(); + let block = make_test_block(1); + let hash = block.block_hash(); + let wallets: BTreeSet = BTreeSet::from([[1u8; 32], [2u8; 32]]); + + pipeline.queue([(FilterMatchKey::new(100, hash), wallets.clone())]); + + // Drive the block through receive_block to land it in `downloaded`. + let hashes = pipeline.coordinator.take_pending(1); + pipeline.coordinator.mark_sent(&hashes); + assert!(pipeline.receive_block(&block)); + + let (taken_block, height, taken_wallets) = pipeline.take_next_ordered_block().unwrap(); + assert_eq!(taken_block.block_hash(), hash); + assert_eq!(height, 100); + assert_eq!(taken_wallets, wallets); + } + + #[test] + fn test_queue_merges_wallet_sets_for_repeat_hashes() { + // Queueing the same block hash twice with different wallet sets must + // produce the union when the block is later taken from the pipeline, + // and must not double-count it in the coordinator's pending state. + let mut pipeline = BlocksPipeline::new(); + let block = make_test_block(1); + let hash = block.block_hash(); + let wallets_a: BTreeSet = BTreeSet::from([[1u8; 32]]); + let wallets_b: BTreeSet = BTreeSet::from([[2u8; 32], [3u8; 32]]); + + pipeline.queue([(FilterMatchKey::new(100, hash), wallets_a.clone())]); + assert_eq!(pipeline.coordinator.pending_count(), 1); + pipeline.queue([(FilterMatchKey::new(100, hash), wallets_b.clone())]); + // Re-queueing must not double the coordinator's pending count. + assert_eq!(pipeline.coordinator.pending_count(), 1); + + // Land the block in `downloaded` to retrieve it. + let hashes = pipeline.coordinator.take_pending(1); + assert_eq!(hashes.len(), 1); + pipeline.coordinator.mark_sent(&hashes); + assert!(pipeline.receive_block(&block)); + + let (_, _, taken_wallets) = pipeline.take_next_ordered_block().unwrap(); + let mut expected = wallets_a; + expected.extend(wallets_b); + assert_eq!(taken_wallets, expected); + } + + #[test] + fn test_queue_does_not_re_enqueue_in_flight_hash() { + // A late-arriving wallet match for a block already in flight must + // merge the wallet id without re-enqueueing the hash. Re-enqueueing + // would cause a duplicate request and corrupt the coordinator's + // pending/in-flight state. + let mut pipeline = BlocksPipeline::new(); + let block = make_test_block(1); + let hash = block.block_hash(); + let wallets_a: BTreeSet = BTreeSet::from([[1u8; 32]]); + let wallets_b: BTreeSet = BTreeSet::from([[2u8; 32]]); + + pipeline.queue([(FilterMatchKey::new(100, hash), wallets_a.clone())]); + // Move the hash to in-flight. + let hashes = pipeline.coordinator.take_pending(1); + pipeline.coordinator.mark_sent(&hashes); + assert_eq!(pipeline.coordinator.pending_count(), 0); + assert_eq!(pipeline.coordinator.active_count(), 1); + + // A second queue call for the same hash must not push it back to + // pending while it is in flight. + pipeline.queue([(FilterMatchKey::new(100, hash), wallets_b.clone())]); + assert_eq!(pipeline.coordinator.pending_count(), 0); + assert_eq!(pipeline.coordinator.active_count(), 1); + + // Late wallet ids are still merged for when the block arrives. + assert!(pipeline.receive_block(&block)); + let (_, _, taken_wallets) = pipeline.take_next_ordered_block().unwrap(); + let mut expected = wallets_a; + expected.extend(wallets_b); + assert_eq!(taken_wallets, expected); + } + + #[test] + fn test_queue_does_not_re_enqueue_downloaded_hash() { + // A late-arriving wallet match for a block already received and sitting + // in `downloaded` (but not yet consumed by `take_next_ordered_block`) + // must merge the wallet id without re-enqueueing the hash. + let mut pipeline = BlocksPipeline::new(); + let block = make_test_block(1); + let hash = block.block_hash(); + let wallets_a: BTreeSet = BTreeSet::from([[1u8; 32]]); + let wallets_b: BTreeSet = BTreeSet::from([[2u8; 32]]); + + pipeline.queue([(FilterMatchKey::new(100, hash), wallets_a.clone())]); + let hashes = pipeline.coordinator.take_pending(1); + pipeline.coordinator.mark_sent(&hashes); + assert!(pipeline.receive_block(&block)); + assert_eq!(pipeline.downloaded.len(), 1); + assert_eq!(pipeline.coordinator.pending_count(), 0); + assert_eq!(pipeline.coordinator.active_count(), 0); + + // Late-arriving match for the same hash must not re-enqueue. + pipeline.queue([(FilterMatchKey::new(100, hash), wallets_b.clone())]); + assert_eq!(pipeline.coordinator.pending_count(), 0); + assert_eq!(pipeline.coordinator.active_count(), 0); + assert_eq!(pipeline.downloaded.len(), 1); + + // Late wallet ids are still merged for when the block is taken. + let (_, _, taken_wallets) = pipeline.take_next_ordered_block().unwrap(); + let mut expected = wallets_a; + expected.extend(wallets_b); + assert_eq!(taken_wallets, expected); + } + + #[test] + fn test_add_from_storage_merges_wallet_sets() { + // The `add_from_storage` path must merge wallet sets for repeat + // additions of the same block hash, matching `queue`'s semantics. + let mut pipeline = BlocksPipeline::new(); + let block = make_test_block(1); + let wallets_a: BTreeSet = BTreeSet::from([[1u8; 32]]); + let wallets_b: BTreeSet = BTreeSet::from([[2u8; 32]]); + + pipeline.add_from_storage(block.clone(), 100, wallets_a.clone()); + pipeline.add_from_storage(block.clone(), 100, wallets_b.clone()); + + let (_, _, taken_wallets) = pipeline.take_next_ordered_block().unwrap(); + let mut expected = wallets_a; + expected.extend(wallets_b); + assert_eq!(taken_wallets, expected); + } + #[test] fn test_receive_block_duplicate() { let mut pipeline = BlocksPipeline::new(); let block = make_test_block(1); // Queue and mark as sent via coordinator - pipeline.queue([FilterMatchKey::new(100, block.block_hash())]); + pipeline.queue([(FilterMatchKey::new(100, block.block_hash()), BTreeSet::new())]); let hashes = pipeline.coordinator.take_pending(1); pipeline.coordinator.mark_sent(&hashes); diff --git a/dash-spv/src/sync/blocks/sync_manager.rs b/dash-spv/src/sync/blocks/sync_manager.rs index c4566a7b1..5b02650b8 100644 --- a/dash-spv/src/sync/blocks/sync_manager.rs +++ b/dash-spv/src/sync/blocks/sync_manager.rs @@ -10,7 +10,8 @@ use crate::types::HashedBlock; use crate::SyncError; use async_trait::async_trait; use dashcore::network::message::NetworkMessage; -use key_wallet_manager::WalletInterface; +use key_wallet_manager::{FilterMatchKey, WalletId, WalletInterface}; +use std::collections::BTreeSet; #[async_trait] impl SyncManager @@ -115,10 +116,10 @@ impl SyncM tracing::debug!("Blocks needed: {} blocks", blocks.len()); - let mut to_download = Vec::new(); + let mut to_download: Vec<(FilterMatchKey, BTreeSet)> = Vec::new(); let block_storage = self.block_storage.read().await; - for key in blocks { + for (key, wallets) in blocks { // Check if block is already stored (from previous sync) if let Ok(Some(hashed_block)) = block_storage.load_block(key.height()).await { if hashed_block.hash() != key.hash() { @@ -135,13 +136,17 @@ impl SyncM ))); } // Block loaded from storage, add to pipeline for processing - self.pipeline.add_from_storage(hashed_block.block().clone(), key.height()); + self.pipeline.add_from_storage( + hashed_block.block().clone(), + key.height(), + wallets.clone(), + ); self.progress.add_from_storage(1); continue; } - // Block not in storage, queue for download with height - to_download.push(key.clone()); + // Block not in storage, queue for download with height + wallets + to_download.push((key.clone(), wallets.clone())); } drop(block_storage); diff --git a/dash-spv/src/sync/events.rs b/dash-spv/src/sync/events.rs index a5ed9734d..86fe6ec76 100644 --- a/dash-spv/src/sync/events.rs +++ b/dash-spv/src/sync/events.rs @@ -2,8 +2,8 @@ use crate::sync::ManagerIdentifier; use dashcore::ephemerealdata::chain_lock::ChainLock; use dashcore::ephemerealdata::instant_lock::InstantLock; use dashcore::{Address, BlockHash, Txid}; -use key_wallet_manager::FilterMatchKey; -use std::collections::BTreeSet; +use key_wallet_manager::{FilterMatchKey, WalletId}; +use std::collections::{BTreeMap, BTreeSet}; /// Events that managers can emit and subscribe to. /// @@ -80,11 +80,15 @@ pub enum SyncEvent { /// Filters matched the wallet, blocks need downloading. /// + /// Each block is tagged with the wallets whose addresses matched its filter, + /// so the block is processed only for those wallets. + /// /// Emitted by: `FiltersManager` /// Consumed by: `BlocksManager` BlocksNeeded { - /// Blocks to download (sorted by height) - blocks: BTreeSet, + /// Blocks to download (height-ordered by `FilterMatchKey`), each + /// associated with the wallet ids that need it. + blocks: BTreeMap>, }, /// Block downloaded and processed through wallet. @@ -97,8 +101,11 @@ pub enum SyncEvent { block_hash: BlockHash, /// Height of the processed block height: u32, - /// New addresses discovered from wallet gap limit maintenance - new_addresses: Vec
, + /// Wallets the block was actually processed for. + wallets: BTreeSet, + /// New addresses discovered from wallet gap limit maintenance, attributed + /// to the wallet that produced them. + new_addresses: BTreeMap>, /// Transaction IDs confirmed in this block that are relevant to the wallet confirmed_txids: Vec, }, @@ -213,7 +220,8 @@ impl SyncEvent { new_addresses, .. } => { - format!("BlockProcessed(height={}, new_addrs={})", height, new_addresses.len()) + let total: usize = new_addresses.values().map(|v| v.len()).sum(); + format!("BlockProcessed(height={}, new_addrs={})", height, total) } SyncEvent::MasternodeStateUpdated { height, diff --git a/dash-spv/src/sync/filters/batch.rs b/dash-spv/src/sync/filters/batch.rs index 75ed22025..02b80be12 100644 --- a/dash-spv/src/sync/filters/batch.rs +++ b/dash-spv/src/sync/filters/batch.rs @@ -1,7 +1,7 @@ use dashcore::bip158::BlockFilter; use dashcore::Address; -use key_wallet_manager::FilterMatchKey; -use std::collections::{HashMap, HashSet}; +use key_wallet_manager::{FilterMatchKey, WalletId}; +use std::collections::{BTreeSet, HashMap, HashSet}; /// A completed batch of compact block filters ready for verification. /// @@ -24,8 +24,14 @@ pub(super) struct FiltersBatch { pending_blocks: u32, /// Whether rescan has been completed for this batch. rescan_complete: bool, - /// Addresses discovered during block processing that need rescan. - collected_addresses: HashSet
, + /// Wallets that were behind for this batch's height range at scan time and + /// therefore need their `synced_height` advanced when the batch commits. + /// Already-synced wallets must not be touched. + scanned_wallets: BTreeSet, + /// Addresses discovered during block processing that still need rescan, + /// attributed per wallet so we can rerun matching only against the wallet + /// that produced each new address. + collected_addresses: HashMap>, } impl FiltersBatch { @@ -43,7 +49,8 @@ impl FiltersBatch { scanned: false, pending_blocks: 0, rescan_complete: false, - collected_addresses: HashSet::new(), + scanned_wallets: BTreeSet::new(), + collected_addresses: HashMap::new(), } } /// Start height of this batch (inclusive). @@ -100,13 +107,26 @@ impl FiltersBatch { self.rescan_complete = true; } /// Add addresses discovered during block processing for later rescan. - pub(super) fn add_addresses(&mut self, addresses: impl IntoIterator) { - self.collected_addresses.extend(addresses); - } - /// Take collected addresses for rescan, leaving the set empty. - pub(super) fn take_collected_addresses(&mut self) -> HashSet
{ + pub(super) fn add_addresses_for_wallet( + &mut self, + wallet_id: WalletId, + addresses: impl IntoIterator, + ) { + self.collected_addresses.entry(wallet_id).or_default().extend(addresses); + } + /// Take collected per-wallet addresses for rescan, leaving the map empty. + pub(super) fn take_collected_addresses(&mut self) -> HashMap> { std::mem::take(&mut self.collected_addresses) } + /// Record the set of wallets that were behind for this batch at scan time. + pub(super) fn set_scanned_wallets(&mut self, wallets: BTreeSet) { + self.scanned_wallets = wallets; + } + /// Wallets that were behind at scan time and must have their synced_height + /// advanced when this batch commits. + pub(super) fn scanned_wallets(&self) -> &BTreeSet { + &self.scanned_wallets + } } impl PartialEq for FiltersBatch { diff --git a/dash-spv/src/sync/filters/block_match_tracker.rs b/dash-spv/src/sync/filters/block_match_tracker.rs new file mode 100644 index 000000000..6f9301491 --- /dev/null +++ b/dash-spv/src/sync/filters/block_match_tracker.rs @@ -0,0 +1,268 @@ +//! Per-block tracking state used by `FiltersManager` while filter matches +//! flow through the block download and apply pipeline. +//! +//! Owns two related maps: +//! +//! - `blocks_remaining`: in-flight matched blocks awaiting `BlockProcessed`, +//! keyed by block hash. The associated `(height, batch_start)` lets the +//! `BlockProcessed` handler decrement the right batch's `pending_blocks`. +//! - `processed_blocks_per_wallet`: which wallets have already had each +//! processed block applied to their state, keyed by height (so commit-time +//! pruning is one `split_off` call) then by hash. Lets a runtime-added +//! wallet still receive a block that was previously processed for another +//! wallet only: the gate is per-wallet, not global. +//! +//! These two maps are coupled: every call site that consults one consults the +//! other, and the lifecycle (track on filter match, record on +//! `BlockProcessed`, prune on commit, clear on reset) is shared. Splitting +//! them out keeps `FiltersManager` focused on batch orchestration. + +use std::collections::{BTreeMap, BTreeSet, HashMap}; + +use dashcore::BlockHash; +use key_wallet_manager::{FilterMatchKey, WalletId}; + +/// Result of recording a filter match for a block against a candidate wallet +/// set. The wallet set carried by `NewlyTracked` and `InFlight` is the +/// residual after subtracting wallets that have already had this block +/// processed. +#[derive(Debug, Clone, PartialEq, Eq)] +pub(super) enum BlockTrackResult { + /// Block was newly tracked for the residual wallets. Caller should emit a + /// `BlocksNeeded` event with this set and account for the block in the + /// batch's `pending_blocks` count. + NewlyTracked { + wallets: BTreeSet, + }, + /// Block is already in flight. Caller should still emit a `BlocksNeeded` + /// event with the residual wallets so the `BlocksPipeline` merges them + /// into the pending wallet set, but must NOT increment the batch's + /// `pending_blocks` count (already counted on first match). + InFlight { + wallets: BTreeSet, + }, + /// All candidate wallets already have this block applied. Caller skips it. + AlreadyProcessed, +} + +/// Per-block tracking state for matched blocks flowing through the filter → +/// block → wallet pipeline. See module-level docs for the invariants. +#[derive(Debug, Default)] +pub(super) struct BlockMatchTracker { + /// In-flight matched blocks awaiting `BlockProcessed`. Maps + /// `block_hash → (height, batch_start)` so the `BlockProcessed` handler + /// can decrement the right batch's `pending_blocks` count. + blocks_remaining: BTreeMap, + /// Per-(height, hash) record of which wallets have had this block + /// applied. Bounded by `prune_at_or_below` after every commit, since + /// below `committed_height` a new wallet can only re-enter via the `tick` + /// rescan trigger which calls `clear` outright. + processed_blocks_per_wallet: BTreeMap>>, +} + +impl BlockMatchTracker { + /// Create an empty tracker. + pub(super) fn new() -> Self { + Self::default() + } + + /// Track a filter match for a block against a candidate wallet set, + /// returning only the wallets that still need the block applied. See + /// `BlockTrackResult` for per-case caller responsibilities. + pub(super) fn track( + &mut self, + key: &FilterMatchKey, + batch_start: u32, + candidate_wallets: BTreeSet, + ) -> BlockTrackResult { + let processed = self.already_processed_wallets(key); + let residual: BTreeSet = + candidate_wallets.difference(&processed).copied().collect(); + if residual.is_empty() { + return BlockTrackResult::AlreadyProcessed; + } + if self.blocks_remaining.contains_key(key.hash()) { + return BlockTrackResult::InFlight { + wallets: residual, + }; + } + self.blocks_remaining.insert(*key.hash(), (key.height(), batch_start)); + BlockTrackResult::NewlyTracked { + wallets: residual, + } + } + + /// Record that `wallets` have had the block at `(height, hash)` applied + /// to their state. Idempotent: existing entries merge, never shrink. + pub(super) fn record_processed( + &mut self, + height: u32, + hash: BlockHash, + wallets: &BTreeSet, + ) { + if wallets.is_empty() { + return; + } + self.processed_blocks_per_wallet + .entry(height) + .or_default() + .entry(hash) + .or_default() + .extend(wallets.iter().copied()); + } + + /// Remove the in-flight entry for `hash`, returning its + /// `(height, batch_start)` if it was tracked. + pub(super) fn finish_in_flight(&mut self, hash: &BlockHash) -> Option<(u32, u32)> { + self.blocks_remaining.remove(hash) + } + + /// Drop every per-wallet processing record at or below `height`. Called + /// after `try_commit_batches` advances `committed_height`: below the new + /// committed height a new wallet can only re-enter via the `tick` rescan + /// trigger, which already wipes the map outright via `clear`. + pub(super) fn prune_at_or_below(&mut self, height: u32) { + self.processed_blocks_per_wallet = + self.processed_blocks_per_wallet.split_off(&(height + 1)); + } + + /// True when there is no in-flight or processed-record state. + pub(super) fn is_empty(&self) -> bool { + self.blocks_remaining.is_empty() && self.processed_blocks_per_wallet.is_empty() + } + + /// Drop all in-flight and processed-record state. + pub(super) fn clear(&mut self) { + self.blocks_remaining.clear(); + self.processed_blocks_per_wallet.clear(); + } + + /// Wallets that have already had this block applied to their state. + fn already_processed_wallets(&self, key: &FilterMatchKey) -> BTreeSet { + self.processed_blocks_per_wallet + .get(&key.height()) + .and_then(|m| m.get(key.hash())) + .cloned() + .unwrap_or_default() + } +} + +#[cfg(test)] +mod tests { + use super::*; + + fn hash_n(n: u8) -> BlockHash { + dashcore::block::Header::dummy(n as u32).block_hash() + } + + /// `track` walks through the full state machine: NewlyTracked on first + /// match, InFlight on re-match while the block is awaiting processing, + /// NewlyTracked again for a residual wallet after the first wallet's + /// processing is recorded, and AlreadyProcessed once every candidate is + /// covered. + #[test] + fn track_state_machine() { + let mut tracker = BlockMatchTracker::new(); + let hash = hash_n(0); + let key = FilterMatchKey::new(100, hash); + let wallet_a: WalletId = [0xA1; 32]; + let wallet_b: WalletId = [0xB2; 32]; + + // First match for {A}: nothing tracked yet, helper records the block. + assert_eq!( + tracker.track(&key, 0, BTreeSet::from([wallet_a])), + BlockTrackResult::NewlyTracked { + wallets: BTreeSet::from([wallet_a]) + } + ); + assert_eq!(tracker.finish_in_flight(&hash), Some((100, 0))); + // Put it back in flight to continue the scenario. + assert!(matches!( + tracker.track(&key, 0, BTreeSet::from([wallet_a])), + BlockTrackResult::NewlyTracked { .. } + )); + + // Re-match for {A} while still in flight: residual is {A}, InFlight. + assert_eq!( + tracker.track(&key, 0, BTreeSet::from([wallet_a])), + BlockTrackResult::InFlight { + wallets: BTreeSet::from([wallet_a]) + } + ); + + // Block is delivered and processed for {A}. + assert!(tracker.finish_in_flight(&hash).is_some()); + tracker.record_processed(100, hash, &BTreeSet::from([wallet_a])); + + // Late-added B's filter matches the same block: residual is {B} and + // it gets re-queued via NewlyTracked. + assert_eq!( + tracker.track(&key, 5000, BTreeSet::from([wallet_a, wallet_b])), + BlockTrackResult::NewlyTracked { + wallets: BTreeSet::from([wallet_b]) + } + ); + + // After B is processed, both wallets are covered: AlreadyProcessed. + assert!(tracker.finish_in_flight(&hash).is_some()); + tracker.record_processed(100, hash, &BTreeSet::from([wallet_b])); + assert_eq!( + tracker.track(&key, 5000, BTreeSet::from([wallet_a, wallet_b])), + BlockTrackResult::AlreadyProcessed + ); + } + + /// `prune_at_or_below` drops every entry at or below the given height + /// while retaining strictly higher entries. Idempotent under repeated + /// calls with the same threshold. + #[test] + fn prune_at_or_below_drops_low_entries() { + let mut tracker = BlockMatchTracker::new(); + let wallet: WalletId = [0xFA; 32]; + let h_low = hash_n(1); + let h_mid = hash_n(2); + let h_high = hash_n(3); + + tracker.record_processed(2500, h_low, &BTreeSet::from([wallet])); + tracker.record_processed(4999, h_mid, &BTreeSet::from([wallet])); + tracker.record_processed(7500, h_high, &BTreeSet::from([wallet])); + + tracker.prune_at_or_below(4999); + + // Entries at or below 4999 are gone, the 7500 entry survives. + let key_low = FilterMatchKey::new(2500, h_low); + let key_mid = FilterMatchKey::new(4999, h_mid); + let key_high = FilterMatchKey::new(7500, h_high); + assert!(tracker.already_processed_wallets(&key_low).is_empty()); + assert!(tracker.already_processed_wallets(&key_mid).is_empty()); + assert!(tracker.already_processed_wallets(&key_high).contains(&wallet)); + + // Repeat call is a no-op. + tracker.prune_at_or_below(4999); + assert!(tracker.already_processed_wallets(&key_high).contains(&wallet)); + } + + /// `is_empty` and `clear` cover both maps together: populating either + /// flips `is_empty`, and `clear` returns to the initial state. + #[test] + fn is_empty_and_clear_cover_both_maps() { + let mut tracker = BlockMatchTracker::new(); + let wallet: WalletId = [0xCC; 32]; + let hash = hash_n(0); + let key = FilterMatchKey::new(100, hash); + + assert!(tracker.is_empty()); + + // Only blocks_remaining populated. + tracker.track(&key, 0, BTreeSet::from([wallet])); + assert!(!tracker.is_empty()); + tracker.clear(); + assert!(tracker.is_empty()); + + // Only processed_blocks_per_wallet populated. + tracker.record_processed(100, hash, &BTreeSet::from([wallet])); + assert!(!tracker.is_empty()); + tracker.clear(); + assert!(tracker.is_empty()); + } +} diff --git a/dash-spv/src/sync/filters/manager.rs b/dash-spv/src/sync/filters/manager.rs index d800d0f95..76ea261ce 100644 --- a/dash-spv/src/sync/filters/manager.rs +++ b/dash-spv/src/sync/filters/manager.rs @@ -4,13 +4,14 @@ //! and matches against wallet to identify blocks for download. //! Emits FiltersStored, FiltersSyncComplete and BlocksNeeded events. -use std::collections::{btree_map, BTreeMap, BTreeSet, HashMap, HashSet}; +use std::collections::{BTreeMap, BTreeSet, HashMap, HashSet}; use std::sync::Arc; use dashcore::bip158::BlockFilter; -use dashcore::{Address, BlockHash}; +use dashcore::Address; use super::batch::FiltersBatch; +use super::block_match_tracker::{BlockMatchTracker, BlockTrackResult}; use super::pipeline::FiltersPipeline; use crate::error::SyncResult; use crate::network::RequestSender; @@ -22,7 +23,7 @@ use crate::validation::{FilterValidationInput, FilterValidator, Validator}; use crate::sync::progress::ProgressPercentage; use dashcore::hash_types::FilterHeader; use key_wallet_manager::WalletInterface; -use key_wallet_manager::{check_compact_filters_for_addresses, FilterMatchKey}; +use key_wallet_manager::{check_compact_filters_for_addresses, FilterMatchKey, WalletId}; use tokio::sync::RwLock; /// Batch size for processing filters. @@ -66,11 +67,10 @@ pub struct FiltersManager< pub(super) active_batches: BTreeMap, /// Current block height being processed (for progress tracking). processing_height: u32, - /// Blocks remaining that need to be processed. - /// Maps block_hash -> (height, batch_start) for batch association. - pub(super) blocks_remaining: BTreeMap, - /// Block hashes that have been matched and queued for download. - pub(super) filters_matched: HashSet, + /// Per-block tracking state for matched blocks: in-flight blocks awaiting + /// `BlockProcessed` and the per-wallet record of which wallets already + /// have a given processed block applied. + pub(super) tracker: BlockMatchTracker, } impl @@ -114,16 +114,14 @@ impl bool { self.active_batches.is_empty() - && self.blocks_remaining.is_empty() - && self.filters_matched.is_empty() + && self.tracker.is_empty() && self.pending_batches.is_empty() && self.filter_pipeline.is_idle() } @@ -453,16 +451,16 @@ impl = self @@ -473,7 +471,7 @@ impl self.progress.committed_height() { self.progress.update_committed_height(end); - self.wallet.write().await.update_synced_height(end); + let scanned_wallets = batch.scanned_wallets().clone(); + if !scanned_wallets.is_empty() { + let mut wallet = self.wallet.write().await; + for wallet_id in &scanned_wallets { + wallet.update_wallet_synced_height(wallet_id, end); + } + } } + // Drop processed-wallet records for the committed range. Below the + // new committed_height a new wallet can only get here via the + // `tick` rescan trigger, which already wipes the map via + // `clear_in_flight_state`, so older entries can never be consulted. + self.tracker.prune_at_or_below(end); self.processing_height = end + 1; tracing::info!( @@ -581,22 +592,24 @@ impl, + new_addresses: &HashMap>, ) -> SyncResult> { if new_addresses.is_empty() { return Ok(vec![]); } - let Some(batch) = self.active_batches.get_mut(&batch_start) else { + let Some(batch) = self.active_batches.get(&batch_start) else { return Ok(vec![]); }; tracing::info!( - "Rescan filters ({}-{}) for {} new addresses", + "Rescan filters ({}-{}) for new addresses across {} wallets", batch.start_height(), batch.end_height(), new_addresses.len() @@ -605,37 +618,63 @@ impl = { + let wallet = self.wallet.read().await; + new_addresses.keys().map(|id| (*id, wallet.wallet_synced_height(id))).collect() + }; + + let mut block_to_wallets: BTreeMap> = BTreeMap::new(); + for (wallet_id, addresses) in new_addresses { + if addresses.is_empty() { + continue; + } + let addresses_vec: Vec<_> = addresses.iter().cloned().collect(); + let min_synced = synced_heights.get(wallet_id).copied().unwrap_or(0); + let matches = + check_compact_filters_for_addresses(batch_filters, addresses_vec, min_synced); + for key in matches { + block_to_wallets.entry(key).or_default().insert(*wallet_id); + } + } - // Match filters against new addresses only - let addresses_vec: Vec<_> = new_addresses.into_iter().collect(); - let matches = check_compact_filters_for_addresses(batch.filters(), addresses_vec); let mut events = Vec::new(); - let mut blocks_needed = BTreeSet::new(); + let mut blocks_needed: BTreeMap> = BTreeMap::new(); let mut new_blocks_count = 0; - if !matches.is_empty() { - self.progress.add_matched(matches.len() as u32); + if !block_to_wallets.is_empty() { + self.progress.add_matched(block_to_wallets.len() as u32); } - for key in matches { - // Skip blocks that were already matched (even if already processed) - if self.filters_matched.contains(key.hash()) { - continue; - } - // Queue blocks discovered by rescan for download - if let btree_map::Entry::Vacant(e) = self.blocks_remaining.entry(*key.hash()) { - e.insert((key.height(), batch_start)); - self.filters_matched.insert(*key.hash()); - blocks_needed.insert(key); - new_blocks_count += 1; + for (key, wallets) in block_to_wallets { + match self.tracker.track(&key, batch_start, wallets) { + BlockTrackResult::NewlyTracked { + wallets, + } => { + blocks_needed.insert(key, wallets); + new_blocks_count += 1; + } + BlockTrackResult::InFlight { + wallets, + } => { + // Block already on its way; merge late wallet ids into the + // pipeline's pending wallet set via a fresh BlocksNeeded. + blocks_needed.insert(key, wallets); + } + BlockTrackResult::AlreadyProcessed => {} } } - // Update batch pending_blocks count + // Update batch pending_blocks count for the genuinely new entries only. if new_blocks_count > 0 { if let Some(batch) = self.active_batches.get_mut(&batch_start) { batch.set_pending_blocks(batch.pending_blocks() + new_blocks_count); } tracing::info!("Rescan found {} additional blocks", new_blocks_count); + } + if !blocks_needed.is_empty() { events.push(SyncEvent::BlocksNeeded { blocks: blocks_needed, }); @@ -644,68 +683,163 @@ impl SyncResult> { let mut events = Vec::new(); - let Some(batch) = self.active_batches.get_mut(&batch_start) else { - tracing::debug!("scan_batch: batch {} not found", batch_start); - return Ok(events); + let (batch_end, filters_empty) = { + let Some(batch) = self.active_batches.get_mut(&batch_start) else { + tracing::debug!("scan_batch: batch {} not found", batch_start); + return Ok(events); + }; + + tracing::debug!( + "scan_batch: batch {}-{} has {} filters", + batch.start_height(), + batch.end_height(), + batch.filters().len() + ); + + batch.mark_scanned(); + (batch.end_height(), batch.filters().is_empty()) }; - tracing::debug!( - "scan_batch: batch {}-{} has {} filters", - batch.start_height(), - batch.end_height(), - batch.filters().len() - ); + // Snapshot per-wallet state for the wallets behind this batch's range. + // A wallet whose `synced_height >= batch_end` is fully covered and is + // skipped entirely, its addresses never even get tested against these + // filters. + let wallet = self.wallet.read().await; + let behind = wallet.wallets_behind(batch_end); + let mut wallet_states: Vec<(WalletId, u32, Vec
)> = Vec::new(); + for wallet_id in &behind { + let synced = wallet.wallet_synced_height(wallet_id); + let addresses = wallet.monitored_addresses_for(wallet_id); + if !addresses.is_empty() { + wallet_states.push((*wallet_id, synced, addresses)); + } + } + drop(wallet); - batch.mark_scanned(); + // Every behind wallet's coverage advances to `batch_end` once this + // batch commits. That includes wallets without any monitored + // addresses: they have nothing to match against these filters, so the + // batch fully accounts for their range and their `synced_height` must + // advance to keep `wallets_behind` from listing them on every future + // batch. + let scanned_wallets: BTreeSet = behind.clone(); - // Get all filters in the batch - if batch.filters().is_empty() { + if let Some(batch) = self.active_batches.get_mut(&batch_start) { + batch.set_scanned_wallets(scanned_wallets); + } + + if filters_empty { tracing::debug!("scan_batch: batch filters are empty, returning early"); return Ok(events); } - // Match against wallet's current addresses - let wallet = self.wallet.read().await; - let addresses = wallet.monitored_addresses(); - let matches = check_compact_filters_for_addresses(batch.filters(), addresses); - drop(wallet); + if wallet_states.is_empty() { + // No addresses to scan, but `scanned_wallets` was still recorded + // so any zero-address behind wallets advance at commit. + tracing::debug!("scan_batch: no behind wallets with monitored addresses"); + return Ok(events); + } + + // Single-pass union-then-attribute: build the union of all addresses + // across behind wallets, run the filters once, then for each matched + // block re-test per-wallet scripts to attribute the match correctly. + let union_addresses: Vec
= + wallet_states.iter().flat_map(|(_, _, addrs)| addrs.iter().cloned()).collect(); + let min_synced = wallet_states.iter().map(|(_, synced, _)| *synced).min().unwrap_or(0); + + let block_to_wallets = { + let Some(batch) = self.active_batches.get(&batch_start) else { + return Ok(events); + }; + let batch_filters = batch.filters(); + + let matches = + check_compact_filters_for_addresses(batch_filters, union_addresses, min_synced); + let mut block_to_wallets: BTreeMap> = + BTreeMap::new(); + for key in matches { + let Some(filter) = batch_filters.get(&key) else { + tracing::warn!( + "skipping unmatched filter key at height {}: hash {}", + key.height(), + key.hash() + ); + continue; + }; + for (wallet_id, wallet_synced, addresses) in &wallet_states { + if key.height() <= *wallet_synced { + continue; + } + let scripts: Vec> = + addresses.iter().map(|a| a.script_pubkey().to_bytes()).collect(); + let matched = match filter + .match_any(key.hash(), scripts.iter().map(|v| v.as_slice())) + { + Ok(matched) => matched, + Err(e) => { + tracing::warn!( + "filter match_any error during attribution at height {}: {}; treating as non-match", + key.height(), + e + ); + false + } + }; + if matched { + block_to_wallets.entry(key.clone()).or_default().insert(*wallet_id); + } + } + } + block_to_wallets + }; tracing::info!( - "Batch {}-{}: found {} matching blocks", - batch.start_height(), - batch.end_height(), - matches.len() + "Batch {}-{}: found {} matching blocks across {} behind wallets", + batch_start, + batch_end, + block_to_wallets.len(), + wallet_states.len() ); - if matches.is_empty() { + if block_to_wallets.is_empty() { return Ok(events); } - self.progress.add_matched(matches.len() as u32); + self.progress.add_matched(block_to_wallets.len() as u32); - // Filter out already-processed blocks and track the new ones - let mut blocks_needed = BTreeSet::new(); + // Either (re)queue the block via `BlocksNeeded` or skip if every + // candidate wallet already has it processed. In-flight blocks still + // re-emit so the BlocksPipeline merges any late-arriving wallet ids. + let mut blocks_needed: BTreeMap> = BTreeMap::new(); let mut new_blocks_count = 0; - for key in matches { - if self.filters_matched.contains(key.hash()) { - continue; - } - if self.blocks_remaining.contains_key(key.hash()) { - continue; + for (key, wallets) in block_to_wallets { + match self.tracker.track(&key, batch_start, wallets) { + BlockTrackResult::NewlyTracked { + wallets, + } => { + blocks_needed.insert(key, wallets); + new_blocks_count += 1; + } + BlockTrackResult::InFlight { + wallets, + } => { + blocks_needed.insert(key, wallets); + } + BlockTrackResult::AlreadyProcessed => {} } - self.blocks_remaining.insert(*key.hash(), (key.height(), batch_start)); - self.filters_matched.insert(*key.hash()); - blocks_needed.insert(key); - new_blocks_count += 1; } - // Update batch pending_blocks count - if let Some(batch) = self.active_batches.get_mut(&batch_start) { - batch.set_pending_blocks(batch.pending_blocks() + new_blocks_count); + // Update batch pending_blocks count for the genuinely new entries only. + if new_blocks_count > 0 { + if let Some(batch) = self.active_batches.get_mut(&batch_start) { + batch.set_pending_blocks(batch.pending_blocks() + new_blocks_count); + } } if !blocks_needed.is_empty() { @@ -773,9 +907,13 @@ mod tests { PersistentFilterHeaderStorage, PersistentFilterStorage, StorageManager, }; use crate::sync::{ManagerIdentifier, SyncManagerProgress}; + use dashcore::bip158::BlockFilter; use dashcore::Header; + use dashcore::{Block, Network, Transaction}; use dashcore_hashes::Hash; - use key_wallet_manager::test_utils::MockWallet; + use key_wallet_manager::test_utils::{ + MockWallet, MockWalletState, MultiMockWallet, MOCK_WALLET_ID, + }; use tokio::sync::mpsc::unbounded_channel; type TestFiltersManager = FiltersManager< @@ -784,6 +922,12 @@ mod tests { PersistentFilterStorage, MockWallet, >; + type MultiTestFiltersManager = FiltersManager< + PersistentBlockHeaderStorage, + PersistentFilterHeaderStorage, + PersistentFilterStorage, + MultiMockWallet, + >; type TestSyncManager = dyn SyncManager; async fn create_test_manager() -> TestFiltersManager { @@ -798,6 +942,30 @@ mod tests { .await } + async fn create_multi_test_manager( + wallet: Arc>, + ) -> MultiTestFiltersManager { + let storage = DiskStorageManager::with_temp_dir().await.unwrap(); + FiltersManager::new( + wallet, + storage.block_headers(), + storage.filter_headers(), + storage.filters(), + ) + .await + } + + /// Build a real `BlockFilter` for a single-output block paying `address`. + fn filter_for_address( + height: u32, + address: &dashcore::Address, + ) -> (FilterMatchKey, BlockFilter) { + let tx = Transaction::dummy(address, 0..0, &[height as u64]); + let block = Block::dummy(height, vec![tx]); + let filter = BlockFilter::dummy(&block); + (FilterMatchKey::new(height, block.block_hash()), filter) + } + #[tokio::test] async fn test_filters_manager_new() { let manager = create_test_manager().await; @@ -816,7 +984,7 @@ mod tests { // Set wallet committed height via last_processed_height (MockWallet default delegates) let mut wallet = MockWallet::new(); - wallet.update_last_processed_height(50); + wallet.update_wallet_synced_height(&MOCK_WALLET_ID, 50); let wallet = Arc::new(RwLock::new(wallet)); // Pre-populate filter storage with filters at heights 1..=100 @@ -928,6 +1096,618 @@ mod tests { manager.try_commit_batches().await.unwrap(); assert_eq!(manager.active_batches.len(), 0); assert_eq!(manager.progress.committed_height(), 4999); + // No wallets were recorded as scanned for this batch, so the per-wallet + // synced_height stays at its initial value. + assert_eq!(manager.wallet.read().await.wallet_synced_height(&MOCK_WALLET_ID), 0); + } + + #[tokio::test] + async fn test_batch_commit_advances_only_scanned_wallets() { + let mut manager = create_test_manager().await; + manager.set_state(SyncState::Syncing); + + // First batch records MOCK_WALLET_ID as scanned, so its synced_height + // advances to the batch end on commit. + let mut batch1 = FiltersBatch::new(0, 4999, HashMap::new()); + batch1.set_pending_blocks(0); + batch1.mark_scanned(); + batch1.mark_rescan_complete(); + batch1.set_scanned_wallets(BTreeSet::from([MOCK_WALLET_ID])); + manager.active_batches.insert(0, batch1); + + manager.try_commit_batches().await.unwrap(); + assert_eq!(manager.progress.committed_height(), 4999); + assert_eq!(manager.wallet.read().await.wallet_synced_height(&MOCK_WALLET_ID), 4999); + + // Second batch leaves scanned_wallets empty (nothing to scan in this + // range), so the per-wallet synced_height stays put even though the + // committed_height advances. + let mut batch2 = FiltersBatch::new(5000, 9999, HashMap::new()); + batch2.set_pending_blocks(0); + batch2.mark_scanned(); + batch2.mark_rescan_complete(); + manager.active_batches.insert(5000, batch2); + + manager.try_commit_batches().await.unwrap(); + assert_eq!(manager.progress.committed_height(), 9999); + assert_eq!(manager.wallet.read().await.wallet_synced_height(&MOCK_WALLET_ID), 4999); + } + + /// Two wallets in the same batch: only the wallet recorded in + /// `scanned_wallets` advances, the other stays put even after commit. + #[tokio::test] + async fn test_batch_commit_advances_only_recorded_wallet_with_two_wallets() { + let wallet_a: WalletId = [0xAA; 32]; + let wallet_b: WalletId = [0xBB; 32]; + let multi = MultiMockWallet::new(); + let multi = Arc::new(RwLock::new(multi)); + { + let mut w = multi.write().await; + w.insert_wallet(wallet_a, MockWalletState::default()); + w.insert_wallet(wallet_b, MockWalletState::default()); + } + let mut manager = create_multi_test_manager(multi.clone()).await; + manager.set_state(SyncState::Syncing); + + // Batch records only wallet_a as scanned. wallet_b is excluded. + let mut batch = FiltersBatch::new(0, 4999, HashMap::new()); + batch.set_pending_blocks(0); + batch.mark_scanned(); + batch.mark_rescan_complete(); + batch.set_scanned_wallets(BTreeSet::from([wallet_a])); + manager.active_batches.insert(0, batch); + + manager.try_commit_batches().await.unwrap(); + assert_eq!(manager.progress.committed_height(), 4999); + assert_eq!(multi.read().await.wallet_synced_height(&wallet_a), 4999); + assert_eq!(multi.read().await.wallet_synced_height(&wallet_b), 0); + } + + /// `scan_batch` with two wallets at different `synced_height` values: + /// only the wallet whose synced_height is below the matching block's + /// height should be attributed. + #[tokio::test] + async fn test_scan_batch_attributes_per_wallet_height() { + let wallet_low: WalletId = [0x01; 32]; + let wallet_high: WalletId = [0x02; 32]; + let address_low = dashcore::Address::dummy(Network::Regtest, 1); + let address_high = dashcore::Address::dummy(Network::Regtest, 2); + + let multi = MultiMockWallet::new(); + let multi = Arc::new(RwLock::new(multi)); + { + let mut w = multi.write().await; + // wallet_low is behind: synced_height=10, will see filters above 10. + w.insert_wallet( + wallet_low, + MockWalletState { + addresses: vec![address_low.clone()], + synced_height: 10, + last_processed_height: 10, + }, + ); + // wallet_high is mostly synced: synced_height=50, only sees > 50. + w.insert_wallet( + wallet_high, + MockWalletState { + addresses: vec![address_high.clone()], + synced_height: 50, + last_processed_height: 50, + }, + ); + } + let mut manager = create_multi_test_manager(multi).await; + manager.set_state(SyncState::Syncing); + + // Build a batch with three filters: at 30 paying wallet_low's address, + // at 60 paying wallet_high's address, at 70 paying wallet_low's address. + let mut filters: HashMap = HashMap::new(); + let (key_30, f_30) = filter_for_address(30, &address_low); + let (key_60, f_60) = filter_for_address(60, &address_high); + let (key_70, f_70) = filter_for_address(70, &address_low); + filters.insert(key_30.clone(), f_30); + filters.insert(key_60.clone(), f_60); + filters.insert(key_70.clone(), f_70); + + let mut batch = FiltersBatch::new(0, 99, filters); + batch.mark_verified(); + manager.active_batches.insert(0, batch); + manager.progress.update_stored_height(99); + + let events = manager.scan_batch(0).await.unwrap(); + + // Find the BlocksNeeded event. + let blocks = events + .iter() + .find_map(|e| match e { + SyncEvent::BlocksNeeded { + blocks, + } => Some(blocks), + _ => None, + }) + .expect("BlocksNeeded event"); + + // Block at 30 only attributable to wallet_low (height <= wallet_high.synced) + let attr_30 = blocks.get(&key_30).expect("entry for height 30"); + assert!(attr_30.contains(&wallet_low)); + assert!(!attr_30.contains(&wallet_high)); + + // Block at 60 only attributable to wallet_high (matches its address); + // wallet_low's address does not match so it shouldn't be there either. + let attr_60 = blocks.get(&key_60).expect("entry for height 60"); + assert!(attr_60.contains(&wallet_high)); + assert!(!attr_60.contains(&wallet_low)); + + // Block at 70 only attributable to wallet_low: matches wallet_low's + // address, and wallet_high's address does not match this filter. + let attr_70 = blocks.get(&key_70).expect("entry for height 70"); + assert!(attr_70.contains(&wallet_low)); + assert!(!attr_70.contains(&wallet_high)); + } + + /// `rescan_batch` with multiple wallets in `addresses_by_wallet`: + /// each wallet's new addresses are matched independently and the + /// attribution is correct in the emitted `BlocksNeeded`. + #[tokio::test] + async fn test_rescan_batch_attributes_per_wallet_addresses() { + let wallet_a: WalletId = [0x0A; 32]; + let wallet_b: WalletId = [0x0B; 32]; + let address_a = dashcore::Address::dummy(Network::Regtest, 11); + let address_b = dashcore::Address::dummy(Network::Regtest, 22); + + let multi = MultiMockWallet::new(); + let multi = Arc::new(RwLock::new(multi)); + { + let mut w = multi.write().await; + w.insert_wallet(wallet_a, MockWalletState::default()); + w.insert_wallet(wallet_b, MockWalletState::default()); + } + let mut manager = create_multi_test_manager(multi).await; + manager.set_state(SyncState::Syncing); + + let mut filters: HashMap = HashMap::new(); + let (key_a, f_a) = filter_for_address(15, &address_a); + let (key_b, f_b) = filter_for_address(25, &address_b); + filters.insert(key_a.clone(), f_a); + filters.insert(key_b.clone(), f_b); + + let mut batch = FiltersBatch::new(0, 99, filters); + batch.mark_verified(); + manager.active_batches.insert(0, batch); + + let mut new_addresses: HashMap> = HashMap::new(); + new_addresses.insert(wallet_a, HashSet::from([address_a])); + new_addresses.insert(wallet_b, HashSet::from([address_b])); + + let events = manager.rescan_batch(0, &new_addresses).await.unwrap(); + + let blocks = events + .iter() + .find_map(|e| match e { + SyncEvent::BlocksNeeded { + blocks, + } => Some(blocks), + _ => None, + }) + .expect("BlocksNeeded event"); + + let attr_a = blocks.get(&key_a).expect("entry for wallet_a's match"); + assert!(attr_a.contains(&wallet_a)); + assert!(!attr_a.contains(&wallet_b)); + + let attr_b = blocks.get(&key_b).expect("entry for wallet_b's match"); + assert!(attr_b.contains(&wallet_b)); + assert!(!attr_b.contains(&wallet_a)); + } + + /// `rescan_batch` honours each wallet's own `synced_height`: a new + /// address belonging to a wallet that has already advanced past a height + /// must not produce a `BlocksNeeded` for that height, even when the + /// filter for that height matches the new address. Two wallets at + /// different heights are exercised so that both the include-above and + /// skip-below paths run. + #[tokio::test] + async fn test_rescan_batch_skips_below_per_wallet_synced_height() { + let wallet_low: WalletId = [0xA1; 32]; + let wallet_high: WalletId = [0xA2; 32]; + let address_low = dashcore::Address::dummy(Network::Regtest, 41); + let address_high = dashcore::Address::dummy(Network::Regtest, 42); + + let multi = MultiMockWallet::new(); + let multi = Arc::new(RwLock::new(multi)); + { + let mut w = multi.write().await; + w.insert_wallet( + wallet_low, + MockWalletState { + addresses: vec![], + synced_height: 20, + last_processed_height: 20, + }, + ); + w.insert_wallet( + wallet_high, + MockWalletState { + addresses: vec![], + synced_height: 60, + last_processed_height: 60, + }, + ); + } + let mut manager = create_multi_test_manager(multi).await; + manager.set_state(SyncState::Syncing); + + // Filters at 30 (matches wallet_low) and 70 (matches wallet_high). + // For wallet_low (synced=20), height 30 is fresh and 70 is also fresh + // since 70 > 20. For wallet_high (synced=60), height 30 is below its + // synced_height so it must be skipped, while 70 is fresh. + let (key_30, f_30) = filter_for_address(30, &address_low); + let (key_70, f_70) = filter_for_address(70, &address_high); + let mut filters: HashMap = HashMap::new(); + filters.insert(key_30.clone(), f_30); + filters.insert(key_70.clone(), f_70); + + let mut batch = FiltersBatch::new(0, 99, filters); + batch.mark_verified(); + manager.active_batches.insert(0, batch); + + // wallet_high also "discovers" address_low to demonstrate that even + // when a new address would match a low height, the per-wallet + // synced_height filter prevents emitting it. + let mut new_addresses: HashMap> = HashMap::new(); + new_addresses.insert(wallet_low, HashSet::from([address_low.clone()])); + new_addresses.insert(wallet_high, HashSet::from([address_low.clone(), address_high])); + + let events = manager.rescan_batch(0, &new_addresses).await.unwrap(); + + let blocks = events + .iter() + .find_map(|e| match e { + SyncEvent::BlocksNeeded { + blocks, + } => Some(blocks), + _ => None, + }) + .expect("BlocksNeeded event"); + + // wallet_low must see height 30, wallet_high must NOT (synced=60>30). + let attr_30 = blocks.get(&key_30).expect("entry at height 30 for wallet_low"); + assert!(attr_30.contains(&wallet_low)); + assert!(!attr_30.contains(&wallet_high)); + + // wallet_high must see height 70 since 70 > 60. + let attr_70 = blocks.get(&key_70).expect("entry at height 70 for wallet_high"); + assert!(attr_70.contains(&wallet_high)); + } + + /// `scan_batch` for a behind wallet with no monitored addresses still + /// records the wallet in `scanned_wallets` so its `synced_height` + /// advances at commit. Otherwise zero-address wallets would be listed by + /// `wallets_behind` on every batch forever. + #[tokio::test] + async fn test_scan_batch_advances_zero_address_wallet() { + let wallet_id: WalletId = [0xCC; 32]; + let multi = MultiMockWallet::new(); + let multi = Arc::new(RwLock::new(multi)); + { + let mut w = multi.write().await; + w.insert_wallet(wallet_id, MockWalletState::default()); + } + let mut manager = create_multi_test_manager(multi.clone()).await; + manager.set_state(SyncState::Syncing); + + // Batch with one filter at height 50 (irrelevant: wallet has no addresses). + let mut filters: HashMap = HashMap::new(); + let throwaway_address = dashcore::Address::dummy(Network::Regtest, 99); + let (key, filter) = filter_for_address(50, &throwaway_address); + filters.insert(key, filter); + + let mut batch = FiltersBatch::new(0, 99, filters); + batch.mark_verified(); + manager.active_batches.insert(0, batch); + manager.progress.update_stored_height(99); + + let events = manager.scan_batch(0).await.unwrap(); + assert!(events.is_empty(), "no addresses should mean no BlocksNeeded events"); + + // Mark batch ready so commit can run, then commit. + if let Some(b) = manager.active_batches.get_mut(&0) { + b.set_pending_blocks(0); + b.mark_rescan_complete(); + } + manager.try_commit_batches().await.unwrap(); + + // Wallet had no addresses, but it was behind, so its synced_height + // advances to the batch end after commit. + assert_eq!(multi.read().await.wallet_synced_height(&wallet_id), 99); + } + + /// `scan_batch` after a runtime-added wallet whose address matches a + /// block already in flight must re-emit `BlocksNeeded` so the + /// `BlocksPipeline` merges the new wallet id into the pending set. + #[tokio::test] + async fn test_scan_batch_in_flight_re_emits_for_late_wallet() { + let wallet_id: WalletId = [0xDD; 32]; + let address = dashcore::Address::dummy(Network::Regtest, 7); + + let multi = MultiMockWallet::new(); + let multi = Arc::new(RwLock::new(multi)); + { + let mut w = multi.write().await; + w.insert_wallet( + wallet_id, + MockWalletState { + addresses: vec![address.clone()], + synced_height: 0, + last_processed_height: 0, + }, + ); + } + let mut manager = create_multi_test_manager(multi).await; + manager.set_state(SyncState::Syncing); + + // One matching filter at height 40. + let (key_40, f_40) = filter_for_address(40, &address); + let mut filters: HashMap = HashMap::new(); + filters.insert(key_40.clone(), f_40); + + let mut batch = FiltersBatch::new(0, 99, filters); + batch.mark_verified(); + manager.active_batches.insert(0, batch); + manager.progress.update_stored_height(99); + + // Pre-seed the tracker so `tracker.track` returns InFlight. + manager.tracker.track(&key_40, 0, BTreeSet::from([wallet_id])); + + let events = manager.scan_batch(0).await.unwrap(); + + let blocks = events + .iter() + .find_map(|e| match e { + SyncEvent::BlocksNeeded { + blocks, + } => Some(blocks), + _ => None, + }) + .expect("InFlight path must still emit BlocksNeeded for wallet-set merge"); + let attribution = blocks.get(&key_40).expect("entry for the in-flight block"); + assert!(attribution.contains(&wallet_id)); + } + + /// `scan_batch` `AlreadyProcessed` path: when every candidate wallet has + /// already had this block processed, the block is skipped (no + /// `BlocksNeeded`). + #[tokio::test] + async fn test_scan_batch_already_processed_is_skipped() { + let wallet_id: WalletId = [0xEE; 32]; + let address = dashcore::Address::dummy(Network::Regtest, 8); + + let multi = MultiMockWallet::new(); + let multi = Arc::new(RwLock::new(multi)); + { + let mut w = multi.write().await; + w.insert_wallet( + wallet_id, + MockWalletState { + addresses: vec![address.clone()], + synced_height: 0, + last_processed_height: 0, + }, + ); + } + let mut manager = create_multi_test_manager(multi).await; + manager.set_state(SyncState::Syncing); + + let (key_40, f_40) = filter_for_address(40, &address); + let mut filters: HashMap = HashMap::new(); + filters.insert(key_40.clone(), f_40); + + let mut batch = FiltersBatch::new(0, 99, filters); + batch.mark_verified(); + manager.active_batches.insert(0, batch); + manager.progress.update_stored_height(99); + + // Pre-record processing for the only candidate wallet so the residual + // is empty and `tracker.track` returns `AlreadyProcessed`. + manager.tracker.record_processed(40, *key_40.hash(), &BTreeSet::from([wallet_id])); + + let events = manager.scan_batch(0).await.unwrap(); + let has_blocks_needed = events.iter().any(|e| matches!(e, SyncEvent::BlocksNeeded { .. })); + assert!(!has_blocks_needed, "AlreadyProcessed must not emit BlocksNeeded"); + } + + /// `scan_batch` for a wallet added at runtime whose address matches a + /// block already processed for another wallet must re-emit `BlocksNeeded` + /// with only the late wallet in the attribution set so the block reloads + /// from storage and applies for the late wallet without disturbing the + /// already-processed one. + #[tokio::test] + async fn test_scan_batch_late_wallet_recovers_already_processed_block() { + let early: WalletId = [0xE1; 32]; + let late: WalletId = [0xE2; 32]; + let address = dashcore::Address::dummy(Network::Regtest, 9); + + let multi = MultiMockWallet::new(); + let multi = Arc::new(RwLock::new(multi)); + { + let mut w = multi.write().await; + w.insert_wallet( + early, + MockWalletState { + addresses: vec![address.clone()], + synced_height: 0, + last_processed_height: 0, + }, + ); + w.insert_wallet( + late, + MockWalletState { + addresses: vec![address.clone()], + synced_height: 0, + last_processed_height: 0, + }, + ); + } + let mut manager = create_multi_test_manager(multi).await; + manager.set_state(SyncState::Syncing); + + let (key_40, f_40) = filter_for_address(40, &address); + let mut filters: HashMap = HashMap::new(); + filters.insert(key_40.clone(), f_40); + + let mut batch = FiltersBatch::new(0, 99, filters); + batch.mark_verified(); + manager.active_batches.insert(0, batch); + manager.progress.update_stored_height(99); + + // The early wallet has already had this block applied. The late + // wallet has not. Both wallets' addresses match the filter at 40. + manager.tracker.record_processed(40, *key_40.hash(), &BTreeSet::from([early])); + + let events = manager.scan_batch(0).await.unwrap(); + let blocks = events + .iter() + .find_map(|e| match e { + SyncEvent::BlocksNeeded { + blocks, + } => Some(blocks), + _ => None, + }) + .expect("late wallet must trigger a BlocksNeeded re-emit"); + let attribution = blocks.get(&key_40).expect("entry for the recovered block"); + assert!(attribution.contains(&late), "late wallet must receive the block"); + assert!( + !attribution.contains(&early), + "early wallet was already processed for this block, must be excluded" + ); + } + + /// `try_commit_batches` prunes `processed_blocks_per_wallet` entries at + /// or below the new committed_height, since they cannot be reached again + /// without `clear_in_flight_state` wiping the map outright. + #[tokio::test] + async fn test_commit_prunes_processed_blocks_per_wallet() { + let mut manager = create_test_manager().await; + manager.set_state(SyncState::Syncing); + + let wallet_id: WalletId = [0xFA; 32]; + let hash_in = dashcore::block::Header::dummy(0).block_hash(); + let hash_out = dashcore::block::Header::dummy(1).block_hash(); + let key_in = FilterMatchKey::new(2500, hash_in); + let key_out = FilterMatchKey::new(7500, hash_out); + manager.tracker.record_processed(2500, hash_in, &BTreeSet::from([wallet_id])); + manager.tracker.record_processed(7500, hash_out, &BTreeSet::from([wallet_id])); + + // Batch 0..=4999 is ready to commit; pruning drops the 2500 entry but + // keeps the 7500 entry which sits above the new committed_height. + let mut batch = FiltersBatch::new(0, 4999, HashMap::new()); + batch.set_pending_blocks(0); + batch.mark_scanned(); + batch.mark_rescan_complete(); + manager.active_batches.insert(0, batch); + + manager.try_commit_batches().await.unwrap(); + + assert_eq!(manager.progress.committed_height(), 4999); + // The 2500 record is gone: a fresh `track` for the same wallet + // re-tracks the block instead of returning `AlreadyProcessed`. + assert!(matches!( + manager.tracker.track(&key_in, 0, BTreeSet::from([wallet_id])), + BlockTrackResult::NewlyTracked { .. } + )); + // The 7500 record survives above the committed height. + assert_eq!( + manager.tracker.track(&key_out, 0, BTreeSet::from([wallet_id])), + BlockTrackResult::AlreadyProcessed + ); + } + + /// `tick` rescan with a wallet that has a non-zero `synced_height`: the + /// batch must start at `synced_height + 1`, not at genesis. + #[tokio::test] + async fn test_tick_rescans_from_wallet_synced_height_not_genesis() { + let mut manager = create_test_manager().await; + + // Wallet sits at synced_height=150, manager committed at 300, so + // the wallet falls behind and the rescan trigger fires. + manager.wallet.write().await.update_wallet_synced_height(&MOCK_WALLET_ID, 150); + manager.set_state(SyncState::Synced); + manager.progress.update_committed_height(300); + manager.progress.update_stored_height(300); + manager.progress.update_filter_header_tip_height(300); + manager.progress.update_target_height(300); + + // Headers must exist in storage so start_download can resolve them. + let headers = dashcore::block::Header::dummy_batch(0..301); + manager.header_storage.write().await.store_headers(&headers).await.unwrap(); + + let (tx, _rx) = unbounded_channel(); + let _ = manager.tick(&RequestSender::new(tx)).await.unwrap(); + + // Batch must start at 151, not at 0. + assert!(manager.active_batches.contains_key(&151)); + assert!(!manager.active_batches.contains_key(&0)); + } + + /// scan_batch's union-then-attribute pass must not falsely attribute a + /// block to a wallet whose own address does not actually match the + /// filter, even if the union pass picked up the block. + #[tokio::test] + async fn test_scan_batch_attribution_excludes_non_matching_wallet() { + let wallet_a: WalletId = [0xAA; 32]; + let wallet_b: WalletId = [0xBB; 32]; + let address_a = dashcore::Address::dummy(Network::Regtest, 31); + let address_b = dashcore::Address::dummy(Network::Regtest, 32); + + let multi = MultiMockWallet::new(); + let multi = Arc::new(RwLock::new(multi)); + { + let mut w = multi.write().await; + w.insert_wallet( + wallet_a, + MockWalletState { + addresses: vec![address_a.clone()], + synced_height: 0, + last_processed_height: 0, + }, + ); + w.insert_wallet( + wallet_b, + MockWalletState { + addresses: vec![address_b.clone()], + synced_height: 0, + last_processed_height: 0, + }, + ); + } + let mut manager = create_multi_test_manager(multi).await; + manager.set_state(SyncState::Syncing); + + // Filter at height 40 only matches address_a. address_b is in the + // union but does not match this specific filter, so the attribution + // pass must exclude wallet_b. + let (key_40, f_40) = filter_for_address(40, &address_a); + let mut filters: HashMap = HashMap::new(); + filters.insert(key_40.clone(), f_40); + + let mut batch = FiltersBatch::new(0, 99, filters); + batch.mark_verified(); + manager.active_batches.insert(0, batch); + manager.progress.update_stored_height(99); + + let events = manager.scan_batch(0).await.unwrap(); + let blocks = events + .iter() + .find_map(|e| match e { + SyncEvent::BlocksNeeded { + blocks, + } => Some(blocks), + _ => None, + }) + .expect("BlocksNeeded event"); + let attribution = blocks.get(&key_40).expect("entry for the matching block"); + assert!(attribution.contains(&wallet_a)); + assert!(!attribution.contains(&wallet_b)); } #[tokio::test] @@ -960,22 +1740,79 @@ mod tests { let mut manager = create_test_manager().await; manager.set_state(SyncState::Syncing); - // Add blocks from different batches + let wallet: WalletId = [1; 32]; let hash1 = dashcore::block::Header::dummy(0).block_hash(); let hash2 = dashcore::block::Header::dummy(1).block_hash(); - manager.blocks_remaining.insert(hash1, (100, 0)); // batch 0 - manager.blocks_remaining.insert(hash2, (5100, 5000)); // batch 5000 + // Track blocks from two different batches. + manager.tracker.track(&FilterMatchKey::new(100, hash1), 0, BTreeSet::from([wallet])); + manager.tracker.track(&FilterMatchKey::new(5100, hash2), 5000, BTreeSet::from([wallet])); - // Verify batch association - assert_eq!(manager.blocks_remaining.get(&hash1), Some(&(100, 0))); - assert_eq!(manager.blocks_remaining.get(&hash2), Some(&(5100, 5000))); + // Each block round-trips its (height, batch_start) on `finish_in_flight`. + assert_eq!(manager.tracker.finish_in_flight(&hash1), Some((100, 0))); + assert_eq!(manager.tracker.finish_in_flight(&hash2), Some((5100, 5000))); + } + + #[tokio::test] + async fn test_track_block_match_per_wallet_residual() { + let mut manager = create_test_manager().await; + let hash = dashcore::block::Header::dummy(0).block_hash(); + let key = FilterMatchKey::new(100, hash); + let wallet_a: WalletId = [0xA1; 32]; + let wallet_b: WalletId = [0xB2; 32]; + + // First match for {A}: nothing tracked yet, helper records the block. + assert_eq!( + manager.tracker.track(&key, 0, BTreeSet::from([wallet_a])), + BlockTrackResult::NewlyTracked { + wallets: BTreeSet::from([wallet_a]) + } + ); + + // Second match for {A} while still in flight: residual is {A} (no + // processing has been recorded yet), so InFlight re-emits to merge + // late-arriving wallet ids into the pipeline's pending set. + assert_eq!( + manager.tracker.track(&key, 0, BTreeSet::from([wallet_a])), + BlockTrackResult::InFlight { + wallets: BTreeSet::from([wallet_a]) + } + ); + + // Block is delivered and processed for {A}. Round-trip the (height, + // batch_start) tuple while removing the in-flight entry, then record + // the processing. + assert_eq!(manager.tracker.finish_in_flight(&hash), Some((100, 0))); + manager.tracker.record_processed(100, hash, &BTreeSet::from([wallet_a])); + + // Late-added wallet B's filter matches the same block. A is already + // processed, B is not — residual is {B} and it gets re-queued via + // NewlyTracked so the block reloads from storage and applies for B + // only. + assert_eq!( + manager.tracker.track(&key, 5000, BTreeSet::from([wallet_a, wallet_b])), + BlockTrackResult::NewlyTracked { + wallets: BTreeSet::from([wallet_b]) + } + ); + assert_eq!(manager.tracker.finish_in_flight(&hash), Some((100, 5000))); + + // After B is also processed, a third match including only A and B + // returns AlreadyProcessed since both are covered. + manager.tracker.record_processed(100, hash, &BTreeSet::from([wallet_b])); + assert_eq!( + manager.tracker.track(&key, 5000, BTreeSet::from([wallet_a, wallet_b])), + BlockTrackResult::AlreadyProcessed + ); + assert!(manager.tracker.finish_in_flight(&hash).is_none()); } #[tokio::test] async fn test_is_idle() { let mut manager = create_test_manager().await; let hash = dashcore::block::Header::dummy(0).block_hash(); + let key = FilterMatchKey::new(100, hash); + let wallet_id: WalletId = [0xCC; 32]; // Fresh manager is idle assert!(manager.is_idle()); @@ -985,13 +1822,13 @@ mod tests { assert!(!manager.is_idle()); manager.active_batches.clear(); - manager.blocks_remaining.insert(hash, (0, 0)); + manager.tracker.track(&key, 0, BTreeSet::from([wallet_id])); assert!(!manager.is_idle()); - manager.blocks_remaining.clear(); + manager.tracker.clear(); - manager.filters_matched.insert(hash); + manager.tracker.record_processed(100, hash, &BTreeSet::from([wallet_id])); assert!(!manager.is_idle()); - manager.filters_matched.clear(); + manager.tracker.clear(); manager.pending_batches.insert(FiltersBatch::new(0, 999, HashMap::new())); assert!(!manager.is_idle()); @@ -1003,8 +1840,8 @@ mod tests { // Populate all fields, then clear_in_flight_state restores idleness manager.active_batches.insert(0, FiltersBatch::new(0, 999, HashMap::new())); - manager.blocks_remaining.insert(hash, (0, 0)); - manager.filters_matched.insert(hash); + manager.tracker.track(&key, 0, BTreeSet::from([wallet_id])); + manager.tracker.record_processed(100, hash, &BTreeSet::from([wallet_id])); manager.pending_batches.insert(FiltersBatch::new(1000, 1999, HashMap::new())); manager.filter_pipeline.init(2000, 2999); assert!(!manager.is_idle()); @@ -1026,13 +1863,15 @@ mod tests { // Add addresses using test utility let addr1 = dashcore::Address::dummy(Network::Testnet, 1); let addr2 = dashcore::Address::dummy(Network::Testnet, 2); + let wallet_id: WalletId = [7; 32]; - batch.add_addresses([addr1.clone(), addr2.clone()]); + batch.add_addresses_for_wallet(wallet_id, [addr1.clone(), addr2.clone()]); let collected = batch.take_collected_addresses(); - assert_eq!(collected.len(), 2); - assert!(collected.contains(&addr1)); - assert!(collected.contains(&addr2)); + let for_wallet = collected.get(&wallet_id).expect("wallet entry"); + assert_eq!(for_wallet.len(), 2); + assert!(for_wallet.contains(&addr1)); + assert!(for_wallet.contains(&addr2)); // After take, should be empty assert!(batch.take_collected_addresses().is_empty()); @@ -1044,7 +1883,7 @@ mod tests { assert_eq!(manager.state(), SyncState::WaitForEvents); // Wallet committed to height 100, so scan_start will be 101 - manager.wallet.write().await.update_last_processed_height(100); + manager.wallet.write().await.update_wallet_synced_height(&MOCK_WALLET_ID, 100); // Filter headers only reached 50, so its below scan_start manager.progress.update_filter_header_tip_height(50); // Chain tip higher so the Synced early-return is not taken @@ -1121,7 +1960,7 @@ mod tests { // Simulate restart where everything is already synced but state is WaitForEvents. // committed == stored == filter_header_tip — start_download detects synced state. manager.set_state(SyncState::WaitForEvents); - manager.wallet.write().await.update_last_processed_height(100); + manager.wallet.write().await.update_wallet_synced_height(&MOCK_WALLET_ID, 100); manager.progress.update_committed_height(100); manager.progress.update_stored_height(100); manager.progress.update_filter_header_tip_height(100); @@ -1167,4 +2006,177 @@ mod tests { assert_eq!(manager.state(), SyncState::Synced); assert!(events.is_empty()); } + + /// A wallet whose `synced_height` sits below the manager's `committed_height` + /// must trigger a rescan from the wallet's height. This simulates a wallet + /// being added at runtime behind current scan progress. + #[tokio::test] + async fn test_tick_rescans_when_wallet_falls_behind_committed() { + let mut manager = create_test_manager().await; + + // Set up a single address on the wallet and a real matching filter at + // height 50 so scan_batch can emit a `BlocksNeeded` for it on rescan. + let address = dashcore::Address::dummy(Network::Regtest, 7); + manager.wallet.write().await.set_addresses(vec![address.clone()]); + + // Build matching block + filter at height 50. + let tx = Transaction::dummy(&address, 0..0, &[50u64]); + let block_at_50 = Block::dummy(50, vec![tx]); + let filter_at_50 = BlockFilter::dummy(&block_at_50); + + // Headers must form a contiguous range so the storage segment is + // fully populated. Only the height-50 entry needs to be the real + // header; the rest are dummies and never get matched against. + let mut headers: Vec = dashcore::block::Header::dummy_batch(0..201); + headers[50] = block_at_50.header; + manager.header_storage.write().await.store_headers(&headers).await.unwrap(); + + // Persist a filter at every height in 0..=100 so `load_filters` over + // the initial batch range succeeds. Non-matching heights get a + // throwaway filter, only height 50 gets the address-matching one. + let mut filter_store = manager.filter_storage.write().await; + let dummy_filter = BlockFilter::new(&[0u8; 32]); + for h in 0..=100u32 { + if h == 50 { + filter_store.store_filter(h, &filter_at_50.content).await.unwrap(); + } else { + filter_store.store_filter(h, &dummy_filter.content).await.unwrap(); + } + } + drop(filter_store); + + // Manager believes filters are committed up to 100. Filter headers + // and target are pinned at 100 too so start_download immediately + // scans the freshly created batch instead of waiting for downloads. + manager.set_state(SyncState::Synced); + manager.progress.update_committed_height(100); + manager.progress.update_stored_height(100); + manager.progress.update_filter_header_tip_height(100); + manager.progress.update_target_height(100); + + // Pre-populate in-flight state so we can verify clear_in_flight_state runs. + manager.active_batches.insert(101, FiltersBatch::new(101, 200, HashMap::new())); + let stale_hash = dashcore::block::Header::dummy(0).block_hash(); + let stale_key = FilterMatchKey::new(150, stale_hash); + manager.tracker.record_processed(150, stale_hash, &BTreeSet::from([MOCK_WALLET_ID])); + manager.filter_pipeline.init(101, 200); + + // MockWallet defaults to synced_height=0, so wallets_behind(100) = {MOCK_WALLET_ID}. + assert_eq!(manager.wallet.read().await.synced_height(), 0); + + let (tx, _rx) = unbounded_channel(); + let requests = RequestSender::new(tx); + + // Sanity: the pre-populated stale processed record is present, so + // `track` for the same wallet would short-circuit to AlreadyProcessed. + assert_eq!( + manager.tracker.track(&stale_key, 0, BTreeSet::from([MOCK_WALLET_ID])), + BlockTrackResult::AlreadyProcessed + ); + // Undo the side effect of the probing `track` so the original + // processed record is the only state present going into `tick`. + manager.tracker.clear(); + manager.tracker.record_processed(150, stale_hash, &BTreeSet::from([MOCK_WALLET_ID])); + + let events = manager.tick(&requests).await.unwrap(); + + // Old in-flight state was cleared and a fresh batch was created at scan_start=0. + assert!(!manager.active_batches.contains_key(&101)); + assert!(manager.active_batches.contains_key(&0)); + // The stale pre-populated record was wiped by `clear_in_flight_state`: + // a fresh `track` for the same wallet now returns `NewlyTracked`. + assert!(matches!( + manager.tracker.track(&stale_key, 0, BTreeSet::from([MOCK_WALLET_ID])), + BlockTrackResult::NewlyTracked { .. } + )); + + // start_download set committed_height to scan_start - 1 = 0. + assert_eq!(manager.progress.committed_height(), 0); + assert_eq!(manager.state(), SyncState::Syncing); + + // Verify a `BlocksNeeded` event was emitted that includes MOCK_WALLET_ID + // for the matching block at height 50. + let blocks_needed = events + .iter() + .find_map(|e| match e { + SyncEvent::BlocksNeeded { + blocks, + } => Some(blocks), + _ => None, + }) + .expect("BlocksNeeded event from rescan"); + let key_50 = FilterMatchKey::new(50, block_at_50.block_hash()); + let attribution = blocks_needed.get(&key_50).expect("entry for matching block 50"); + assert!(attribution.contains(&MOCK_WALLET_ID)); + } + + /// When every managed wallet is at or beyond `committed_height`, the rescan + /// trigger must not fire even though the aggregate `synced_height` could + /// otherwise look stale. + #[tokio::test] + async fn test_tick_does_not_rescan_when_no_wallets_behind() { + let mut manager = create_test_manager().await; + + // Wallet at synced_height=200, manager committed at 100 → no wallets behind. + manager.wallet.write().await.update_wallet_synced_height(&MOCK_WALLET_ID, 200); + + manager.set_state(SyncState::Synced); + manager.progress.update_committed_height(100); + manager.progress.update_stored_height(100); + manager.progress.update_filter_header_tip_height(200); + manager.progress.update_target_height(200); + + let (tx, _rx) = unbounded_channel(); + let requests = RequestSender::new(tx); + + let events = manager.tick(&requests).await.unwrap(); + + assert!(events.is_empty()); + assert_eq!(manager.progress.committed_height(), 100); + assert_eq!(manager.state(), SyncState::Synced); + assert!(manager.active_batches.is_empty()); + } + + /// `committed_height = 0` on a fresh manager must not falsely trip the + /// rescan trigger. `wallets_behind(0)` returns an empty set since heights + /// are unsigned, so no wallet can be strictly less than 0. + #[tokio::test] + async fn test_tick_does_not_rescan_at_genesis_committed() { + let mut manager = create_test_manager().await; + // Default state: committed_height=0, wallet synced_height=0, state=WaitForEvents. + assert_eq!(manager.progress.committed_height(), 0); + assert_eq!(manager.state(), SyncState::WaitForEvents); + + let (tx, _rx) = unbounded_channel(); + let requests = RequestSender::new(tx); + + let events = manager.tick(&requests).await.unwrap(); + + assert!(events.is_empty()); + assert!(manager.is_idle()); + assert_eq!(manager.state(), SyncState::WaitForEvents); + } + + /// The rescan trigger only fires in `Syncing | Synced | WaitForEvents`. + /// `WaitingForConnections` must be skipped since we're not actively syncing. + #[tokio::test] + async fn test_tick_does_not_rescan_in_waiting_for_connections() { + let mut manager = create_test_manager().await; + manager.set_state(SyncState::WaitingForConnections); + manager.progress.update_committed_height(100); + + // Wallet behind committed — would normally trip the trigger. + assert!(!manager.wallet.read().await.wallets_behind(100).is_empty()); + + let (tx, _rx) = unbounded_channel(); + let requests = RequestSender::new(tx); + + let events = manager.tick(&requests).await.unwrap(); + + assert!(events.is_empty()); + // committed_height not lowered, no batches created. + assert_eq!(manager.progress.committed_height(), 100); + assert_eq!(manager.state(), SyncState::WaitingForConnections); + assert!(manager.active_batches.is_empty()); + } } diff --git a/dash-spv/src/sync/filters/mod.rs b/dash-spv/src/sync/filters/mod.rs index a930e87da..e65bb1b30 100644 --- a/dash-spv/src/sync/filters/mod.rs +++ b/dash-spv/src/sync/filters/mod.rs @@ -1,5 +1,6 @@ mod batch; mod batch_tracker; +mod block_match_tracker; mod manager; mod pipeline; mod progress; diff --git a/dash-spv/src/sync/filters/sync_manager.rs b/dash-spv/src/sync/filters/sync_manager.rs index 45b341465..dadf490e8 100644 --- a/dash-spv/src/sync/filters/sync_manager.rs +++ b/dash-spv/src/sync/filters/sync_manager.rs @@ -41,8 +41,7 @@ impl< fn clear_in_flight_state(&mut self) { self.active_batches.clear(); - self.blocks_remaining.clear(); - self.filters_matched.clear(); + self.tracker.clear(); self.pending_batches.clear(); self.filter_pipeline = FiltersPipeline::new(); } @@ -156,12 +155,17 @@ impl< SyncEvent::BlockProcessed { block_hash, height, + wallets, new_addresses, .. } => { + // Record per-wallet processing so a future scan can give a + // late-added wallet its own pass at this block via the + // `tracker.track` residual. + self.tracker.record_processed(*height, *block_hash, wallets); + // Check if this block is part of our tracked blocks - if let Some((_, batch_start)) = self.blocks_remaining.remove(block_hash) { - // Decrement this batch's pending_blocks count + if let Some((_, batch_start)) = self.tracker.finish_in_flight(block_hash) { if let Some(batch) = self.active_batches.get_mut(&batch_start) { batch.decrement_pending_blocks(); tracing::debug!( @@ -173,16 +177,16 @@ impl< ); } - // Collect new addresses in the batch for deferred rescan at commit time. - // This batches rescans for efficiency and ensures all blocks from - // a BlocksNeeded event are processed before triggering new rescans. - if !new_addresses.is_empty() { + // Collect per-wallet new addresses for deferred rescan at commit time. + for (wallet_id, addrs) in new_addresses { + if addrs.is_empty() { + continue; + } if let Some(batch) = self.active_batches.get_mut(&batch_start) { - batch.add_addresses(new_addresses.iter().cloned()); + batch.add_addresses_for_wallet(*wallet_id, addrs.iter().cloned()); } } - // Try to commit/scan/create batches return self.try_process_batch().await; } } @@ -194,6 +198,30 @@ impl< } async fn tick(&mut self, requests: &RequestSender) -> SyncResult> { + // Detect a wallet that was added behind our scan progress and rescan + // from its `synced_height`. Reset committed_height to the lowest + // synced_height across the stale wallets only, so already-synced + // wallets are not re-scanned from scratch. + if matches!(self.state(), SyncState::Syncing | SyncState::Synced | SyncState::WaitForEvents) + { + let committed = self.progress.committed_height(); + let wallet_read = self.wallet.read().await; + let behind = wallet_read.wallets_behind(committed); + let stale_min_synced = + behind.iter().map(|id| wallet_read.wallet_synced_height(id)).min(); + drop(wallet_read); + if let Some(stale_min_synced) = stale_min_synced { + tracing::info!( + "Wallet synced_height {} fell below filter committed_height {}, restarting scan", + stale_min_synced, + committed + ); + self.clear_in_flight_state(); + self.progress.update_committed_height(stale_min_synced); + return self.start_download(requests).await; + } + } + // TODO: Get rid of the send pending in here? Or decouple it from the header storage? // Run tick when Syncing OR when Synced with pending work (new blocks arriving) let has_pending_work = !self.active_batches.is_empty(); diff --git a/dash-spv/src/sync/mempool/sync_manager.rs b/dash-spv/src/sync/mempool/sync_manager.rs index 806905224..aef25554f 100644 --- a/dash-spv/src/sync/mempool/sync_manager.rs +++ b/dash-spv/src/sync/mempool/sync_manager.rs @@ -194,6 +194,7 @@ mod tests { use crate::test_utils::test_socket_address; use dashcore::hashes::Hash; use key_wallet_manager::test_utils::MockWallet; + use std::collections::{BTreeMap, BTreeSet}; use std::sync::Arc; use tokio::sync::{mpsc, RwLock}; @@ -388,7 +389,8 @@ mod tests { let event = SyncEvent::BlockProcessed { block_hash: dashcore::BlockHash::all_zeros(), height: 1001, - new_addresses: vec![], + wallets: BTreeSet::new(), + new_addresses: BTreeMap::new(), confirmed_txids: txids.clone(), }; let events = manager.handle_sync_event(&event, &requests).await.unwrap(); @@ -573,7 +575,8 @@ mod tests { let event = SyncEvent::BlockProcessed { block_hash: dashcore::BlockHash::all_zeros(), height: 1001, - new_addresses: vec![], + wallets: BTreeSet::new(), + new_addresses: BTreeMap::new(), confirmed_txids: vec![dashcore::Txid::all_zeros()], }; manager.handle_sync_event(&event, &requests).await.unwrap(); @@ -599,7 +602,8 @@ mod tests { let event = SyncEvent::BlockProcessed { block_hash: dashcore::BlockHash::all_zeros(), height: 1001, - new_addresses: vec![], + wallets: BTreeSet::new(), + new_addresses: BTreeMap::new(), confirmed_txids: vec![], }; manager.handle_sync_event(&event, &requests).await.unwrap(); diff --git a/dash-spv/tests/dashd_sync/helpers.rs b/dash-spv/tests/dashd_sync/helpers.rs index 99069b04b..b90a9a187 100644 --- a/dash-spv/tests/dashd_sync/helpers.rs +++ b/dash-spv/tests/dashd_sync/helpers.rs @@ -98,7 +98,7 @@ pub(super) fn is_progress_event(event: &SyncEvent) -> bool { SyncEvent::BlockProcessed { new_addresses, .. - } => !new_addresses.is_empty(), + } => new_addresses.values().any(|v| !v.is_empty()), _ => false, } } diff --git a/key-wallet-ffi/src/wallet_manager_tests.rs b/key-wallet-ffi/src/wallet_manager_tests.rs index 3d062021a..9cb6ed66f 100644 --- a/key-wallet-ffi/src/wallet_manager_tests.rs +++ b/key-wallet-ffi/src/wallet_manager_tests.rs @@ -6,7 +6,7 @@ mod tests { use crate::error::{FFIError, FFIErrorCode}; use crate::{wallet, wallet_manager}; use dash_network::ffi::FFINetwork; - use key_wallet_manager::WalletInterface; + use key_wallet_manager::{WalletId, WalletInterface}; use std::ffi::{CStr, CString}; use std::ptr; use std::slice; @@ -442,13 +442,14 @@ mod tests { let height = unsafe { wallet_manager::wallet_manager_current_height(manager, error) }; assert_eq!(height, 0); - // Updating last-processed height without wallets is a no-op + // Updating last-processed height for an unknown wallet is a no-op. + let unknown_wallet: WalletId = [0xff; 32]; let new_height = 12345; unsafe { let manager_ref = &*manager; manager_ref.runtime.block_on(async { let mut manager_guard = manager_ref.manager.write().await; - manager_guard.update_last_processed_height(new_height); + manager_guard.update_wallet_last_processed_height(&unknown_wallet, new_height); }); } diff --git a/key-wallet-manager/Cargo.toml b/key-wallet-manager/Cargo.toml index 46e201200..b54f43418 100644 --- a/key-wallet-manager/Cargo.toml +++ b/key-wallet-manager/Cargo.toml @@ -18,6 +18,7 @@ key-wallet = { path = "../key-wallet", default-features = false } dashcore = { path = "../dash" } async-trait = "0.1" tokio = { version = "1", features = ["macros", "rt", "sync"] } +tracing = "0.1" zeroize = { version = "1.8", features = ["derive"] } rayon = { version = "1.11", optional = true } bincode = { version = "2.0.1", optional = true } diff --git a/key-wallet-manager/examples/wallet_creation.rs b/key-wallet-manager/examples/wallet_creation.rs index d11abe13b..fd82448b8 100644 --- a/key-wallet-manager/examples/wallet_creation.rs +++ b/key-wallet-manager/examples/wallet_creation.rs @@ -144,8 +144,11 @@ fn main() { println!(" Current last-processed height (Testnet): {:?}", manager.last_processed_height()); - // Update last-processed height across all managed wallets - manager.update_last_processed_height(850_000); + // Advance every wallet's last-processed height through the per-wallet API. + let wallet_ids: Vec<_> = manager.list_wallets().into_iter().copied().collect(); + for wallet_id in &wallet_ids { + manager.update_wallet_last_processed_height(wallet_id, 850_000); + } println!(" Updated last-processed height to: {:?}", manager.last_processed_height()); println!("\n=== Summary ==="); diff --git a/key-wallet-manager/src/event_tests.rs b/key-wallet-manager/src/event_tests.rs index 3e851cad7..21b206bce 100644 --- a/key-wallet-manager/src/event_tests.rs +++ b/key-wallet-manager/src/event_tests.rs @@ -7,6 +7,7 @@ use dashcore::hash_types::CycleHash; use dashcore::hashes::Hash; use dashcore::BlockHash; use key_wallet::transaction_checking::BlockInfo; +use std::collections::BTreeSet; // --------------------------------------------------------------------------- // Lifecycle flow tests @@ -484,7 +485,8 @@ async fn test_process_block_emits_events() { txdata: vec![tx], }; - let result = manager.process_block(&block, 1000).await; + let wallets = BTreeSet::from([wallet_id]); + let result = manager.process_block_for_wallets(&block, 1000, &wallets).await; assert_eq!(result.new_txids.len(), 1); let events = drain_events(&mut rx); diff --git a/key-wallet-manager/src/lib.rs b/key-wallet-manager/src/lib.rs index d1ddeab02..c6a6e94b9 100644 --- a/key-wallet-manager/src/lib.rs +++ b/key-wallet-manager/src/lib.rs @@ -33,7 +33,7 @@ use key_wallet::wallet::managed_wallet_info::wallet_info_interface::WalletInfoIn use key_wallet::wallet::managed_wallet_info::ManagedWalletInfo; use key_wallet::{AccountType, Address, ExtendedPrivKey, Mnemonic, Network, Wallet}; use key_wallet::{ExtendedPubKey, WalletCoreBalance}; -use std::collections::BTreeMap; +use std::collections::{BTreeMap, BTreeSet}; use std::str::FromStr; use tokio::sync::broadcast; @@ -72,8 +72,9 @@ pub struct CheckTransactionsResult { pub affected_wallets: Vec, /// Set to false if the transaction was already stored and is being re-processed (e.g., during rescan) pub is_new_transaction: bool, - /// New addresses generated during gap limit maintenance - pub new_addresses: Vec
, + /// New addresses generated during gap limit maintenance, attributed to the + /// wallet that produced them. + pub new_addresses: BTreeMap>, /// Total value received across all wallets pub total_received: u64, /// Total value sent across all wallets @@ -82,6 +83,13 @@ pub struct CheckTransactionsResult { pub involved_addresses: Vec
, } +impl CheckTransactionsResult { + /// Iterate over every newly generated address regardless of wallet attribution. + pub(crate) fn all_new_addresses(&self) -> impl Iterator { + self.new_addresses.values().flatten() + } +} + /// High-level wallet manager that manages multiple wallets /// /// Each wallet can contain multiple accounts following BIP44 standard. @@ -450,16 +458,33 @@ impl WalletManager { update_state_if_found: bool, update_balance: bool, ) -> CheckTransactionsResult { - let mut result = CheckTransactionsResult::default(); + let wallet_ids: BTreeSet = self.wallets.keys().cloned().collect(); + self.check_transaction_in_wallets( + tx, + context, + &wallet_ids, + update_state_if_found, + update_balance, + ) + .await + } - // We need to iterate carefully since we're mutating - let wallet_ids: Vec = self.wallets.keys().cloned().collect(); + /// Check a transaction against the given subset of wallets and update their states if relevant. + pub(crate) async fn check_transaction_in_wallets( + &mut self, + tx: &Transaction, + context: TransactionContext, + wallet_ids: &BTreeSet, + update_state_if_found: bool, + update_balance: bool, + ) -> CheckTransactionsResult { + let mut result = CheckTransactionsResult::default(); for wallet_id in wallet_ids { // Get mutable references to both wallet and wallet_info // We need to use split borrowing to get around Rust's borrow checker - let wallet_opt = self.wallets.get_mut(&wallet_id); - let wallet_info_opt = self.wallet_infos.get_mut(&wallet_id); + let wallet_opt = self.wallets.get_mut(wallet_id); + let wallet_info_opt = self.wallet_infos.get_mut(wallet_id); if let (Some(wallet), Some(wallet_info)) = (wallet_opt, wallet_info_opt) { let check_result = wallet_info @@ -472,15 +497,12 @@ impl WalletManager { ) .await; - // If the transaction is relevant if check_result.is_relevant { - result.affected_wallets.push(wallet_id); - // If any wallet reports this as new, mark result as new + result.affected_wallets.push(*wallet_id); if check_result.is_new_transaction { result.is_new_transaction = true; } - // Aggregate totals and involved addresses across wallets result.total_received = result.total_received.saturating_add(check_result.total_received); result.total_sent = result.total_sent.saturating_add(check_result.total_sent); @@ -493,16 +515,15 @@ impl WalletManager { if check_result.is_new_transaction { for (account_index, record) in check_result.new_records { let event = WalletEvent::TransactionReceived { - wallet_id, + wallet_id: *wallet_id, account_index, record: Box::new(record), }; let _ = self.event_sender.send(event); } } else if check_result.state_modified { - // Known transaction whose state was modified (confirmation or IS-lock). let event = WalletEvent::TransactionStatusChanged { - wallet_id, + wallet_id: *wallet_id, txid: tx.txid(), status: context.clone(), }; @@ -510,7 +531,13 @@ impl WalletManager { } } - result.new_addresses.extend(check_result.new_addresses); + if !check_result.new_addresses.is_empty() { + result + .new_addresses + .entry(*wallet_id) + .or_default() + .extend(check_result.new_addresses); + } } } diff --git a/key-wallet-manager/src/matching.rs b/key-wallet-manager/src/matching.rs index 0b61289f8..acc73865a 100644 --- a/key-wallet-manager/src/matching.rs +++ b/key-wallet-manager/src/matching.rs @@ -27,18 +27,33 @@ impl FilterMatchKey { } /// Check compact filters for addresses and return the keys that matched. +/// +/// Entries with `key.height() <= min_height` are skipped. Pass `0` to test +/// every filter in the input. pub fn check_compact_filters_for_addresses( input: &HashMap, addresses: Vec
, + min_height: CoreBlockHeight, ) -> BTreeSet { let script_pubkey_bytes: Vec> = addresses.iter().map(|address| address.script_pubkey().to_bytes()).collect(); let match_filter = |(key, filter): (&FilterMatchKey, &BlockFilter)| { - filter - .match_any(key.hash(), script_pubkey_bytes.iter().map(|v| v.as_slice())) - .unwrap_or(false) - .then_some(key.clone()) + if key.height() <= min_height { + return None; + } + match filter.match_any(key.hash(), script_pubkey_bytes.iter().map(|v| v.as_slice())) { + Ok(true) => Some(key.clone()), + Ok(false) => None, + Err(e) => { + tracing::warn!( + "filter match_any error at height {}: {}; treating as non-match", + key.height(), + e + ); + None + } + } }; #[cfg(feature = "parallel-filters")] @@ -60,7 +75,7 @@ mod tests { #[test] fn test_empty_input_returns_empty() { - let result = check_compact_filters_for_addresses(&HashMap::new(), vec![]); + let result = check_compact_filters_for_addresses(&HashMap::new(), vec![], 0); assert!(result.is_empty()); } @@ -75,7 +90,7 @@ mod tests { let mut input = HashMap::new(); input.insert(key.clone(), filter); - let output = check_compact_filters_for_addresses(&input, vec![]); + let output = check_compact_filters_for_addresses(&input, vec![], 0); assert!(!output.contains(&key)); } @@ -90,7 +105,7 @@ mod tests { let mut input = HashMap::new(); input.insert(key.clone(), filter); - let output = check_compact_filters_for_addresses(&input, vec![address]); + let output = check_compact_filters_for_addresses(&input, vec![address], 0); assert!(output.contains(&key)); } @@ -107,7 +122,7 @@ mod tests { let mut input = HashMap::new(); input.insert(key.clone(), filter); - let output = check_compact_filters_for_addresses(&input, vec![address]); + let output = check_compact_filters_for_addresses(&input, vec![address], 0); assert!(!output.contains(&key)); } @@ -137,7 +152,7 @@ mod tests { input.insert(key_2.clone(), filter_2); input.insert(key_3.clone(), filter_3); - let output = check_compact_filters_for_addresses(&input, vec![address_1, address_2]); + let output = check_compact_filters_for_addresses(&input, vec![address_1, address_2], 0); assert_eq!(output.len(), 2); assert!(output.contains(&key_1)); assert!(output.contains(&key_2)); @@ -160,7 +175,7 @@ mod tests { input.insert(key, filter); } - let output = check_compact_filters_for_addresses(&input, vec![address]); + let output = check_compact_filters_for_addresses(&input, vec![address], 0); // Verify output is sorted by height (ascending) let heights_out: Vec = output.iter().map(|k| k.height()).collect(); diff --git a/key-wallet-manager/src/process_block.rs b/key-wallet-manager/src/process_block.rs index 7f6ceb23e..6f232ea93 100644 --- a/key-wallet-manager/src/process_block.rs +++ b/key-wallet-manager/src/process_block.rs @@ -1,5 +1,5 @@ use crate::wallet_interface::{BlockProcessingResult, MempoolTransactionResult, WalletInterface}; -use crate::{WalletEvent, WalletManager}; +use crate::{WalletEvent, WalletId, WalletManager}; use async_trait::async_trait; use core::fmt::Write as _; use dashcore::ephemerealdata::instant_lock::InstantLock; @@ -7,24 +7,27 @@ use dashcore::prelude::CoreBlockHeight; use dashcore::{Address, Block, Transaction}; use key_wallet::transaction_checking::{BlockInfo, TransactionContext}; use key_wallet::wallet::managed_wallet_info::wallet_info_interface::WalletInfoInterface; +use std::collections::BTreeSet; use tokio::sync::broadcast; #[async_trait] impl WalletInterface for WalletManager { - async fn process_block( + async fn process_block_for_wallets( &mut self, block: &Block, height: CoreBlockHeight, + wallets: &BTreeSet, ) -> BlockProcessingResult { let mut result = BlockProcessingResult::default(); + if wallets.is_empty() { + return result; + } let info = BlockInfo::new(height, block.block_hash(), block.header.time); - // Process each transaction using the base manager for tx in &block.txdata { let context = TransactionContext::InBlock(info); - let check_result = - self.check_transaction_in_all_wallets(tx, context, true, false).await; + self.check_transaction_in_wallets(tx, context, wallets, true, false).await; if !check_result.affected_wallets.is_empty() { if check_result.is_new_transaction { @@ -34,10 +37,27 @@ impl WalletInterface for WalletM } } - result.new_addresses.extend(check_result.new_addresses); + for (wallet_id, addrs) in check_result.new_addresses { + result.new_addresses.entry(wallet_id).or_default().extend(addrs); + } } - self.update_last_processed_height(height); + // For each processed wallet: advance last-processed height monotonically + // and refresh the cached balance so it reflects any UTXO changes from + // this block. Rescan blocks at heights below the wallet's current + // checkpoint must not drag the height backwards, but they still need a + // balance refresh because UTXOs were added or removed. + let snapshot = self.snapshot_balances(); + for wallet_id in wallets { + if let Some(info) = self.wallet_infos.get_mut(wallet_id) { + if height > info.last_processed_height() { + info.update_last_processed_height(height); + } else { + info.update_balance(); + } + } + } + self.emit_balance_changes(&snapshot); result } @@ -72,12 +92,13 @@ impl WalletInterface for WalletM } self.emit_balance_changes(&snapshot); + let new_addresses: Vec
= check_result.all_new_addresses().cloned().collect(); MempoolTransactionResult { is_relevant, net_amount, is_outgoing: net_amount < 0, addresses: check_result.involved_addresses, - new_addresses: check_result.new_addresses, + new_addresses, } } @@ -85,6 +106,10 @@ impl WalletInterface for WalletM self.monitored_addresses() } + fn monitored_addresses_for(&self, wallet_id: &WalletId) -> Vec
{ + self.wallet_infos.get(wallet_id).map(|info| info.monitored_addresses()).unwrap_or_default() + } + fn watched_outpoints(&self) -> Vec { self.watched_outpoints() } @@ -101,24 +126,47 @@ impl WalletInterface for WalletM self.wallet_infos.values().map(|info| info.last_processed_height()).max().unwrap_or(0) } - fn update_last_processed_height(&mut self, height: CoreBlockHeight) { - let snapshot = self.snapshot_balances(); + fn synced_height(&self) -> CoreBlockHeight { + self.wallet_infos.values().map(|info| info.synced_height()).min().unwrap_or(0) + } - for (_wallet_id, info) in self.wallet_infos.iter_mut() { - info.update_last_processed_height(height); - } + fn wallets_behind(&self, height: CoreBlockHeight) -> BTreeSet { + self.wallet_infos + .iter() + .filter_map(|(id, info)| { + if info.synced_height() < height { + Some(*id) + } else { + None + } + }) + .collect() + } - self.emit_balance_changes(&snapshot); + fn wallet_synced_height(&self, wallet_id: &WalletId) -> CoreBlockHeight { + self.wallet_infos.get(wallet_id).map(|info| info.synced_height()).unwrap_or(0) } - fn synced_height(&self) -> CoreBlockHeight { - self.wallet_infos.values().map(|info| info.synced_height()).min().unwrap_or(0) + fn update_wallet_synced_height(&mut self, wallet_id: &WalletId, height: CoreBlockHeight) { + if let Some(info) = self.wallet_infos.get_mut(wallet_id) { + if height > info.synced_height() { + info.update_synced_height(height); + } + } } - fn update_synced_height(&mut self, height: CoreBlockHeight) { - for (_wallet_id, info) in self.wallet_infos.iter_mut() { - info.update_synced_height(height); + fn update_wallet_last_processed_height( + &mut self, + wallet_id: &WalletId, + height: CoreBlockHeight, + ) { + let snapshot = self.snapshot_balances(); + if let Some(info) = self.wallet_infos.get_mut(wallet_id) { + if height > info.last_processed_height() { + info.update_last_processed_height(height); + } } + self.emit_balance_changes(&snapshot); } fn subscribe_events(&self) -> broadcast::Receiver { @@ -193,10 +241,11 @@ mod tests { BlockHash, Network, OutPoint, ScriptBuf, TxIn, TxMerkleNode, TxOut, Txid, Witness, }; use key_wallet::account::StandardAccountType; + use key_wallet::mnemonic::Language; use key_wallet::wallet::initialization::WalletAccountCreationOptions; use key_wallet::wallet::managed_wallet_info::transaction_building::AccountTypePreference; use key_wallet::wallet::managed_wallet_info::ManagedWalletInfo; - use key_wallet::AccountType; + use key_wallet::{AccountType, Mnemonic}; fn make_block(txdata: Vec) -> Block { Block { @@ -215,15 +264,9 @@ mod tests { #[tokio::test] async fn test_last_processed_height() { let mut manager: WalletManager = WalletManager::new(Network::Testnet); - // Initial state - assert_eq!(manager.last_processed_height(), 0); - // Updating last-processed height without wallets is a no-op - manager.update_last_processed_height(1000); assert_eq!(manager.last_processed_height(), 0); - // Still a no-op without wallets - manager.update_last_processed_height(5000); - assert_eq!(manager.last_processed_height(), 0); - manager.update_last_processed_height(10); + let unknown: WalletId = [0xff; 32]; + manager.update_wallet_last_processed_height(&unknown, 1000); assert_eq!(manager.last_processed_height(), 0); } @@ -277,12 +320,13 @@ mod tests { #[tokio::test] async fn test_process_block_emits_balance_updated() { - let (mut manager, _wallet_id, addr) = setup_manager_with_wallet(); + let (mut manager, wallet_id, addr) = setup_manager_with_wallet(); let tx = create_tx_paying_to(&addr, 0xcc); let block = make_block(vec![tx]); let mut rx = manager.subscribe_events(); - manager.process_block(&block, 100).await; + let wallets = BTreeSet::from([wallet_id]); + manager.process_block_for_wallets(&block, 100, &wallets).await; let mut found = false; while let Ok(event) = rx.try_recv() { @@ -299,6 +343,38 @@ mod tests { assert!(found, "should emit BalanceUpdated for block processing"); } + #[tokio::test] + async fn test_process_block_for_wallets_only_touches_listed() { + let (mut manager, wallet_id1, _) = setup_manager_with_wallet(); + let mnemonic2 = Mnemonic::generate(12, Language::English).unwrap(); + let wallet_id2 = manager + .create_wallet_from_mnemonic( + &mnemonic2.to_string(), + "", + 0, + WalletAccountCreationOptions::Default, + ) + .unwrap(); + + let block = make_block(vec![]); + + let only_w1 = BTreeSet::from([wallet_id1]); + manager.process_block_for_wallets(&block, 200, &only_w1).await; + assert_eq!(manager.get_wallet_info(&wallet_id1).unwrap().last_processed_height(), 200); + assert_eq!(manager.get_wallet_info(&wallet_id2).unwrap().last_processed_height(), 0); + + let only_w2 = BTreeSet::from([wallet_id2]); + manager.process_block_for_wallets(&block, 300, &only_w2).await; + assert_eq!(manager.get_wallet_info(&wallet_id1).unwrap().last_processed_height(), 200); + assert_eq!(manager.get_wallet_info(&wallet_id2).unwrap().last_processed_height(), 300); + + // Empty wallet set is a no-op even though the height is past both wallets. + let none = BTreeSet::new(); + manager.process_block_for_wallets(&block, 1000, &none).await; + assert_eq!(manager.get_wallet_info(&wallet_id1).unwrap().last_processed_height(), 200); + assert_eq!(manager.get_wallet_info(&wallet_id2).unwrap().last_processed_height(), 300); + } + #[tokio::test] async fn test_mempool_transaction_result_contains_wallet_effect_data() { let (mut manager, _wallet_id, addr) = setup_manager_with_wallet(); @@ -382,9 +458,13 @@ mod tests { assert_eq!(manager.monitor_revision(), expected_rev, "after get_change_address"); } - // update_last_processed_height does NOT bump - manager.update_last_processed_height(1000); - assert_eq!(manager.monitor_revision(), expected_rev, "after update_last_processed_height"); + // `update_wallet_last_processed_height` does not bump the monitor revision. + manager.update_wallet_last_processed_height(&wallet_id, 1000); + assert_eq!( + manager.monitor_revision(), + expected_rev, + "after update_wallet_last_processed_height" + ); // process_mempool_transaction bumps from UTXO changes and possibly // new addresses generated via gap limit maintenance @@ -406,11 +486,12 @@ mod tests { "after process_instant_send_lock" ); - // process_block bumps from UTXO changes and possibly new addresses + // process_block_for_wallets bumps from UTXO changes and possibly new addresses let rev_before_block = manager.monitor_revision(); let tx2 = create_tx_paying_to(&addr, 0xd1); let block = make_block(vec![tx2]); - let _result = manager.process_block(&block, 100).await; + let block_wallets = BTreeSet::from([wallet_id]); + let _result = manager.process_block_for_wallets(&block, 100, &block_wallets).await; assert!( manager.monitor_revision() > rev_before_block, "block with tx paying to our address should bump revision (UTXO added)" diff --git a/key-wallet-manager/src/test_utils/mock_wallet.rs b/key-wallet-manager/src/test_utils/mock_wallet.rs index 180e064ac..bd16d60d6 100644 --- a/key-wallet-manager/src/test_utils/mock_wallet.rs +++ b/key-wallet-manager/src/test_utils/mock_wallet.rs @@ -1,18 +1,27 @@ -use crate::{BlockProcessingResult, MempoolTransactionResult, WalletEvent, WalletInterface}; +use crate::{ + BlockProcessingResult, MempoolTransactionResult, WalletEvent, WalletId, WalletInterface, +}; use dashcore::ephemerealdata::instant_lock::InstantLock; use dashcore::prelude::CoreBlockHeight; use dashcore::{Address, Block, OutPoint, Transaction, Txid}; use key_wallet::transaction_checking::TransactionContext; +use std::collections::BTreeSet; use std::sync::Arc; use tokio::sync::{broadcast, Mutex}; // Type alias for captured IS lock payloads type InstantLockCaptures = Arc)>>>; +/// Default wallet ID used by `MockWallet` and `NonMatchingMockWallet` for tests +/// that don't care about per-wallet attribution. +pub const MOCK_WALLET_ID: WalletId = [0u8; 32]; + pub struct MockWallet { + wallet_id: WalletId, processed_blocks: Arc>>, processed_transactions: Arc>>, last_processed_height: CoreBlockHeight, + synced_height: CoreBlockHeight, event_sender: broadcast::Sender, /// When true, process_mempool_transaction returns is_relevant=true. mempool_relevant: bool, @@ -45,9 +54,11 @@ impl MockWallet { pub fn new() -> Self { let (event_sender, _) = broadcast::channel(16); Self { + wallet_id: MOCK_WALLET_ID, processed_blocks: Arc::new(Mutex::new(Vec::new())), processed_transactions: Arc::new(Mutex::new(Vec::new())), last_processed_height: 0, + synced_height: 0, event_sender, mempool_relevant: false, addresses: Vec::new(), @@ -61,6 +72,11 @@ impl MockWallet { } } + /// Override the wallet id used for per-wallet API surfaces. + pub fn set_wallet_id(&mut self, wallet_id: WalletId) { + self.wallet_id = wallet_id; + } + /// Configure whether mempool transactions are reported as relevant. pub fn set_mempool_relevant(&mut self, relevant: bool) { self.mempool_relevant = relevant; @@ -108,14 +124,25 @@ impl MockWallet { #[async_trait::async_trait] impl WalletInterface for MockWallet { - async fn process_block(&mut self, block: &Block, height: u32) -> BlockProcessingResult { + async fn process_block_for_wallets( + &mut self, + block: &Block, + height: u32, + wallets: &BTreeSet, + ) -> BlockProcessingResult { + if !wallets.contains(&self.wallet_id) { + return BlockProcessingResult::default(); + } let mut processed = self.processed_blocks.lock().await; processed.push((block.block_hash(), height)); + if height > self.last_processed_height { + self.last_processed_height = height; + } BlockProcessingResult { new_txids: block.txdata.iter().map(|tx| tx.txid()).collect(), existing_txids: Vec::new(), - new_addresses: Vec::new(), + new_addresses: Default::default(), } } @@ -152,6 +179,14 @@ impl WalletInterface for MockWallet { self.addresses.clone() } + fn monitored_addresses_for(&self, wallet_id: &WalletId) -> Vec
{ + if wallet_id == &self.wallet_id { + self.addresses.clone() + } else { + Vec::new() + } + } + fn watched_outpoints(&self) -> Vec { self.outpoints.clone() } @@ -160,8 +195,40 @@ impl WalletInterface for MockWallet { self.last_processed_height } - fn update_last_processed_height(&mut self, height: CoreBlockHeight) { - self.last_processed_height = height; + fn synced_height(&self) -> CoreBlockHeight { + self.synced_height + } + + fn wallets_behind(&self, height: CoreBlockHeight) -> BTreeSet { + if self.synced_height < height { + BTreeSet::from([self.wallet_id]) + } else { + BTreeSet::new() + } + } + + fn wallet_synced_height(&self, wallet_id: &WalletId) -> CoreBlockHeight { + if wallet_id == &self.wallet_id { + self.synced_height + } else { + 0 + } + } + + fn update_wallet_synced_height(&mut self, wallet_id: &WalletId, height: CoreBlockHeight) { + if wallet_id == &self.wallet_id && height > self.synced_height { + self.synced_height = height; + } + } + + fn update_wallet_last_processed_height( + &mut self, + wallet_id: &WalletId, + height: CoreBlockHeight, + ) { + if wallet_id == &self.wallet_id && height > self.last_processed_height { + self.last_processed_height = height; + } } fn monitor_revision(&self) -> u64 { @@ -189,7 +256,9 @@ impl WalletInterface for MockWallet { /// Mock wallet that returns false for filter checks pub struct NonMatchingMockWallet { + wallet_id: WalletId, last_processed_height: CoreBlockHeight, + synced_height: CoreBlockHeight, event_sender: broadcast::Sender, } @@ -203,7 +272,9 @@ impl NonMatchingMockWallet { pub fn new() -> Self { let (event_sender, _) = broadcast::channel(16); Self { + wallet_id: MOCK_WALLET_ID, last_processed_height: 0, + synced_height: 0, event_sender, } } @@ -211,7 +282,15 @@ impl NonMatchingMockWallet { #[async_trait::async_trait] impl WalletInterface for NonMatchingMockWallet { - async fn process_block(&mut self, _block: &Block, _height: u32) -> BlockProcessingResult { + async fn process_block_for_wallets( + &mut self, + _block: &Block, + height: u32, + wallets: &BTreeSet, + ) -> BlockProcessingResult { + if wallets.contains(&self.wallet_id) && height > self.last_processed_height { + self.last_processed_height = height; + } BlockProcessingResult::default() } @@ -227,6 +306,10 @@ impl WalletInterface for NonMatchingMockWallet { Vec::new() } + fn monitored_addresses_for(&self, _wallet_id: &WalletId) -> Vec
{ + Vec::new() + } + fn watched_outpoints(&self) -> Vec { Vec::new() } @@ -235,8 +318,40 @@ impl WalletInterface for NonMatchingMockWallet { self.last_processed_height } - fn update_last_processed_height(&mut self, height: CoreBlockHeight) { - self.last_processed_height = height; + fn synced_height(&self) -> CoreBlockHeight { + self.synced_height + } + + fn wallets_behind(&self, height: CoreBlockHeight) -> BTreeSet { + if self.synced_height < height { + BTreeSet::from([self.wallet_id]) + } else { + BTreeSet::new() + } + } + + fn wallet_synced_height(&self, wallet_id: &WalletId) -> CoreBlockHeight { + if wallet_id == &self.wallet_id { + self.synced_height + } else { + 0 + } + } + + fn update_wallet_synced_height(&mut self, wallet_id: &WalletId, height: CoreBlockHeight) { + if wallet_id == &self.wallet_id && height > self.synced_height { + self.synced_height = height; + } + } + + fn update_wallet_last_processed_height( + &mut self, + wallet_id: &WalletId, + height: CoreBlockHeight, + ) { + if wallet_id == &self.wallet_id && height > self.last_processed_height { + self.last_processed_height = height; + } } fn subscribe_events(&self) -> broadcast::Receiver { @@ -247,3 +362,146 @@ impl WalletInterface for NonMatchingMockWallet { "NonMatchingWallet (test implementation)".to_string() } } + +/// Per-wallet state held inside `MultiMockWallet`. +#[derive(Default)] +pub struct MockWalletState { + pub addresses: Vec
, + pub synced_height: CoreBlockHeight, + pub last_processed_height: CoreBlockHeight, +} + +/// Multi-wallet mock that holds independent state for several wallet IDs, +/// enabling tests that exercise per-wallet attribution paths. +pub struct MultiMockWallet { + wallets: std::collections::BTreeMap, + event_sender: broadcast::Sender, + /// Track every block processed for assertions. + processed: Arc>>, +} + +impl Default for MultiMockWallet { + fn default() -> Self { + Self::new() + } +} + +impl MultiMockWallet { + pub fn new() -> Self { + let (event_sender, _) = broadcast::channel(16); + Self { + wallets: std::collections::BTreeMap::new(), + event_sender, + processed: Arc::new(Mutex::new(Vec::new())), + } + } + + /// Insert or replace a wallet's state. + pub fn insert_wallet(&mut self, wallet_id: WalletId, state: MockWalletState) { + self.wallets.insert(wallet_id, state); + } + + /// Mutable access to a wallet's state, panicking if absent. + pub fn wallet_mut(&mut self, wallet_id: &WalletId) -> &mut MockWalletState { + self.wallets.get_mut(wallet_id).expect("wallet present") + } + + pub fn processed(&self) -> Arc>> { + self.processed.clone() + } +} + +#[async_trait::async_trait] +impl WalletInterface for MultiMockWallet { + async fn process_block_for_wallets( + &mut self, + block: &Block, + height: CoreBlockHeight, + wallets: &BTreeSet, + ) -> BlockProcessingResult { + let hash = block.block_hash(); + let mut processed = self.processed.lock().await; + for wallet_id in wallets { + if let Some(state) = self.wallets.get_mut(wallet_id) { + processed.push((*wallet_id, hash, height)); + if height > state.last_processed_height { + state.last_processed_height = height; + } + } + } + BlockProcessingResult::default() + } + + async fn process_mempool_transaction( + &mut self, + _tx: &Transaction, + _instant_lock: Option, + ) -> MempoolTransactionResult { + MempoolTransactionResult::default() + } + + fn monitored_addresses(&self) -> Vec
{ + self.wallets.values().flat_map(|s| s.addresses.iter().cloned()).collect() + } + + fn monitored_addresses_for(&self, wallet_id: &WalletId) -> Vec
{ + self.wallets.get(wallet_id).map(|s| s.addresses.clone()).unwrap_or_default() + } + + fn watched_outpoints(&self) -> Vec { + Vec::new() + } + + fn last_processed_height(&self) -> CoreBlockHeight { + self.wallets.values().map(|s| s.last_processed_height).max().unwrap_or(0) + } + + fn synced_height(&self) -> CoreBlockHeight { + self.wallets.values().map(|s| s.synced_height).min().unwrap_or(0) + } + + fn wallets_behind(&self, height: CoreBlockHeight) -> BTreeSet { + self.wallets + .iter() + .filter_map(|(id, s)| { + if s.synced_height < height { + Some(*id) + } else { + None + } + }) + .collect() + } + + fn wallet_synced_height(&self, wallet_id: &WalletId) -> CoreBlockHeight { + self.wallets.get(wallet_id).map(|s| s.synced_height).unwrap_or(0) + } + + fn update_wallet_synced_height(&mut self, wallet_id: &WalletId, height: CoreBlockHeight) { + if let Some(state) = self.wallets.get_mut(wallet_id) { + if height > state.synced_height { + state.synced_height = height; + } + } + } + + fn update_wallet_last_processed_height( + &mut self, + wallet_id: &WalletId, + height: CoreBlockHeight, + ) { + if let Some(state) = self.wallets.get_mut(wallet_id) { + if height > state.last_processed_height { + state.last_processed_height = height; + } + } + } + + fn subscribe_events(&self) -> broadcast::Receiver { + self.event_sender.subscribe() + } + + async fn describe(&self) -> String { + "MultiMockWallet (test implementation)".to_string() + } +} diff --git a/key-wallet-manager/src/test_utils/mod.rs b/key-wallet-manager/src/test_utils/mod.rs index 108a02fd5..252be4387 100644 --- a/key-wallet-manager/src/test_utils/mod.rs +++ b/key-wallet-manager/src/test_utils/mod.rs @@ -1,4 +1,7 @@ mod mock_wallet; pub use mock_wallet::MockWallet; +pub use mock_wallet::MockWalletState; +pub use mock_wallet::MultiMockWallet; pub use mock_wallet::NonMatchingMockWallet; +pub use mock_wallet::MOCK_WALLET_ID; diff --git a/key-wallet-manager/src/wallet_interface.rs b/key-wallet-manager/src/wallet_interface.rs index 90e01e80d..e2e7a8f89 100644 --- a/key-wallet-manager/src/wallet_interface.rs +++ b/key-wallet-manager/src/wallet_interface.rs @@ -2,11 +2,12 @@ //! //! This module defines the trait that SPV clients use to interact with wallets. -use crate::WalletEvent; +use crate::{WalletEvent, WalletId}; use async_trait::async_trait; use dashcore::ephemerealdata::instant_lock::InstantLock; use dashcore::prelude::CoreBlockHeight; use dashcore::{Address, Block, OutPoint, Transaction, Txid}; +use std::collections::{BTreeMap, BTreeSet}; use tokio::sync::broadcast; /// Result of processing a block through the wallet @@ -16,8 +17,8 @@ pub struct BlockProcessingResult { pub new_txids: Vec, /// Transaction IDs that were already in wallet history pub existing_txids: Vec, - /// New addresses generated during gap limit maintenance - pub new_addresses: Vec
, + /// New addresses generated per wallet during gap-limit maintenance. + pub new_addresses: BTreeMap>, } /// Result of processing a mempool transaction through the wallet @@ -45,18 +46,27 @@ impl BlockProcessingResult { pub fn relevant_tx_count(&self) -> usize { self.new_txids.len() + self.existing_txids.len() } + + /// Iterate over every newly generated address regardless of wallet attribution. + pub fn all_new_addresses(&self) -> impl Iterator { + self.new_addresses.values().flatten() + } } /// Trait for wallet implementations to receive SPV events #[async_trait] pub trait WalletInterface: Send + Sync + 'static { - /// Called when a new block is received that may contain relevant transactions. - /// Returns processing result including relevant transactions and any new addresses - /// generated during gap limit maintenance. - async fn process_block( + /// Process a block, but only against the listed wallets. Implementations + /// must update the per-wallet `last_processed_height` for each wallet in + /// `wallets` once the block is applied to its state. + /// + /// Pass the result of `wallets_behind(height)` for the canonical "scan + /// only the wallets that need this block" semantics. + async fn process_block_for_wallets( &mut self, block: &Block, height: CoreBlockHeight, + wallets: &BTreeSet, ) -> BlockProcessingResult; /// Called when a transaction is seen in the mempool. @@ -71,6 +81,9 @@ pub trait WalletInterface: Send + Sync + 'static { /// Get all addresses the wallet is monitoring for incoming transactions fn monitored_addresses(&self) -> Vec
; + /// Get monitored addresses for a specific wallet. + fn monitored_addresses_for(&self, wallet_id: &WalletId) -> Vec
; + /// Get all outpoints the wallet is watching (unspent outputs). /// Used for bloom filter construction to detect spends of our UTXOs. fn watched_outpoints(&self) -> Vec; @@ -88,23 +101,37 @@ pub trait WalletInterface: Send + Sync + 'static { /// Return the last fully processed height of the wallet. fn last_processed_height(&self) -> CoreBlockHeight; - /// Update the wallet's last processed height. This also triggers balance updates. - fn update_last_processed_height(&mut self, height: CoreBlockHeight); - - /// Return the height at which filter scanning was last committed. - /// Defaults to `last_processed_height()` for implementations that don't separate these concepts. - // TODO: This can probably somehow be combined with last_processed_height(). - fn synced_height(&self) -> CoreBlockHeight { - self.last_processed_height() + /// Return the lowest committed sync checkpoint across all managed wallets. + /// Filter scanning resumes from this height. A new wallet added behind this + /// drags the value down and triggers a rescan. + fn synced_height(&self) -> CoreBlockHeight; + + /// Return the wallet IDs whose `synced_height` is strictly less than `height`, + /// i.e. the wallets that still need filter coverage at that height. + fn wallets_behind(&self, height: CoreBlockHeight) -> BTreeSet; + + /// Return the wallet IDs that still need filter coverage at heights up to + /// and including `height`. Equivalent to `wallets_behind(height + 1)` but + /// expresses the inclusive intent at the call site, so callers don't have + /// to compensate the strict-less-than semantics with `+ 1`. + fn wallets_not_yet_at(&self, height: CoreBlockHeight) -> BTreeSet { + self.wallets_behind(height.saturating_add(1)) } - /// Update the committed synced height. Call when a height is fully processed - /// (including any rescans for newly discovered addresses). - fn update_synced_height(&mut self, height: CoreBlockHeight) { - if height > self.last_processed_height() { - self.update_last_processed_height(height); - } - } + /// Return the per-wallet committed sync checkpoint, or `0` if unknown. + fn wallet_synced_height(&self, wallet_id: &WalletId) -> CoreBlockHeight; + + /// Advance one wallet's committed sync checkpoint. Implementations must + /// only advance forward (a value below the current is silently ignored). + fn update_wallet_synced_height(&mut self, wallet_id: &WalletId, height: CoreBlockHeight); + + /// Advance one wallet's last-processed height after a block has been applied + /// to its state. Implementations must only advance forward. + fn update_wallet_last_processed_height( + &mut self, + wallet_id: &WalletId, + height: CoreBlockHeight, + ); /// Return a revision counter that increments whenever the set of monitored /// addresses or watched outpoints changes. The mempool manager uses this to diff --git a/key-wallet-manager/tests/integration_test.rs b/key-wallet-manager/tests/integration_test.rs index 16851df2a..fe0047564 100644 --- a/key-wallet-manager/tests/integration_test.rs +++ b/key-wallet-manager/tests/integration_test.rs @@ -162,13 +162,7 @@ fn test_balance_calculation() { fn test_block_height_tracking() { let mut manager = WalletManager::::new(Network::Testnet); - // Initial state - assert_eq!(manager.last_processed_height(), 0); - assert_eq!(manager.synced_height(), 0); - - // Updating heights before adding wallets is a no-op - manager.update_last_processed_height(1000); - manager.update_synced_height(500); + // Initial state with no wallets assert_eq!(manager.last_processed_height(), 0); assert_eq!(manager.synced_height(), 0); @@ -194,53 +188,58 @@ fn test_block_height_tracking() { assert_eq!(manager.wallet_count(), 2); - // Verify both wallets have last_processed_height and synced_height of 0 initially + // Both wallets initialized with `synced_height = birth_height - 1 = 0`, + // so neither has been processed past genesis. for wallet_info in manager.get_all_wallet_infos().values() { assert_eq!(wallet_info.last_processed_height(), 0); assert_eq!(wallet_info.synced_height(), 0); } - // Update last-processed height - should propagate to all wallets - manager.update_last_processed_height(12345); + // Per-wallet last-processed updates only touch the addressed wallet. + manager.update_wallet_last_processed_height(&wallet_id1, 12345); assert_eq!(manager.last_processed_height(), 12345); - - // Verify all wallets got updated while synced_height stays at 0 let wallet_info1 = manager.get_wallet_info(&wallet_id1).unwrap(); let wallet_info2 = manager.get_wallet_info(&wallet_id2).unwrap(); assert_eq!(wallet_info1.last_processed_height(), 12345); - assert_eq!(wallet_info2.last_processed_height(), 12345); - assert_eq!(wallet_info1.synced_height(), 0); - assert_eq!(wallet_info2.synced_height(), 0); - - // Update synced height - should propagate to all wallets without touching last_processed_height - manager.update_synced_height(20000); - assert_eq!(manager.synced_height(), 20000); + assert_eq!(wallet_info2.last_processed_height(), 0); - for wallet_info in manager.get_all_wallet_infos().values() { - assert_eq!(wallet_info.last_processed_height(), 12345); - assert_eq!(wallet_info.synced_height(), 20000); - } - - // Update wallets individually to different last-processed heights - let wallet_info1 = manager.get_wallet_info_mut(&wallet_id1).unwrap(); - wallet_info1.update_last_processed_height(30000); + // Per-wallet synced-height updates only touch the addressed wallet. + manager.update_wallet_synced_height(&wallet_id1, 12000); + let wallet_info1 = manager.get_wallet_info(&wallet_id1).unwrap(); + let wallet_info2 = manager.get_wallet_info(&wallet_id2).unwrap(); + assert_eq!(wallet_info1.synced_height(), 12000); + assert_eq!(wallet_info2.synced_height(), 0); + // Aggregate `synced_height()` is `min` across wallets, so wallet 2 holds it at 0. + assert_eq!(manager.synced_height(), 0); - let wallet_info2 = manager.get_wallet_info_mut(&wallet_id2).unwrap(); - wallet_info2.update_last_processed_height(25000); + // Advance wallet 2 too. Aggregate min jumps to wallet 2's new value. + manager.update_wallet_synced_height(&wallet_id2, 11000); + assert_eq!(manager.synced_height(), 11000); - // Verify each wallet has its own last_processed_height and manager reports the max + // Wallets advance independently. Aggregate `last_processed_height()` is `max`. + manager.update_wallet_last_processed_height(&wallet_id2, 25000); let wallet_info1 = manager.get_wallet_info(&wallet_id1).unwrap(); let wallet_info2 = manager.get_wallet_info(&wallet_id2).unwrap(); - assert_eq!(wallet_info1.last_processed_height(), 30000); + assert_eq!(wallet_info1.last_processed_height(), 12345); assert_eq!(wallet_info2.last_processed_height(), 25000); - assert_eq!(manager.last_processed_height(), 30000); + assert_eq!(manager.last_processed_height(), 25000); - // Manager synced-height update syncs across all wallets - manager.update_synced_height(40000); - let wallet_info1 = manager.get_wallet_info(&wallet_id1).unwrap(); + // Per-wallet updates are monotonic. Values below the current are ignored. + manager.update_wallet_last_processed_height(&wallet_id2, 10); + manager.update_wallet_synced_height(&wallet_id2, 10); let wallet_info2 = manager.get_wallet_info(&wallet_id2).unwrap(); - assert_eq!(wallet_info1.last_processed_height(), 30000); assert_eq!(wallet_info2.last_processed_height(), 25000); - assert_eq!(wallet_info1.synced_height(), 40000); - assert_eq!(wallet_info2.synced_height(), 40000); + assert_eq!(wallet_info2.synced_height(), 11000); + + // `wallets_behind(height)` lists wallets with `synced_height < height`. + let behind_at_12500 = manager.wallets_behind(12500); + assert!(behind_at_12500.contains(&wallet_id1)); + assert!(behind_at_12500.contains(&wallet_id2)); + // A wallet at exactly `height` is not behind. wallet_id1 sits at 12000, + // wallet_id2 sits at 11000. + let behind_at_12000 = manager.wallets_behind(12000); + assert!(!behind_at_12000.contains(&wallet_id1)); + assert!(behind_at_12000.contains(&wallet_id2)); + let behind_at_500 = manager.wallets_behind(500); + assert!(behind_at_500.is_empty()); } diff --git a/key-wallet-manager/tests/spv_integration_tests.rs b/key-wallet-manager/tests/spv_integration_tests.rs index 71b3bbfab..d30cb12c0 100644 --- a/key-wallet-manager/tests/spv_integration_tests.rs +++ b/key-wallet-manager/tests/spv_integration_tests.rs @@ -8,8 +8,17 @@ use key_wallet::wallet::initialization::WalletAccountCreationOptions; use key_wallet::wallet::managed_wallet_info::wallet_info_interface::WalletInfoInterface; use key_wallet::wallet::managed_wallet_info::ManagedWalletInfo; use key_wallet::Network; -use key_wallet_manager::WalletInterface; -use key_wallet_manager::WalletManager; +use key_wallet_manager::{BlockProcessingResult, WalletId, WalletInterface, WalletManager}; +use std::collections::BTreeSet; + +async fn process_block_all_wallets( + manager: &mut WalletManager, + block: &Block, + height: u32, +) -> BlockProcessingResult { + let wallet_ids: BTreeSet = manager.list_wallets().into_iter().copied().collect(); + manager.process_block_for_wallets(block, height, &wallet_ids).await +} #[tokio::test] async fn test_block_processing() { @@ -29,7 +38,7 @@ async fn test_block_processing() { let tx3 = Transaction::dummy(&external, 0..0, &[300_000]); let block = Block::dummy(100, vec![tx1.clone(), tx2.clone(), tx3.clone()]); - let result = manager.process_block(&block, 100).await; + let result = process_block_all_wallets(&mut manager, &block, 100).await; // Both transactions should be new (first time seen) assert_eq!(result.new_txids.len(), 2); @@ -38,13 +47,14 @@ async fn test_block_processing() { assert!(!result.new_txids.contains(&tx3.txid())); // No existing transactions during initial processing assert!(result.existing_txids.is_empty()); - assert_eq!(result.new_addresses.len(), 2); + let new_addresses: Vec<_> = result.all_new_addresses().cloned().collect(); + assert_eq!(new_addresses.len(), 2); let addresses_after = manager.monitored_addresses(); let actual_increase = addresses_after.len() - addresses_before.len(); - assert_eq!(result.new_addresses.len(), actual_increase); + assert_eq!(new_addresses.len(), actual_increase); - for new_addr in &result.new_addresses { + for new_addr in &new_addresses { assert!(addresses_after.contains(new_addr)); } } @@ -61,7 +71,7 @@ async fn test_block_processing_result_empty() { let tx2 = Transaction::dummy(&external, 0..0, &[200_000]); let block = Block::dummy(100, vec![tx1, tx2]); - let result = manager.process_block(&block, 100).await; + let result = process_block_all_wallets(&mut manager, &block, 100).await; assert!(result.new_txids.is_empty()); assert!(result.existing_txids.is_empty()); @@ -101,7 +111,7 @@ async fn test_height_updated_after_block_processing() { for height in [1000, 2000, 3000] { let tx = Transaction::dummy(&Address::dummy(Network::Testnet, 0), 0..0, &[100000]); let block = Block::dummy(height, vec![tx]); - manager.process_block(&block, height).await; + process_block_all_wallets(&mut manager, &block, height).await; assert_wallet_heights(&manager, height); } } @@ -138,7 +148,7 @@ async fn test_immature_balance_matures_during_block_processing() { // Process the coinbase at height 1000 let coinbase_height = 1000; let coinbase_block = Block::dummy(coinbase_height, vec![coinbase_tx.clone()]); - manager.process_block(&coinbase_block, coinbase_height).await; + process_block_all_wallets(&mut manager, &coinbase_block, coinbase_height).await; // Verify the coinbase is detected and stored as immature let wallet_info = manager.get_wallet_info(&wallet_id).expect("Wallet info should exist"); @@ -157,7 +167,7 @@ async fn test_immature_balance_matures_during_block_processing() { let tx = Transaction::dummy(&Address::dummy(Network::Regtest, 0), 0..0, &[1000]); for height in (coinbase_height + 1)..maturity_height { let block = Block::dummy(height, vec![tx.clone()]); - manager.process_block(&block, height).await; + process_block_all_wallets(&mut manager, &block, height).await; } // Verify still immature just before maturity @@ -170,7 +180,7 @@ async fn test_immature_balance_matures_during_block_processing() { // Process the maturity block let maturity_block = Block::dummy(maturity_height, vec![tx.clone()]); - manager.process_block(&maturity_block, maturity_height).await; + process_block_all_wallets(&mut manager, &maturity_block, maturity_height).await; // Verify the coinbase has matured let wallet_info = manager.get_wallet_info(&wallet_id).expect("Wallet info should exist"); @@ -201,7 +211,7 @@ async fn test_block_rescan_marks_transactions_as_existing() { let block = Block::dummy(100, vec![tx1.clone()]); // First processing - transaction should be new - let result1 = manager.process_block(&block, 100).await; + let result1 = process_block_all_wallets(&mut manager, &block, 100).await; assert_eq!(result1.new_txids.len(), 1, "First processing should have 1 new transaction"); assert!( @@ -215,7 +225,7 @@ async fn test_block_rescan_marks_transactions_as_existing() { let tx_history_count = wallet_info.transaction_history().len(); // Second processing (simulating rescan) - transaction should be existing - let result2 = manager.process_block(&block, 100).await; + let result2 = process_block_all_wallets(&mut manager, &block, 100).await; assert!(result2.new_txids.is_empty(), "Rescan should have no new transactions"); assert_eq!(result2.existing_txids.len(), 1, "Rescan should have 1 existing transaction"); From a8e381adc8bd6ae0abfb3b605fcdd7ff8607ce57 Mon Sep 17 00:00:00 2001 From: QuantumExplorer Date: Tue, 28 Apr 2026 13:20:14 +0800 Subject: [PATCH 2/2] chore: merge v0.42-dev into feat/per-wallet-filter-scan (#698) * chore(dash-spv): refresh masternode seed files (#695) Co-authored-by: QuantumExplorer <11468583+QuantumExplorer@users.noreply.github.com> * feat: make wallet events atomic (#696) * feat: make wallet events atomic Reshape `WalletEvent` so each variant carries the records or context needed to persist a wallet update atomically off a single event, alongside the post-change balance. The variant set is now: - `TransactionReceived { wallet_id, record, balance }`. Fires when the wallet first sees an off-chain transaction. - `TransactionStatusChanged { wallet_id, txid, context, balance }`. Fires when a known off-chain transaction has its state change. Currently fires only for InstantSend locks. - `BlockUpdate { wallet_id, height, inserted, updated, matured, balance }`. Carries records bucketed by what happened to them in the block, plus the post-block balance. - `SyncHeightUpdate { wallet_id, height }`. Marks a filter-batch checkpoint. `TransactionRecord` carries `account_type` directly, identifying the owning account. `WalletInfoInterface` gains a `matured_coinbase_records` method that enumerates coinbase records crossing the maturity threshold during a height advance, populating `BlockUpdate.matured`. The FFI groups the flattened account-discriminator fields into an `FFIAccountType` struct and renames the prior discriminant enum to `FFIAccountKind`. * fix: record balance before bumping `block_processed_wallet_count` Tests wait on `block_processed_wallet_count` and then read `last_confirmed`/`last_unconfirmed`. Bumping the counter before storing the balance snapshot left those reads racey. Reorder so the balance is recorded first. Addresses CodeRabbit review comment on PR #696 https://github.com/dashpay/rust-dashcore/pull/696#discussion_r3148723093 * fix: place `IdentityTopUp.registration_index` in `index_secondary` The `FFIAccountType` doc states `index_secondary` carries `registration_index` for `IdentityTopUp` and `index = 0` for variants without a meaningful primary index, matching the parallel encoding in `FFIAccountKind::from_account_type`. The `From<&AccountType>` impl wrote `registration_index` into `index` instead, breaking the documented FFI contract. Addresses CodeRabbit review comment on PR #696 https://github.com/dashpay/rust-dashcore/pull/696#discussion_r3148723127 * fix: route confirmation backfills to `new_records` `is_new` is wallet-wide (set on the first matching account, then breaks), so the per-account `else` branch can run for an account that did not previously hold the record. `confirm_transaction` backfills via `record_transaction` in that case, but the post-call record was always pushed onto `updated_records`, breaking the atomic `inserted`/`updated` contract consumed by `WalletEvent::BlockUpdate`. Capture `existed_before` per account and route to `new_records` when the record was just created. Addresses CodeRabbit review comment on PR #696 https://github.com/dashpay/rust-dashcore/pull/696#discussion_r3148723134 * refactor(key-wallet-manager): extract `finalize_block_advance` helper `process_block` and `update_last_processed_height` duplicated the entire balance-snapshot, prior-heights collection, matured-coinbase window, height advance, and per-wallet `BlockUpdate` emission. Extract the shared tail into a private `WalletManager::finalize_block_advance` helper that takes the inserted/updated maps. `update_last_processed_height` becomes a one-line call with empty maps; `process_block` keeps only its txdata loop before delegating. * refactor: rename wallet events for clearer semantics Rename `WalletEvent` variants and the matching FFI callbacks to past-participle names that say what happened, replacing vague "Update" suffixes: - `TransactionReceived` -> `TransactionDetected`. "Received" implied incoming funds, but the event fires for any first-time off-chain sighting (incoming or outgoing). - `TransactionStatusChanged` -> `TransactionInstantLocked`. The event only ever fires for an InstantSend lock applied to a known mempool tx, so name it for what it actually is. Drop the `status: TransactionContext` field and carry the `InstantLock` directly. - `BlockUpdate` -> `BlockProcessed`. Mirrors `process_block` and matches the past-participle pattern. - `SyncHeightUpdate` -> `SyncHeightAdvanced`. Conveys monotonic forward motion. FFI rename mirrors the Rust side: the IS callback now takes `islock_data: *const u8` + `islock_len: usize` instead of an `FFITransactionContext`, removing a discriminant that was always `InstantSend`. The wallet-side `OnBlockProcessedCallback` becomes `OnWalletBlockProcessedCallback` to disambiguate from the existing sync-event type with the same name. * fix: record balance before bumping IS-locked counter in test callback Addresses CodeRabbit review comment on PR #696 https://github.com/dashpay/rust-dashcore/pull/696#pullrequestreview-4185234563 The instant_locked callback bumped `transaction_instant_send_locked_count` before calling `record_balance`. Tests that wait on the counter and then read `last_confirmed`/`last_unconfirmed` could observe the previous balance snapshot. Match the ordering used by the other callbacks: store the balance first, then bump the counter. * fix: backfill missing transaction record in InstantSend path Addresses CodeRabbit review comment on PR #696 https://github.com/dashpay/rust-dashcore/pull/696#pullrequestreview-4185234563 The IS-lock branch in `WalletTransactionChecker::check_core_transaction` only updated accounts that already held a `TransactionRecord` for the txid. When wallet-level `is_new` was `false` (because at least one account had the record) but another matched account did not, the latter was silently skipped: no record was created and `mark_utxos_instant_send` ran against an empty UTXO set on that account. Mirror the confirmation path: when the affected account lacks the record, call `record_transaction` to register the record and its UTXOs, then mark them IS-locked. This ordering ensures the freshly registered UTXOs receive the IS-lock flag too. The backfilled record is pushed into `new_records` to match the existing convention from commit 659a6d5. Add `test_instantsend_backfills_missing_record_in_other_account` covering the multi-account scenario. --------- Co-authored-by: github-actions[bot] <41898282+github-actions[bot]@users.noreply.github.com> Co-authored-by: QuantumExplorer <11468583+QuantumExplorer@users.noreply.github.com> Co-authored-by: Kevin Rombach <35775977+xdustinface@users.noreply.github.com> Co-authored-by: Claude Opus 4.7 (1M context) --- dash-network-seeds/seeds/mainnet.txt | 92 +- dash-network-seeds/seeds/testnet.txt | 14 +- dash-spv-ffi/src/bin/ffi_cli.rs | 119 ++- dash-spv-ffi/src/callbacks.rs | 191 +++- dash-spv-ffi/src/lib.rs | 6 + dash-spv-ffi/tests/dashd_sync/callbacks.rs | 182 +++- dash-spv-ffi/tests/dashd_sync/context.rs | 4 +- .../tests/dashd_sync/tests_callback.rs | 258 +++-- .../tests/dashd_sync/tests_transaction.rs | 67 +- dash-spv/tests/dashd_sync/helpers.rs | 18 +- dash-spv/tests/dashd_sync/tests_mempool.rs | 6 +- key-wallet-ffi/FFI_API.md | 28 +- key-wallet-ffi/src/account.rs | 28 +- key-wallet-ffi/src/account_collection.rs | 20 +- .../src/account_derivation_tests.rs | 8 +- key-wallet-ffi/src/account_tests.rs | 38 +- key-wallet-ffi/src/address_pool.rs | 12 +- key-wallet-ffi/src/managed_account.rs | 205 +++- key-wallet-ffi/src/transaction_checking.rs | 2 +- key-wallet-ffi/src/types.rs | 98 +- key-wallet-ffi/src/wallet.rs | 30 +- key-wallet-ffi/src/wallet_tests.rs | 6 +- .../tests/test_managed_account_collection.rs | 18 +- key-wallet-manager/src/accessors.rs | 21 +- key-wallet-manager/src/event_tests.rs | 974 ++++++++---------- key-wallet-manager/src/events.rs | 142 ++- key-wallet-manager/src/lib.rs | 41 +- key-wallet-manager/src/process_block.rs | 250 ++++- key-wallet-manager/src/test_helpers.rs | 71 -- key-wallet/src/managed_account/mod.rs | 1 + .../src/managed_account/transaction_record.rs | 22 +- key-wallet/src/tests/spent_outpoints_tests.rs | 6 +- .../transaction_checking/account_checker.rs | 12 +- .../transaction_checking/wallet_checker.rs | 161 ++- .../wallet_info_interface.rs | 38 + 35 files changed, 2010 insertions(+), 1179 deletions(-) diff --git a/dash-network-seeds/seeds/mainnet.txt b/dash-network-seeds/seeds/mainnet.txt index e1adb2e85..c893b6160 100644 --- a/dash-network-seeds/seeds/mainnet.txt +++ b/dash-network-seeds/seeds/mainnet.txt @@ -1,10 +1,10 @@ -# Auto-generated by masternode-seeds-fetcher on 2026-04-23T09:43:50Z for mainnet +# Auto-generated by masternode-seeds-fetcher on 2026-04-27T07:06:07Z for mainnet # Source: Dash P2P network (mnlistdiff) -# Primary peer: 135.181.53.196:9999 -# Tip block hash: 0000000000000032859abaadd7d3a1e63c960b579b6b59f783af122859ab642c -# Tip block height: 2459693 -# 2034 seeds (1674 regular + 360 evo) of 2934 total masternodes, valid-only -# Probe summary: core_reachable=1987/2034 platform_reachable=352/360 ssl_valid=294/360 +# Primary peer: 142.93.143.137:9999 +# Tip block hash: 000000000000001aa9945193ba7c284f3644522f3b3011a082109ee206b1a963 +# Tip block height: 2461833 +# 2036 seeds (1679 regular + 357 evo) of 2936 total masternodes, valid-only +# Probe summary: core_reachable=1981/2036 platform_reachable=353/357 ssl_valid=296/357 # Columns: # Values: core_reach/plat_reach=ok|timeout|refused|error|?, core_sync=sync|-N|+N|?, plat_live=ok|none|?|-, plat_ssl=valid|expired|self-signed|untrusted|no-handshake|?|- # Do not edit manually — refreshed weekly by .github/workflows/update-masternode-seeds.yml @@ -15,8 +15,8 @@ regular 2.57.241.178:9999 - ok sync - - - evo 2.59.156.192:9999 443 ok sync ok ok valid regular 2.59.219.54:9999 - ok sync - - - regular 2.233.120.35:9999 - ok sync - - - -regular 5.2.67.190:9999 - error ? - - - -regular 5.2.73.58:9999 - error ? - - - +regular 5.2.67.190:9999 - ok sync - - - +regular 5.2.73.58:9999 - ok sync - - - regular 5.9.237.32:9999 - ok sync - - - regular 5.9.237.37:9999 - ok sync - - - regular 5.35.103.19:9999 - ok sync - - - @@ -32,32 +32,32 @@ regular 5.161.110.79:9999 - ok sync - - - evo 5.189.145.80:9999 443 ok sync ok ok valid evo 5.189.151.7:9999 443 ok sync ok ok valid evo 5.189.164.253:9999 443 ok sync ok ok valid -regular 5.230.228.238:9999 - ok sync - - - +regular 5.230.228.238:9999 - timeout ? - - - regular 5.230.228.239:9999 - timeout ? - - - regular 5.230.229.1:9999 - timeout ? - - - -regular 5.230.229.2:9999 - ok sync - - - -regular 5.230.229.3:9999 - ok sync - - - +regular 5.230.229.2:9999 - timeout ? - - - +regular 5.230.229.3:9999 - timeout ? - - - regular 5.230.229.4:9999 - timeout ? - - - regular 5.230.229.5:9999 - timeout ? - - - regular 5.230.229.6:9999 - timeout ? - - - -regular 5.230.229.7:9999 - ok sync - - - +regular 5.230.229.7:9999 - timeout ? - - - regular 5.230.229.8:9999 - timeout ? - - - regular 5.230.229.9:9999 - timeout ? - - - regular 5.230.229.10:9999 - timeout ? - - - regular 5.230.229.11:9999 - timeout ? - - - -regular 5.230.229.12:9999 - ok sync - - - +regular 5.230.229.12:9999 - timeout ? - - - regular 5.230.229.13:9999 - timeout ? - - - -regular 5.230.229.14:9999 - ok sync - - - +regular 5.230.229.14:9999 - timeout ? - - - regular 5.230.229.15:9999 - timeout ? - - - -regular 5.230.229.16:9999 - ok sync - - - +regular 5.230.229.16:9999 - timeout ? - - - regular 5.230.229.17:9999 - timeout ? - - - -regular 5.230.229.18:9999 - ok sync - - - +regular 5.230.229.18:9999 - timeout ? - - - regular 5.230.229.19:9999 - timeout ? - - - -regular 5.230.229.20:9999 - ok sync - - - -regular 5.230.229.21:9999 - ok sync - - - +regular 5.230.229.20:9999 - timeout ? - - - +regular 5.230.229.21:9999 - timeout ? - - - regular 5.230.229.22:9999 - timeout ? - - - regular 5.230.229.23:9999 - timeout ? - - - -regular 5.230.229.24:9999 - ok sync - - - +regular 5.230.229.24:9999 - timeout ? - - - regular 5.230.229.25:9999 - timeout ? - - - regular 5.230.229.27:9999 - timeout ? - - - regular 8.219.0.187:9999 - ok sync - - - @@ -195,6 +195,7 @@ regular 8.222.149.162:9999 - ok sync - - - regular 8.222.149.195:9999 - ok sync - - - regular 8.222.150.74:9999 - ok sync - - - regular 8.222.151.173:9999 - ok sync - - - +regular 15.188.53.12:9999 - error ? - - - evo 15.235.102.215:9999 443 ok sync refused none no-handshake evo 15.235.102.216:9999 443 ok sync refused none no-handshake regular 23.88.22.66:9999 - ok sync - - - @@ -221,6 +222,7 @@ evo 31.220.91.60:9999 443 ok sync ok ok valid regular 34.209.37.222:9999 - ok sync - - - regular 34.246.176.25:9999 - ok sync - - - regular 35.174.217.98:9999 - ok sync - - - +regular 35.180.128.14:9999 - ok sync - - - evo 37.27.67.154:9999 443 ok sync ok ok expired evo 37.27.67.156:9999 443 ok sync ok ok expired evo 37.27.67.159:9999 443 ok sync ok ok expired @@ -391,6 +393,7 @@ evo 49.13.28.255:9999 443 ok sync ok ok valid regular 49.13.140.167:9999 - ok sync - - - evo 49.13.154.121:9999 443 ok sync ok ok expired evo 49.13.193.251:9999 443 ok sync ok ok valid +regular 49.13.197.215:9999 - ok sync - - - regular 49.13.209.157:9999 - ok sync - - - regular 49.13.212.202:9999 - ok sync - - - regular 49.13.212.210:9999 - ok sync - - - @@ -413,7 +416,7 @@ evo 54.69.95.118:9999 443 ok sync ok ok valid evo 57.128.212.163:9999 443 ok sync ok ok expired evo 57.131.28.197:9999 443 ok sync ok ok valid regular 62.60.244.174:9999 - ok sync - - - -evo 62.171.133.125:9999 443 ok sync ok none expired +evo 62.171.133.125:9999 443 ok sync ok ok expired evo 62.171.136.93:9999 443 ok sync ok ok valid evo 62.171.136.245:9999 443 ok sync ok ok valid evo 62.171.138.186:9999 443 ok sync ok ok valid @@ -469,8 +472,8 @@ regular 65.108.145.8:9999 - ok sync - - - regular 65.108.150.87:9999 - ok sync - - - regular 65.108.221.24:9999 - ok sync - - - evo 65.108.246.145:9999 443 ok sync ok ok valid +evo 65.109.84.201:9999 443 ok sync ok ok expired evo 65.109.84.203:9999 443 ok sync ok ok expired -evo 65.109.84.204:9999 443 ok sync ok ok expired regular 65.109.93.110:9999 - ok sync - - - evo 65.109.108.138:9999 443 ok sync ok ok expired evo 65.109.108.139:9999 443 ok sync ok ok expired @@ -533,8 +536,8 @@ evo 78.141.225.100:9999 443 ok sync ok ok valid regular 78.141.240.214:9999 - ok sync - - - regular 80.208.230.144:9999 - ok sync - - - regular 80.209.233.182:9999 - ok sync - - - -regular 80.209.239.129:9999 - error ? - - - -evo 80.240.19.200:9999 443 error ? timeout none no-handshake +regular 80.209.239.129:9999 - ok sync - - - +evo 80.240.19.200:9999 443 ok sync ok ok valid regular 80.240.132.231:9999 - ok sync - - - regular 80.240.135.83:9999 - ok sync - - - regular 80.249.147.8:9999 - ok sync - - - @@ -714,12 +717,12 @@ regular 89.46.42.216:9999 - ok sync - - - regular 89.46.42.218:9999 - ok sync - - - regular 89.47.167.131:9999 - ok sync - - - regular 89.117.3.242:9999 - ok sync - - - -evo 89.117.57.27:9999 443 ok sync ok ok expired +evo 89.117.57.27:9999 443 error ? timeout none no-handshake evo 89.125.50.14:9999 443 ok sync ok ok valid evo 89.125.50.206:9999 443 ok sync ok ok valid evo 89.125.209.27:9999 443 ok sync ok ok valid evo 89.125.209.69:9999 443 ok sync ok ok valid -evo 89.125.209.106:9999 443 ok sync ok ok valid +evo 89.125.209.106:9999 443 ok sync ok none valid evo 89.125.209.110:9999 443 ok sync ok ok valid evo 89.125.209.120:9999 443 ok sync ok ok valid evo 89.125.209.133:9999 443 ok sync ok ok valid @@ -738,7 +741,6 @@ evo 91.198.108.38:9999 443 ok sync ok ok valid regular 91.198.108.39:9999 - ok sync - - - regular 91.198.108.40:9999 - ok sync - - - evo 91.199.149.177:9999 443 ok sync ok ok expired -evo 93.95.115.187:9999 443 error ? ok ok valid evo 93.115.172.36:9999 443 ok sync ok ok valid evo 93.115.172.37:9999 443 ok sync ok ok valid evo 93.115.172.38:9999 443 ok sync ok ok valid @@ -812,6 +814,7 @@ regular 95.217.71.203:9999 - ok sync - - - regular 95.217.71.204:9999 - ok sync - - - regular 95.217.71.207:9999 - ok sync - - - regular 95.217.71.209:9999 - ok sync - - - +regular 95.217.71.210:9999 - ok sync - - - regular 95.217.71.211:9999 - ok sync - - - regular 95.217.125.97:9999 - ok sync - - - regular 95.217.125.98:9999 - ok sync - - - @@ -819,8 +822,8 @@ regular 95.217.125.101:9999 - ok sync - - - regular 95.217.125.103:9999 - ok sync - - - regular 96.30.194.83:9999 - ok sync - - - regular 100.42.182.181:9999 - ok sync - - - -evo 103.214.68.30:9999 443 ok sync timeout none no-handshake -evo 103.214.68.131:9999 443 ok sync timeout none no-handshake +evo 103.214.68.30:9999 443 ok sync ok ok valid +evo 103.214.68.131:9999 443 ok sync ok ok valid regular 104.128.239.50:9999 - ok sync - - - regular 104.128.239.123:9999 - ok sync - - - regular 104.129.51.151:9999 - ok sync - - - @@ -861,7 +864,7 @@ regular 107.170.165.78:9999 - ok sync - - - regular 107.170.171.115:9999 - ok sync - - - regular 107.170.196.35:9999 - ok sync - - - regular 107.170.223.74:9999 - ok sync - - - -regular 107.170.238.241:9999 - error ? - - - +regular 107.170.238.241:9999 - ok sync - - - regular 107.170.242.110:9999 - ok sync - - - regular 107.170.254.160:9999 - ok sync - - - regular 107.172.78.198:9999 - ok sync - - - @@ -869,7 +872,7 @@ regular 107.173.28.208:9999 - ok sync - - - regular 107.174.204.232:9999 - ok sync - - - regular 107.175.206.17:9999 - ok sync - - - regular 107.175.206.125:9999 - ok sync - - - -regular 107.179.202.74:9999 - ok sync - - - +regular 107.179.202.74:9999 - error ? - - - regular 107.189.3.74:9999 - ok sync - - - regular 107.191.58.41:9999 - ok sync - - - evo 108.61.165.170:9999 443 ok sync ok ok valid @@ -1092,7 +1095,7 @@ regular 134.209.92.57:9999 - ok sync - - - regular 134.209.96.96:9999 - ok sync - - - regular 134.209.105.161:9999 - ok sync - - - regular 134.209.146.189:9999 - ok sync - - - -regular 134.209.156.141:9999 - ok sync - - - +regular 134.209.156.141:9999 - error ? - - - regular 134.209.158.119:9999 - ok sync - - - regular 134.209.176.109:9999 - ok sync - - - regular 134.209.185.24:9999 - ok sync - - - @@ -1194,7 +1197,7 @@ regular 138.197.131.126:9999 - ok sync - - - regular 138.197.147.28:9999 - ok sync - - - regular 138.197.161.208:9999 - ok sync - - - regular 139.28.97.2:9999 - ok sync - - - -regular 139.59.0.167:9999 - ok sync - - - +regular 139.59.0.167:9999 - error ? - - - regular 139.59.3.197:9999 - ok sync - - - regular 139.59.4.172:9999 - ok sync - - - regular 139.59.22.95:9999 - ok sync - - - @@ -1219,7 +1222,7 @@ evo 139.84.232.129:9999 443 ok sync ok ok valid evo 139.84.236.208:9999 443 ok sync ok ok valid evo 139.99.201.103:9999 443 ok sync ok ok valid regular 139.162.211.76:9999 - ok sync - - - -regular 139.162.215.169:9999 - ok sync - - - +regular 139.162.215.169:9999 - error ? - - - evo 139.180.143.115:9999 443 ok sync ok ok valid regular 139.180.208.106:9999 - ok sync - - - evo 142.44.136.69:9999 443 ok sync ok ok valid @@ -1228,7 +1231,7 @@ regular 142.93.98.38:9999 - ok sync - - - regular 142.93.143.137:9999 - ok sync - - - regular 142.93.154.186:9999 - ok sync - - - regular 142.93.157.112:9999 - ok sync - - - -regular 142.93.215.115:9999 - ok sync - - - +regular 142.93.215.115:9999 - error ? - - - regular 142.93.216.91:9999 - ok sync - - - regular 143.110.156.147:9999 - ok sync - - - regular 143.110.183.99:9999 - ok sync - - - @@ -1236,7 +1239,7 @@ regular 143.110.189.48:9999 - ok sync - - - regular 143.110.191.135:9999 - ok sync - - - regular 143.110.248.96:9999 - ok sync - - - regular 143.110.250.48:9999 - ok sync - - - -regular 143.110.250.167:9999 - ok sync - - - +regular 143.110.250.167:9999 - error ? - - - regular 143.198.42.189:9999 - ok sync - - - regular 143.198.74.32:9999 - ok sync - - - regular 143.198.104.135:9999 - ok sync - - - @@ -1274,6 +1277,7 @@ regular 147.182.147.0:9999 - ok sync - - - regular 148.113.202.169:9999 - ok sync - - - regular 148.251.73.224:9999 - ok sync - - - regular 148.251.73.232:9999 - ok sync - - - +regular 148.251.73.234:9999 - ok sync - - - regular 148.251.73.235:9999 - ok sync - - - regular 148.251.73.238:9999 - ok sync - - - regular 149.28.58.97:9999 - ok sync - - - @@ -1404,7 +1408,7 @@ evo 161.97.85.159:9999 443 ok sync ok ok valid evo 161.97.85.182:9999 443 ok sync ok ok valid evo 161.97.88.199:9999 443 ok sync ok ok valid evo 161.97.88.219:9999 443 ok sync ok ok valid -evo 161.97.91.68:9999 443 error ? timeout none no-handshake +evo 161.97.91.68:9999 443 ok sync ok ok valid evo 161.97.91.217:9999 443 ok sync ok ok valid evo 161.97.96.120:9999 443 ok sync ok ok valid evo 161.97.102.156:9999 443 ok sync ok ok valid @@ -1424,15 +1428,15 @@ evo 161.97.173.67:9999 443 ok sync ok ok valid evo 161.97.175.233:9999 443 ok sync ok ok valid evo 161.97.176.38:9999 443 ok sync ok ok valid evo 161.97.179.214:9999 443 ok sync ok ok valid -evo 161.97.180.105:9999 443 error ? timeout none no-handshake +evo 161.97.180.105:9999 443 ok sync ok ok valid evo 161.97.180.182:9999 443 ok sync ok ok valid regular 162.212.35.99:9999 - ok sync - - - evo 162.212.35.100:9999 443 ok sync ok ok valid -regular 162.212.35.101:9999 - error ? - - - +regular 162.212.35.101:9999 - ok sync - - - regular 162.212.35.102:9999 - error ? - - - regular 162.212.35.103:9999 - error ? - - - regular 162.212.35.104:9999 - error ? - - - -regular 162.212.35.105:9999 - error ? - - - +regular 162.212.35.105:9999 - ok sync - - - regular 162.212.35.106:9999 - error ? - - - regular 162.212.35.107:9999 - ok sync - - - regular 162.212.35.108:9999 - ok sync - - - @@ -1488,7 +1492,6 @@ regular 167.99.182.250:9999 - ok sync - - - regular 167.99.185.82:9999 - ok sync - - - regular 167.99.199.59:9999 - ok sync - - - regular 167.99.205.145:9999 - ok sync - - - -evo 167.114.153.110:9999 443 ok sync ok ok valid regular 167.114.185.96:9999 - ok sync - - - regular 167.172.45.235:9999 - ok sync - - - regular 167.172.65.155:9999 - ok sync - - - @@ -1560,7 +1563,7 @@ regular 173.199.119.21:9999 - ok sync - - - evo 173.212.196.214:9999 443 ok sync ok ok valid evo 173.212.231.230:9999 443 ok sync ok ok valid evo 173.212.232.90:9999 443 ok sync ok ok valid -evo 173.212.245.118:9999 443 ok sync ok none valid +evo 173.212.245.118:9999 443 ok sync ok ok valid evo 173.212.251.130:9999 443 ok sync ok ok valid regular 173.249.16.95:9999 - ok sync - - - evo 173.249.21.12:9999 443 ok sync ok ok valid @@ -1652,6 +1655,7 @@ regular 178.63.235.196:9999 - ok sync - - - regular 178.63.235.197:9999 - ok sync - - - regular 178.63.235.198:9999 - ok sync - - - regular 178.63.235.199:9999 - ok sync - - - +regular 178.63.235.200:9999 - ok sync - - - regular 178.63.235.201:9999 - ok sync - - - regular 178.63.236.96:9999 - ok sync - - - regular 178.63.236.97:9999 - ok sync - - - @@ -1722,8 +1726,6 @@ regular 185.175.56.212:9999 - ok sync - - - regular 185.185.40.14:9999 - ok sync - - - regular 185.185.40.104:9999 - ok sync - - - regular 185.185.40.115:9999 - ok sync - - - -regular 185.185.40.167:9999 - ok sync - - - -regular 185.185.40.171:9999 - ok sync - - - evo 185.185.80.117:9999 443 ok sync ok ok valid regular 185.193.19.214:9999 - ok sync - - - evo 185.198.234.12:9999 443 ok sync ok ok expired @@ -1768,6 +1770,7 @@ regular 188.40.163.8:9999 - ok sync - - - regular 188.40.163.9:9999 - ok sync - - - regular 188.40.163.12:9999 - ok sync - - - regular 188.40.163.15:9999 - ok sync - - - +regular 188.40.163.18:9999 - ok sync - - - regular 188.40.163.21:9999 - ok sync - - - regular 188.40.163.24:9999 - ok sync - - - regular 188.40.163.25:9999 - ok sync - - - @@ -1950,7 +1953,6 @@ regular 194.135.80.33:9999 - ok sync - - - regular 194.135.84.100:9999 - ok sync - - - regular 194.135.94.228:9999 - ok sync - - - evo 194.163.156.190:9999 443 ok sync ok ok valid -evo 194.163.159.171:9999 443 ok sync ok ok valid evo 194.163.166.76:9999 443 ok sync ok ok valid evo 194.163.172.206:9999 443 ok sync ok ok valid regular 194.163.183.132:9999 - ok sync - - - @@ -1971,7 +1973,7 @@ regular 202.182.115.161:9999 - ok sync - - - regular 205.206.173.159:9999 - ok sync - - - regular 205.206.254.21:9999 - ok sync - - - regular 205.206.255.206:9999 - ok sync - - - -regular 206.189.21.120:9999 - error ? - - - +regular 206.189.21.120:9999 - ok sync - - - regular 206.189.28.109:9999 - ok sync - - - regular 206.189.112.246:9999 - ok sync - - - regular 206.189.134.126:9999 - ok sync - - - diff --git a/dash-network-seeds/seeds/testnet.txt b/dash-network-seeds/seeds/testnet.txt index 37c453b77..152d6a463 100644 --- a/dash-network-seeds/seeds/testnet.txt +++ b/dash-network-seeds/seeds/testnet.txt @@ -1,14 +1,14 @@ -# Auto-generated by masternode-seeds-fetcher on 2026-04-23T09:36:05Z for testnet +# Auto-generated by masternode-seeds-fetcher on 2026-04-27T07:08:19Z for testnet # Source: Dash P2P network (mnlistdiff) -# Primary peer: 68.67.122.21:19999 -# Tip block hash: 00000019c99229ca7308c36585d1986897c1e559c11f257ba3344deadb3aebd9 -# Tip block height: 1463660 +# Primary peer: 68.67.122.75:19999 +# Tip block hash: 0000010335a26fb8e304493a3c0a4ac0b9781ce5454248c8d1e86425eaa37bb6 +# Tip block height: 1466079 # 86 seeds (55 regular + 31 evo) of 543 total masternodes, valid-only -# Probe summary: core_reachable=85/86 platform_reachable=31/31 ssl_valid=28/31 +# Probe summary: core_reachable=86/86 platform_reachable=31/31 ssl_valid=29/31 # Columns: # Values: core_reach/plat_reach=ok|timeout|refused|error|?, core_sync=sync|-N|+N|?, plat_live=ok|none|?|-, plat_ssl=valid|expired|self-signed|untrusted|no-handshake|?|- # Do not edit manually — refreshed weekly by .github/workflows/update-masternode-seeds.yml -evo 68.67.122.1:19999 1443 ok sync ok ok expired +evo 68.67.122.1:19999 1443 ok sync ok ok valid evo 68.67.122.2:19999 1443 ok sync ok ok valid evo 68.67.122.3:19999 1443 ok sync ok ok valid evo 68.67.122.4:19999 1443 ok sync ok ok valid @@ -63,7 +63,7 @@ regular 68.67.122.52:19999 - ok sync - - - regular 68.67.122.53:19999 - ok sync - - - regular 68.67.122.54:19999 - ok sync - - - regular 68.67.122.55:19999 - ok sync - - - -regular 68.67.122.56:19999 - timeout ? - - - +regular 68.67.122.56:19999 - ok sync - - - regular 68.67.122.57:19999 - ok sync - - - regular 68.67.122.58:19999 - ok sync - - - regular 68.67.122.59:19999 - ok sync - - - diff --git a/dash-spv-ffi/src/bin/ffi_cli.rs b/dash-spv-ffi/src/bin/ffi_cli.rs index 1ffd967be..42028d837 100644 --- a/dash-spv-ffi/src/bin/ffi_cli.rs +++ b/dash-spv-ffi/src/bin/ffi_cli.rs @@ -5,8 +5,7 @@ use std::ptr; use clap::{Arg, ArgAction, Command}; use dash_network::ffi::FFINetwork; use dash_spv_ffi::*; -use key_wallet_ffi::managed_account::FFITransactionRecord; -use key_wallet_ffi::types::FFITransactionContext; +use key_wallet_ffi::types::FFIBalance; use key_wallet_ffi::wallet_manager::wallet_manager_add_wallet_from_mnemonic; use key_wallet_ffi::FFIError; @@ -156,63 +155,108 @@ extern "C" fn on_peers_updated(connected_count: u32, best_height: u32, _user_dat // Wallet Event Callbacks // ============================================================================ -extern "C" fn on_transaction_received( +fn short_wallet(wallet_id: *const c_char) -> String { + let s = ffi_string_to_rust(wallet_id); + if s.len() > 8 { + s[..8].to_string() + } else { + s + } +} + +fn read_balance(balance: *const FFIBalance) -> FFIBalance { + if balance.is_null() { + tracing::warn!("read_balance: null pointer, returning zero balance"); + return FFIBalance::default(); + } + unsafe { *balance } +} + +extern "C" fn on_transaction_detected( wallet_id: *const c_char, - account_index: u32, record: *const FFITransactionRecord, + balance: *const FFIBalance, _user_data: *mut c_void, ) { - let wallet_str = ffi_string_to_rust(wallet_id); - let wallet_short = if wallet_str.len() > 8 { - &wallet_str[..8] - } else { - &wallet_str - }; + let wallet_short = short_wallet(wallet_id); if record.is_null() { - println!( - "[Wallet] TX received: wallet={}..., account={}, record=null", - wallet_short, account_index - ); + println!("[Wallet] TX detected: wallet={}..., record=null", wallet_short); return; } let r = unsafe { &*record }; + let b = read_balance(balance); let txid_hex = hex::encode(r.txid); println!( - "[Wallet] TX received: wallet={}..., txid={}, account={}, amount={} duffs, tx_size={}", - wallet_short, txid_hex, account_index, r.net_amount, r.tx_len + "[Wallet] TX detected: wallet={}..., txid={}, account_kind={:?}, account_index={}, amount={} duffs, balance[confirmed={}, unconfirmed={}]", + wallet_short, + txid_hex, + r.account_type.kind, + r.account_type.index, + r.net_amount, + b.confirmed, + b.unconfirmed ); } -extern "C" fn on_transaction_status_changed( - _wallet_id: *const c_char, +extern "C" fn on_transaction_instant_locked( + wallet_id: *const c_char, txid: *const [u8; 32], - status: FFITransactionContext, + _islock_data: *const u8, + islock_len: usize, + balance: *const FFIBalance, _user_data: *mut c_void, ) { - let txid_hex = unsafe { hex::encode(*txid) }; - println!("[Wallet] TX status changed: txid={}, status={:?}", txid_hex, status); + let wallet_short = short_wallet(wallet_id); + if txid.is_null() { + println!("[Wallet] TX instant-locked: wallet={}..., txid=null", wallet_short); + return; + } + let txid_bytes = unsafe { &*txid }; + let b = read_balance(balance); + let txid_hex = hex::encode(txid_bytes); + println!( + "[Wallet] TX instant-locked: wallet={}..., txid={}, islock_len={}, balance[confirmed={}, unconfirmed={}]", + wallet_short, txid_hex, islock_len, b.confirmed, b.unconfirmed + ); } -extern "C" fn on_balance_updated( +extern "C" fn on_wallet_block_processed( wallet_id: *const c_char, - spendable: u64, - unconfirmed: u64, - immature: u64, - locked: u64, + height: u32, + _inserted: *const FFITransactionRecord, + inserted_count: u32, + _updated: *const FFITransactionRecord, + updated_count: u32, + _matured: *const FFITransactionRecord, + matured_count: u32, + balance: *const FFIBalance, _user_data: *mut c_void, ) { - let wallet_str = ffi_string_to_rust(wallet_id); - let wallet_short = if wallet_str.len() > 8 { - &wallet_str[..8] - } else { - &wallet_str - }; + let wallet_short = short_wallet(wallet_id); + let b = read_balance(balance); println!( - "[Wallet] Balance updated: wallet={}..., spendable={}, unconfirmed={}, immature={}, locked={}", - wallet_short, spendable, unconfirmed, immature, locked + "[Wallet] Block processed: wallet={}..., height={}, inserted={}, updated={}, matured={}, balance[confirmed={}, unconfirmed={}, immature={}, locked={}]", + wallet_short, + height, + inserted_count, + updated_count, + matured_count, + b.confirmed, + b.unconfirmed, + b.immature, + b.locked ); } +extern "C" fn on_sync_height_advanced( + wallet_id: *const c_char, + height: u32, + _user_data: *mut c_void, +) { + let wallet_short = short_wallet(wallet_id); + println!("[Wallet] Sync height advanced: wallet={}..., height={}", wallet_short, height); +} + // ============================================================================ // Progress Callback // ============================================================================ @@ -434,9 +478,10 @@ fn main() { user_data: ptr::null_mut(), }, wallet: FFIWalletEventCallbacks { - on_transaction_received: Some(on_transaction_received), - on_transaction_status_changed: Some(on_transaction_status_changed), - on_balance_updated: Some(on_balance_updated), + on_transaction_detected: Some(on_transaction_detected), + on_transaction_instant_locked: Some(on_transaction_instant_locked), + on_block_processed: Some(on_wallet_block_processed), + on_sync_height_advanced: Some(on_sync_height_advanced), user_data: ptr::null_mut(), }, error: FFIClientErrorCallback { diff --git a/dash-spv-ffi/src/callbacks.rs b/dash-spv-ffi/src/callbacks.rs index 31c098316..2f5990c9d 100644 --- a/dash-spv-ffi/src/callbacks.rs +++ b/dash-spv-ffi/src/callbacks.rs @@ -12,11 +12,11 @@ use dash_spv::sync::{SyncEvent, SyncProgress}; use dash_spv::EventHandler; use dashcore::hashes::Hash; use key_wallet_ffi::managed_account::FFITransactionRecord; -use key_wallet_ffi::types::FFITransactionContext; +use key_wallet_ffi::types::FFIBalance; use key_wallet_manager::WalletEvent; use std::ffi::CString; -use std::ops::Deref; use std::os::raw::{c_char, c_void}; +use std::ptr; // ============================================================================ // Sync Event Types (for FFISyncEventCallbacks) @@ -532,63 +532,93 @@ impl FFINetworkEventCallbacks { // FFIWalletEventCallbacks - One callback per WalletEvent variant // ============================================================================ -/// Callback for WalletEvent::TransactionReceived +/// Callback for `WalletEvent::TransactionDetected`. /// -/// The `record` pointer is borrowed and only valid for the duration of the -/// callback. Callers must copy any data they need to retain after the callback -/// returns. The record contains all transaction details including serialized -/// transaction bytes, input/output details, and classification metadata. -pub type OnTransactionReceivedCallback = Option< +/// Fires when a wallet-relevant transaction is first seen off-chain — either +/// in the mempool, or directly via an InstantSend lock (in that case the +/// record's `context` is `InstantSend(..)`). +/// +/// All pointer parameters are borrowed and only valid for the duration of the +/// callback. `balance` is the wallet's balance *after* the transaction was +/// recorded. +pub type OnTransactionDetectedCallback = Option< extern "C" fn( wallet_id: *const c_char, - account_index: u32, record: *const FFITransactionRecord, + balance: *const FFIBalance, user_data: *mut c_void, ), >; -/// Callback for WalletEvent::TransactionStatusChanged +/// Callback for `WalletEvent::TransactionInstantLocked`. +/// +/// Fires when an InstantSend lock is applied to a previously-seen off-chain +/// wallet-relevant transaction. Consumers already hold the full record from +/// the prior `TransactionDetected`; only the txid, the consensus-serialized +/// `InstantLock` bytes, and the post-change balance are delivered. /// -/// The `wallet_id` string pointer and `txid` hash pointer are borrowed and only -/// valid for the duration of the callback. -pub type OnTransactionStatusChangedCallback = Option< +/// All pointer parameters are borrowed and only valid for the duration of +/// the callback. `balance` is the wallet's balance *after* the change. +pub type OnTransactionInstantLockedCallback = Option< extern "C" fn( wallet_id: *const c_char, txid: *const [u8; 32], - status: FFITransactionContext, + islock_data: *const u8, + islock_len: usize, + balance: *const FFIBalance, user_data: *mut c_void, ), >; -/// Callback for WalletEvent::BalanceUpdated +/// Callback for `WalletEvent::BlockProcessed`. /// -/// The `wallet_id` string pointer is borrowed and only valid for the duration -/// of the callback. Callers must copy the string if they need to retain it -/// after the callback returns. -pub type OnBalanceUpdatedCallback = Option< +/// Fires once per wallet affected by a processed block. The three record +/// arrays bucket what happened in this block: `inserted` is records first +/// stored, `updated` is previously-known records confirmed, `matured` is +/// older coinbase records whose maturity threshold was just crossed. Empty +/// arrays are passed as null with a zero count. `balance` is the wallet's +/// balance *after* the block was processed. +/// +/// All array pointers and their contents are borrowed and only valid for the +/// duration of the callback. +pub type OnWalletBlockProcessedCallback = Option< extern "C" fn( wallet_id: *const c_char, - confirmed: u64, - unconfirmed: u64, - immature: u64, - locked: u64, + height: u32, + inserted: *const FFITransactionRecord, + inserted_count: u32, + updated: *const FFITransactionRecord, + updated_count: u32, + matured: *const FFITransactionRecord, + matured_count: u32, + balance: *const FFIBalance, user_data: *mut c_void, ), >; +/// Callback for `WalletEvent::SyncHeightAdvanced`. +/// +/// Fires once per wallet when the filter pipeline commits a batch — the +/// wallet has been scanned up to `height`. Consumers can persist this as a +/// checkpoint atomically with any records/balance already persisted from +/// prior `BlockProcessed` events inside the batch. +pub type OnSyncHeightAdvancedCallback = + Option; + /// Wallet event callbacks - one callback per WalletEvent variant. /// /// Set only the callbacks you're interested in; unset callbacks will be ignored. /// -/// All pointer parameters passed to callbacks (wallet IDs, txids, addresses) -/// are borrowed and only valid for the duration of the callback invocation. -/// Callers must copy any data they need to retain. +/// All pointer parameters passed to callbacks (wallet IDs, txids, records, +/// balances) are borrowed and only valid for the duration of the callback +/// invocation. Callers must copy any data they need to retain. #[repr(C)] #[derive(Clone)] pub struct FFIWalletEventCallbacks { - pub on_transaction_received: OnTransactionReceivedCallback, - pub on_transaction_status_changed: OnTransactionStatusChangedCallback, - pub on_balance_updated: OnBalanceUpdatedCallback, + pub on_transaction_detected: OnTransactionDetectedCallback, + pub on_transaction_instant_locked: OnTransactionInstantLockedCallback, + pub on_block_processed: OnWalletBlockProcessedCallback, + pub on_sync_height_advanced: OnSyncHeightAdvancedCallback, pub user_data: *mut c_void, } @@ -599,9 +629,10 @@ unsafe impl Sync for FFIWalletEventCallbacks {} impl Default for FFIWalletEventCallbacks { fn default() -> Self { Self { - on_transaction_received: None, - on_transaction_status_changed: None, - on_balance_updated: None, + on_transaction_detected: None, + on_transaction_instant_locked: None, + on_block_processed: None, + on_sync_height_advanced: None, user_data: std::ptr::null_mut(), } } @@ -696,62 +727,112 @@ impl FFIWalletEventCallbacks { /// Dispatch a WalletEvent to the appropriate callback. pub fn dispatch(&self, event: &WalletEvent) { match event { - WalletEvent::TransactionReceived { + WalletEvent::TransactionDetected { wallet_id, - account_index, record, + balance, } => { - if let Some(cb) = self.on_transaction_received { + if let Some(cb) = self.on_transaction_detected { let wallet_id_hex = hex::encode(wallet_id); let c_wallet_id = CString::new(wallet_id_hex).unwrap_or_default(); - - let ffi_record = FFITransactionRecord::from(record.deref()); + let ffi_record = FFITransactionRecord::from(record.as_ref()); + let ffi_balance = FFIBalance::from(*balance); cb( c_wallet_id.as_ptr(), - *account_index, &ffi_record as *const FFITransactionRecord, + &ffi_balance as *const FFIBalance, self.user_data, ); } } - WalletEvent::TransactionStatusChanged { + WalletEvent::TransactionInstantLocked { wallet_id, txid, - status, + instant_lock, + balance, } => { - if let Some(cb) = self.on_transaction_status_changed { + if let Some(cb) = self.on_transaction_instant_locked { let wallet_id_hex = hex::encode(wallet_id); let c_wallet_id = CString::new(wallet_id_hex).unwrap_or_default(); - let txid_bytes = txid.as_byte_array(); - let ffi_ctx = FFITransactionContext::from(status.clone()); + let txid_bytes = *txid.as_byte_array(); + let islock_bytes = dashcore::consensus::serialize(instant_lock); + let ffi_balance = FFIBalance::from(*balance); cb( c_wallet_id.as_ptr(), - txid_bytes as *const [u8; 32], - ffi_ctx, + &txid_bytes as *const [u8; 32], + islock_bytes.as_ptr(), + islock_bytes.len(), + &ffi_balance as *const FFIBalance, self.user_data, ); } } - WalletEvent::BalanceUpdated { + WalletEvent::BlockProcessed { wallet_id, - confirmed, - unconfirmed, - immature, - locked, + height, + inserted, + updated, + matured, + balance, } => { - if let Some(cb) = self.on_balance_updated { + if let Some(cb) = self.on_block_processed { let wallet_id_hex = hex::encode(wallet_id); let c_wallet_id = CString::new(wallet_id_hex).unwrap_or_default(); + let ffi_inserted: Vec = + inserted.iter().map(FFITransactionRecord::from).collect(); + let ffi_updated: Vec = + updated.iter().map(FFITransactionRecord::from).collect(); + let ffi_matured: Vec = + matured.iter().map(FFITransactionRecord::from).collect(); + let ffi_balance = FFIBalance::from(*balance); + + // Pass a null pointer when an array is empty so C/Swift + // consumers that null-check before reading don't see a + // non-null dangling pointer paired with a zero count. + let inserted_ptr = if ffi_inserted.is_empty() { + ptr::null() + } else { + ffi_inserted.as_ptr() + }; + let updated_ptr = if ffi_updated.is_empty() { + ptr::null() + } else { + ffi_updated.as_ptr() + }; + let matured_ptr = if ffi_matured.is_empty() { + ptr::null() + } else { + ffi_matured.as_ptr() + }; + cb( c_wallet_id.as_ptr(), - *confirmed, - *unconfirmed, - *immature, - *locked, + *height, + inserted_ptr, + ffi_inserted.len() as u32, + updated_ptr, + ffi_updated.len() as u32, + matured_ptr, + ffi_matured.len() as u32, + &ffi_balance as *const FFIBalance, self.user_data, ); + + drop(ffi_inserted); + drop(ffi_updated); + drop(ffi_matured); + } + } + WalletEvent::SyncHeightAdvanced { + wallet_id, + height, + } => { + if let Some(cb) = self.on_sync_height_advanced { + let wallet_id_hex = hex::encode(wallet_id); + let c_wallet_id = CString::new(wallet_id_hex).unwrap_or_default(); + cb(c_wallet_id.as_ptr(), *height, self.user_data); } } } diff --git a/dash-spv-ffi/src/lib.rs b/dash-spv-ffi/src/lib.rs index d53a16d56..36d7d389f 100644 --- a/dash-spv-ffi/src/lib.rs +++ b/dash-spv-ffi/src/lib.rs @@ -14,6 +14,12 @@ pub use platform_integration::*; pub use types::*; pub use utils::*; +// Re-export wallet-FFI types used by `FFIWalletEventCallbacks` so consumers +// can refer to them via `dash_spv_ffi::*` without importing `key_wallet_ffi` +// directly. +pub use key_wallet_ffi::managed_account::{FFIAccountType, FFITransactionRecord}; +pub use key_wallet_ffi::types::FFIAccountKind; + // FFINetwork is now defined in types.rs for cbindgen compatibility // It must match the definition in key_wallet_ffi diff --git a/dash-spv-ffi/tests/dashd_sync/callbacks.rs b/dash-spv-ffi/tests/dashd_sync/callbacks.rs index 4e4c67ab0..295137537 100644 --- a/dash-spv-ffi/tests/dashd_sync/callbacks.rs +++ b/dash-spv-ffi/tests/dashd_sync/callbacks.rs @@ -2,13 +2,13 @@ use std::ffi::CStr; use std::os::raw::{c_char, c_void}; +use std::slice; use std::sync::atomic::{AtomicU32, AtomicU64, Ordering}; use std::sync::{Arc, Mutex}; use std::time::Duration; use dash_spv_ffi::*; -use key_wallet_ffi::managed_account::FFITransactionRecord; -use key_wallet_ffi::types::FFITransactionContext; +use key_wallet_ffi::types::FFIBalance; /// Tracks callback invocations for verification. /// @@ -38,8 +38,12 @@ pub(super) struct CallbackTracker { // Wallet event tracking pub(super) transaction_received_count: AtomicU32, - pub(super) transaction_status_changed_count: AtomicU32, - pub(super) balance_updated_count: AtomicU32, + pub(super) transaction_instant_send_locked_count: AtomicU32, + pub(super) block_processed_wallet_count: AtomicU32, + pub(super) block_processed_wallet_record_count: AtomicU32, + pub(super) synced_height_updated_count: AtomicU32, + /// Highest synced-height value observed from any `SyncedHeightUpdated`. + pub(super) last_synced_height: AtomicU32, // Data from callbacks pub(super) last_header_tip: AtomicU32, @@ -49,13 +53,38 @@ pub(super) struct CallbackTracker { pub(super) connected_peers: Mutex>, pub(super) errors: Mutex>, - // Transaction data from on_transaction_received (txid, net_amount) + // Per-record (txid, net_amount) seen via the off-chain wallet callback. pub(super) received_transactions: Mutex>, - - // Balance data from on_balance_updated - pub(super) last_spendable: AtomicU64, + // Per-record (txid, net_amount) seen via the block-processed callback. + pub(super) block_received_transactions: Mutex>, + + // `FFIAccountKind` discriminants captured from wallet callbacks. Lets + // tests assert that account-type delivery is well-formed and matches the + // expected account. + pub(super) received_account_types: Mutex>, + pub(super) block_account_types: Mutex>, + + // `account_index` values captured alongside `FFIAccountKind`, paired + // positionally with the corresponding `*_account_types` entries. + pub(super) received_account_indices: Mutex>, + pub(super) block_account_indices: Mutex>, + + // Per-record bucketing observed on `BlockProcessed` changes, in delivery + // order. Each entry is `true` when the record was delivered via the + // `inserted` array, `false` when delivered via `updated`. Lets tests + // assert that confirmation of a previously-known mempool transaction + // lands in `updated` rather than `inserted`. + pub(super) block_record_inserted: Mutex>, + + // Balance data from the most recent wallet event. + pub(super) last_confirmed: AtomicU64, pub(super) last_unconfirmed: AtomicU64, + // Raw IS lock bytes captured from the most recent + // `on_transaction_instant_send_locked` callback. Lets tests verify the + // payload is non-empty and round-trips through `InstantLock` deserialisation. + pub(super) last_islock_bytes: Mutex>>, + // Lifecycle ordering via global sequence counter pub(super) sequence_counter: AtomicU32, pub(super) sync_start_seq: AtomicU32, @@ -341,15 +370,25 @@ extern "C" fn on_peers_updated(connected_count: u32, best_height: u32, user_data tracing::debug!("on_peers_updated: connected={}, best_height={}", connected_count, best_height); } -extern "C" fn on_transaction_received( +fn record_balance(tracker: &CallbackTracker, balance: *const FFIBalance) { + if balance.is_null() { + return; + } + let b = unsafe { *balance }; + tracker.last_confirmed.store(b.confirmed, Ordering::SeqCst); + tracker.last_unconfirmed.store(b.unconfirmed, Ordering::SeqCst); +} + +extern "C" fn on_transaction_detected( wallet_id: *const c_char, - account_index: u32, record: *const FFITransactionRecord, + balance: *const FFIBalance, user_data: *mut c_void, ) { let Some(tracker) = (unsafe { tracker_from(user_data) }) else { return; }; + let mut account_log = None; if !record.is_null() { let r = unsafe { &*record }; tracker @@ -357,50 +396,130 @@ extern "C" fn on_transaction_received( .lock() .unwrap_or_else(|e| e.into_inner()) .push((r.txid, r.net_amount)); + tracker + .received_account_types + .lock() + .unwrap_or_else(|e| e.into_inner()) + .push(r.account_type.kind); + tracker + .received_account_indices + .lock() + .unwrap_or_else(|e| e.into_inner()) + .push(r.account_type.index); + account_log = Some((r.account_type.kind, r.account_type.index)); } + // Store the balance before bumping the counter so a test that waits on the + // counter and then reads `last_unconfirmed` is guaranteed to observe the + // balance for the same callback invocation. + record_balance(tracker, balance); tracker.transaction_received_count.fetch_add(1, Ordering::SeqCst); let wallet_str = unsafe { cstr_or_unknown(wallet_id) }; - tracing::info!("on_transaction_received: wallet={}, account={}", wallet_str, account_index,); + tracing::info!("on_transaction_detected: wallet={}, account={:?}", wallet_str, account_log); } -extern "C" fn on_transaction_status_changed( +extern "C" fn on_transaction_instant_locked( _wallet_id: *const c_char, _txid: *const [u8; 32], - status: FFITransactionContext, + islock_data: *const u8, + islock_len: usize, + balance: *const FFIBalance, user_data: *mut c_void, ) { let Some(tracker) = (unsafe { tracker_from(user_data) }) else { return; }; - tracker.transaction_status_changed_count.fetch_add(1, Ordering::SeqCst); - tracing::debug!("on_transaction_status_changed: status={:?}", status); + if !islock_data.is_null() && islock_len > 0 { + let bytes = unsafe { slice::from_raw_parts(islock_data, islock_len) }.to_vec(); + *tracker.last_islock_bytes.lock().unwrap_or_else(|e| e.into_inner()) = Some(bytes); + } + record_balance(tracker, balance); + tracker.transaction_instant_send_locked_count.fetch_add(1, Ordering::SeqCst); + tracing::debug!("on_transaction_instant_locked"); } -extern "C" fn on_balance_updated( +#[allow(clippy::too_many_arguments)] +extern "C" fn on_wallet_block_processed( wallet_id: *const c_char, - spendable: u64, - unconfirmed: u64, - immature: u64, - locked: u64, + height: u32, + inserted: *const FFITransactionRecord, + inserted_count: u32, + updated: *const FFITransactionRecord, + updated_count: u32, + _matured: *const FFITransactionRecord, + matured_count: u32, + balance: *const FFIBalance, user_data: *mut c_void, ) { let Some(tracker) = (unsafe { tracker_from(user_data) }) else { return; }; - tracker.last_spendable.store(spendable, Ordering::SeqCst); - tracker.last_unconfirmed.store(unconfirmed, Ordering::SeqCst); - tracker.balance_updated_count.fetch_add(1, Ordering::SeqCst); + // Append all per-record state before bumping either counter so that a + // test waiting on `block_processed_wallet_count` (the per-callback counter) + // is guaranteed to also observe the matching `block_processed_wallet_record_count` + // and the underlying vectors. Tests should always wait on + // `block_processed_wallet_count` and read the record counter afterwards. + let mut sink = tracker.block_received_transactions.lock().unwrap_or_else(|e| e.into_inner()); + let mut types = tracker.block_account_types.lock().unwrap_or_else(|e| e.into_inner()); + let mut indices = tracker.block_account_indices.lock().unwrap_or_else(|e| e.into_inner()); + let mut bucket = tracker.block_record_inserted.lock().unwrap_or_else(|e| e.into_inner()); + let mut records_added = 0u32; + if !inserted.is_null() && inserted_count > 0 { + let slice = unsafe { slice::from_raw_parts(inserted, inserted_count as usize) }; + for r in slice { + sink.push((r.txid, r.net_amount)); + types.push(r.account_type.kind); + indices.push(r.account_type.index); + bucket.push(true); + records_added += 1; + } + } + if !updated.is_null() && updated_count > 0 { + let slice = unsafe { slice::from_raw_parts(updated, updated_count as usize) }; + for r in slice { + sink.push((r.txid, r.net_amount)); + types.push(r.account_type.kind); + indices.push(r.account_type.index); + bucket.push(false); + records_added += 1; + } + } + drop(sink); + drop(types); + drop(indices); + drop(bucket); + if records_added > 0 { + tracker.block_processed_wallet_record_count.fetch_add(records_added, Ordering::SeqCst); + } + record_balance(tracker, balance); + tracker.block_processed_wallet_count.fetch_add(1, Ordering::SeqCst); let wallet_str = unsafe { cstr_or_unknown(wallet_id) }; tracing::info!( - "on_balance_updated: wallet={}, spendable={}, unconfirmed={}, immature={}, locked={}", + "on_wallet_block_processed: wallet={}, height={}, inserted={}, updated={}, matured={}", wallet_str, - spendable, - unconfirmed, - immature, - locked, + height, + inserted_count, + updated_count, + matured_count ); } +extern "C" fn on_sync_height_advanced( + wallet_id: *const c_char, + height: u32, + user_data: *mut c_void, +) { + let Some(tracker) = (unsafe { tracker_from(user_data) }) else { + return; + }; + // Store the height before bumping the counter so a test that waits on the + // counter and then reads `last_synced_height` is guaranteed to observe the + // height for the same callback invocation. + tracker.last_synced_height.store(height, Ordering::SeqCst); + tracker.synced_height_updated_count.fetch_add(1, Ordering::SeqCst); + let wallet_str = unsafe { cstr_or_unknown(wallet_id) }; + tracing::info!("on_sync_height_advanced: wallet={}, height={}", wallet_str, height); +} + /// Create sync callbacks with all event handlers wired to the tracker. /// /// The `user_data` pointer borrows the tracker Arc. The caller must ensure the @@ -444,9 +563,10 @@ pub(super) fn create_network_callbacks(tracker: &Arc) -> FFINet /// Arc outlives all callback invocations. pub(super) fn create_wallet_callbacks(tracker: &Arc) -> FFIWalletEventCallbacks { FFIWalletEventCallbacks { - on_transaction_received: Some(on_transaction_received), - on_transaction_status_changed: Some(on_transaction_status_changed), - on_balance_updated: Some(on_balance_updated), + on_transaction_detected: Some(on_transaction_detected), + on_transaction_instant_locked: Some(on_transaction_instant_locked), + on_block_processed: Some(on_wallet_block_processed), + on_sync_height_advanced: Some(on_sync_height_advanced), user_data: Arc::as_ptr(tracker) as *mut c_void, } } diff --git a/dash-spv-ffi/tests/dashd_sync/context.rs b/dash-spv-ffi/tests/dashd_sync/context.rs index 2c015e761..be21cfd65 100644 --- a/dash-spv-ffi/tests/dashd_sync/context.rs +++ b/dash-spv-ffi/tests/dashd_sync/context.rs @@ -35,7 +35,7 @@ use key_wallet_ffi::managed_account::{ use key_wallet_ffi::managed_wallet::{ managed_wallet_get_next_bip44_receive_address, managed_wallet_info_free, }; -use key_wallet_ffi::types::FFIAccountType; +use key_wallet_ffi::types::FFIAccountKind; use key_wallet_ffi::wallet::wallet_free_const; use key_wallet_ffi::wallet_manager::{ wallet_manager_add_wallet_from_mnemonic, wallet_manager_get_managed_wallet_info, @@ -313,7 +313,7 @@ impl FFITestContext { ) -> T { let wm = self.session.wallet_manager as *const FFIWalletManager; let result = - managed_wallet_get_account(wm, wallet_id.as_ptr(), 0, FFIAccountType::StandardBIP44); + managed_wallet_get_account(wm, wallet_id.as_ptr(), 0, FFIAccountKind::StandardBIP44); assert!( result.error_code == 0 && !result.account.is_null(), "Failed to get BIP44 account 0" diff --git a/dash-spv-ffi/tests/dashd_sync/tests_callback.rs b/dash-spv-ffi/tests/dashd_sync/tests_callback.rs index e7396e2fd..4acd4b53c 100644 --- a/dash-spv-ffi/tests/dashd_sync/tests_callback.rs +++ b/dash-spv-ffi/tests/dashd_sync/tests_callback.rs @@ -2,6 +2,7 @@ use std::sync::atomic::Ordering; use std::time::Duration; use dash_spv::test_utils::{DashdTestContext, TestChain}; +use dash_spv_ffi::FFIAccountKind; use dashcore::hashes::Hash; use dashcore::Amount; @@ -100,31 +101,62 @@ fn test_all_callbacks_during_sync() { ); drop(connected_peers); - // Wait for wallet callbacks (they travel on a separate channel from sync events) - tracker.wait_for_callback(&tracker.transaction_received_count, 0, "transaction_received"); - tracker.wait_for_callback(&tracker.balance_updated_count, 0, "balance_updated"); + // Wait for wallet callbacks (they travel on a separate channel from sync events). + // Wait on `block_processed_wallet_count` because it is bumped last in the + // callback, after all per-record state has been written. Reading the + // record counter afterwards is therefore guaranteed to see the matching + // increment. + tracker.wait_for_callback(&tracker.block_processed_wallet_count, 0, "block_processed"); // Validate wallet event callbacks (test wallet has transactions) - let tx_received = tracker.transaction_received_count.load(Ordering::SeqCst); - let balance_updated = tracker.balance_updated_count.load(Ordering::SeqCst); - let tx_status_changed = tracker.transaction_status_changed_count.load(Ordering::SeqCst); + let block_records = tracker.block_processed_wallet_record_count.load(Ordering::SeqCst); + let block_changes = tracker.block_processed_wallet_count.load(Ordering::SeqCst); + let received = tracker.transaction_received_count.load(Ordering::SeqCst); + let instant_send_locked = + tracker.transaction_instant_send_locked_count.load(Ordering::SeqCst); tracing::info!( - "Wallet: tx_received={}, tx_status_changed={}, balance_updated={}", - tx_received, - tx_status_changed, - balance_updated + "Wallet: received={}, instant_send_locked={}, block_changes={}, block_records={}", + received, + instant_send_locked, + block_changes, + block_records ); assert!( - tx_received > 0, - "on_transaction_received should fire for wallet with transactions" + block_records > 0, + "on_block_processed should deliver records for a wallet with transactions" + ); + assert!( + block_changes > 0, + "on_block_processed should fire for blocks containing wallet records" + ); + assert_eq!( + received, 0, + "on_transaction_detected must not fire during historical block sync" ); assert_eq!( - tx_status_changed, 0, - "on_transaction_status_changed should not fire here, all transactions are confirmed." + instant_send_locked, 0, + "on_transaction_instant_send_locked should not fire during initial sync" + ); + + // Validate SyncedHeightUpdated callback (atomicity boundary for persistence flush). + // Wait explicitly for the callback because it travels on the same wallet + // broadcast channel as `BlockProcessed` but is dispatched separately, + // so observing block-processed records does not guarantee it has fired yet. + tracker.wait_for_callback(&tracker.synced_height_updated_count, 0, "synced_height_updated"); + let synced_height_fired = tracker.synced_height_updated_count.load(Ordering::SeqCst); + let last_synced_height = tracker.last_synced_height.load(Ordering::SeqCst); + assert!( + synced_height_fired > 0, + "on_synced_height_updated should fire at least once during sync" + ); + assert!( + last_synced_height >= dashd.initial_height, + "last_synced_height ({}) should be at least initial_height ({}) after sync", + last_synced_height, + dashd.initial_height ); - assert!(balance_updated > 0, "on_balance_updated should fire for wallet with transactions"); // Validate sync cycle (initial sync is cycle 0) let last_sync_cycle = tracker.last_sync_cycle.load(Ordering::SeqCst); @@ -211,14 +243,53 @@ fn test_all_callbacks_during_sync() { "best height from peers should match initial height" ); - // Validate transaction data from initial sync - let received_txs = tracker.received_transactions.lock().unwrap(); - assert!(!received_txs.is_empty(), "should have received transactions during sync"); + // Validate transaction data from initial sync. Historical sync only + // touches the block-processed callback (off-chain callback must + // remain silent during initial sync), so assert against that bucket + // explicitly. + let block_received = tracker.block_received_transactions.lock().unwrap(); + assert!(!block_received.is_empty(), "should have received block records during sync"); assert!( - received_txs.iter().any(|&(_, amount)| amount != 0), - "at least one received transaction amount should be non-zero" + block_received.iter().any(|&(_, amount)| amount != 0), + "at least one block-record net_amount should be non-zero" ); - drop(received_txs); + drop(block_received); + + // Every record observed during initial sync is a fresh insertion + // (no prior mempool sighting), so each must arrive in the `inserted` + // bucket of `BlockProcessed`. + let bucket = tracker.block_record_inserted.lock().unwrap(); + assert!(!bucket.is_empty(), "block records should be captured"); + assert!( + bucket.iter().all(|inserted| *inserted), + "every block record during historical sync should arrive via `inserted`, got: {:?}", + *bucket + ); + drop(bucket); + + // Validate the BIP-44 account discriminant + index reach the FFI + // boundary intact: every change observed during historical sync + // belongs to the default BIP-44 account (index 0) of the test wallet. + let account_types = tracker.block_account_types.lock().unwrap(); + let account_indices = tracker.block_account_indices.lock().unwrap(); + assert!(!account_types.is_empty(), "block account types should be captured"); + assert_eq!( + account_types.len(), + account_indices.len(), + "block account types and indices must be paired 1:1" + ); + assert!( + account_types.iter().all(|t| *t == FFIAccountKind::StandardBIP44), + "every block change should carry FFIAccountKind::StandardBIP44, got: {:?}", + *account_types + ); + assert!( + account_indices.iter().all(|i| *i == 0), + "every BIP-44 change should carry account_index = 0, got: {:?}", + *account_indices + ); + drop(account_indices); + drop(account_types); // Masternodes are disabled in test config, so these should not fire let masternode_updated = tracker.masternode_state_updated_count.load(Ordering::SeqCst); @@ -234,7 +305,7 @@ fn test_all_callbacks_during_sync() { /// Verify wallet and network callbacks fire correctly after initial sync completes. /// /// After initial sync, sends DASH to the wallet and mines a block. Verifies that -/// on_transaction_received and on_balance_updated callbacks fire. Then disconnects +/// on_transaction_detected and on_balance_updated callbacks fire. Then disconnects /// dashd peers and verifies on_peer_disconnected fires, followed by on_peer_connected /// after automatic reconnection. #[test] @@ -260,59 +331,80 @@ fn test_callbacks_post_sync_transactions_and_disconnect() { tracing::info!("Initial sync complete"); // Record callback counts before post-sync operations - let tx_received_before = tracker.transaction_received_count.load(Ordering::SeqCst); - let balance_updated_before = tracker.balance_updated_count.load(Ordering::SeqCst); - - // Send DASH to the wallet and mine a block + let received_before = tracker.transaction_received_count.load(Ordering::SeqCst); + let block_changes_before = tracker.block_processed_wallet_count.load(Ordering::SeqCst); + let block_records_before = + tracker.block_processed_wallet_record_count.load(Ordering::SeqCst); + + // Send DASH to the wallet. Wait for the off-chain callback before + // mining so the SPV node observes the transaction in the mempool. + // If we mine immediately, the block path can deliver the transaction + // first and the off-chain callback would never fire. let receive_address = ctx.get_receive_address(&wallet_id); let send_amount = Amount::from_sat(100_000_000); let txid = dashd.node.send_to_address(&receive_address, send_amount); tracing::info!("Sent {} to wallet, txid: {}", send_amount, txid); + tracker.wait_for_callback( + &tracker.transaction_received_count, + received_before, + "transaction_received", + ); + + // The off-chain callback updates `last_unconfirmed` with the + // post-event balance. Snapshot it now, before mining. After + // confirmation the block-processed callback overwrites the same + // field back toward zero, so this is the only window in which the + // unconfirmed-balance update is observable. + let unconfirmed_after_mempool = tracker.last_unconfirmed.load(Ordering::SeqCst); + assert!( + unconfirmed_after_mempool > 0, + "balance.unconfirmed should be positive after mempool receipt, got {}", + unconfirmed_after_mempool + ); + let miner_address = dashd.node.get_new_address_from_wallet("default"); dashd.node.generate_blocks(1, &miner_address); // Wait for incremental sync to complete ctx.wait_for_sync(dashd.initial_height + 1); - // Wait for wallet callbacks (they travel on a separate channel from sync events) + // Wait for the block-processed callback. The per-callback counter is + // bumped last in the callback, so observing it incremented guarantees + // the per-record vectors and counters have already been updated. tracker.wait_for_callback( - &tracker.transaction_received_count, - tx_received_before, - "transaction_received", - ); - tracker.wait_for_callback( - &tracker.balance_updated_count, - balance_updated_before, - "balance_updated", + &tracker.block_processed_wallet_count, + block_changes_before, + "block_processed", ); - // Verify on_transaction_received fired for the new transaction - let tx_received_after = tracker.transaction_received_count.load(Ordering::SeqCst); + // Verify on_transaction_detected fired for the new transaction + let received_after = tracker.transaction_received_count.load(Ordering::SeqCst); assert!( - tx_received_after > tx_received_before, - "on_transaction_received should fire for post-sync transaction: {} -> {}", - tx_received_before, - tx_received_after + received_after > received_before, + "on_transaction_detected should fire for post-sync transaction: {} -> {}", + received_before, + received_after ); tracing::info!( - "Transaction callback verified: {} -> {}", - tx_received_before, - tx_received_after + "Off-chain transaction callback verified: {} -> {}", + received_before, + received_after ); - // Verify the sent txid appears in the callback data with a non-zero - // net_amount. The SPV wallet and dashd share the same mnemonic so the - // transaction is an internal transfer (wallet owns both inputs and - // outputs); net_amount therefore equals approximately -fee, not the - // nominal send amount. + // Verify the sent txid appears in the off-chain callback data with a + // non-zero net_amount. Asserting against the off-chain bucket (rather + // than the union of off-chain + block records) ensures the off-chain + // callback specifically delivered the txid — a broken off-chain + // callback that pushed the wrong txid wouldn't be masked by the + // block path. The SPV wallet and dashd share the same mnemonic so + // the transaction is an internal transfer (wallet owns both inputs + // and outputs); net_amount therefore equals approximately -fee, not + // the nominal send amount. let sent_txid_bytes = *txid.as_byte_array(); let received_txs = tracker.received_transactions.lock().unwrap(); let sent_entry = received_txs.iter().find(|&&(id, _)| id == sent_txid_bytes); - assert!( - sent_entry.is_some(), - "sent txid should appear in received transaction callback data" - ); + assert!(sent_entry.is_some(), "sent txid should appear in transaction callback data"); let &(_, net_amount) = sent_entry.unwrap(); // Internal transfer: net_amount = received - sent = (send_amount + change) - input = -fee. // The fee must be negative, non-zero, and small (< 0.001 DASH). @@ -323,20 +415,58 @@ fn test_callbacks_post_sync_transactions_and_disconnect() { ); drop(received_txs); - let balance_updated_after = tracker.balance_updated_count.load(Ordering::SeqCst); - tracing::info!( - "Balance updated callback verified: {} -> {}", - balance_updated_before, - balance_updated_after + // Verify the off-chain callback delivered the BIP-44 account + // discriminant + index 0 (default test account). + let received_types = tracker.received_account_types.lock().unwrap(); + let received_indices = tracker.received_account_indices.lock().unwrap(); + assert!( + received_types.iter().all(|t| *t == FFIAccountKind::StandardBIP44), + "off-chain callback should deliver FFIAccountKind::StandardBIP44, got: {:?}", + *received_types ); - - // Verify balance data from callback reflects a positive spendable balance - let last_spendable = tracker.last_spendable.load(Ordering::SeqCst); assert!( - last_spendable > 0, - "last_spendable from on_balance_updated should be positive after receiving funds" + received_indices.iter().all(|i| *i == 0), + "off-chain BIP-44 callback should deliver account_index = 0, got: {:?}", + *received_indices ); - tracing::info!("Balance data verified: last_spendable={}", last_spendable); + drop(received_indices); + drop(received_types); + + // The post-sync block confirms a transaction that was already known + // from the mempool, so the corresponding `BlockProcessed` change must + // arrive in the `updated` bucket rather than `inserted`. Slice by + // the pre-captured index so only post-sync entries are checked, + // avoiding masking by any `updated` entry that might appear during + // initial sync. + let block_bucket = tracker.block_record_inserted.lock().unwrap(); + assert!( + block_bucket.len() >= block_records_before as usize, + "block_record_inserted length ({}) < block_records_before ({}): counter/vector mismatch", + block_bucket.len(), + block_records_before + ); + let new_bucket = &block_bucket[block_records_before as usize..]; + assert!( + new_bucket.iter().any(|inserted| !inserted), + "post-sync block confirming a known mempool tx should arrive in the \ + `updated` bucket, got: {:?}", + new_bucket + ); + drop(block_bucket); + + let block_records_after = + tracker.block_processed_wallet_record_count.load(Ordering::SeqCst); + tracing::info!( + "Block-processed record callback verified: {} -> {}", + block_records_before, + block_records_after + ); + + // Verify balance data from the most recent wallet event reflects a positive + // confirmed balance. + let last_confirmed = tracker.last_confirmed.load(Ordering::SeqCst); + assert!(last_confirmed > 0, "last_confirmed should be positive after receiving funds"); + tracing::info!("Balance data verified: last_confirmed={}", last_confirmed); // Record connect count before disconnect let connect_before = tracker.peer_connected_count.load(Ordering::SeqCst); diff --git a/dash-spv-ffi/tests/dashd_sync/tests_transaction.rs b/dash-spv-ffi/tests/dashd_sync/tests_transaction.rs index b97c5d4a4..263048205 100644 --- a/dash-spv-ffi/tests/dashd_sync/tests_transaction.rs +++ b/dash-spv-ffi/tests/dashd_sync/tests_transaction.rs @@ -1,6 +1,7 @@ use std::sync::atomic::Ordering; use dash_spv::test_utils::{DashdTestContext, TestChain}; +use dash_spv_ffi::FFIAccountKind; use dashcore::hashes::Hash; use dashcore::Amount; @@ -47,7 +48,10 @@ fn test_ffi_sync_then_generate_blocks() { // Generate a block containing a wallet transaction and wait for sync. let cycle_before = ctx.tracker().last_sync_cycle.load(Ordering::SeqCst); - let tx_received_before = ctx.tracker().transaction_received_count.load(Ordering::SeqCst); + let block_changes_before = + ctx.tracker().block_processed_wallet_count.load(Ordering::SeqCst); + let block_records_before = + ctx.tracker().block_processed_wallet_record_count.load(Ordering::SeqCst); let receive_address = ctx.get_receive_address(&wallet_id); let send_amount = Amount::from_sat(100_000_000); let txid = dashd.node.send_to_address(&receive_address, send_amount); @@ -66,23 +70,70 @@ fn test_ffi_sync_then_generate_blocks() { cycle_after_first ); - // Wait for wallet callback (travels on a separate channel from sync events) + // Wait for wallet callback (travels on a separate channel from sync events). + // The per-callback counter is bumped last in the callback, so observing + // it incremented guarantees the per-record vectors are also updated. ctx.tracker().wait_for_callback( - &ctx.tracker().transaction_received_count, - tx_received_before, - "transaction_received", + &ctx.tracker().block_processed_wallet_count, + block_changes_before, + "block_processed", ); - // Verify the transaction was received via wallet callback - let received_txs = ctx.tracker().received_transactions.lock().unwrap(); + // Verify the transaction was received via the block-processed callback + let received_txs = ctx.tracker().block_received_transactions.lock().unwrap(); let txid_bytes = *txid.as_byte_array(); assert!( received_txs.iter().any(|&(txid, _)| txid == txid_bytes), - "Wallet callback should have received txid {}", + "Block-processed callback should have received txid {}", txid ); drop(received_txs); + // Verify per-record bucketing was captured for the post-sync block. + let bucket = ctx.tracker().block_record_inserted.lock().unwrap(); + assert!( + bucket.len() >= block_records_before as usize, + "block_record_inserted length ({}) < block_records_before ({}): counter/vector mismatch", + bucket.len(), + block_records_before + ); + let new_bucket = &bucket[block_records_before as usize..]; + assert!( + !new_bucket.is_empty(), + "block_record_inserted should be populated for the post-sync block" + ); + drop(bucket); + + // Verify the BIP-44 account discriminant + index were delivered for + // the post-sync block records. + let types = ctx.tracker().block_account_types.lock().unwrap(); + let indices = ctx.tracker().block_account_indices.lock().unwrap(); + assert!( + types.len() >= block_records_before as usize, + "block_account_types length ({}) < block_records_before ({}): counter/vector mismatch", + types.len(), + block_records_before + ); + assert_eq!( + types.len(), + indices.len(), + "block account types and indices must be paired 1:1" + ); + let new_types = &types[block_records_before as usize..]; + let new_indices = &indices[block_records_before as usize..]; + assert!( + new_types.iter().all(|t| *t == FFIAccountKind::StandardBIP44), + "post-sync block changes should carry FFIAccountKind::StandardBIP44, got: {:?}", + new_types + ); + assert!( + new_indices.iter().all(|i| *i == 0), + "post-sync BIP-44 changes should carry account_index = 0, got: {:?}", + new_indices + ); + drop(indices); + drop(types); + // Verify via wallet query as well assert!( ctx.has_transaction(&wallet_id, &txid), diff --git a/dash-spv/tests/dashd_sync/helpers.rs b/dash-spv/tests/dashd_sync/helpers.rs index b90a9a187..c318224ce 100644 --- a/dash-spv/tests/dashd_sync/helpers.rs +++ b/dash-spv/tests/dashd_sync/helpers.rs @@ -126,7 +126,8 @@ pub(super) async fn wait_for_network_event( } } -/// Wait for a wallet `TransactionReceived` event with mempool status within the given timeout. +/// Wait for a wallet `TransactionDetected` event within the given timeout. +/// Accepts both plain mempool and InstantSend-locked mempool arrivals. /// Returns `Some(txid)` if received, `None` on timeout. pub(super) async fn wait_for_mempool_tx( receiver: &mut broadcast::Receiver, @@ -140,7 +141,14 @@ pub(super) async fn wait_for_mempool_tx( _ = &mut timeout => return None, result = receiver.recv() => { match result { - Ok(WalletEvent::TransactionReceived { ref record, .. }) if record.context == TransactionContext::Mempool => return Some(record.txid), + Ok(WalletEvent::TransactionDetected { ref record, .. }) + if matches!( + record.context, + TransactionContext::Mempool | TransactionContext::InstantSend(_) + ) => + { + return Some(record.txid); + } Ok(_) => continue, Err(_) => return None, } @@ -176,13 +184,13 @@ pub(super) async fn wait_for_mempool_synced( } } -/// Assert that no mempool `TransactionReceived` event arrives within the given duration. +/// Assert that no mempool `TransactionDetected` event arrives within the given duration. pub(super) async fn assert_no_mempool_tx( receiver: &mut broadcast::Receiver, wait: Duration, ) { if let Some(txid) = wait_for_mempool_tx(receiver, wait).await { - panic!("Unexpected mempool TransactionReceived event with txid: {}", txid); + panic!("Unexpected TransactionDetected event with txid: {}", txid); } } @@ -319,7 +327,7 @@ pub(super) async fn wait_for_mempool_txs_both( for _ in 0..count { let txid = wait_for_mempool_tx(receiver, timeout) .await - .expect("Expected mempool TransactionReceived event"); + .expect("Expected TransactionDetected event"); txids.insert(txid); } txids diff --git a/dash-spv/tests/dashd_sync/tests_mempool.rs b/dash-spv/tests/dashd_sync/tests_mempool.rs index 845f31fda..14e8156d7 100644 --- a/dash-spv/tests/dashd_sync/tests_mempool.rs +++ b/dash-spv/tests/dashd_sync/tests_mempool.rs @@ -38,7 +38,7 @@ async fn test_mempool_detects_incoming_tx() { let mempool_txid = wait_for_mempool_tx_both(&mut fa, &mut bf, MEMPOOL_TIMEOUT) .await - .expect("Expected mempool TransactionReceived event"); + .expect("Expected TransactionDetected event"); assert_eq!(mempool_txid, txid, "Mempool event txid should match sent txid"); fa.stop().await; @@ -106,7 +106,7 @@ async fn test_mempool_to_confirmed_lifecycle() { let mempool_txid = wait_for_mempool_tx_both(&mut fa, &mut bf, MEMPOOL_TIMEOUT) .await - .expect("Expected mempool TransactionReceived event"); + .expect("Expected TransactionDetected event"); assert_eq!(mempool_txid, txid); // Mine the transaction @@ -552,7 +552,7 @@ async fn test_broadcast_transaction_local_detection() { // The locally dispatched transaction should be picked up by the mempool manager let detected = wait_for_mempool_tx_both(&mut fa, &mut bf, MEMPOOL_TIMEOUT) .await - .expect("Expected mempool TransactionReceived event after broadcast"); + .expect("Expected TransactionDetected event after broadcast"); assert_eq!(detected, txid, "Detected txid should match broadcast txid"); // Step 4: Mine the broadcast tx and verify it transitions to confirmed diff --git a/key-wallet-ffi/FFI_API.md b/key-wallet-ffi/FFI_API.md index 934797af0..9fa14f312 100644 --- a/key-wallet-ffi/FFI_API.md +++ b/key-wallet-ffi/FFI_API.md @@ -887,7 +887,7 @@ Free managed wallet info # Safety - `managed_wallet` must be a valid pointer t #### `managed_wallet_generate_addresses_to_index` ```c -managed_wallet_generate_addresses_to_index(managed_wallet: *mut FFIManagedWalletInfo, wallet: *const FFIWallet, account_type: FFIAccountType, account_index: c_uint, pool_type: FFIAddressPoolType, target_index: c_uint, error: *mut FFIError,) -> bool +managed_wallet_generate_addresses_to_index(managed_wallet: *mut FFIManagedWalletInfo, wallet: *const FFIWallet, account_type: FFIAccountKind, account_index: c_uint, pool_type: FFIAddressPoolType, target_index: c_uint, error: *mut FFIError,) -> bool ``` **Description:** @@ -903,7 +903,7 @@ Generate addresses up to a specific index in a pool This ensures that addresses #### `managed_wallet_get_account` ```c -managed_wallet_get_account(manager: *const FFIWalletManager, wallet_id: *const u8, account_index: c_uint, account_type: FFIAccountType,) -> FFIManagedCoreAccountResult +managed_wallet_get_account(manager: *const FFIWalletManager, wallet_id: *const u8, account_index: c_uint, account_type: FFIAccountKind,) -> FFIManagedCoreAccountResult ``` **Description:** @@ -951,7 +951,7 @@ Get number of accounts in a managed wallet # Safety - `manager` must be a vali #### `managed_wallet_get_address_pool_info` ```c -managed_wallet_get_address_pool_info(managed_wallet: *const FFIManagedWalletInfo, account_type: FFIAccountType, account_index: c_uint, pool_type: FFIAddressPoolType, info_out: *mut FFIAddressPoolInfo, error: *mut FFIError,) -> bool +managed_wallet_get_address_pool_info(managed_wallet: *const FFIManagedWalletInfo, account_type: FFIAccountKind, account_index: c_uint, pool_type: FFIAddressPoolType, info_out: *mut FFIAddressPoolInfo, error: *mut FFIError,) -> bool ``` **Description:** @@ -1175,7 +1175,7 @@ Mark an address as used in the pool This updates the pool's tracking of which a #### `managed_wallet_set_gap_limit` ```c -managed_wallet_set_gap_limit(managed_wallet: *mut FFIManagedWalletInfo, account_type: FFIAccountType, account_index: c_uint, pool_type: FFIAddressPoolType, gap_limit: c_uint, error: *mut FFIError,) -> bool +managed_wallet_set_gap_limit(managed_wallet: *mut FFIManagedWalletInfo, account_type: FFIAccountKind, account_index: c_uint, pool_type: FFIAddressPoolType, gap_limit: c_uint, error: *mut FFIError,) -> bool ``` **Description:** @@ -1191,7 +1191,7 @@ Set the gap limit for an address pool The gap limit determines how many unused #### `wallet_add_account` ```c -wallet_add_account(wallet: *mut FFIWallet, account_type: crate::types::FFIAccountType, account_index: c_uint,) -> crate::types::FFIAccountResult +wallet_add_account(wallet: *mut FFIWallet, account_type: crate::types::FFIAccountKind, account_index: c_uint,) -> crate::types::FFIAccountResult ``` **Description:** @@ -1207,7 +1207,7 @@ This function dereferences a raw pointer to FFIWallet. The caller must ensure th #### `wallet_add_account_with_string_xpub` ```c -wallet_add_account_with_string_xpub(wallet: *mut FFIWallet, account_type: crate::types::FFIAccountType, account_index: c_uint, xpub_string: *const c_char,) -> crate::types::FFIAccountResult +wallet_add_account_with_string_xpub(wallet: *mut FFIWallet, account_type: crate::types::FFIAccountKind, account_index: c_uint, xpub_string: *const c_char,) -> crate::types::FFIAccountResult ``` **Description:** @@ -1223,7 +1223,7 @@ This function dereferences raw pointers. The caller must ensure that: - The wall #### `wallet_add_account_with_xpub_bytes` ```c -wallet_add_account_with_xpub_bytes(wallet: *mut FFIWallet, account_type: crate::types::FFIAccountType, account_index: c_uint, xpub_bytes: *const u8, xpub_len: usize,) -> crate::types::FFIAccountResult +wallet_add_account_with_xpub_bytes(wallet: *mut FFIWallet, account_type: crate::types::FFIAccountKind, account_index: c_uint, xpub_bytes: *const u8, xpub_len: usize,) -> crate::types::FFIAccountResult ``` **Description:** @@ -1575,7 +1575,7 @@ Free a const wallet handle This is a const-safe wrapper for wallet_free() that #### `wallet_get_account` ```c -wallet_get_account(wallet: *const FFIWallet, account_index: c_uint, account_type: FFIAccountType,) -> FFIAccountResult +wallet_get_account(wallet: *const FFIWallet, account_index: c_uint, account_type: FFIAccountKind,) -> FFIAccountResult ``` **Description:** @@ -2313,14 +2313,14 @@ Free an account handle # Safety - `account` must be a valid pointer to an FFIA #### `account_get_account_type` ```c -account_get_account_type(account: *const FFIAccount, out_index: *mut c_uint,) -> FFIAccountType +account_get_account_type(account: *const FFIAccount, out_index: *mut c_uint,) -> FFIAccountKind ``` **Description:** -Get the account type of an account # Safety - `account` must be a valid pointer to an FFIAccount instance - `out_index` must be a valid pointer to a c_uint where the index will be stored - Returns FFIAccountType::StandardBIP44 with index 0 if the account is null +Get the account type of an account # Safety - `account` must be a valid pointer to an FFIAccount instance - `out_index` must be a valid pointer to a c_uint where the index will be stored - Returns FFIAccountKind::StandardBIP44 with index 0 if the account is null **Safety:** -- `account` must be a valid pointer to an FFIAccount instance - `out_index` must be a valid pointer to a c_uint where the index will be stored - Returns FFIAccountType::StandardBIP44 with index 0 if the account is null +- `account` must be a valid pointer to an FFIAccount instance - `out_index` must be a valid pointer to a c_uint where the index will be stored - Returns FFIAccountKind::StandardBIP44 with index 0 if the account is null **Module:** `account` @@ -2407,7 +2407,7 @@ bls_account_free(account: *mut FFIBLSAccount) -> () #### `bls_account_get_account_type` ```c -bls_account_get_account_type(account: *const FFIBLSAccount, out_index: *mut c_uint,) -> FFIAccountType +bls_account_get_account_type(account: *const FFIBLSAccount, out_index: *mut c_uint,) -> FFIAccountKind ``` **Module:** `account` @@ -2493,7 +2493,7 @@ eddsa_account_free(account: *mut FFIEdDSAAccount) -> () #### `eddsa_account_get_account_type` ```c -eddsa_account_get_account_type(account: *const FFIEdDSAAccount, out_index: *mut c_uint,) -> FFIAccountType +eddsa_account_get_account_type(account: *const FFIEdDSAAccount, out_index: *mut c_uint,) -> FFIAccountKind ``` **Module:** `account` @@ -3077,7 +3077,7 @@ Free transactions array returned by managed_core_account_get_transactions # Saf #### `managed_core_account_get_account_type` ```c -managed_core_account_get_account_type(account: *const FFIManagedCoreAccount, index_out: *mut c_uint,) -> FFIAccountType +managed_core_account_get_account_type(account: *const FFIManagedCoreAccount, index_out: *mut c_uint,) -> FFIAccountKind ``` **Description:** diff --git a/key-wallet-ffi/src/account.rs b/key-wallet-ffi/src/account.rs index 773eaed05..76b00a1f3 100644 --- a/key-wallet-ffi/src/account.rs +++ b/key-wallet-ffi/src/account.rs @@ -2,7 +2,7 @@ use crate::deref_ptr; use crate::error::{FFIError, FFIErrorCode}; -use crate::types::{FFIAccountResult, FFIAccountType, FFIWallet}; +use crate::types::{FFIAccountKind, FFIAccountResult, FFIWallet}; use dash_network::ffi::FFINetwork; #[cfg(feature = "bls")] use key_wallet::account::BLSAccount; @@ -83,7 +83,7 @@ impl FFIEdDSAAccount { pub unsafe extern "C" fn wallet_get_account( wallet: *const FFIWallet, account_index: c_uint, - account_type: FFIAccountType, + account_type: FFIAccountKind, ) -> FFIAccountResult { if wallet.is_null() { return FFIAccountResult::error(FFIErrorCode::InvalidInput, "Wallet is null".to_string()); @@ -270,22 +270,22 @@ pub unsafe extern "C" fn account_get_parent_wallet_id(account: *const FFIAccount /// /// - `account` must be a valid pointer to an FFIAccount instance /// - `out_index` must be a valid pointer to a c_uint where the index will be stored -/// - Returns FFIAccountType::StandardBIP44 with index 0 if the account is null +/// - Returns FFIAccountKind::StandardBIP44 with index 0 if the account is null #[no_mangle] pub unsafe extern "C" fn account_get_account_type( account: *const FFIAccount, out_index: *mut c_uint, -) -> FFIAccountType { +) -> FFIAccountKind { if account.is_null() || out_index.is_null() { if !out_index.is_null() { *out_index = 0; } - return FFIAccountType::StandardBIP44; + return FFIAccountKind::StandardBIP44; } let account = &*account; let (account_type, index, registration_index) = - FFIAccountType::from_account_type(&account.inner().account_type); + FFIAccountKind::from_account_type(&account.inner().account_type); // For IdentityTopUp, the registration_index is the relevant index *out_index = registration_index.unwrap_or(index); @@ -385,23 +385,23 @@ pub unsafe extern "C" fn bls_account_get_parent_wallet_id( /// /// - `account` must be a valid pointer to an FFIBLSAccount instance /// - `out_index` must be a valid pointer to a c_uint where the index will be stored -/// - Returns FFIAccountType::StandardBIP44 with index 0 if the account is null +/// - Returns FFIAccountKind::StandardBIP44 with index 0 if the account is null #[cfg(feature = "bls")] #[no_mangle] pub unsafe extern "C" fn bls_account_get_account_type( account: *const FFIBLSAccount, out_index: *mut c_uint, -) -> FFIAccountType { +) -> FFIAccountKind { if account.is_null() || out_index.is_null() { if !out_index.is_null() { *out_index = 0; } - return FFIAccountType::StandardBIP44; + return FFIAccountKind::StandardBIP44; } let account = &*account; let (account_type, index, registration_index) = - FFIAccountType::from_account_type(&account.inner().account_type); + FFIAccountKind::from_account_type(&account.inner().account_type); // For IdentityTopUp, the registration_index is the relevant index *out_index = registration_index.unwrap_or(index); @@ -502,23 +502,23 @@ pub unsafe extern "C" fn eddsa_account_get_parent_wallet_id( /// /// - `account` must be a valid pointer to an FFIEdDSAAccount instance /// - `out_index` must be a valid pointer to a c_uint where the index will be stored -/// - Returns FFIAccountType::StandardBIP44 with index 0 if the account is null +/// - Returns FFIAccountKind::StandardBIP44 with index 0 if the account is null #[cfg(feature = "eddsa")] #[no_mangle] pub unsafe extern "C" fn eddsa_account_get_account_type( account: *const FFIEdDSAAccount, out_index: *mut c_uint, -) -> FFIAccountType { +) -> FFIAccountKind { if account.is_null() || out_index.is_null() { if !out_index.is_null() { *out_index = 0; } - return FFIAccountType::StandardBIP44; + return FFIAccountKind::StandardBIP44; } let account = &*account; let (account_type, index, registration_index) = - FFIAccountType::from_account_type(&account.inner().account_type); + FFIAccountKind::from_account_type(&account.inner().account_type); // For IdentityTopUp, the registration_index is the relevant index *out_index = registration_index.unwrap_or(index); diff --git a/key-wallet-ffi/src/account_collection.rs b/key-wallet-ffi/src/account_collection.rs index 6c8a59942..f45c32ab0 100644 --- a/key-wallet-ffi/src/account_collection.rs +++ b/key-wallet-ffi/src/account_collection.rs @@ -1119,7 +1119,7 @@ mod tests { options.option_type = crate::types::FFIAccountCreationOptionType::AllAccounts; // Add provider operator keys account type - let special_types = [crate::types::FFIAccountType::ProviderOperatorKeys]; + let special_types = [crate::types::FFIAccountKind::ProviderOperatorKeys]; options.special_account_types = special_types.as_ptr(); options.special_account_types_count = special_types.len(); @@ -1166,7 +1166,7 @@ mod tests { options.option_type = crate::types::FFIAccountCreationOptionType::AllAccounts; // Add provider platform keys account type - let special_types = [crate::types::FFIAccountType::ProviderPlatformKeys]; + let special_types = [crate::types::FFIAccountKind::ProviderPlatformKeys]; options.special_account_types = special_types.as_ptr(); options.special_account_types_count = special_types.len(); @@ -1215,10 +1215,10 @@ mod tests { // Add various special accounts let special_types = [ - crate::types::FFIAccountType::ProviderVotingKeys, - crate::types::FFIAccountType::ProviderOwnerKeys, - crate::types::FFIAccountType::IdentityRegistration, - crate::types::FFIAccountType::IdentityInvitation, + crate::types::FFIAccountKind::ProviderVotingKeys, + crate::types::FFIAccountKind::ProviderOwnerKeys, + crate::types::FFIAccountKind::IdentityRegistration, + crate::types::FFIAccountKind::IdentityInvitation, ]; options.special_account_types = special_types.as_ptr(); options.special_account_types_count = special_types.len(); @@ -1353,10 +1353,10 @@ mod tests { // Add various special accounts let special_types = [ - crate::types::FFIAccountType::ProviderVotingKeys, - crate::types::FFIAccountType::ProviderOwnerKeys, - crate::types::FFIAccountType::IdentityRegistration, - crate::types::FFIAccountType::IdentityInvitation, + crate::types::FFIAccountKind::ProviderVotingKeys, + crate::types::FFIAccountKind::ProviderOwnerKeys, + crate::types::FFIAccountKind::IdentityRegistration, + crate::types::FFIAccountKind::IdentityInvitation, ]; options.special_account_types = special_types.as_ptr(); options.special_account_types_count = special_types.len(); diff --git a/key-wallet-ffi/src/account_derivation_tests.rs b/key-wallet-ffi/src/account_derivation_tests.rs index f4c15c2fe..b380916d2 100644 --- a/key-wallet-ffi/src/account_derivation_tests.rs +++ b/key-wallet-ffi/src/account_derivation_tests.rs @@ -7,7 +7,7 @@ mod tests { use crate::derivation::*; use crate::error::{FFIError, FFIErrorCode}; use crate::keys::{extended_private_key_free, private_key_free}; - use crate::types::FFIAccountType; + use crate::types::FFIAccountKind; use crate::wallet; use dash_network::ffi::FFINetwork; @@ -35,7 +35,7 @@ mod tests { // Get account 0 (BIP44) let account = unsafe { - crate::account::wallet_get_account(wallet, 0, FFIAccountType::StandardBIP44).account + crate::account::wallet_get_account(wallet, 0, FFIAccountKind::StandardBIP44).account }; assert!(!account.is_null()); @@ -133,7 +133,7 @@ mod tests { // Get account 0 (BIP44) let account = unsafe { - crate::account::wallet_get_account(wallet, 0, FFIAccountType::StandardBIP44).account + crate::account::wallet_get_account(wallet, 0, FFIAccountKind::StandardBIP44).account }; assert!(!account.is_null()); @@ -186,7 +186,7 @@ mod tests { }; assert!(!wallet.is_null()); let account = unsafe { - crate::account::wallet_get_account(wallet, 0, FFIAccountType::StandardBIP44).account + crate::account::wallet_get_account(wallet, 0, FFIAccountKind::StandardBIP44).account }; assert!(!account.is_null()); diff --git a/key-wallet-ffi/src/account_tests.rs b/key-wallet-ffi/src/account_tests.rs index e5ee42392..35be6a188 100644 --- a/key-wallet-ffi/src/account_tests.rs +++ b/key-wallet-ffi/src/account_tests.rs @@ -3,14 +3,14 @@ mod tests { use super::super::*; use crate::error::{FFIError, FFIErrorCode}; - use crate::types::FFIAccountType; + use crate::types::FFIAccountKind; use crate::wallet; use std::ffi::CString; use std::ptr; #[test] fn test_wallet_get_account_null_wallet() { - let result = unsafe { wallet_get_account(ptr::null(), 0, FFIAccountType::StandardBIP44) }; + let result = unsafe { wallet_get_account(ptr::null(), 0, FFIAccountKind::StandardBIP44) }; assert!(result.account.is_null()); assert_ne!(result.error_code, 0); @@ -42,7 +42,7 @@ mod tests { }; // Try to get the default account (should exist) - let result = unsafe { wallet_get_account(wallet, 0, FFIAccountType::StandardBIP44) }; + let result = unsafe { wallet_get_account(wallet, 0, FFIAccountKind::StandardBIP44) }; // Note: Since the account may not exist yet (depends on wallet creation logic), // we just check that the call doesn't return an error for invalid parameters @@ -109,18 +109,18 @@ mod tests { #[test] fn test_account_type_values() { - // Test FFIAccountType enum values - assert_eq!(FFIAccountType::StandardBIP44 as u32, 0); - assert_eq!(FFIAccountType::StandardBIP32 as u32, 1); - assert_eq!(FFIAccountType::CoinJoin as u32, 2); - assert_eq!(FFIAccountType::IdentityRegistration as u32, 3); - assert_eq!(FFIAccountType::IdentityTopUp as u32, 4); - assert_eq!(FFIAccountType::IdentityTopUpNotBoundToIdentity as u32, 5); - assert_eq!(FFIAccountType::IdentityInvitation as u32, 6); - assert_eq!(FFIAccountType::ProviderVotingKeys as u32, 7); - assert_eq!(FFIAccountType::ProviderOwnerKeys as u32, 8); - assert_eq!(FFIAccountType::ProviderOperatorKeys as u32, 9); - assert_eq!(FFIAccountType::ProviderPlatformKeys as u32, 10); + // Test FFIAccountKind enum values + assert_eq!(FFIAccountKind::StandardBIP44 as u32, 0); + assert_eq!(FFIAccountKind::StandardBIP32 as u32, 1); + assert_eq!(FFIAccountKind::CoinJoin as u32, 2); + assert_eq!(FFIAccountKind::IdentityRegistration as u32, 3); + assert_eq!(FFIAccountKind::IdentityTopUp as u32, 4); + assert_eq!(FFIAccountKind::IdentityTopUpNotBoundToIdentity as u32, 5); + assert_eq!(FFIAccountKind::IdentityInvitation as u32, 6); + assert_eq!(FFIAccountKind::ProviderVotingKeys as u32, 7); + assert_eq!(FFIAccountKind::ProviderOwnerKeys as u32, 8); + assert_eq!(FFIAccountKind::ProviderOperatorKeys as u32, 9); + assert_eq!(FFIAccountKind::ProviderPlatformKeys as u32, 10); } #[test] @@ -144,7 +144,7 @@ mod tests { assert_eq!(error.code, FFIErrorCode::Success); // Get an account - let result = unsafe { wallet_get_account(wallet, 0, FFIAccountType::StandardBIP44) }; + let result = unsafe { wallet_get_account(wallet, 0, FFIAccountKind::StandardBIP44) }; if !result.account.is_null() { // Test all the getter functions @@ -167,7 +167,7 @@ mod tests { // Test get account type let mut index = 999u32; let account_type = account_get_account_type(result.account, &mut index); - assert_eq!(account_type as u32, FFIAccountType::StandardBIP44 as u32); + assert_eq!(account_type as u32, FFIAccountKind::StandardBIP44 as u32); assert_eq!(index, 0); // Account index should be 0 // Test is watch only - should be false for a wallet created from mnemonic @@ -206,12 +206,12 @@ mod tests { let mut index = 0u32; let account_type = unsafe { account_get_account_type(ptr::null(), &mut index) }; - assert_eq!(account_type as u32, FFIAccountType::StandardBIP44 as u32); + assert_eq!(account_type as u32, FFIAccountKind::StandardBIP44 as u32); assert_eq!(index, 0); // Test with null out_index let account_type = unsafe { account_get_account_type(ptr::null(), ptr::null_mut()) }; - assert_eq!(account_type as u32, FFIAccountType::StandardBIP44 as u32); + assert_eq!(account_type as u32, FFIAccountKind::StandardBIP44 as u32); let is_watch_only = unsafe { account_get_is_watch_only(ptr::null()) }; assert!(!is_watch_only); diff --git a/key-wallet-ffi/src/address_pool.rs b/key-wallet-ffi/src/address_pool.rs index 487e1b292..a42123429 100644 --- a/key-wallet-ffi/src/address_pool.rs +++ b/key-wallet-ffi/src/address_pool.rs @@ -8,7 +8,7 @@ use std::os::raw::{c_char, c_uint}; use crate::error::{FFIError, FFIErrorCode}; use crate::managed_wallet::FFIManagedWalletInfo; -use crate::types::{FFIAccountType, FFIWallet}; +use crate::types::{FFIAccountKind, FFIWallet}; use crate::utils::rust_string_to_c; use crate::{check_ptr, deref_ptr, deref_ptr_mut, unwrap_or_return}; use key_wallet::account::ManagedAccountCollection; @@ -285,7 +285,7 @@ pub struct FFIAddressPoolInfo { #[no_mangle] pub unsafe extern "C" fn managed_wallet_get_address_pool_info( managed_wallet: *const FFIManagedWalletInfo, - account_type: FFIAccountType, + account_type: FFIAccountKind, account_index: c_uint, pool_type: FFIAddressPoolType, info_out: *mut FFIAddressPoolInfo, @@ -372,7 +372,7 @@ pub unsafe extern "C" fn managed_wallet_get_address_pool_info( #[no_mangle] pub unsafe extern "C" fn managed_wallet_set_gap_limit( managed_wallet: *mut FFIManagedWalletInfo, - account_type: FFIAccountType, + account_type: FFIAccountKind, account_index: c_uint, pool_type: FFIAddressPoolType, gap_limit: c_uint, @@ -447,7 +447,7 @@ pub unsafe extern "C" fn managed_wallet_set_gap_limit( pub unsafe extern "C" fn managed_wallet_generate_addresses_to_index( managed_wallet: *mut FFIManagedWalletInfo, wallet: *const FFIWallet, - account_type: FFIAccountType, + account_type: FFIAccountKind, account_index: c_uint, pool_type: FFIAddressPoolType, target_index: c_uint, @@ -977,7 +977,7 @@ mod tests { manager, wallet_ids_out, 0, - FFIAccountType::StandardBIP44, + FFIAccountKind::StandardBIP44, ); assert!(!result.account.is_null()); @@ -1076,7 +1076,7 @@ mod tests { manager, wallet_ids_out, 0, - FFIAccountType::StandardBIP44, + FFIAccountKind::StandardBIP44, ); assert!(!result.account.is_null()); diff --git a/key-wallet-ffi/src/managed_account.rs b/key-wallet-ffi/src/managed_account.rs index 0feda56a5..a4c273d43 100644 --- a/key-wallet-ffi/src/managed_account.rs +++ b/key-wallet-ffi/src/managed_account.rs @@ -14,7 +14,7 @@ use crate::address_pool::{FFIAddressPool, FFIAddressPoolType}; use crate::check_ptr; use crate::error::{FFIError, FFIErrorCode}; use crate::types::{ - FFIAccountType, FFIInputDetail, FFIOutputDetail, FFITransactionContext, + FFIAccountKind, FFIInputDetail, FFIOutputDetail, FFITransactionContext, FFITransactionDirection, FFITransactionType, }; use crate::wallet_manager::FFIWalletManager; @@ -185,7 +185,7 @@ pub unsafe extern "C" fn managed_wallet_get_account( manager: *const FFIWalletManager, wallet_id: *const u8, account_index: c_uint, - account_type: FFIAccountType, + account_type: FFIAccountKind, ) -> FFIManagedCoreAccountResult { if manager.is_null() { return FFIManagedCoreAccountResult::error( @@ -529,9 +529,9 @@ pub unsafe extern "C" fn managed_core_account_get_parent_wallet_id( pub unsafe extern "C" fn managed_core_account_get_account_type( account: *const FFIManagedCoreAccount, index_out: *mut c_uint, -) -> FFIAccountType { +) -> FFIAccountKind { if account.is_null() { - return FFIAccountType::StandardBIP44; // Default type + return FFIAccountKind::StandardBIP44; // Default type } let account = &*account; @@ -551,36 +551,36 @@ pub unsafe extern "C" fn managed_core_account_get_account_type( } => { use key_wallet::account::StandardAccountType; match standard_account_type { - StandardAccountType::BIP44Account => FFIAccountType::StandardBIP44, - StandardAccountType::BIP32Account => FFIAccountType::StandardBIP32, + StandardAccountType::BIP44Account => FFIAccountKind::StandardBIP44, + StandardAccountType::BIP32Account => FFIAccountKind::StandardBIP32, } } AccountType::CoinJoin { .. - } => FFIAccountType::CoinJoin, - AccountType::IdentityRegistration => FFIAccountType::IdentityRegistration, + } => FFIAccountKind::CoinJoin, + AccountType::IdentityRegistration => FFIAccountKind::IdentityRegistration, AccountType::IdentityTopUp { .. - } => FFIAccountType::IdentityTopUp, + } => FFIAccountKind::IdentityTopUp, AccountType::IdentityTopUpNotBoundToIdentity => { - FFIAccountType::IdentityTopUpNotBoundToIdentity + FFIAccountKind::IdentityTopUpNotBoundToIdentity } - AccountType::IdentityInvitation => FFIAccountType::IdentityInvitation, - AccountType::AssetLockAddressTopUp => FFIAccountType::AssetLockAddressTopUp, - AccountType::AssetLockShieldedAddressTopUp => FFIAccountType::AssetLockShieldedAddressTopUp, - AccountType::ProviderVotingKeys => FFIAccountType::ProviderVotingKeys, - AccountType::ProviderOwnerKeys => FFIAccountType::ProviderOwnerKeys, - AccountType::ProviderOperatorKeys => FFIAccountType::ProviderOperatorKeys, - AccountType::ProviderPlatformKeys => FFIAccountType::ProviderPlatformKeys, + AccountType::IdentityInvitation => FFIAccountKind::IdentityInvitation, + AccountType::AssetLockAddressTopUp => FFIAccountKind::AssetLockAddressTopUp, + AccountType::AssetLockShieldedAddressTopUp => FFIAccountKind::AssetLockShieldedAddressTopUp, + AccountType::ProviderVotingKeys => FFIAccountKind::ProviderVotingKeys, + AccountType::ProviderOwnerKeys => FFIAccountKind::ProviderOwnerKeys, + AccountType::ProviderOperatorKeys => FFIAccountKind::ProviderOperatorKeys, + AccountType::ProviderPlatformKeys => FFIAccountKind::ProviderPlatformKeys, AccountType::DashpayReceivingFunds { .. - } => FFIAccountType::DashpayReceivingFunds, + } => FFIAccountKind::DashpayReceivingFunds, AccountType::DashpayExternalAccount { .. - } => FFIAccountType::DashpayExternalAccount, + } => FFIAccountKind::DashpayExternalAccount, AccountType::PlatformPayment { .. - } => FFIAccountType::PlatformPayment, + } => FFIAccountKind::PlatformPayment, } } @@ -664,6 +664,134 @@ pub unsafe extern "C" fn managed_core_account_get_utxo_count( account.inner().utxos.len() as c_uint } +/// FFI-compatible owning-account descriptor for a [`FFITransactionRecord`]. +/// +/// Mirrors the Rust-side `TransactionRecord::account_type`. `kind` is the +/// discriminant; `index` is the primary index (`0` for variants that have no +/// meaningful primary index — identity-singletons, provider-key, asset-lock); +/// `index_secondary` carries the secondary index (`registration_index` for +/// `IdentityTopUp`, `key_class` for `PlatformPayment`) or `-1` when not +/// applicable. The `identity_user` and `identity_friend` pointers are non-null +/// only for the Dashpay variants and point to 32-byte identity hashes owned by +/// this struct (freed by its `Drop` impl). `key_class` is `-1` unless +/// this is a `PlatformPayment` record, in which case it carries the `key_class` +/// hardened index (also exposed in `index_secondary` for symmetry with the +/// existing FFI tuple contract). +#[repr(C)] +pub struct FFIAccountType { + /// Discriminant identifying the owning account variant. + pub kind: FFIAccountKind, + /// Primary account index for variants that carry one. + pub index: u32, + /// Secondary account index when applicable, `-1` otherwise. + pub index_secondary: i32, + /// Pointer to the 32-byte `user_identity_id` of the Dashpay account that + /// owns this record, null when the account is not a Dashpay variant. The + /// pointee is owned by this struct and freed when it is dropped. + pub identity_user: *const [u8; 32], + /// Pointer to the 32-byte `friend_identity_id` of the Dashpay account + /// that owns this record, null when the account is not a Dashpay variant. + /// The pointee is owned by this struct and freed when it is dropped. + pub identity_friend: *const [u8; 32], + /// `PlatformPayment` `key_class` hardened index, `-1` for any other + /// account variant. Mirrors `index_secondary` for `PlatformPayment`. + pub key_class: i32, +} + +impl From<&AccountType> for FFIAccountType { + fn from(account_type: &AccountType) -> Self { + use key_wallet::account::StandardAccountType; + let (kind, index, index_secondary) = match *account_type { + AccountType::Standard { + index, + standard_account_type: StandardAccountType::BIP44Account, + } => (FFIAccountKind::StandardBIP44, index, -1), + AccountType::Standard { + index, + standard_account_type: StandardAccountType::BIP32Account, + } => (FFIAccountKind::StandardBIP32, index, -1), + AccountType::CoinJoin { + index, + } => (FFIAccountKind::CoinJoin, index, -1), + AccountType::IdentityRegistration => (FFIAccountKind::IdentityRegistration, 0, -1), + AccountType::IdentityTopUp { + registration_index, + } => (FFIAccountKind::IdentityTopUp, 0, registration_index as i32), + AccountType::IdentityTopUpNotBoundToIdentity => { + (FFIAccountKind::IdentityTopUpNotBoundToIdentity, 0, -1) + } + AccountType::IdentityInvitation => (FFIAccountKind::IdentityInvitation, 0, -1), + AccountType::AssetLockAddressTopUp => (FFIAccountKind::AssetLockAddressTopUp, 0, -1), + AccountType::AssetLockShieldedAddressTopUp => { + (FFIAccountKind::AssetLockShieldedAddressTopUp, 0, -1) + } + AccountType::ProviderVotingKeys => (FFIAccountKind::ProviderVotingKeys, 0, -1), + AccountType::ProviderOwnerKeys => (FFIAccountKind::ProviderOwnerKeys, 0, -1), + AccountType::ProviderOperatorKeys => (FFIAccountKind::ProviderOperatorKeys, 0, -1), + AccountType::ProviderPlatformKeys => (FFIAccountKind::ProviderPlatformKeys, 0, -1), + AccountType::DashpayReceivingFunds { + index, + .. + } => (FFIAccountKind::DashpayReceivingFunds, index, -1), + AccountType::DashpayExternalAccount { + index, + .. + } => (FFIAccountKind::DashpayExternalAccount, index, -1), + AccountType::PlatformPayment { + account, + key_class, + } => (FFIAccountKind::PlatformPayment, account, key_class as i32), + }; + + let (identity_user, identity_friend) = match *account_type { + AccountType::DashpayReceivingFunds { + user_identity_id, + friend_identity_id, + .. + } + | AccountType::DashpayExternalAccount { + user_identity_id, + friend_identity_id, + .. + } => ( + Box::into_raw(Box::new(user_identity_id)) as *const [u8; 32], + Box::into_raw(Box::new(friend_identity_id)) as *const [u8; 32], + ), + _ => (std::ptr::null(), std::ptr::null()), + }; + + let key_class = match *account_type { + AccountType::PlatformPayment { + key_class, + .. + } => key_class as i32, + _ => -1, + }; + + FFIAccountType { + kind, + index, + index_secondary, + identity_user, + identity_friend, + key_class, + } + } +} + +impl Drop for FFIAccountType { + fn drop(&mut self) { + if !self.identity_user.is_null() { + let _ = unsafe { Box::from_raw(self.identity_user as *mut [u8; 32]) }; + self.identity_user = std::ptr::null(); + } + if !self.identity_friend.is_null() { + let _ = unsafe { Box::from_raw(self.identity_friend as *mut [u8; 32]) }; + self.identity_friend = std::ptr::null(); + } + } +} + /// FFI-compatible transaction record /// /// Heap-allocated fields are freed automatically when the record is dropped @@ -682,6 +810,8 @@ pub struct FFITransactionRecord { pub direction: FFITransactionDirection, /// Fee if known, 0 if unknown pub fee: u64, + /// Owning-account descriptor (discriminant + indices + identity ids). + pub account_type: FFIAccountType, /// Input details array pub input_details: *mut FFIInputDetail, /// Number of input details @@ -707,6 +837,8 @@ impl From<&TransactionRecord> for FFITransactionRecord { let direction = FFITransactionDirection::from(value.direction); let fee = value.fee.unwrap_or(0); + let account_type = FFIAccountType::from(&value.account_type); + // Serialize transaction bytes let tx_slice = dashcore::consensus::serialize(&value.transaction).into_boxed_slice(); let tx_len = tx_slice.len(); @@ -750,6 +882,7 @@ impl From<&TransactionRecord> for FFITransactionRecord { transaction_type, direction, fee, + account_type, input_details, input_details_count, output_details, @@ -1488,7 +1621,7 @@ mod tests { manager, wallet_ids_out, 0, - FFIAccountType::StandardBIP44, + FFIAccountKind::StandardBIP44, ); assert!(!result.account.is_null()); @@ -1550,7 +1683,7 @@ mod tests { // Try to get a non-existent CoinJoin account let mut result = - managed_wallet_get_account(manager, wallet_ids_out, 0, FFIAccountType::CoinJoin); + managed_wallet_get_account(manager, wallet_ids_out, 0, FFIAccountKind::CoinJoin); assert!(result.account.is_null()); assert_ne!(result.error_code, 0); @@ -1677,7 +1810,7 @@ mod tests { manager, wallet_ids_out, 0, - FFIAccountType::StandardBIP44, + FFIAccountKind::StandardBIP44, ); assert!(!result.account.is_null()); @@ -1693,7 +1826,7 @@ mod tests { // Test get_account_type let mut index_out: c_uint = 999; // Initialize with unexpected value let account_type = managed_core_account_get_account_type(account, &mut index_out); - assert_eq!(account_type, FFIAccountType::StandardBIP44); + assert_eq!(account_type, FFIAccountKind::StandardBIP44); assert_eq!(index_out, 0); // Test get_is_watch_only @@ -1745,7 +1878,7 @@ mod tests { let mut index_out: c_uint = 0; let account_type = managed_core_account_get_account_type(ptr::null(), &mut index_out); - assert_eq!(account_type, FFIAccountType::StandardBIP44); // Default type + assert_eq!(account_type, FFIAccountKind::StandardBIP44); // Default type let is_watch_only = managed_core_account_get_is_watch_only(ptr::null()); assert!(!is_watch_only); @@ -1795,7 +1928,7 @@ mod tests { manager, wallet_ids_out, 0, - FFIAccountType::StandardBIP44, + FFIAccountKind::StandardBIP44, ); assert!(!result.account.is_null()); @@ -1853,7 +1986,7 @@ mod tests { manager, wallet_ids_out, 0, - FFIAccountType::StandardBIP44, + FFIAccountKind::StandardBIP44, ); assert!(!result.account.is_null()); @@ -1939,7 +2072,7 @@ mod tests { // Get CoinJoin account let cj_result = - managed_wallet_get_account(manager, wallet_ids_out, 0, FFIAccountType::CoinJoin); + managed_wallet_get_account(manager, wallet_ids_out, 0, FFIAccountKind::CoinJoin); assert!(!cj_result.account.is_null()); let cj_account = cj_result.account; @@ -2014,6 +2147,14 @@ mod tests { transaction_type: FFITransactionType::Standard, direction: FFITransactionDirection::Incoming, fee: 226, + account_type: FFIAccountType { + kind: FFIAccountKind::StandardBIP44, + index: 0, + index_secondary: -1, + identity_user: std::ptr::null(), + identity_friend: std::ptr::null(), + key_class: -1, + }, input_details_count: input_slice.len(), input_details: Box::into_raw(input_slice) as *mut FFIInputDetail, output_details_count: output_slice.len(), @@ -2038,6 +2179,14 @@ mod tests { transaction_type: FFITransactionType::Standard, direction: FFITransactionDirection::Outgoing, fee: 0, + account_type: FFIAccountType { + kind: FFIAccountKind::StandardBIP44, + index: 0, + index_secondary: -1, + identity_user: std::ptr::null(), + identity_friend: std::ptr::null(), + key_class: -1, + }, input_details: std::ptr::null_mut(), input_details_count: 0, output_details: std::ptr::null_mut(), diff --git a/key-wallet-ffi/src/transaction_checking.rs b/key-wallet-ffi/src/transaction_checking.rs index 1cc177977..1875a89f0 100644 --- a/key-wallet-ffi/src/transaction_checking.rs +++ b/key-wallet-ffi/src/transaction_checking.rs @@ -26,7 +26,7 @@ use key_wallet::wallet::managed_wallet_info::ManagedWalletInfo; /// Account type match result #[repr(C)] pub struct FFIAccountMatch { - /// Account type ID (matches FFIAccountType enum values) + /// Account type ID (matches FFIAccountKind enum values) pub account_type: c_uint, /// Account index (if applicable) pub account_index: c_uint, diff --git a/key-wallet-ffi/src/types.rs b/key-wallet-ffi/src/types.rs index 58447e4f2..f11a840e3 100644 --- a/key-wallet-ffi/src/types.rs +++ b/key-wallet-ffi/src/types.rs @@ -203,7 +203,7 @@ pub enum FFIStandardAccountType { /// - Provider accounts: Various masternode provider key types (voting, owner, operator, platform) #[repr(C)] #[derive(Debug, Clone, Copy, PartialEq)] -pub enum FFIAccountType { +pub enum FFIAccountKind { /// Standard BIP44 account (m/44'/coin_type'/account'/x/x) StandardBIP44 = 0, /// Standard BIP32 account (m/account'/x/x) @@ -238,42 +238,42 @@ pub enum FFIAccountType { AssetLockShieldedAddressTopUp = 15, } -impl FFIAccountType { +impl FFIAccountKind { /// Convert to AccountType with the provided index (used where applicable). /// For types needing an index (e.g., IdentityTopUp.registration_index), the provided index is used. pub fn to_account_type(self, index: u32) -> key_wallet::AccountType { use key_wallet::account::account_type::StandardAccountType; match self { - FFIAccountType::StandardBIP44 => key_wallet::AccountType::Standard { + FFIAccountKind::StandardBIP44 => key_wallet::AccountType::Standard { index, standard_account_type: StandardAccountType::BIP44Account, }, - FFIAccountType::StandardBIP32 => key_wallet::AccountType::Standard { + FFIAccountKind::StandardBIP32 => key_wallet::AccountType::Standard { index, standard_account_type: StandardAccountType::BIP32Account, }, - FFIAccountType::CoinJoin => key_wallet::AccountType::CoinJoin { + FFIAccountKind::CoinJoin => key_wallet::AccountType::CoinJoin { index, }, - FFIAccountType::IdentityRegistration => key_wallet::AccountType::IdentityRegistration, - FFIAccountType::IdentityTopUp => { + FFIAccountKind::IdentityRegistration => key_wallet::AccountType::IdentityRegistration, + FFIAccountKind::IdentityTopUp => { // IdentityTopUp requires a registration_index key_wallet::AccountType::IdentityTopUp { registration_index: index, } } - FFIAccountType::IdentityTopUpNotBoundToIdentity => { + FFIAccountKind::IdentityTopUpNotBoundToIdentity => { key_wallet::AccountType::IdentityTopUpNotBoundToIdentity } - FFIAccountType::IdentityInvitation => key_wallet::AccountType::IdentityInvitation, - FFIAccountType::AssetLockAddressTopUp => key_wallet::AccountType::AssetLockAddressTopUp, - FFIAccountType::AssetLockShieldedAddressTopUp => { + FFIAccountKind::IdentityInvitation => key_wallet::AccountType::IdentityInvitation, + FFIAccountKind::AssetLockAddressTopUp => key_wallet::AccountType::AssetLockAddressTopUp, + FFIAccountKind::AssetLockShieldedAddressTopUp => { key_wallet::AccountType::AssetLockShieldedAddressTopUp } - FFIAccountType::ProviderVotingKeys => key_wallet::AccountType::ProviderVotingKeys, - FFIAccountType::ProviderOwnerKeys => key_wallet::AccountType::ProviderOwnerKeys, - FFIAccountType::ProviderOperatorKeys => key_wallet::AccountType::ProviderOperatorKeys, - FFIAccountType::ProviderPlatformKeys => key_wallet::AccountType::ProviderPlatformKeys, + FFIAccountKind::ProviderVotingKeys => key_wallet::AccountType::ProviderVotingKeys, + FFIAccountKind::ProviderOwnerKeys => key_wallet::AccountType::ProviderOwnerKeys, + FFIAccountKind::ProviderOperatorKeys => key_wallet::AccountType::ProviderOperatorKeys, + FFIAccountKind::ProviderPlatformKeys => key_wallet::AccountType::ProviderPlatformKeys, // DashPay variants require additional identity IDs (user_identity_id and friend_identity_id) // that are not part of the current FFI API. These types cannot be constructed via this // conversion path. Attempting to use them is a programming error. @@ -285,25 +285,25 @@ impl FFIAccountType { // - Or extend to_account_type to accept optional identity ID parameters // // Until then, attempting to convert these variants will panic to prevent silent misrouting. - FFIAccountType::DashpayReceivingFunds => { + FFIAccountKind::DashpayReceivingFunds => { panic!( - "FFIAccountType::DashpayReceivingFunds cannot be converted to AccountType \ + "FFIAccountKind::DashpayReceivingFunds cannot be converted to AccountType \ without user_identity_id and friend_identity_id. The FFI API does not yet \ support passing these 32-byte identity IDs. This is a programming error - \ DashPay account creation must use a different API path." ); } - FFIAccountType::DashpayExternalAccount => { + FFIAccountKind::DashpayExternalAccount => { panic!( - "FFIAccountType::DashpayExternalAccount cannot be converted to AccountType \ + "FFIAccountKind::DashpayExternalAccount cannot be converted to AccountType \ without user_identity_id and friend_identity_id. The FFI API does not yet \ support passing these 32-byte identity IDs. This is a programming error - \ DashPay account creation must use a different API path." ); } - FFIAccountType::PlatformPayment => { + FFIAccountKind::PlatformPayment => { panic!( - "FFIAccountType::PlatformPayment cannot be converted to AccountType \ + "FFIAccountKind::PlatformPayment cannot be converted to AccountType \ without account and key_class indices. The FFI API does not yet \ support passing these values. This is a programming error - \ Platform Payment account creation must use a different API path." @@ -314,7 +314,7 @@ impl FFIAccountType { /// Convert from AccountType to FFI representation /// - /// Returns: (FFIAccountType, primary_index, optional_secondary_index) + /// Returns: (FFIAccountKind, primary_index, optional_secondary_index) /// /// # Panics /// @@ -331,41 +331,41 @@ impl FFIAccountType { index, standard_account_type, } => match standard_account_type { - StandardAccountType::BIP44Account => (FFIAccountType::StandardBIP44, *index, None), - StandardAccountType::BIP32Account => (FFIAccountType::StandardBIP32, *index, None), + StandardAccountType::BIP44Account => (FFIAccountKind::StandardBIP44, *index, None), + StandardAccountType::BIP32Account => (FFIAccountKind::StandardBIP32, *index, None), }, key_wallet::AccountType::CoinJoin { index, - } => (FFIAccountType::CoinJoin, *index, None), + } => (FFIAccountKind::CoinJoin, *index, None), key_wallet::AccountType::IdentityRegistration => { - (FFIAccountType::IdentityRegistration, 0, None) + (FFIAccountKind::IdentityRegistration, 0, None) } key_wallet::AccountType::IdentityTopUp { registration_index, - } => (FFIAccountType::IdentityTopUp, 0, Some(*registration_index)), + } => (FFIAccountKind::IdentityTopUp, 0, Some(*registration_index)), key_wallet::AccountType::IdentityTopUpNotBoundToIdentity => { - (FFIAccountType::IdentityTopUpNotBoundToIdentity, 0, None) + (FFIAccountKind::IdentityTopUpNotBoundToIdentity, 0, None) } key_wallet::AccountType::IdentityInvitation => { - (FFIAccountType::IdentityInvitation, 0, None) + (FFIAccountKind::IdentityInvitation, 0, None) } key_wallet::AccountType::AssetLockAddressTopUp => { - (FFIAccountType::AssetLockAddressTopUp, 0, None) + (FFIAccountKind::AssetLockAddressTopUp, 0, None) } key_wallet::AccountType::AssetLockShieldedAddressTopUp => { - (FFIAccountType::AssetLockShieldedAddressTopUp, 0, None) + (FFIAccountKind::AssetLockShieldedAddressTopUp, 0, None) } key_wallet::AccountType::ProviderVotingKeys => { - (FFIAccountType::ProviderVotingKeys, 0, None) + (FFIAccountKind::ProviderVotingKeys, 0, None) } key_wallet::AccountType::ProviderOwnerKeys => { - (FFIAccountType::ProviderOwnerKeys, 0, None) + (FFIAccountKind::ProviderOwnerKeys, 0, None) } key_wallet::AccountType::ProviderOperatorKeys => { - (FFIAccountType::ProviderOperatorKeys, 0, None) + (FFIAccountKind::ProviderOperatorKeys, 0, None) } key_wallet::AccountType::ProviderPlatformKeys => { - (FFIAccountType::ProviderPlatformKeys, 0, None) + (FFIAccountKind::ProviderPlatformKeys, 0, None) } key_wallet::AccountType::DashpayReceivingFunds { index, @@ -375,7 +375,7 @@ impl FFIAccountType { // Cannot convert DashPay accounts to FFI without losing identity ID information panic!( "Cannot convert AccountType::DashpayReceivingFunds (index={}, user_id={:?}, friend_id={:?}) \ - to FFI representation. The current FFI tuple format (FFIAccountType, u32, Option) \ + to FFI representation. The current FFI tuple format (FFIAccountKind, u32, Option) \ cannot represent the two 32-byte identity IDs required by DashPay accounts. \ This would result in silent data loss. A dedicated FFI API for DashPay accounts is needed.", index, @@ -391,7 +391,7 @@ impl FFIAccountType { // Cannot convert DashPay accounts to FFI without losing identity ID information panic!( "Cannot convert AccountType::DashpayExternalAccount (index={}, user_id={:?}, friend_id={:?}) \ - to FFI representation. The current FFI tuple format (FFIAccountType, u32, Option) \ + to FFI representation. The current FFI tuple format (FFIAccountKind, u32, Option) \ cannot represent the two 32-byte identity IDs required by DashPay accounts. \ This would result in silent data loss. A dedicated FFI API for DashPay accounts is needed.", index, @@ -402,7 +402,7 @@ impl FFIAccountType { key_wallet::AccountType::PlatformPayment { account, key_class, - } => (FFIAccountType::PlatformPayment, *account, Some(*key_class)), + } => (FFIAccountKind::PlatformPayment, *account, Some(*key_class)), } } } @@ -499,8 +499,8 @@ pub struct FFIWalletAccountCreationOptions { /// For SpecificAccounts: Additional special account types to create /// (e.g., IdentityRegistration, ProviderKeys, etc.) - /// This is an array of FFIAccountType values - pub special_account_types: *const FFIAccountType, + /// This is an array of FFIAccountKind values + pub special_account_types: *const FFIAccountKind, pub special_account_types_count: usize, } @@ -956,21 +956,21 @@ mod tests { #[should_panic(expected = "DashpayReceivingFunds cannot be converted to AccountType")] fn test_dashpay_receiving_funds_to_account_type_panics() { // This should panic because we cannot construct a DashPay account without identity IDs - let _ = FFIAccountType::DashpayReceivingFunds.to_account_type(0); + let _ = FFIAccountKind::DashpayReceivingFunds.to_account_type(0); } #[test] #[should_panic(expected = "DashpayExternalAccount cannot be converted to AccountType")] fn test_dashpay_external_account_to_account_type_panics() { // This should panic because we cannot construct a DashPay account without identity IDs - let _ = FFIAccountType::DashpayExternalAccount.to_account_type(0); + let _ = FFIAccountKind::DashpayExternalAccount.to_account_type(0); } #[test] #[should_panic(expected = "PlatformPayment cannot be converted to AccountType")] fn test_platform_payment_to_account_type_panics() { // This should panic because we cannot construct a Platform Payment account without indices - let _ = FFIAccountType::PlatformPayment.to_account_type(0); + let _ = FFIAccountKind::PlatformPayment.to_account_type(0); } #[test] @@ -982,7 +982,7 @@ mod tests { user_identity_id: [1u8; 32], friend_identity_id: [2u8; 32], }; - let _ = FFIAccountType::from_account_type(&account_type); + let _ = FFIAccountKind::from_account_type(&account_type); } #[test] @@ -994,13 +994,13 @@ mod tests { user_identity_id: [1u8; 32], friend_identity_id: [2u8; 32], }; - let _ = FFIAccountType::from_account_type(&account_type); + let _ = FFIAccountKind::from_account_type(&account_type); } #[test] fn test_non_dashpay_conversions_work() { // Verify that non-DashPay types still convert correctly - let standard_bip44 = FFIAccountType::StandardBIP44.to_account_type(5); + let standard_bip44 = FFIAccountKind::StandardBIP44.to_account_type(5); assert!(matches!( standard_bip44, key_wallet::AccountType::Standard { @@ -1009,7 +1009,7 @@ mod tests { } )); - let coinjoin = FFIAccountType::CoinJoin.to_account_type(3); + let coinjoin = FFIAccountKind::CoinJoin.to_account_type(3); assert!(matches!( coinjoin, key_wallet::AccountType::CoinJoin { @@ -1018,8 +1018,8 @@ mod tests { )); // Test reverse conversion - let (ffi_type, index, _) = FFIAccountType::from_account_type(&standard_bip44); - assert_eq!(ffi_type, FFIAccountType::StandardBIP44); + let (ffi_type, index, _) = FFIAccountKind::from_account_type(&standard_bip44); + assert_eq!(ffi_type, FFIAccountKind::StandardBIP44); assert_eq!(index, 5); } diff --git a/key-wallet-ffi/src/wallet.rs b/key-wallet-ffi/src/wallet.rs index 83ad81306..b29802747 100644 --- a/key-wallet-ffi/src/wallet.rs +++ b/key-wallet-ffi/src/wallet.rs @@ -335,10 +335,10 @@ pub unsafe extern "C" fn wallet_free_const(wallet: *const FFIWallet) { #[no_mangle] pub unsafe extern "C" fn wallet_add_account( wallet: *mut FFIWallet, - account_type: crate::types::FFIAccountType, + account_type: crate::types::FFIAccountKind, account_index: c_uint, ) -> crate::types::FFIAccountResult { - use crate::types::FFIAccountType; + use crate::types::FFIAccountKind; if wallet.is_null() { return crate::types::FFIAccountResult::error( @@ -349,7 +349,7 @@ pub unsafe extern "C" fn wallet_add_account( // Check for account types that require special handling match account_type { - FFIAccountType::PlatformPayment => { + FFIAccountKind::PlatformPayment => { return crate::types::FFIAccountResult::error( FFIErrorCode::InvalidInput, "PlatformPayment accounts require account and key_class indices. \ @@ -357,7 +357,7 @@ pub unsafe extern "C" fn wallet_add_account( .to_string(), ); } - FFIAccountType::DashpayReceivingFunds => { + FFIAccountKind::DashpayReceivingFunds => { return crate::types::FFIAccountResult::error( FFIErrorCode::InvalidInput, "DashpayReceivingFunds accounts require identity IDs. \ @@ -365,7 +365,7 @@ pub unsafe extern "C" fn wallet_add_account( .to_string(), ); } - FFIAccountType::DashpayExternalAccount => { + FFIAccountKind::DashpayExternalAccount => { return crate::types::FFIAccountResult::error( FFIErrorCode::InvalidInput, "DashpayExternalAccount accounts require identity IDs. \ @@ -553,12 +553,12 @@ pub unsafe extern "C" fn wallet_add_dashpay_external_account_with_xpub_bytes( #[no_mangle] pub unsafe extern "C" fn wallet_add_account_with_xpub_bytes( wallet: *mut FFIWallet, - account_type: crate::types::FFIAccountType, + account_type: crate::types::FFIAccountKind, account_index: c_uint, xpub_bytes: *const u8, xpub_len: usize, ) -> crate::types::FFIAccountResult { - use crate::types::FFIAccountType; + use crate::types::FFIAccountKind; if wallet.is_null() { return crate::types::FFIAccountResult::error( @@ -576,7 +576,7 @@ pub unsafe extern "C" fn wallet_add_account_with_xpub_bytes( // Check for account types that require special handling match account_type { - FFIAccountType::PlatformPayment => { + FFIAccountKind::PlatformPayment => { return crate::types::FFIAccountResult::error( FFIErrorCode::InvalidInput, "PlatformPayment accounts require account and key_class indices. \ @@ -584,7 +584,7 @@ pub unsafe extern "C" fn wallet_add_account_with_xpub_bytes( .to_string(), ); } - FFIAccountType::DashpayReceivingFunds => { + FFIAccountKind::DashpayReceivingFunds => { return crate::types::FFIAccountResult::error( FFIErrorCode::InvalidInput, "DashpayReceivingFunds accounts require identity IDs. \ @@ -592,7 +592,7 @@ pub unsafe extern "C" fn wallet_add_account_with_xpub_bytes( .to_string(), ); } - FFIAccountType::DashpayExternalAccount => { + FFIAccountKind::DashpayExternalAccount => { return crate::types::FFIAccountResult::error( FFIErrorCode::InvalidInput, "DashpayExternalAccount accounts require identity IDs. \ @@ -677,11 +677,11 @@ pub unsafe extern "C" fn wallet_add_account_with_xpub_bytes( #[no_mangle] pub unsafe extern "C" fn wallet_add_account_with_string_xpub( wallet: *mut FFIWallet, - account_type: crate::types::FFIAccountType, + account_type: crate::types::FFIAccountKind, account_index: c_uint, xpub_string: *const c_char, ) -> crate::types::FFIAccountResult { - use crate::types::FFIAccountType; + use crate::types::FFIAccountKind; if wallet.is_null() { return crate::types::FFIAccountResult::error( @@ -699,7 +699,7 @@ pub unsafe extern "C" fn wallet_add_account_with_string_xpub( // Check for account types that require special handling match account_type { - FFIAccountType::PlatformPayment => { + FFIAccountKind::PlatformPayment => { return crate::types::FFIAccountResult::error( FFIErrorCode::InvalidInput, "PlatformPayment accounts require account and key_class indices. \ @@ -707,7 +707,7 @@ pub unsafe extern "C" fn wallet_add_account_with_string_xpub( .to_string(), ); } - FFIAccountType::DashpayReceivingFunds => { + FFIAccountKind::DashpayReceivingFunds => { return crate::types::FFIAccountResult::error( FFIErrorCode::InvalidInput, "DashpayReceivingFunds accounts require identity IDs. \ @@ -715,7 +715,7 @@ pub unsafe extern "C" fn wallet_add_account_with_string_xpub( .to_string(), ); } - FFIAccountType::DashpayExternalAccount => { + FFIAccountKind::DashpayExternalAccount => { return crate::types::FFIAccountResult::error( FFIErrorCode::InvalidInput, "DashpayExternalAccount accounts require identity IDs. \ diff --git a/key-wallet-ffi/src/wallet_tests.rs b/key-wallet-ffi/src/wallet_tests.rs index d3a67f571..de13d4aa8 100644 --- a/key-wallet-ffi/src/wallet_tests.rs +++ b/key-wallet-ffi/src/wallet_tests.rs @@ -4,7 +4,7 @@ mod wallet_tests { use crate::account::account_free; use crate::error::{FFIError, FFIErrorCode}; - use crate::types::FFIAccountType; + use crate::types::FFIAccountKind; use crate::wallet; use dash_network::ffi::FFINetwork; use std::ffi::CString; @@ -284,7 +284,7 @@ mod wallet_tests { // Test adding account - check if it succeeds or fails gracefully let result = - unsafe { wallet::wallet_add_account(wallet, FFIAccountType::StandardBIP44, 1) }; + unsafe { wallet::wallet_add_account(wallet, FFIAccountKind::StandardBIP44, 1) }; // Some implementations may not support adding accounts, so just verify it doesn't crash // and the error code is set appropriately assert!(!result.account.is_null() || result.error_code != 0); @@ -313,7 +313,7 @@ mod wallet_tests { fn test_wallet_add_account_null() { // Test with null wallet let result = unsafe { - wallet::wallet_add_account(ptr::null_mut(), FFIAccountType::StandardBIP44, 0) + wallet::wallet_add_account(ptr::null_mut(), FFIAccountKind::StandardBIP44, 0) }; assert!(result.account.is_null()); assert_ne!(result.error_code, 0); diff --git a/key-wallet-ffi/tests/test_managed_account_collection.rs b/key-wallet-ffi/tests/test_managed_account_collection.rs index 5517a6060..7baa8d9b9 100644 --- a/key-wallet-ffi/tests/test_managed_account_collection.rs +++ b/key-wallet-ffi/tests/test_managed_account_collection.rs @@ -101,10 +101,10 @@ fn test_managed_account_collection_with_special_accounts() { // Add various special accounts let special_types = [ - key_wallet_ffi::types::FFIAccountType::ProviderVotingKeys, - key_wallet_ffi::types::FFIAccountType::ProviderOwnerKeys, - key_wallet_ffi::types::FFIAccountType::IdentityRegistration, - key_wallet_ffi::types::FFIAccountType::IdentityInvitation, + key_wallet_ffi::types::FFIAccountKind::ProviderVotingKeys, + key_wallet_ffi::types::FFIAccountKind::ProviderOwnerKeys, + key_wallet_ffi::types::FFIAccountKind::IdentityRegistration, + key_wallet_ffi::types::FFIAccountKind::IdentityInvitation, ]; options.special_account_types = special_types.as_ptr(); options.special_account_types_count = special_types.len(); @@ -229,9 +229,9 @@ fn test_managed_account_collection_summary() { // Add various special accounts let special_types = [ - key_wallet_ffi::types::FFIAccountType::ProviderVotingKeys, - key_wallet_ffi::types::FFIAccountType::ProviderOwnerKeys, - key_wallet_ffi::types::FFIAccountType::IdentityRegistration, + key_wallet_ffi::types::FFIAccountKind::ProviderVotingKeys, + key_wallet_ffi::types::FFIAccountKind::ProviderOwnerKeys, + key_wallet_ffi::types::FFIAccountKind::IdentityRegistration, ]; options.special_account_types = special_types.as_ptr(); options.special_account_types_count = special_types.len(); @@ -310,8 +310,8 @@ fn test_managed_account_collection_summary_data() { // Add various special accounts let special_types = [ - key_wallet_ffi::types::FFIAccountType::IdentityRegistration, - key_wallet_ffi::types::FFIAccountType::IdentityInvitation, + key_wallet_ffi::types::FFIAccountKind::IdentityRegistration, + key_wallet_ffi::types::FFIAccountKind::IdentityInvitation, ]; options.special_account_types = special_types.as_ptr(); options.special_account_types_count = special_types.len(); diff --git a/key-wallet-manager/src/accessors.rs b/key-wallet-manager/src/accessors.rs index 1475fbd35..8095b64b5 100644 --- a/key-wallet-manager/src/accessors.rs +++ b/key-wallet-manager/src/accessors.rs @@ -178,29 +178,10 @@ impl WalletManager { } /// Snapshot the current balance of every managed wallet. - pub(crate) fn snapshot_balances(&self) -> Vec<(WalletId, WalletCoreBalance)> { + pub(crate) fn snapshot_balances(&self) -> BTreeMap { self.wallet_infos.iter().map(|(id, info)| (*id, info.balance())).collect() } - /// Emit `BalanceUpdated` events for wallets whose balance differs from the snapshot. - pub(crate) fn emit_balance_changes(&self, old_balances: &[(WalletId, WalletCoreBalance)]) { - for (wallet_id, old_balance) in old_balances { - if let Some(info) = self.wallet_infos.get(wallet_id) { - let new_balance = info.balance(); - if *old_balance != new_balance { - let event = WalletEvent::BalanceUpdated { - wallet_id: *wallet_id, - confirmed: new_balance.confirmed(), - unconfirmed: new_balance.unconfirmed(), - immature: new_balance.immature(), - locked: new_balance.locked(), - }; - let _ = self.event_sender.send(event); - } - } - } - } - /// Get all outpoints from wallet UTXOs across all managed wallets. /// Used for bloom filter construction to detect spends of our UTXOs. pub fn watched_outpoints(&self) -> Vec { diff --git a/key-wallet-manager/src/event_tests.rs b/key-wallet-manager/src/event_tests.rs index 21b206bce..fdbe91a20 100644 --- a/key-wallet-manager/src/event_tests.rs +++ b/key-wallet-manager/src/event_tests.rs @@ -1,689 +1,595 @@ use super::test_helpers::*; use super::*; use crate::wallet_interface::WalletInterface; +use dashcore::block::{Block, Header, Version}; +use dashcore::blockdata::script::Builder; +use dashcore::blockdata::transaction::special_transaction::asset_lock::AssetLockPayload; +use dashcore::blockdata::transaction::special_transaction::TransactionPayload; use dashcore::bls_sig_utils::BLSSignature; use dashcore::ephemerealdata::instant_lock::InstantLock; use dashcore::hash_types::CycleHash; use dashcore::hashes::Hash; -use dashcore::BlockHash; -use key_wallet::transaction_checking::BlockInfo; +use dashcore::opcodes; +use dashcore::{ + BlockHash, CompactTarget, OutPoint, ScriptBuf, TxIn, TxMerkleNode, TxOut, Txid, Witness, +}; +use key_wallet::account::StandardAccountType; +use key_wallet::AccountType; use std::collections::BTreeSet; +fn make_block(txdata: Vec, seed: u8, time: u32) -> Block { + Block { + header: Header { + version: Version::default(), + prev_blockhash: BlockHash::from_byte_array([seed; 32]), + merkle_root: TxMerkleNode::all_zeros(), + time, + bits: CompactTarget::from_consensus(0x1d00ffff), + nonce: 0, + }, + txdata, + } +} + +fn make_coinbase_paying_to(addr: &Address, value: u64) -> Transaction { + Transaction { + version: 2, + lock_time: 0, + input: vec![TxIn { + previous_output: OutPoint { + txid: Txid::all_zeros(), + vout: 0xffffffff, + }, + script_sig: ScriptBuf::new(), + sequence: 0xffffffff, + witness: Witness::default(), + }], + output: vec![TxOut { + value, + script_pubkey: addr.script_pubkey(), + }], + special_transaction_payload: None, + } +} + // --------------------------------------------------------------------------- -// Lifecycle flow tests +// Mempool path // --------------------------------------------------------------------------- #[tokio::test] -async fn test_mempool_to_confirmed_event_flow() { +async fn test_mempool_tx_emits_single_event_with_balance() { let (mut manager, wallet_id, addr) = setup_manager_with_wallet(); let mut rx = manager.subscribe_events(); let tx = create_tx_paying_to(&addr, 0xaa); - // First time in mempool — validate all event fields - manager.check_transaction_in_all_wallets(&tx, TransactionContext::Mempool, true, true).await; - let event = assert_single_event(&mut rx); - match event { - WalletEvent::TransactionReceived { - wallet_id: ev_wid, + manager.process_mempool_transaction(&tx, None).await; + + let events = drain_events(&mut rx); + assert_eq!(events.len(), 1, "exactly one event expected, got {:?}", events); + match &events[0] { + WalletEvent::TransactionDetected { + wallet_id: wid, record, - .. + balance, } => { - assert_eq!(record.context, TransactionContext::Mempool); + assert_eq!(*wid, wallet_id); assert_eq!(record.txid, tx.txid()); - assert_eq!(ev_wid, wallet_id); + assert_eq!(record.context, TransactionContext::Mempool); assert_eq!(record.net_amount, TX_AMOUNT as i64); + assert!(matches!( + record.account_type, + AccountType::Standard { + index: 0, + standard_account_type: StandardAccountType::BIP44Account + } + )); + assert_eq!(balance.unconfirmed(), TX_AMOUNT); + assert_eq!(balance.confirmed(), 0); } - other => panic!("expected TransactionReceived, got {:?}", other), - } - - // Same tx now confirmed in a block - let block_ctx = TransactionContext::InBlock(BlockInfo::new( - 100, - BlockHash::from_byte_array([0xaa; 32]), - 1000, - )); - manager.check_transaction_in_all_wallets(&tx, block_ctx, true, true).await; - let event = assert_single_event(&mut rx); - match event { - WalletEvent::TransactionStatusChanged { - wallet_id: ev_wid, - txid: ev_txid, - status, - } => { - assert_eq!(ev_wid, wallet_id); - assert_eq!(ev_txid, tx.txid()); - assert!( - matches!( - status, - TransactionContext::InBlock(info) if info.height() == 100 - ), - "expected InBlock(100), got {:?}", - status - ); - } - other => panic!("expected TransactionStatusChanged, got {:?}", other), + other => panic!("expected TransactionDetected, got {:?}", other), } } #[tokio::test] -async fn test_mempool_to_instantsend_to_confirmed_event_flow() { - assert_lifecycle_flow( - &[ - TransactionContext::Mempool, - TransactionContext::InstantSend(InstantLock::default()), - TransactionContext::InBlock(BlockInfo::new( - 200, - BlockHash::from_byte_array([0xbb; 32]), - 2000, - )), - ], - 0xbb, - ) - .await; -} - -#[tokio::test] -async fn test_first_seen_in_block_event_flow() { - assert_lifecycle_flow( - &[TransactionContext::InBlock(BlockInfo::new( - 1000, - BlockHash::from_byte_array([0xdd; 32]), - 10000, - ))], - 0xdd, - ) - .await; -} - -// --------------------------------------------------------------------------- -// Duplicate suppression tests -// --------------------------------------------------------------------------- +async fn test_mempool_tx_with_instant_lock_emits_detected_event_with_locked_balance() { + let (mut manager, wallet_id, addr) = setup_manager_with_wallet(); + let mut rx = manager.subscribe_events(); + let tx = create_tx_paying_to(&addr, 0xbb); -#[tokio::test] -async fn test_duplicate_mempool_emits_no_event() { - assert_context_suppressed( - &[TransactionContext::Mempool], - TransactionContext::Mempool, - None, - 0x11, - ) - .await; -} + manager.process_mempool_transaction(&tx, Some(dummy_instant_lock(tx.txid()))).await; -#[tokio::test] -async fn test_duplicate_instantsend_emits_no_event() { - assert_context_suppressed( - &[TransactionContext::Mempool, TransactionContext::InstantSend(InstantLock::default())], - TransactionContext::InstantSend(InstantLock::default()), - None, - 0x22, - ) - .await; + let events = drain_events(&mut rx); + assert_eq!(events.len(), 1, "one event expected for first-seen IS-locked tx, got {:?}", events); + match &events[0] { + WalletEvent::TransactionDetected { + wallet_id: wid, + record, + balance, + } => { + assert_eq!(*wid, wallet_id); + assert!(matches!(record.context, TransactionContext::InstantSend(_))); + assert_eq!(balance.confirmed(), TX_AMOUNT); + assert_eq!(balance.unconfirmed(), 0); + } + other => panic!("expected TransactionDetected with IS context, got {:?}", other), + } } #[tokio::test] -async fn test_duplicate_confirmed_emits_no_event() { - let block_ctx = TransactionContext::InBlock(BlockInfo::new( - 300, - BlockHash::from_byte_array([0x33; 32]), - 3000, - )); - let block_ctx2 = block_ctx.clone(); - assert_context_suppressed(&[block_ctx], block_ctx2, Some(300), 0x33).await; -} - -// --------------------------------------------------------------------------- -// Edge case tests -// --------------------------------------------------------------------------- +async fn test_irrelevant_mempool_tx_emits_no_events() { + use dashcore::{PublicKey, ScriptBuf}; -#[tokio::test] -async fn test_first_seen_as_instantsend_then_duplicate() { - assert_context_suppressed( - &[TransactionContext::InstantSend(InstantLock::default())], - TransactionContext::InstantSend(InstantLock::default()), - None, - 0x55, - ) - .await; -} + let (mut manager, _wallet_id, _addr) = setup_manager_with_wallet(); + let mut rx = manager.subscribe_events(); -#[tokio::test] -async fn test_late_instantsend_after_confirmation_is_ignored() { - assert_context_suppressed( - &[ - TransactionContext::Mempool, - TransactionContext::InBlock(BlockInfo::new( - 800, - BlockHash::from_byte_array([0x77; 32]), - 8000, - )), - ], - TransactionContext::InstantSend(InstantLock::default()), - Some(800), - 0x77, - ) - .await; -} + let random_script = + ScriptBuf::new_p2pkh(&PublicKey::from_slice(&[2; 33]).unwrap().pubkey_hash()); + let tx = Transaction { + version: 2, + lock_time: 0, + input: vec![dashcore::TxIn { + previous_output: dashcore::OutPoint { + txid: dashcore::Txid::from_byte_array([0xe4; 32]), + vout: 0, + }, + script_sig: ScriptBuf::new(), + sequence: u32::MAX, + witness: dashcore::Witness::default(), + }], + output: vec![dashcore::TxOut { + value: TX_AMOUNT, + script_pubkey: random_script, + }], + special_transaction_payload: None, + }; -#[tokio::test] -async fn test_mempool_after_instantsend_is_suppressed() { - assert_context_suppressed( - &[TransactionContext::Mempool, TransactionContext::InstantSend(InstantLock::default())], - TransactionContext::Mempool, - None, - 0xab, - ) - .await; + let result = manager.process_mempool_transaction(&tx, None).await; + assert!(!result.is_relevant); + assert_no_events(&mut rx); } // --------------------------------------------------------------------------- -// BalanceUpdated event tests +// InstantSend path // --------------------------------------------------------------------------- #[tokio::test] -async fn test_mempool_tx_emits_balance_updated() { +async fn test_instant_send_lock_on_known_mempool_tx_emits_instant_locked_event() { let (mut manager, wallet_id, addr) = setup_manager_with_wallet(); - let mut rx = manager.subscribe_events(); - let tx = create_tx_paying_to(&addr, 0xf1); + let tx = create_tx_paying_to(&addr, 0xe1); + // First see the tx as plain mempool manager.process_mempool_transaction(&tx, None).await; + let pre_lock_balance = manager.get_wallet_info(&wallet_id).unwrap().balance(); + assert_eq!(pre_lock_balance.confirmed(), 0); + assert_eq!(pre_lock_balance.unconfirmed(), TX_AMOUNT); + let mut rx = manager.subscribe_events(); + + let lock = InstantLock { + txid: tx.txid(), + cyclehash: CycleHash::from_byte_array([0xab; 32]), + signature: BLSSignature::from([0xcd; 96]), + ..InstantLock::default() + }; + manager.process_instant_send_lock(lock.clone()); let events = drain_events(&mut rx); - let balance_events: Vec<_> = - events.iter().filter(|e| matches!(e, WalletEvent::BalanceUpdated { .. })).collect(); - assert_eq!(balance_events.len(), 1, "expected exactly 1 BalanceUpdated, got {:?}", events); - assert!( - matches!( - balance_events[0], - WalletEvent::BalanceUpdated { - wallet_id: wid, - unconfirmed, - confirmed, - .. - } if *wid == wallet_id && *unconfirmed == TX_AMOUNT && *confirmed == 0 - ), - "expected BalanceUpdated with unconfirmed={TX_AMOUNT}, confirmed=0, got {:?}", - balance_events[0] - ); + assert_eq!(events.len(), 1, "exactly one event expected, got {:?}", events); + match &events[0] { + WalletEvent::TransactionInstantLocked { + wallet_id: wid, + txid, + instant_lock, + balance, + } => { + assert_eq!(*wid, wallet_id); + assert_eq!(*txid, tx.txid()); + assert_eq!(*instant_lock, lock); + assert_eq!(balance.confirmed(), TX_AMOUNT); + assert_eq!(balance.unconfirmed(), 0); + } + other => panic!("expected TransactionInstantLocked, got {:?}", other), + } } #[tokio::test] -async fn test_instantsend_tx_emits_balance_updated_spendable() { - let (mut manager, wallet_id, addr) = setup_manager_with_wallet(); - let mut rx = manager.subscribe_events(); - let tx = create_tx_paying_to(&addr, 0xf2); +async fn test_instant_send_lock_dedup_second_is_silent() { + let (mut manager, _wallet_id, addr) = setup_manager_with_wallet(); + let tx = create_tx_paying_to(&addr, 0xe2); - manager.process_mempool_transaction(&tx, Some(dummy_instant_lock(tx.txid()))).await; + manager.process_mempool_transaction(&tx, None).await; + manager.process_instant_send_lock(dummy_instant_lock(tx.txid())); - let events = drain_events(&mut rx); - let balance_events: Vec<_> = - events.iter().filter(|e| matches!(e, WalletEvent::BalanceUpdated { .. })).collect(); - assert_eq!(balance_events.len(), 1, "expected exactly 1 BalanceUpdated, got {:?}", events); - assert!( - matches!( - balance_events[0], - WalletEvent::BalanceUpdated { - wallet_id: wid, - confirmed, - unconfirmed, - .. - } if *wid == wallet_id && *confirmed == TX_AMOUNT && *unconfirmed == 0 - ), - "expected BalanceUpdated with confirmed={TX_AMOUNT}, unconfirmed=0, got {:?}", - balance_events[0] - ); + let mut rx = manager.subscribe_events(); + manager.process_instant_send_lock(dummy_instant_lock(tx.txid())); + assert_no_events(&mut rx); } #[tokio::test] -async fn test_mempool_to_instantsend_transitions_balance() { - let (mut manager, wallet_id, addr) = setup_manager_with_wallet(); +async fn test_instant_send_lock_for_unknown_txid_is_silent() { + let (mut manager, _wallet_id, _addr) = setup_manager_with_wallet(); let mut rx = manager.subscribe_events(); - let tx = create_tx_paying_to(&addr, 0xf3); + let unknown_txid = Txid::from_byte_array([0xee; 32]); - // Mempool tx: balance should be unconfirmed - manager.process_mempool_transaction(&tx, None).await; - let events = drain_events(&mut rx); - assert!( - events.iter().any(|e| matches!( - e, - WalletEvent::BalanceUpdated { - wallet_id: wid, - unconfirmed, - confirmed, - .. - } if *wid == wallet_id && *unconfirmed == TX_AMOUNT && *confirmed == 0 - )), - "expected unconfirmed balance after mempool, got {:?}", - events - ); - - // IS lock: balance should move from unconfirmed to confirmed - manager.process_instant_send_lock(dummy_instant_lock(tx.txid())); - let events = drain_events(&mut rx); - assert!( - events.iter().any(|e| matches!( - e, - WalletEvent::BalanceUpdated { - wallet_id: wid, - confirmed, - unconfirmed, - .. - } if *wid == wallet_id && *confirmed == TX_AMOUNT && *unconfirmed == 0 - )), - "expected confirmed balance after IS lock, got {:?}", - events - ); + manager.process_instant_send_lock(dummy_instant_lock(unknown_txid)); + assert_no_events(&mut rx); } #[tokio::test] -async fn test_process_instant_send_lock_updates_transaction_record_context() { +async fn test_late_instant_send_lock_after_block_confirmation_emits_event() { + // A late IS-lock for a transaction that was already confirmed in a block + // currently downgrades the record context from `InBlock(_)` back to + // `InstantSend(_)` and re-emits `TransactionInstantLocked`. This test + // pins down that observable behavior so any future change (silently + // ignoring the late lock, rejecting it at the record layer) shows up as a + // test failure rather than a silent semantic drift. let (mut manager, wallet_id, addr) = setup_manager_with_wallet(); - let tx = create_tx_paying_to(&addr, 0xf4); - - // Process as mempool transaction first - manager.process_mempool_transaction(&tx, None).await; + let tx = create_tx_paying_to(&addr, 0xe3); - // Verify record starts with Mempool context - let history = manager.wallet_transaction_history(&wallet_id).unwrap(); - let record = history.iter().find(|r| r.txid == tx.txid()).unwrap(); - assert_eq!(record.context, TransactionContext::Mempool); + // Confirm the transaction in a block first. + let block = make_block(vec![tx.clone()], 0xe3, 4000); + let wallets = BTreeSet::from([wallet_id]); + manager.process_block_for_wallets(&block, 300, &wallets).await; - // Create a rich InstantLock with a non-default cyclehash + let mut rx = manager.subscribe_events(); let lock = InstantLock { txid: tx.txid(), cyclehash: CycleHash::from_byte_array([0xab; 32]), signature: BLSSignature::from([0xcd; 96]), ..InstantLock::default() }; - manager.process_instant_send_lock(lock.clone()); - // Verify the transaction record context was updated to InstantSend - let history = manager.wallet_transaction_history(&wallet_id).unwrap(); - let record = history.iter().find(|r| r.txid == tx.txid()).unwrap(); - assert_eq!( - record.context, - TransactionContext::InstantSend(lock), - "transaction record context should be updated to InstantSend with matching lock" - ); + let events = drain_events(&mut rx); + let lock_event = events + .iter() + .find(|e| matches!(e, WalletEvent::TransactionInstantLocked { .. })) + .unwrap_or_else(|| { + panic!( + "late IS-lock for an already-confirmed tx currently emits \ + TransactionInstantLocked, got: {:?}", + events + ) + }); + match lock_event { + WalletEvent::TransactionInstantLocked { + wallet_id: wid, + txid, + instant_lock, + .. + } => { + assert_eq!(*wid, wallet_id); + assert_eq!(*txid, tx.txid()); + assert_eq!(*instant_lock, lock); + } + _ => unreachable!(), + } } // --------------------------------------------------------------------------- -// Production API tests +// Block path // --------------------------------------------------------------------------- #[tokio::test] -async fn test_process_instant_send_lock_for_unknown_txid() { - let (mut manager, wallet_id, _addr) = setup_manager_with_wallet(); +async fn test_block_with_new_tx_emits_inserted_record() { + let (mut manager, wallet_id, addr) = setup_manager_with_wallet(); let mut rx = manager.subscribe_events(); + let tx = create_tx_paying_to(&addr, 0xcc); + let block = make_block(vec![tx.clone()], 0xcc, 1000); - let unknown_txid = dashcore::Txid::from_byte_array([0xee; 32]); - let balance_before = manager.wallet_infos.get(&wallet_id).unwrap().balance(); - - manager.process_instant_send_lock(dummy_instant_lock(unknown_txid)); + let wallets = BTreeSet::from([wallet_id]); + let result = manager.process_block_for_wallets(&block, 100, &wallets).await; + assert_eq!(result.new_txids.len(), 1); - assert_no_events(&mut rx); - let balance_after = manager.wallet_infos.get(&wallet_id).unwrap().balance(); - assert_eq!(balance_before, balance_after); + let events = drain_events(&mut rx); + assert_eq!(events.len(), 1, "one event per affected wallet expected, got {:?}", events); + match &events[0] { + WalletEvent::BlockProcessed { + wallet_id: wid, + height, + inserted, + updated, + matured, + balance, + } => { + assert_eq!(*wid, wallet_id); + assert_eq!(*height, 100); + assert_eq!(inserted.len(), 1); + assert!(updated.is_empty()); + assert!(matured.is_empty()); + assert!(matches!( + inserted[0].account_type, + AccountType::Standard { + index: 0, + standard_account_type: StandardAccountType::BIP44Account + } + )); + assert_eq!(inserted[0].txid, tx.txid()); + assert!(matches!( + inserted[0].context, + TransactionContext::InBlock(info) if info.height() == 100 + )); + assert_eq!(balance.confirmed(), TX_AMOUNT); + } + other => panic!("expected BlockProcessed, got {:?}", other), + } } #[tokio::test] -async fn test_process_instant_send_lock_dedup() { +async fn test_block_confirming_known_mempool_tx_emits_updated_record() { let (mut manager, wallet_id, addr) = setup_manager_with_wallet(); - let tx = create_tx_paying_to(&addr, 0xe1); + let tx = create_tx_paying_to(&addr, 0xdd); + // Seen in mempool first manager.process_mempool_transaction(&tx, None).await; + let mut rx = manager.subscribe_events(); + let block = make_block(vec![tx.clone()], 0xdd, 2000); + let wallets = BTreeSet::from([wallet_id]); + manager.process_block_for_wallets(&block, 200, &wallets).await; - // First IS lock should emit events - manager.process_instant_send_lock(dummy_instant_lock(tx.txid())); let events = drain_events(&mut rx); - assert!( - events.iter().any(|e| matches!( - e, - WalletEvent::TransactionStatusChanged { - wallet_id: wid, - status: TransactionContext::InstantSend(_), - .. - } if *wid == wallet_id - )), - "expected TransactionStatusChanged(InstantSend) with correct wallet_id, got {:?}", - events - ); - assert!( - events.iter().any( - |e| matches!(e, WalletEvent::BalanceUpdated { wallet_id: wid, .. } if *wid == wallet_id) - ), - "expected BalanceUpdated for wallet, got {:?}", - events - ); - - // Second IS lock should be a no-op - manager.process_instant_send_lock(dummy_instant_lock(tx.txid())); - assert_no_events(&mut rx); + assert_eq!(events.len(), 1, "one BlockProcessed expected, got {:?}", events); + match &events[0] { + WalletEvent::BlockProcessed { + wallet_id: wid, + height, + inserted, + updated, + matured, + balance, + } => { + assert_eq!(*wid, wallet_id); + assert_eq!(*height, 200); + assert!(inserted.is_empty()); + assert_eq!(updated.len(), 1); + assert!(matured.is_empty()); + assert_eq!(updated[0].txid, tx.txid()); + // Confirmation moves balance from unconfirmed to confirmed + assert_eq!(balance.confirmed(), TX_AMOUNT); + assert_eq!(balance.unconfirmed(), 0); + } + other => panic!("expected BlockProcessed with updated record, got {:?}", other), + } } #[tokio::test] -async fn test_process_instant_send_lock_after_block_confirmation() { - let (mut manager, wallet_id, addr) = setup_manager_with_wallet(); - let tx = create_tx_paying_to(&addr, 0xe2); - - // Process as IS mempool tx, then confirm in block - manager.process_mempool_transaction(&tx, Some(dummy_instant_lock(tx.txid()))).await; - let block_ctx = TransactionContext::InBlock(BlockInfo::new( - 500, - BlockHash::from_byte_array([0xe2; 32]), - 5000, - )); - manager.check_transaction_in_all_wallets(&tx, block_ctx, true, true).await; - - // IS lock after block confirmation is a no-op (already tracked via mempool IS) - let mut rx = manager.subscribe_events(); - manager.process_instant_send_lock(dummy_instant_lock(tx.txid())); - assert_no_events(&mut rx); +async fn test_block_with_index_less_account_tx_carries_account_type() { + // Index-less account variants (`IdentityRegistration`, `IdentityTopUpNotBound`, + // `IdentityInvitation`, `AssetLockAddressTopUp`, `AssetLockShieldedAddressTopUp`, + // `Provider*`) used to be silently dropped on the way out of `wallet_checker.rs` + // because the old emission code only kept matches whose `account_index()` was + // `Some(_)`. Verify they now flow through with the right `AccountType`. + let (mut manager, wallet_id, _addr) = setup_manager_with_wallet(); - // Confirm height preserved - let history = manager.wallet_transaction_history(&wallet_id).unwrap(); - let records: Vec<_> = history.iter().filter(|r| r.txid == tx.txid()).collect(); - assert_eq!(records.len(), 1); - assert_eq!(records[0].height(), Some(500)); -} + let xpub = manager + .get_wallet(&wallet_id) + .expect("wallet") + .accounts + .identity_registration + .as_ref() + .expect("default wallet should have an IdentityRegistration account") + .account_xpub; + let identity_address = manager + .get_wallet_info_mut(&wallet_id) + .expect("wallet info") + .identity_registration_managed_account_mut() + .expect("managed IdentityRegistration account") + .next_address(Some(&xpub), true) + .expect("identity registration address"); + + // Build a DIP-2 AssetLock transaction whose `credit_outputs` pay to the + // identity registration address. AssetLock funds aren't spendable on the + // Core chain, so balance does not shift, but the account does receive a + // record — which is exactly what we want to observe in `BlockProcessed`. + let tx = Transaction { + version: 3, + lock_time: 0, + input: vec![TxIn { + previous_output: OutPoint { + txid: Txid::from_byte_array([0xee; 32]), + vout: 0, + }, + script_sig: ScriptBuf::new(), + sequence: u32::MAX, + witness: Witness::default(), + }], + output: vec![TxOut { + value: 100_000_000, + script_pubkey: Builder::new() + .push_opcode(opcodes::all::OP_RETURN) + .push_slice([0u8; 20]) + .into_script(), + }], + special_transaction_payload: Some(TransactionPayload::AssetLockPayloadType( + AssetLockPayload { + version: 1, + credit_outputs: vec![TxOut { + value: 100_000_000, + script_pubkey: identity_address.script_pubkey(), + }], + }, + )), + }; -#[tokio::test] -async fn test_mixed_instantsend_paths_no_duplicate_events() { - let (mut manager, wallet_id, addr) = setup_manager_with_wallet(); let mut rx = manager.subscribe_events(); - let tx = create_tx_paying_to(&addr, 0xf0); - - // Mempool first - manager.check_transaction_in_all_wallets(&tx, TransactionContext::Mempool, true, true).await; - drain_events(&mut rx); + let block = make_block(vec![tx.clone()], 0xee, 9999); + let wallets = BTreeSet::from([wallet_id]); + manager.process_block_for_wallets(&block, 9000, &wallets).await; - // IS lock via process_instant_send_lock (network IS lock message) - manager.process_instant_send_lock(dummy_instant_lock(tx.txid())); let events = drain_events(&mut rx); - assert!( - events.iter().any(|e| matches!( - e, - WalletEvent::TransactionStatusChanged { - wallet_id: wid, - status: TransactionContext::InstantSend(_), - .. - } if *wid == wallet_id - )), - "expected TransactionStatusChanged(InstantSend) with correct wallet_id, got {:?}", - events - ); + let block_event = events + .iter() + .find(|e| matches!(e, WalletEvent::BlockProcessed { .. })) + .unwrap_or_else(|| panic!("expected a BlockProcessed event, got {:?}", events)); - // Same IS lock via check_transaction_in_all_wallets (block/tx processing path) - // should be suppressed — no duplicate event - let is_lock = dummy_instant_lock(tx.txid()); - manager - .check_transaction_in_all_wallets(&tx, TransactionContext::InstantSend(is_lock), true, true) - .await; - assert_no_events(&mut rx); + match block_event { + WalletEvent::BlockProcessed { + wallet_id: wid, + inserted, + .. + } => { + assert_eq!(*wid, wallet_id); + let identity_record = inserted + .iter() + .find(|r| matches!(r.account_type, AccountType::IdentityRegistration)) + .unwrap_or_else(|| { + panic!( + "expected an inserted record for AccountType::IdentityRegistration, \ + got: {:?}", + inserted + ) + }); + assert_eq!(identity_record.txid, tx.txid()); + } + _ => unreachable!(), + } } #[tokio::test] -async fn test_mixed_instantsend_paths_reverse_no_duplicate_events() { - let (mut manager, wallet_id, addr) = setup_manager_with_wallet(); +async fn test_empty_block_for_idle_wallet_emits_nothing() { + let (mut manager, wallet_id, _addr) = setup_manager_with_wallet(); let mut rx = manager.subscribe_events(); - let tx = create_tx_paying_to(&addr, 0xf1); - - // Mempool first - manager.check_transaction_in_all_wallets(&tx, TransactionContext::Mempool, true, true).await; - drain_events(&mut rx); - - // IS lock via check_transaction_in_all_wallets first - let is_lock = dummy_instant_lock(tx.txid()); - manager - .check_transaction_in_all_wallets( - &tx, - TransactionContext::InstantSend(is_lock.clone()), - true, - true, - ) - .await; - let events = drain_events(&mut rx); - assert!( - events.iter().any(|e| matches!( - e, - WalletEvent::TransactionStatusChanged { - wallet_id: wid, - status: TransactionContext::InstantSend(_), - .. - } if *wid == wallet_id - )), - "expected TransactionStatusChanged(InstantSend) with correct wallet_id, got {:?}", - events - ); + let block = make_block(Vec::new(), 0x55, 3000); - // Same IS lock via process_instant_send_lock — should be suppressed - manager.process_instant_send_lock(is_lock); + let wallets = BTreeSet::from([wallet_id]); + manager.process_block_for_wallets(&block, 50, &wallets).await; assert_no_events(&mut rx); } #[tokio::test] -async fn test_process_block_emits_events() { - use dashcore::blockdata::block::{Block, Header, Version}; - use dashcore::hashes::Hash; - use dashcore::{BlockHash, CompactTarget, TxMerkleNode}; - +async fn test_block_processed_carries_matured_coinbase_record() { + // A coinbase received at height H matures at H + 100. Process the + // coinbase block first, then advance the chain past maturity by + // processing further blocks. The block whose height crosses H + 100 + // must carry the matured coinbase in `BlockProcessed.matured`. let (mut manager, wallet_id, addr) = setup_manager_with_wallet(); - let mut rx = manager.subscribe_events(); - let tx = create_tx_paying_to(&addr, 0xe3); - - let block = Block { - header: Header { - version: Version::default(), - prev_blockhash: BlockHash::all_zeros(), - merkle_root: TxMerkleNode::all_zeros(), - time: 12345, - bits: CompactTarget::from_consensus(0x1d00ffff), - nonce: 0, - }, - txdata: vec![tx], - }; - + let coinbase_tx = make_coinbase_paying_to(&addr, 5_000_000_000); + let coinbase_height = 100; + let coinbase_block = make_block(vec![coinbase_tx.clone()], 0xc0, 4000); let wallets = BTreeSet::from([wallet_id]); - let result = manager.process_block_for_wallets(&block, 1000, &wallets).await; - assert_eq!(result.new_txids.len(), 1); + manager.process_block_for_wallets(&coinbase_block, coinbase_height, &wallets).await; + + // Advance to maturity height. With coinbase_height = 100, maturity is at + // height 200. Processing block 200 must surface the matured record. + let mut rx = manager.subscribe_events(); + let mature_block = make_block(Vec::new(), 0xc1, 5000); + manager.process_block_for_wallets(&mature_block, coinbase_height + 100, &wallets).await; let events = drain_events(&mut rx); - let event = events + let block_event = events .iter() - .find(|e| matches!(e, WalletEvent::TransactionReceived { .. })) + .find(|e| matches!(e, WalletEvent::BlockProcessed { matured, .. } if !matured.is_empty())) .unwrap_or_else(|| { - panic!("expected TransactionReceived from process_block, got {:?}", events) + panic!("expected a BlockProcessed carrying matured coinbase, got {:?}", events) }); - match event { - WalletEvent::TransactionReceived { - account_index, - record, + match block_event { + WalletEvent::BlockProcessed { + wallet_id: wid, + height, + inserted, + updated, + matured, .. } => { - assert!( - matches!( - record.context, - TransactionContext::InBlock(info) if info.height() == 1000 - ), - "expected InBlock at height 1000, got {:?}", - record.context - ); - assert_eq!(*account_index, 0); - assert!( - !record.input_details.is_empty() || !record.output_details.is_empty(), - "expected non-empty details" - ); + assert_eq!(*wid, wallet_id); + assert_eq!(*height, coinbase_height + 100); + assert!(inserted.is_empty()); + assert!(updated.is_empty()); + assert_eq!(matured.len(), 1); + assert_eq!(matured[0].txid, coinbase_tx.txid()); } _ => unreachable!(), } - assert!( - events.iter().any( - |e| matches!(e, WalletEvent::BalanceUpdated { wallet_id: wid, .. } if *wid == wallet_id) - ), - "expected BalanceUpdated from process_block, got {:?}", - events - ); -} - -#[tokio::test] -async fn test_irrelevant_mempool_tx_emits_no_events() { - use dashcore::{PublicKey, ScriptBuf}; - - let (mut manager, _wallet_id, _addr) = setup_manager_with_wallet(); - let mut rx = manager.subscribe_events(); - - // Create a tx paying to a random script that doesn't match any wallet address - let random_script = - ScriptBuf::new_p2pkh(&PublicKey::from_slice(&[2; 33]).unwrap().pubkey_hash()); - let tx = Transaction { - version: 2, - lock_time: 0, - input: vec![dashcore::TxIn { - previous_output: dashcore::OutPoint { - txid: dashcore::Txid::from_byte_array([0xe4; 32]), - vout: 0, - }, - script_sig: ScriptBuf::new(), - sequence: u32::MAX, - witness: dashcore::Witness::default(), - }], - output: vec![dashcore::TxOut { - value: TX_AMOUNT, - script_pubkey: random_script, - }], - special_transaction_payload: None, - }; - - let result = manager.process_mempool_transaction(&tx, None).await; - - assert!(!result.is_relevant); - assert_eq!(result.net_amount, 0); - assert_no_events(&mut rx); } // --------------------------------------------------------------------------- -// Edge case tests +// SyncHeightAdvanced // --------------------------------------------------------------------------- #[tokio::test] -async fn test_instantsend_to_chainlocked_event_flow() { - assert_lifecycle_flow( - &[ - TransactionContext::InstantSend(InstantLock::default()), - TransactionContext::InChainLockedBlock(BlockInfo::new( - 1600, - BlockHash::from_byte_array([0xc3; 32]), - 16000, - )), - ], - 0xc3, - ) - .await; +async fn test_update_wallet_synced_height_emits_event_per_wallet() { + let (mut manager, wallet_id, _addr) = setup_manager_with_wallet(); + let mut rx = manager.subscribe_events(); + + manager.update_wallet_synced_height(&wallet_id, 1000); + + let synced_events: Vec<_> = drain_events(&mut rx) + .into_iter() + .filter_map(|e| match e { + WalletEvent::SyncHeightAdvanced { + wallet_id, + height, + } => Some((wallet_id, height)), + _ => None, + }) + .collect(); + assert_eq!(synced_events, vec![(wallet_id, 1000)]); } #[tokio::test] -async fn test_mempool_to_block_to_chainlocked_event_flow() { - let (mut manager, _wallet_id, addr) = setup_manager_with_wallet(); +async fn test_update_wallet_synced_height_does_not_re_emit_when_unchanged() { + let (mut manager, wallet_id, _addr) = setup_manager_with_wallet(); let mut rx = manager.subscribe_events(); - let tx = create_tx_paying_to(&addr, 0xc4); - // Step 1: mempool — emits TransactionReceived - manager.check_transaction_in_all_wallets(&tx, TransactionContext::Mempool, true, true).await; - let event = assert_single_event(&mut rx); + manager.update_wallet_synced_height(&wallet_id, 2000); + drain_events(&mut rx); + + // Re-calling with the same height must not emit another SyncHeightAdvanced + manager.update_wallet_synced_height(&wallet_id, 2000); + let events = drain_events(&mut rx); assert!( - matches!( - &event, - WalletEvent::TransactionReceived { record, .. } - if record.context == TransactionContext::Mempool - ), - "expected TransactionReceived(Mempool), got {:?}", - event + !events.iter().any(|e| matches!(e, WalletEvent::SyncHeightAdvanced { .. })), + "no SyncHeightAdvanced should fire when height did not advance, got {:?}", + events ); - // Step 2: block confirmation — emits TransactionStatusChanged - let block_ctx = TransactionContext::InBlock(BlockInfo::new( - 1700, - BlockHash::from_byte_array([0xc4; 32]), - 17000, - )); - manager.check_transaction_in_all_wallets(&tx, block_ctx, true, true).await; - let event = assert_single_event(&mut rx); + // Going backwards also must not emit + manager.update_wallet_synced_height(&wallet_id, 1500); + let events = drain_events(&mut rx); assert!( - matches!( - event, - WalletEvent::TransactionStatusChanged { - status: TransactionContext::InBlock(_), - .. - } - ), - "expected TransactionStatusChanged(InBlock), got {:?}", - event + !events.iter().any(|e| matches!(e, WalletEvent::SyncHeightAdvanced { .. })), + "no SyncHeightAdvanced should fire when height went backwards, got {:?}", + events ); - - // Step 3: chain lock on already-confirmed tx — no event (wallet doesn't - // track chain lock state separately from block confirmation) - let cl_ctx = TransactionContext::InChainLockedBlock(BlockInfo::new( - 1700, - BlockHash::from_byte_array([0xc4; 32]), - 17000, - )); - manager.check_transaction_in_all_wallets(&tx, cl_ctx, true, true).await; - assert_no_events(&mut rx); } +// --------------------------------------------------------------------------- +// Dry run and irrelevant paths +// --------------------------------------------------------------------------- + #[tokio::test] -async fn test_chainlocked_block_event_flow() { +async fn test_check_transaction_does_not_emit_events_directly() { + // Event emission is the caller's responsibility; the low-level check + // function never emits so batch callers can defer emission until after + // their own balance refresh. let (mut manager, _wallet_id, addr) = setup_manager_with_wallet(); let mut rx = manager.subscribe_events(); - let tx = create_tx_paying_to(&addr, 0xc1); - - let ctx = TransactionContext::InChainLockedBlock(BlockInfo::new( - 2000, - BlockHash::from_byte_array([0xc1; 32]), - 20000, - )); - manager.check_transaction_in_all_wallets(&tx, ctx, true, true).await; - let event = assert_single_event(&mut rx); - assert!( - matches!( - &event, - WalletEvent::TransactionReceived { record, .. } - if matches!(record.context, TransactionContext::InChainLockedBlock(info) if info.height() == 2000) - ), - "expected TransactionReceived(InChainLockedBlock at 2000), got {:?}", - event - ); + let tx = create_tx_paying_to(&addr, 0xd1); + + let result = manager + .check_transaction_in_all_wallets(&tx, TransactionContext::Mempool, true, true) + .await; + assert!(!result.affected_wallets.is_empty()); + assert!(!result.per_wallet_new_records.is_empty()); + assert_no_events(&mut rx); } #[tokio::test] async fn test_check_transaction_dry_run_does_not_persist_state() { let (mut manager, _wallet_id, addr) = setup_manager_with_wallet(); let mut rx = manager.subscribe_events(); - let tx = create_tx_paying_to(&addr, 0xd1); + let tx = create_tx_paying_to(&addr, 0xd2); - // Dry run: update_state_if_found = false let result = manager .check_transaction_in_all_wallets(&tx, TransactionContext::Mempool, false, false) .await; - assert!(!result.affected_wallets.is_empty()); - assert_eq!(result.total_received, TX_AMOUNT); assert_no_events(&mut rx); - // Call again — should still report as relevant (state not persisted) - let result2 = manager - .check_transaction_in_all_wallets(&tx, TransactionContext::Mempool, false, false) - .await; - assert!(!result2.affected_wallets.is_empty()); - assert_eq!(result2.total_received, TX_AMOUNT); - assert_no_events(&mut rx); - - // Now persist — should still report as new since dry runs didn't record it - let result3 = manager + // Subsequent persist should still see the tx as new + let result = manager .check_transaction_in_all_wallets(&tx, TransactionContext::Mempool, true, true) .await; - assert!(result3.is_new_transaction); + assert!(result.is_new_transaction); } diff --git a/key-wallet-manager/src/events.rs b/key-wallet-manager/src/events.rs index 5d0e2b282..d04ca3900 100644 --- a/key-wallet-manager/src/events.rs +++ b/key-wallet-manager/src/events.rs @@ -1,90 +1,146 @@ //! Wallet events for notifying consumers of wallet state changes. //! -//! These events are emitted by the WalletManager when significant wallet -//! operations occur, allowing consumers to receive push-based notifications. +//! Each variant is self-contained: it carries the transaction record(s) that +//! triggered it and the wallet's new balance after the change. Consumers can +//! persist the transaction(s) and balance atomically off a single event. -use dashcore::{Amount, SignedAmount, Txid}; +use dashcore::ephemerealdata::instant_lock::InstantLock; +use dashcore::prelude::CoreBlockHeight; +use dashcore::Txid; use key_wallet::managed_account::transaction_record::TransactionRecord; -use key_wallet::transaction_checking::TransactionContext; +use key_wallet::WalletCoreBalance; use crate::WalletId; /// Events emitted by the wallet manager. /// -/// Each event represents a meaningful wallet state change that consumers -/// may want to react to. +/// Each event represents a meaningful wallet state change. Events that +/// modify balance carry the wallet's balance *after* the change so +/// consumers can persist the record(s) and balance atomically. #[derive(Debug, Clone)] pub enum WalletEvent { - /// A transaction relevant to the wallet was received for the first time. - TransactionReceived { + /// First time the wallet sees an off-chain wallet-relevant transaction + /// (mempool, or directly via an InstantSend lock — in that case + /// `record.context` is `InstantSend(..)`). + TransactionDetected { /// ID of the affected wallet. wallet_id: WalletId, - /// Account index within the wallet. - account_index: u32, /// The full transaction record with all details. record: Box, + /// Wallet balance after the transaction was recorded. + balance: WalletCoreBalance, }, - /// The confirmation status of a previously seen transaction has changed. - TransactionStatusChanged { + /// An InstantSend lock was applied to a previously-seen off-chain + /// wallet-relevant transaction. + TransactionInstantLocked { /// ID of the affected wallet. wallet_id: WalletId, /// Transaction ID. txid: Txid, - /// New transaction context. - status: TransactionContext, + /// The InstantSend lock now applied to the transaction. + instant_lock: InstantLock, + /// Wallet balance after the status change. + balance: WalletCoreBalance, }, - /// The wallet balance has changed. - BalanceUpdated { + /// A block was processed for a wallet. Carries records bucketed by what + /// happened to them in this block, plus the post-block balance. + /// `inserted` is records first stored in this block, `updated` is + /// previously-known records that just confirmed, `matured` is older + /// coinbase records that crossed the maturity threshold as the scanned + /// height advanced. + BlockProcessed { /// ID of the affected wallet. wallet_id: WalletId, - /// New confirmed balance in duffs (mature, in a block or InstantSend-locked). - confirmed: u64, - /// New unconfirmed balance in duffs (mature, mempool-only). Also spendable. - unconfirmed: u64, - /// New immature balance (coinbase UTXOs not yet mature). - immature: u64, - /// New locked balance (UTXOs reserved for specific purposes like CoinJoin) - locked: u64, + /// Height of the block that was processed. + height: CoreBlockHeight, + /// Records first stored for this wallet in this block. + inserted: Vec, + /// Previously-known records confirmed by this block. + updated: Vec, + /// Older coinbase records whose maturity threshold was crossed by + /// this height advance. + matured: Vec, + /// Wallet balance after the block was processed. + balance: WalletCoreBalance, + }, + /// The wallet's scan cursor advanced because the filter pipeline + /// committed a batch covering blocks up to `height`. No records or + /// balance — consumers persist this as a checkpoint atomically with + /// any records/balance from prior `BlockProcessed` events in the batch. + SyncHeightAdvanced { + /// ID of the affected wallet. + wallet_id: WalletId, + /// New scanned height for the wallet. + height: CoreBlockHeight, }, } impl WalletEvent { - /// Get a short description of this event for logging. + /// ID of the wallet this event pertains to. + pub fn wallet_id(&self) -> WalletId { + match self { + WalletEvent::TransactionDetected { + wallet_id, + .. + } + | WalletEvent::TransactionInstantLocked { + wallet_id, + .. + } + | WalletEvent::BlockProcessed { + wallet_id, + .. + } + | WalletEvent::SyncHeightAdvanced { + wallet_id, + .. + } => *wallet_id, + } + } + + /// Short description for logging. pub fn description(&self) -> String { match self { - WalletEvent::TransactionReceived { + WalletEvent::TransactionDetected { record, + balance, .. } => { format!( - "TransactionReceived(txid={}, amount={}, status={})", - record.txid, - SignedAmount::from_sat(record.net_amount), - record.context + "TransactionDetected(txid={}, context={}, balance={})", + record.txid, record.context, balance ) } - WalletEvent::TransactionStatusChanged { + WalletEvent::TransactionInstantLocked { txid, - status, + balance, .. } => { - format!("TransactionStatusChanged(txid={}, status={})", txid, status) + format!("TransactionInstantLocked(txid={}, balance={})", txid, balance) } - WalletEvent::BalanceUpdated { - confirmed, - unconfirmed, - immature, - locked, + WalletEvent::BlockProcessed { + height, + inserted, + updated, + matured, + balance, .. } => { format!( - "BalanceUpdated(confirmed={}, unconfirmed={}, immature={}, locked={})", - Amount::from_sat(*confirmed), - Amount::from_sat(*unconfirmed), - Amount::from_sat(*immature), - Amount::from_sat(*locked) + "BlockProcessed(height={}, inserted={}, updated={}, matured={}, balance={})", + height, + inserted.len(), + updated.len(), + matured.len(), + balance ) } + WalletEvent::SyncHeightAdvanced { + height, + .. + } => { + format!("SyncHeightAdvanced(height={})", height) + } } } } diff --git a/key-wallet-manager/src/lib.rs b/key-wallet-manager/src/lib.rs index c6a6e94b9..4e9ed5056 100644 --- a/key-wallet-manager/src/lib.rs +++ b/key-wallet-manager/src/lib.rs @@ -27,6 +27,7 @@ pub use wallet_interface::{BlockProcessingResult, MempoolTransactionResult, Wall use dashcore::blockdata::transaction::Transaction; use dashcore::prelude::CoreBlockHeight; use key_wallet::account::AccountCollection; +use key_wallet::managed_account::transaction_record::TransactionRecord; use key_wallet::transaction_checking::TransactionContext; use key_wallet::wallet::managed_wallet_info::transaction_building::AccountTypePreference; use key_wallet::wallet::managed_wallet_info::wallet_info_interface::WalletInfoInterface; @@ -81,6 +82,11 @@ pub struct CheckTransactionsResult { pub total_sent: u64, /// Addresses involved across all wallets pub involved_addresses: Vec
, + /// Records newly recorded by this check, grouped by wallet. + pub per_wallet_new_records: BTreeMap>, + /// Records whose state was updated by this check (confirmation or + /// InstantSend lock on a previously stored record), grouped by wallet. + pub per_wallet_updated_records: BTreeMap>, } impl CheckTransactionsResult { @@ -450,7 +456,11 @@ impl WalletManager { } /// Check a transaction against all wallets and update their states if relevant. - /// Returns affected wallets and any new addresses generated during gap limit maintenance. + /// + /// Collects — but does not emit — the per-wallet records affected by the + /// check. Callers are responsible for emitting the appropriate + /// `WalletEvent` *after* refreshing wallet balances so events never + /// carry a stale balance. pub async fn check_transaction_in_all_wallets( &mut self, tx: &Transaction, @@ -512,22 +522,19 @@ impl WalletManager { } } - if check_result.is_new_transaction { - for (account_index, record) in check_result.new_records { - let event = WalletEvent::TransactionReceived { - wallet_id: *wallet_id, - account_index, - record: Box::new(record), - }; - let _ = self.event_sender.send(event); - } - } else if check_result.state_modified { - let event = WalletEvent::TransactionStatusChanged { - wallet_id: *wallet_id, - txid: tx.txid(), - status: context.clone(), - }; - let _ = self.event_sender.send(event); + if !check_result.new_records.is_empty() { + result + .per_wallet_new_records + .entry(*wallet_id) + .or_default() + .extend(check_result.new_records); + } + if !check_result.updated_records.is_empty() { + result + .per_wallet_updated_records + .entry(*wallet_id) + .or_default() + .extend(check_result.updated_records); } } diff --git a/key-wallet-manager/src/process_block.rs b/key-wallet-manager/src/process_block.rs index 6f232ea93..9c313e177 100644 --- a/key-wallet-manager/src/process_block.rs +++ b/key-wallet-manager/src/process_block.rs @@ -5,9 +5,10 @@ use core::fmt::Write as _; use dashcore::ephemerealdata::instant_lock::InstantLock; use dashcore::prelude::CoreBlockHeight; use dashcore::{Address, Block, Transaction}; +use key_wallet::managed_account::transaction_record::TransactionRecord; use key_wallet::transaction_checking::{BlockInfo, TransactionContext}; use key_wallet::wallet::managed_wallet_info::wallet_info_interface::WalletInfoInterface; -use std::collections::BTreeSet; +use std::collections::{BTreeMap, BTreeSet}; use tokio::sync::broadcast; #[async_trait] @@ -24,6 +25,9 @@ impl WalletInterface for WalletM } let info = BlockInfo::new(height, block.block_hash(), block.header.time); + let mut per_wallet_inserted: BTreeMap> = BTreeMap::new(); + let mut per_wallet_updated: BTreeMap> = BTreeMap::new(); + for tx in &block.txdata { let context = TransactionContext::InBlock(info); let check_result = @@ -40,24 +44,15 @@ impl WalletInterface for WalletM for (wallet_id, addrs) in check_result.new_addresses { result.new_addresses.entry(wallet_id).or_default().extend(addrs); } - } - - // For each processed wallet: advance last-processed height monotonically - // and refresh the cached balance so it reflects any UTXO changes from - // this block. Rescan blocks at heights below the wallet's current - // checkpoint must not drag the height backwards, but they still need a - // balance refresh because UTXOs were added or removed. - let snapshot = self.snapshot_balances(); - for wallet_id in wallets { - if let Some(info) = self.wallet_infos.get_mut(wallet_id) { - if height > info.last_processed_height() { - info.update_last_processed_height(height); - } else { - info.update_balance(); - } + for (wallet_id, records) in check_result.per_wallet_new_records { + per_wallet_inserted.entry(wallet_id).or_default().extend(records); + } + for (wallet_id, records) in check_result.per_wallet_updated_records { + per_wallet_updated.entry(wallet_id).or_default().extend(records); } } - self.emit_balance_changes(&snapshot); + + self.finalize_block_advance(height, wallets, per_wallet_inserted, per_wallet_updated); result } @@ -67,15 +62,15 @@ impl WalletInterface for WalletM tx: &Transaction, instant_lock: Option, ) -> MempoolTransactionResult { - let context = match instant_lock { + let context = match instant_lock.as_ref() { Some(lock) => { debug_assert_eq!(lock.txid, tx.txid(), "InstantLock txid must match transaction"); - TransactionContext::InstantSend(lock) + TransactionContext::InstantSend(lock.clone()) } None => TransactionContext::Mempool, }; - let snapshot = self.snapshot_balances(); - let check_result = self.check_transaction_in_all_wallets(tx, context, true, false).await; + let mut check_result = + self.check_transaction_in_all_wallets(tx, context, true, false).await; let is_relevant = !check_result.affected_wallets.is_empty(); let net_amount = if is_relevant { @@ -84,13 +79,53 @@ impl WalletInterface for WalletM 0 }; - // Refresh cached balances only for affected wallets + // Refresh cached balances for affected wallets before emitting so + // every event carries a post-change balance. for wallet_id in &check_result.affected_wallets { if let Some(info) = self.wallet_infos.get_mut(wallet_id) { info.update_balance(); } } - self.emit_balance_changes(&snapshot); + + let per_wallet_new_records = std::mem::take(&mut check_result.per_wallet_new_records); + let per_wallet_updated_records = + std::mem::take(&mut check_result.per_wallet_updated_records); + + for (wallet_id, records) in per_wallet_new_records { + let Some(info) = self.wallet_infos.get(&wallet_id) else { + continue; + }; + let balance = info.balance(); + for record in records { + let event = WalletEvent::TransactionDetected { + wallet_id, + record: Box::new(record), + balance, + }; + let _ = self.event_sender.send(event); + } + } + + if let Some(lock) = instant_lock { + for (wallet_id, records) in per_wallet_updated_records { + if records.is_empty() { + continue; + } + let Some(info) = self.wallet_infos.get(&wallet_id) else { + continue; + }; + let balance = info.balance(); + for record in records { + let event = WalletEvent::TransactionInstantLocked { + wallet_id, + txid: record.txid, + instant_lock: lock.clone(), + balance, + }; + let _ = self.event_sender.send(event); + } + } + } let new_addresses: Vec
= check_result.all_new_addresses().cloned().collect(); MempoolTransactionResult { @@ -151,6 +186,10 @@ impl WalletInterface for WalletM if let Some(info) = self.wallet_infos.get_mut(wallet_id) { if height > info.synced_height() { info.update_synced_height(height); + let _ = self.event_sender.send(WalletEvent::SyncHeightAdvanced { + wallet_id: *wallet_id, + height, + }); } } } @@ -160,13 +199,8 @@ impl WalletInterface for WalletM wallet_id: &WalletId, height: CoreBlockHeight, ) { - let snapshot = self.snapshot_balances(); - if let Some(info) = self.wallet_infos.get_mut(wallet_id) { - if height > info.last_processed_height() { - info.update_last_processed_height(height); - } - } - self.emit_balance_changes(&snapshot); + let wallets = BTreeSet::from([*wallet_id]); + self.finalize_block_advance(height, &wallets, BTreeMap::new(), BTreeMap::new()); } fn subscribe_events(&self) -> broadcast::Receiver { @@ -175,11 +209,11 @@ impl WalletInterface for WalletM fn process_instant_send_lock(&mut self, instant_lock: InstantLock) { let txid = instant_lock.txid; - let snapshot = self.snapshot_balances(); let mut affected_wallets = Vec::new(); for (wallet_id, info) in self.wallet_infos.iter_mut() { if info.mark_instant_send_utxos(&txid, &instant_lock) { + info.update_balance(); affected_wallets.push(*wallet_id); } } @@ -188,16 +222,17 @@ impl WalletInterface for WalletM return; } - for wallet_id in &affected_wallets { - let event = WalletEvent::TransactionStatusChanged { - wallet_id: *wallet_id, - txid, - status: TransactionContext::InstantSend(instant_lock.clone()), + for wallet_id in affected_wallets { + let Some(info) = self.wallet_infos.get(&wallet_id) else { + continue; }; - let _ = self.event_sender().send(event); + let _ = self.event_sender().send(WalletEvent::TransactionInstantLocked { + wallet_id, + txid, + instant_lock: instant_lock.clone(), + balance: info.balance(), + }); } - - self.emit_balance_changes(&snapshot); } async fn describe(&self) -> String { @@ -230,6 +265,90 @@ impl WalletInterface for WalletM } } +impl WalletManager { + /// For each wallet in `wallets`: advance `last_processed_height` to + /// `height` (monotonically — never backwards), refresh the cached balance, + /// collect matured-coinbase records over the window `(prior, height]`, and + /// emit a `BlockProcessed` event whose balance reflects the post-advance + /// state. A wallet whose `last_processed_height` is already at or above + /// `height` keeps its height but still gets a balance refresh, so rescan + /// passes that hit blocks below the wallet's checkpoint surface UTXO + /// changes without dragging the height backwards. + fn finalize_block_advance( + &mut self, + height: CoreBlockHeight, + wallets: &BTreeSet, + mut per_wallet_inserted: BTreeMap>, + mut per_wallet_updated: BTreeMap>, + ) { + if wallets.is_empty() { + return; + } + + let snapshot = self.snapshot_balances(); + let prior_heights: BTreeMap = wallets + .iter() + .filter_map(|id| { + self.wallet_infos.get(id).map(|info| (*id, info.last_processed_height())) + }) + .collect(); + + // Collect matured coinbase records before advancing the height so the + // (old, new] window is well-defined per wallet. Wallets whose height + // is already at or past `height` contribute no matured records on this + // pass (their matured window is empty). + let mut per_wallet_matured: BTreeMap> = BTreeMap::new(); + for wallet_id in wallets { + let Some(info) = self.wallet_infos.get(wallet_id) else { + continue; + }; + let old_height = prior_heights.get(wallet_id).copied().unwrap_or(0); + if height > old_height { + let matured = info.matured_coinbase_records(old_height, height); + if !matured.is_empty() { + per_wallet_matured.insert(*wallet_id, matured); + } + } + } + + // Advance heights and refresh balances. Event emission happens below + // so each wallet's event carries the post-advance balance. + for wallet_id in wallets { + if let Some(info) = self.wallet_infos.get_mut(wallet_id) { + if height > info.last_processed_height() { + info.update_last_processed_height(height); + } else { + info.update_balance(); + } + } + } + + for wallet_id in wallets { + let Some(info) = self.wallet_infos.get(wallet_id) else { + continue; + }; + let new_balance = info.balance(); + let inserted = per_wallet_inserted.remove(wallet_id).unwrap_or_default(); + let updated = per_wallet_updated.remove(wallet_id).unwrap_or_default(); + let matured = per_wallet_matured.remove(wallet_id).unwrap_or_default(); + let balance_changed = snapshot.get(wallet_id).copied() != Some(new_balance); + + if !inserted.is_empty() || !updated.is_empty() || !matured.is_empty() || balance_changed + { + let event = WalletEvent::BlockProcessed { + wallet_id: *wallet_id, + height, + inserted, + updated, + matured, + balance: new_balance, + }; + let _ = self.event_sender.send(event); + } + } + } +} + #[cfg(test)] mod tests { use super::*; @@ -271,27 +390,29 @@ mod tests { } #[tokio::test] - async fn test_process_mempool_transaction_balance_events() { + async fn test_process_mempool_transaction_emits_event() { let (mut manager, _wallet_id, addr) = setup_manager_with_wallet(); let mut rx = manager.subscribe_events(); - // Relevant tx should emit BalanceUpdated + // Relevant tx should emit TransactionDetected carrying the balance let tx = create_tx_paying_to(&addr, 0xaa); manager.process_mempool_transaction(&tx, None).await; let mut found = false; while let Ok(event) = rx.try_recv() { - if let WalletEvent::BalanceUpdated { - unconfirmed, + if let WalletEvent::TransactionDetected { + balance, + record, .. } = event { - assert!(unconfirmed > 0, "unconfirmed balance should increase"); + assert_eq!(record.txid, tx.txid(), "event should carry the mempool tx"); + assert!(balance.unconfirmed() > 0, "unconfirmed balance should increase"); found = true; break; } } - assert!(found, "should emit BalanceUpdated for mempool transaction"); + assert!(found, "should emit TransactionDetected for mempool transaction"); // Irrelevant tx should not emit any events let unrelated_tx = Transaction { @@ -319,10 +440,10 @@ mod tests { } #[tokio::test] - async fn test_process_block_emits_balance_updated() { + async fn test_process_block_emits_block_processed() { let (mut manager, wallet_id, addr) = setup_manager_with_wallet(); let tx = create_tx_paying_to(&addr, 0xcc); - let block = make_block(vec![tx]); + let block = make_block(vec![tx.clone()]); let mut rx = manager.subscribe_events(); let wallets = BTreeSet::from([wallet_id]); @@ -330,17 +451,44 @@ mod tests { let mut found = false; while let Ok(event) = rx.try_recv() { - if let WalletEvent::BalanceUpdated { - confirmed, + if let WalletEvent::BlockProcessed { + height, + inserted, + balance, .. } = event { - assert!(confirmed > 0, "confirmed balance should increase after block"); + assert_eq!(height, 100); + assert!(balance.confirmed() > 0, "confirmed balance should increase after block"); + assert_eq!(inserted.len(), 1); + assert_eq!(inserted[0].txid, tx.txid()); found = true; break; } } - assert!(found, "should emit BalanceUpdated for block processing"); + assert!(found, "should emit BlockProcessed for block processing"); + } + + #[tokio::test] + async fn test_update_wallet_synced_height_emits_sync_height_advanced() { + let (mut manager, wallet_id, _addr) = setup_manager_with_wallet(); + let mut rx = manager.subscribe_events(); + + manager.update_wallet_synced_height(&wallet_id, 500); + + let mut found = false; + while let Ok(event) = rx.try_recv() { + if let WalletEvent::SyncHeightAdvanced { + wallet_id: evt_wallet_id, + height, + } = event + { + assert_eq!(evt_wallet_id, wallet_id); + assert_eq!(height, 500); + found = true; + } + } + assert!(found, "should emit SyncHeightAdvanced on update_wallet_synced_height"); } #[tokio::test] diff --git a/key-wallet-manager/src/test_helpers.rs b/key-wallet-manager/src/test_helpers.rs index f70cef633..f71223961 100644 --- a/key-wallet-manager/src/test_helpers.rs +++ b/key-wallet-manager/src/test_helpers.rs @@ -59,79 +59,8 @@ pub(crate) fn drain_events(rx: &mut broadcast::Receiver) -> Vec) -> WalletEvent { - let events = drain_events(rx); - assert_eq!(events.len(), 1, "expected 1 event, got {}: {:?}", events.len(), events); - events.into_iter().next().unwrap() -} - /// Drain events and assert none were emitted. pub(crate) fn assert_no_events(rx: &mut broadcast::Receiver) { let events = drain_events(rx); assert!(events.is_empty(), "expected no events, got {}: {:?}", events.len(), events); } - -/// Submit a transaction through a sequence of contexts and verify the event flow. -/// -/// The first context produces a `TransactionReceived` event; each subsequent -/// context produces a `TransactionStatusChanged` event. -pub(crate) async fn assert_lifecycle_flow(contexts: &[TransactionContext], input_seed: u8) { - assert!(!contexts.is_empty(), "at least one context required"); - - let (mut manager, wallet_id, addr) = setup_manager_with_wallet(); - let mut rx = manager.subscribe_events(); - let tx = create_tx_paying_to(&addr, input_seed); - - for (i, ctx) in contexts.iter().enumerate() { - manager.check_transaction_in_all_wallets(&tx, ctx.clone(), true, true).await; - let event = assert_single_event(&mut rx); - - if i == 0 { - assert!( - matches!(&event, WalletEvent::TransactionReceived { wallet_id: wid, record, .. } if *wid == wallet_id && record.context == *ctx), - "context[{}]: expected TransactionReceived with wallet_id and status {:?}, got {:?}", - i, - ctx, - event - ); - } else { - assert!( - matches!(&event, WalletEvent::TransactionStatusChanged { wallet_id: wid, status, .. } if *wid == wallet_id && status == ctx), - "context[{}]: expected TransactionStatusChanged with wallet_id and status {:?}, got {:?}", - i, - ctx, - event - ); - } - } -} - -/// Submit a transaction through `setup_contexts`, drain events, then submit with -/// `suppressed_context` and assert no event is emitted. Optionally verify -/// the stored height. -pub(crate) async fn assert_context_suppressed( - setup_contexts: &[TransactionContext], - suppressed_context: TransactionContext, - expected_height: Option, - input_seed: u8, -) { - let (mut manager, wallet_id, addr) = setup_manager_with_wallet(); - let mut rx = manager.subscribe_events(); - let tx = create_tx_paying_to(&addr, input_seed); - - for ctx in setup_contexts { - manager.check_transaction_in_all_wallets(&tx, ctx.clone(), true, true).await; - drain_events(&mut rx); - } - - manager.check_transaction_in_all_wallets(&tx, suppressed_context, true, true).await; - assert_no_events(&mut rx); - - let history = manager.wallet_transaction_history(&wallet_id).unwrap(); - let records: Vec<_> = history.iter().filter(|r| r.txid == tx.txid()).collect(); - assert_eq!(records.len(), 1); - if let Some(height) = expected_height { - assert_eq!(records[0].height(), Some(height)); - } -} diff --git a/key-wallet/src/managed_account/mod.rs b/key-wallet/src/managed_account/mod.rs index 2633cb0b0..65978843c 100644 --- a/key-wallet/src/managed_account/mod.rs +++ b/key-wallet/src/managed_account/mod.rs @@ -526,6 +526,7 @@ impl ManagedCoreAccount { let tx_record = TransactionRecord::new( tx.clone(), + self.account_type.to_account_type(), context.clone(), transaction_type, direction, diff --git a/key-wallet/src/managed_account/transaction_record.rs b/key-wallet/src/managed_account/transaction_record.rs index e4eca2c36..b51aee6f1 100644 --- a/key-wallet/src/managed_account/transaction_record.rs +++ b/key-wallet/src/managed_account/transaction_record.rs @@ -3,6 +3,7 @@ //! This module contains the transaction record structure used to track //! transactions associated with accounts. +use crate::account::AccountType; use crate::error::Error; use crate::transaction_checking::transaction_router::TransactionType; use crate::transaction_checking::{BlockInfo, TransactionContext}; @@ -79,6 +80,8 @@ pub struct TransactionRecord { pub transaction: Transaction, /// Transaction ID pub txid: Txid, + /// Account this record belongs to. + pub account_type: AccountType, /// The context in which this transaction was last seen pub context: TransactionContext, /// Classification of the transaction type @@ -98,9 +101,13 @@ pub struct TransactionRecord { } impl TransactionRecord { - /// Create a new transaction record with the given context + /// Create a new transaction record with the given context. + /// + /// `account_type` identifies the owning account. + #[allow(clippy::too_many_arguments)] pub fn new( transaction: Transaction, + account_type: AccountType, context: TransactionContext, transaction_type: TransactionType, direction: TransactionDirection, @@ -111,6 +118,7 @@ impl TransactionRecord { let txid = transaction.txid(); Self { txid, + account_type, transaction, context, transaction_type, @@ -194,9 +202,17 @@ impl TransactionRecord { #[cfg(test)] mod tests { use super::*; + use crate::account::StandardAccountType; use dashcore::hashes::Hash; use dashcore::BlockHash; + fn test_account_type() -> AccountType { + AccountType::Standard { + index: 0, + standard_account_type: StandardAccountType::BIP44Account, + } + } + fn test_block_context(height: u32) -> TransactionContext { TransactionContext::InBlock(BlockInfo::new(height, BlockHash::all_zeros(), 1234567890)) } @@ -208,6 +224,7 @@ mod tests { ) -> TransactionRecord { TransactionRecord::new( tx, + test_account_type(), context, TransactionType::Standard, TransactionDirection::Incoming, @@ -264,6 +281,7 @@ mod tests { let outgoing = TransactionRecord::new( tx.clone(), + test_account_type(), TransactionContext::Mempool, TransactionType::Standard, TransactionDirection::Outgoing, @@ -277,6 +295,7 @@ mod tests { let internal = TransactionRecord::new( tx.clone(), + test_account_type(), TransactionContext::Mempool, TransactionType::Standard, TransactionDirection::Internal, @@ -289,6 +308,7 @@ mod tests { let coinjoin = TransactionRecord::new( tx, + test_account_type(), TransactionContext::Mempool, TransactionType::CoinJoin, TransactionDirection::CoinJoin, diff --git a/key-wallet/src/tests/spent_outpoints_tests.rs b/key-wallet/src/tests/spent_outpoints_tests.rs index fe58242fd..92f92244c 100644 --- a/key-wallet/src/tests/spent_outpoints_tests.rs +++ b/key-wallet/src/tests/spent_outpoints_tests.rs @@ -3,7 +3,7 @@ use dashcore::blockdata::transaction::{OutPoint, Transaction}; use dashcore::{TxIn, Txid}; -use crate::account::TransactionRecord; +use crate::account::{AccountType, StandardAccountType, TransactionRecord}; use crate::managed_account::transaction_record::TransactionDirection; use crate::managed_account::ManagedCoreAccount; use crate::transaction_checking::{TransactionContext, TransactionType}; @@ -39,6 +39,10 @@ fn receive_only_tx() -> Transaction { fn record_from_tx(tx: &Transaction) -> TransactionRecord { TransactionRecord::new( tx.clone(), + AccountType::Standard { + index: 0, + standard_account_type: StandardAccountType::BIP44Account, + }, TransactionContext::Mempool, TransactionType::Standard, TransactionDirection::Incoming, diff --git a/key-wallet/src/transaction_checking/account_checker.rs b/key-wallet/src/transaction_checking/account_checker.rs index a9750f1aa..050a613b5 100644 --- a/key-wallet/src/transaction_checking/account_checker.rs +++ b/key-wallet/src/transaction_checking/account_checker.rs @@ -46,8 +46,15 @@ pub struct TransactionCheckResult { pub total_received_for_credit_conversion: u64, /// New addresses generated during gap limit maintenance pub new_addresses: Vec
, - /// Transaction records created for new transactions, paired with their account index - pub new_records: Vec<(u32, TransactionRecord)>, + /// Transaction records created for new transactions. Each record carries + /// its owning [`AccountType`](crate::account::AccountType) on + /// `record.account_type`, so consumers can recover it without an external + /// pairing. + pub new_records: Vec, + /// Transaction records updated by this check (confirmation or IS-lock + /// applied to a previously stored record). Each record carries its owning + /// `AccountType` on `record.account_type`. + pub updated_records: Vec, } /// Enum representing the type of Core account that matched with embedded data @@ -376,6 +383,7 @@ impl ManagedAccountCollection { total_received_for_credit_conversion: 0, new_addresses: Vec::new(), new_records: Vec::new(), + updated_records: Vec::new(), }; for account_type in account_types { diff --git a/key-wallet/src/transaction_checking/wallet_checker.rs b/key-wallet/src/transaction_checking/wallet_checker.rs index 391df1ae6..5872a46c0 100644 --- a/key-wallet/src/transaction_checking/wallet_checker.rs +++ b/key-wallet/src/transaction_checking/wallet_checker.rs @@ -94,16 +94,33 @@ impl WalletTransactionChecker for ManagedWalletInfo { if already_confirmed { return result; } - // Mark UTXOs as IS-locked and update the transaction context - for account_match in &result.affected_accounts { - if let Some(account) = self + // Mark UTXOs as IS-locked and update the transaction context. + // An account can match (its address pool detects the tx) without + // already holding a record — backfill via `record_transaction` + // before marking UTXOs so the freshly registered UTXOs get the + // IS-lock flag too. + for account_match in result.affected_accounts.clone() { + let Some(account) = self .accounts .get_by_account_type_match_mut(&account_match.account_type_match) - { + else { + continue; + }; + if account.transactions.contains_key(&txid) { account.mark_utxos_instant_send(&txid); if let Some(record) = account.transactions.get_mut(&txid) { record.update_context(context.clone()); + result.updated_records.push(record.clone()); } + } else { + let record = account.record_transaction( + tx, + &account_match, + context.clone(), + tx_type, + ); + account.mark_utxos_instant_send(&txid); + result.new_records.push(record); } } if update_balance { @@ -129,12 +146,20 @@ impl WalletTransactionChecker for ManagedWalletInfo { if is_new { let record = account.record_transaction(tx, &account_match, context.clone(), tx_type); - if let Some(account_index) = account_match.account_type_match.account_index() { - result.new_records.push((account_index, record)); - } - result.state_modified = true; - } else if account.confirm_transaction(tx, &account_match, context.clone(), tx_type) { + result.new_records.push(record); result.state_modified = true; + } else { + let existed_before = account.transactions.contains_key(&tx.txid()); + if account.confirm_transaction(tx, &account_match, context.clone(), tx_type) { + result.state_modified = true; + if let Some(record) = account.transactions.get(&tx.txid()) { + if existed_before { + result.updated_records.push(record.clone()); + } else { + result.new_records.push(record.clone()); + } + } + } } for address_info in account_match.account_type_match.all_involved_addresses() { @@ -197,6 +222,7 @@ impl WalletTransactionChecker for ManagedWalletInfo { #[cfg(test)] mod tests { use super::*; + use crate::account::account_type::StandardAccountType; use crate::managed_account::transaction_record::{OutputRole, TransactionDirection}; use crate::test_utils::TestWalletContext; use crate::transaction_checking::BlockInfo; @@ -204,7 +230,7 @@ mod tests { use crate::wallet::initialization::WalletAccountCreationOptions; use crate::wallet::managed_wallet_info::wallet_info_interface::WalletInfoInterface; use crate::wallet::{ManagedWalletInfo, Wallet}; - use crate::Network; + use crate::{AccountType, Network}; use dashcore::blockdata::script::ScriptBuf; use dashcore::blockdata::transaction::Transaction; use dashcore::ephemerealdata::instant_lock::InstantLock; @@ -980,6 +1006,121 @@ mod tests { assert_eq!(ctx.managed_wallet.metadata.total_transactions, 1); } + /// Test that the InstantSend branch backfills a `TransactionRecord` on accounts + /// that match the transaction but have no prior record. This mirrors the + /// confirmation path's backfill: a tx pays outputs to two accounts but only + /// the first holds a record (e.g., a missed mempool delivery on the second + /// account); when the IS lock arrives, the wallet-level `is_new` is `false`, + /// yet the second account must still be backfilled or its UTXOs would be + /// IS-locked without a matching `TransactionRecord`. + #[tokio::test] + async fn test_instantsend_backfills_missing_record_in_other_account() { + let mut wallet = + Wallet::new_random(Network::Testnet, WalletAccountCreationOptions::Default) + .expect("Should create wallet"); + wallet + .add_account( + AccountType::Standard { + index: 1, + standard_account_type: StandardAccountType::BIP44Account, + }, + None, + ) + .expect("Should add second BIP44 account"); + + let mut managed_wallet = + ManagedWalletInfo::from_wallet_with_name(&wallet, "Test".to_string()); + + let xpub0 = wallet + .accounts + .standard_bip44_accounts + .get(&0) + .expect("Should have BIP44 account 0") + .account_xpub; + let address0 = managed_wallet + .bip44_managed_account_at_index_mut(0) + .expect("Should have managed account 0") + .next_receive_address(Some(&xpub0), true) + .expect("Should generate address for account 0"); + + let xpub1 = wallet + .accounts + .standard_bip44_accounts + .get(&1) + .expect("Should have BIP44 account 1") + .account_xpub; + let address1 = managed_wallet + .bip44_managed_account_at_index_mut(1) + .expect("Should have managed account 1") + .next_receive_address(Some(&xpub1), true) + .expect("Should generate address for account 1"); + + // Build a tx with outputs to both accounts. + let mut tx = Transaction::dummy(&address0, 0..1, &[100_000]); + tx.output.push(TxOut { + value: 50_000, + script_pubkey: address1.script_pubkey(), + }); + let txid = tx.txid(); + + // Process as mempool first so both accounts record the tx. + let mut wallet_mut = wallet; + let mempool_result = managed_wallet + .check_core_transaction(&tx, TransactionContext::Mempool, &mut wallet_mut, true, true) + .await; + assert!(mempool_result.is_relevant); + assert!(mempool_result.is_new_transaction); + assert_eq!(mempool_result.affected_accounts.len(), 2); + + // Drop the record + UTXOs from account 1 to simulate a missed delivery + // there. Account 0 keeps the record so wallet-level `is_new` will be + // `false` when the IS lock arrives, exercising the backfill branch. + let account1 = managed_wallet + .bip44_managed_account_at_index_mut(1) + .expect("Should have managed account 1"); + account1.transactions.remove(&txid); + account1.utxos.clear(); + assert!(!account1.transactions.contains_key(&txid)); + assert!(account1.utxos.is_empty()); + + let is_result = managed_wallet + .check_core_transaction( + &tx, + TransactionContext::InstantSend(InstantLock::default()), + &mut wallet_mut, + true, + true, + ) + .await; + assert!(is_result.is_relevant); + assert!(!is_result.is_new_transaction, "Account 0 still holds the record"); + assert!(is_result.state_modified); + + // Account 0 was already known: classified as updated. + assert_eq!(is_result.updated_records.len(), 1); + assert_eq!(is_result.updated_records[0].txid, txid); + // Account 1 was backfilled: classified as new. + assert_eq!(is_result.new_records.len(), 1); + assert_eq!(is_result.new_records[0].txid, txid); + + // Both accounts should now hold the record with IS context and IS-locked UTXOs. + for account_index in 0..=1 { + let account = managed_wallet + .bip44_managed_account_at_index(account_index) + .expect("Should have account"); + let record = account + .transactions + .get(&txid) + .expect("Both accounts should hold the record after IS backfill"); + assert!(matches!(record.context, TransactionContext::InstantSend(_))); + assert!( + account.utxos.values().any(|u| u.outpoint.txid == txid && u.is_instantlocked), + "Account {account_index} should have an IS-locked UTXO from this tx" + ); + } + assert!(managed_wallet.instant_send_locks.contains(&txid)); + } + /// Test that `confirm_transaction` backfills a `TransactionRecord` when the account /// doesn't already have it. This covers the case where a block confirmation is processed /// on an account that missed the initial mempool recording (e.g., due to gap limit diff --git a/key-wallet/src/wallet/managed_wallet_info/wallet_info_interface.rs b/key-wallet/src/wallet/managed_wallet_info/wallet_info_interface.rs index 15c37a8a7..548d12fea 100644 --- a/key-wallet/src/wallet/managed_wallet_info/wallet_info_interface.rs +++ b/key-wallet/src/wallet/managed_wallet_info/wallet_info_interface.rs @@ -99,6 +99,18 @@ pub trait WalletInfoInterface: Sized + WalletTransactionChecker + ManagedAccount /// Record that the durable wallet sync checkpoint has advanced to `current_height`. fn update_synced_height(&mut self, current_height: u32); + /// Records whose coinbase maturity threshold lies in + /// `(old_height, new_height]`, i.e. coinbase records that just matured + /// during the height advance from `old_height` to `new_height`. + /// + /// Returns clones of the matured records so the caller can include them + /// in atomic events without mutating wallet state. + fn matured_coinbase_records( + &self, + old_height: CoreBlockHeight, + new_height: CoreBlockHeight, + ) -> Vec; + /// Mark UTXOs for a transaction as InstantSend-locked across all accounts /// and update the corresponding transaction record context. /// Returns `true` if any UTXO was newly marked. @@ -259,6 +271,32 @@ impl WalletInfoInterface for ManagedWalletInfo { self.metadata.synced_height = current_height; } + fn matured_coinbase_records( + &self, + old_height: CoreBlockHeight, + new_height: CoreBlockHeight, + ) -> Vec { + if new_height <= old_height { + return Vec::new(); + } + let mut matured = Vec::new(); + for account in self.accounts.all_accounts() { + for record in account.transactions.values() { + if !record.transaction.is_coin_base() { + continue; + } + let Some(record_height) = record.height() else { + continue; + }; + let maturity_height = record_height.saturating_add(100); + if maturity_height > old_height && maturity_height <= new_height { + matured.push(record.clone()); + } + } + } + matured + } + fn mark_instant_send_utxos(&mut self, txid: &Txid, lock: &InstantLock) -> bool { if !self.instant_send_locks.insert(*txid) { return false;