diff --git a/Cargo.lock b/Cargo.lock index fd435ac0ce..8632a2afbd 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -922,6 +922,7 @@ name = "batched-merkle-tree-test" version = "0.1.0" dependencies = [ "light-account-checks", + "light-array-map", "light-batched-merkle-tree", "light-bloom-filter", "light-compressed-account", diff --git a/program-libs/batched-merkle-tree/src/errors.rs b/program-libs/batched-merkle-tree/src/errors.rs index a322777757..e09b5bc21b 100644 --- a/program-libs/batched-merkle-tree/src/errors.rs +++ b/program-libs/batched-merkle-tree/src/errors.rs @@ -51,6 +51,8 @@ pub enum BatchedMerkleTreeError { NonInclusionCheckFailed, #[error("Bloom filter must be zeroed prior to reusing a batch.")] BloomFilterNotZeroed, + #[error("Cannot zero out complete or more than complete root history.")] + CannotZeroCompleteRootHistory, #[error("Account error {0}")] AccountError(#[from] AccountError), } @@ -70,6 +72,7 @@ impl From for u32 { BatchedMerkleTreeError::TreeIsFull => 14310, BatchedMerkleTreeError::NonInclusionCheckFailed => 14311, BatchedMerkleTreeError::BloomFilterNotZeroed => 14312, + BatchedMerkleTreeError::CannotZeroCompleteRootHistory => 14313, BatchedMerkleTreeError::Hasher(e) => e.into(), BatchedMerkleTreeError::ZeroCopy(e) => e.into(), BatchedMerkleTreeError::MerkleTreeMetadata(e) => e.into(), diff --git a/program-libs/batched-merkle-tree/src/merkle_tree.rs b/program-libs/batched-merkle-tree/src/merkle_tree.rs index ca91e6ec9b..7517684478 100644 --- a/program-libs/batched-merkle-tree/src/merkle_tree.rs +++ b/program-libs/batched-merkle-tree/src/merkle_tree.rs @@ -747,7 +747,11 @@ impl<'a> BatchedMerkleTreeAccount<'a> { /// - now all roots containing values nullified in the final B0 root update are zeroed /// - B0 is safe to clear /// - fn zero_out_roots(&mut self, sequence_number: u64, first_safe_root_index: u32) { + fn zero_out_roots( + &mut self, + sequence_number: u64, + first_safe_root_index: u32, + ) -> Result<(), BatchedMerkleTreeError> { // 1. Check whether overlapping roots exist. let overlapping_roots_exits = sequence_number > self.sequence_number; if overlapping_roots_exits { @@ -757,10 +761,13 @@ impl<'a> BatchedMerkleTreeAccount<'a> { // the update of the previous batch therfore allow anyone to prove // inclusion of values nullified in the previous batch. let num_remaining_roots = sequence_number - self.sequence_number; + if num_remaining_roots >= self.root_history.len() as u64 { + return Err(BatchedMerkleTreeError::CannotZeroCompleteRootHistory); + } // 2.2. Zero out roots oldest to first safe root index. // Skip one iteration we don't need to zero out // the first safe root. - for _ in 1..num_remaining_roots { + for _ in 0..num_remaining_roots { self.root_history[oldest_root_index] = [0u8; 32]; oldest_root_index += 1; oldest_root_index %= self.root_history.len(); @@ -771,6 +778,7 @@ impl<'a> BatchedMerkleTreeAccount<'a> { "Zeroing out roots failed." ); } + Ok(()) } /// Zero out bloom filter of previous batch if 50% of the @@ -808,23 +816,34 @@ impl<'a> BatchedMerkleTreeAccount<'a> { let current_batch_is_half_full = num_inserted_elements >= batch_size / 2; current_batch_is_half_full && current_batch_is_not_inserted }; - + let sequence_number = self.sequence_number; + let root_history_len = self.metadata.root_history_capacity as u64; let previous_pending_batch = self .queue_batches .batches .get_mut(previous_pending_batch_index) .ok_or(BatchedMerkleTreeError::InvalidBatchIndex)?; - + let no_insert_since_last_batch_root = (previous_pending_batch + .sequence_number + .saturating_sub(root_history_len)) + == sequence_number; let previous_batch_is_inserted = previous_pending_batch.get_state() == BatchState::Inserted; let previous_batch_is_ready = previous_batch_is_inserted && !previous_pending_batch.bloom_filter_is_zeroed(); // Current batch is at least half full, previous batch is inserted, and not zeroed. - if current_batch_is_half_full && previous_batch_is_ready { + if current_batch_is_half_full && previous_batch_is_ready && !no_insert_since_last_batch_root + { // 3.1. Mark bloom filter zeroed. previous_pending_batch.set_bloom_filter_to_zeroed(); let seq = previous_pending_batch.sequence_number; - let root_index = previous_pending_batch.root_index; + // previous_pending_batch.root_index is the index the root + // of the last update of that batch was inserted at. + // This is the last unsafe root index. + // The next index is safe. + let first_safe_root_index = + (previous_pending_batch.root_index + 1) % self.metadata.root_history_capacity; + // 3.2. Zero out bloom filter. { let bloom_filter = self @@ -837,7 +856,7 @@ impl<'a> BatchedMerkleTreeAccount<'a> { // which allows to prove inclusion of a value // that was inserted into the bloom filter just zeroed out. { - self.zero_out_roots(seq, root_index); + self.zero_out_roots(seq, first_safe_root_index)?; } } @@ -1145,7 +1164,9 @@ mod test { let rng = &mut rand::rngs::StdRng::from_seed([0u8; 32]); let mut latest_root_0 = [0u8; 32]; let mut latest_root_1 = [0u8; 32]; - + let last_batch1_root; + let last_batch0_root_update2; + let first_batch1_root; // 1. No batch is ready // -> nothing should happen. { @@ -1164,7 +1185,7 @@ mod test { latest_root_0 = rnd_root; account.metadata.sequence_number += 1; let root_index = account.get_root_index(); - println!("root_index: {}", root_index); + let sequence_number = account.sequence_number; let state = account.queue_batches.batches[0] @@ -1182,6 +1203,7 @@ mod test { let index = account.queue_batches.batches[0].root_index; assert_eq!(account.root_history[index as usize], latest_root_0); } + let last_batch0_root = latest_root_0; // 2. Batch 0 is inserted but Batch 1 is not half full // -> nothing should happen. { @@ -1214,23 +1236,49 @@ mod test { { insert_rnd_addresses(&mut account_data, 1, rng, current_slot, &pubkey).unwrap(); } + // 4.1 Batch 0 is inserted but Batch 1 is half full but no further roots exist + // if we zero out all roots we delete the tree state. + // -> nothing should happen. + { + let mut account_data = account_data.clone(); + let account_data_ref = account_data.clone(); + let mut account = + BatchedMerkleTreeAccount::address_from_bytes(&mut account_data, &pubkey) + .unwrap(); + account.zero_out_previous_batch_bloom_filter().unwrap(); + assert_eq!(account_data, account_data_ref); + } + { + let mut account = + BatchedMerkleTreeAccount::address_from_bytes(&mut account_data, &pubkey) + .unwrap(); + // Insert first root for batch 1. + let rnd_root = rng.gen(); + first_batch1_root = rnd_root; + account.root_history.push(rnd_root); + account.metadata.sequence_number += 1; + let root_index = account.get_root_index(); + + let sequence_number = account.sequence_number; + + let state = account.queue_batches.batches[1] + .mark_as_inserted_in_merkle_tree(sequence_number, root_index, root_history_len) + .unwrap(); + account + .queue_batches + .increment_pending_batch_index_if_inserted(state); + } let mut account_data = account_data.clone(); let mut account = BatchedMerkleTreeAccount::address_from_bytes(&mut account_data, &pubkey).unwrap(); - println!( - "currently inserted elements: {:?}", - account.queue_batches.batches[1].get_num_inserted_elements() - ); + let previous_roots = account.root_history.to_vec(); account.zero_out_previous_batch_bloom_filter().unwrap(); let current_roots = account.root_history.to_vec(); - println!("previous_roots: {:?}", previous_roots); + assert_ne!(previous_roots, current_roots); let root_index = account.queue_batches.batches[0].root_index; - assert_eq!( - account.root_history[root_index as usize], - previous_roots[root_index as usize] - ); + assert_eq!( account.queue_batches.batches[0].get_state(), BatchState::Inserted @@ -1244,12 +1292,14 @@ mod test { ); for i in 0..root_history_len as usize { - if i == root_index as usize { - assert_eq!(account.root_history[i], latest_root_0); + if i == root_index as usize + 1 { + assert_eq!(account.root_history[i], first_batch1_root); } else { assert_eq!(account.root_history[i], [0u8; 32]); } } + + assert!(!account.root_history.iter().any(|x| *x == last_batch0_root)); } // Make Batch 1 full and insert { @@ -1261,7 +1311,7 @@ mod test { BatchState::Full ); // simulate batch insertion - for _ in 0..num_zkp_updates { + for _ in 1..num_zkp_updates { let rnd_root = rng.gen(); account.root_history.push(rnd_root); latest_root_1 = rnd_root; @@ -1284,6 +1334,7 @@ mod test { assert_eq!(account.queue_batches.pending_batch_index, 0); let index = account.queue_batches.batches[1].root_index; assert_eq!(account.root_history[index as usize], latest_root_1); + last_batch1_root = latest_root_1; } println!("pre 4"); // 5. Batch 1 is inserted and Batch 0 is empty @@ -1328,6 +1379,7 @@ mod test { insert_rnd_addresses(&mut account_data, 1, rng, current_slot, &pubkey).unwrap(); } // simulate 10 other batch insertions from an output queue + // that overwrite the complete root history { let mut account = BatchedMerkleTreeAccount::address_from_bytes(&mut account_data, &pubkey) @@ -1338,9 +1390,11 @@ mod test { account.metadata.sequence_number += 1; } } + let mut account_data_ref = account_data.clone(); let mut account = BatchedMerkleTreeAccount::address_from_bytes(&mut account_data, &pubkey).unwrap(); + // Batch 0 is half full and other roots exist. -> should zero out bloom filter but not zero out any roots. account.zero_out_previous_batch_bloom_filter().unwrap(); let mut account_ref = BatchedMerkleTreeAccount::address_from_bytes(&mut account_data_ref, &pubkey) @@ -1350,6 +1404,7 @@ mod test { .for_each(|x| *x = 0); account_ref.queue_batches.batches[1].set_bloom_filter_to_zeroed(); assert_eq!(account.get_metadata(), account_ref.get_metadata()); + assert!(!account.root_history.iter().any(|x| *x == last_batch1_root)); assert_eq!(account, account_ref); } // 8. Batch 1 is already zeroed -> nothing should happen @@ -1400,6 +1455,28 @@ mod test { .queue_batches .increment_pending_batch_index_if_inserted(state); } + last_batch0_root_update2 = *account.root_history.last().unwrap(); + + // Perform batch 1 insertions to create a new root that is not part of batch 1 update. + { + let rnd_root = rng.gen(); + account.root_history.push(rnd_root); + + account.metadata.sequence_number += 1; + let root_index = account.get_root_index(); + let sequence_number = account.sequence_number; + + let state = account.queue_batches.batches[1] + .mark_as_inserted_in_merkle_tree( + sequence_number, + root_index, + root_history_len, + ) + .unwrap(); + account + .queue_batches + .increment_pending_batch_index_if_inserted(state); + } } println!("pre 9"); let mut account_data_ref = account_data.clone(); @@ -1413,7 +1490,9 @@ mod test { account.queue_batches.batches[1].get_state(), BatchState::Full ); + account.zero_out_previous_batch_bloom_filter().unwrap(); + let mut account_ref = BatchedMerkleTreeAccount::address_from_bytes(&mut account_data_ref, &pubkey) .unwrap(); @@ -1424,12 +1503,16 @@ mod test { account_ref.queue_batches.batches[0].set_bloom_filter_to_zeroed(); assert_eq!(account.get_metadata(), account_ref.get_metadata()); for i in 0..root_history_len as usize { - if i == root_index as usize { + if i == root_index as usize + 1 { continue; } else { account_ref.root_history[i] = [0u8; 32]; } } + assert!(!account + .root_history + .iter() + .any(|x| *x == last_batch0_root_update2)); assert_eq!(account, account_ref); } @@ -1437,7 +1520,7 @@ mod test { { let mut account = BatchedMerkleTreeAccount::address_from_bytes(&mut account_data, &pubkey).unwrap(); - for _ in 0..num_zkp_updates { + for _ in 1..num_zkp_updates { let rnd_root = rng.gen(); account.root_history.push(rnd_root); account.metadata.sequence_number += 1; @@ -1470,7 +1553,6 @@ mod test { } println!("pre 9.1"); - // Zero out batch 1 with user tx { // fill batch 0 { diff --git a/program-tests/batched-merkle-tree-test/Cargo.toml b/program-tests/batched-merkle-tree-test/Cargo.toml index bb0b434421..d63e8565ed 100644 --- a/program-tests/batched-merkle-tree-test/Cargo.toml +++ b/program-tests/batched-merkle-tree-test/Cargo.toml @@ -22,6 +22,7 @@ light-bloom-filter = { workspace = true, features = ["solana"] } light-zero-copy = { workspace = true } solana-pubkey = { workspace = true } light-merkle-tree-metadata = { workspace = true } +light-array-map = { workspace = true} [lints.rust.unexpected_cfgs] level = "allow" diff --git a/program-tests/batched-merkle-tree-test/tests/e2e.rs b/program-tests/batched-merkle-tree-test/tests/e2e.rs new file mode 100644 index 0000000000..cf205e8b63 --- /dev/null +++ b/program-tests/batched-merkle-tree-test/tests/e2e.rs @@ -0,0 +1,2 @@ +#[path = "e2e_tests/mod.rs"] +mod e2e_tests; diff --git a/program-tests/batched-merkle-tree-test/tests/e2e_tests/address.rs b/program-tests/batched-merkle-tree-test/tests/e2e_tests/address.rs new file mode 100644 index 0000000000..5e5ebb037a --- /dev/null +++ b/program-tests/batched-merkle-tree-test/tests/e2e_tests/address.rs @@ -0,0 +1,277 @@ +#![allow(unused_assignments)] + +use light_batched_merkle_tree::{ + batch::BatchState, + constants::{DEFAULT_BATCH_ADDRESS_TREE_HEIGHT, NUM_BATCHES}, + errors::BatchedMerkleTreeError, + initialize_address_tree::{ + get_address_merkle_tree_account_size_from_params, init_batched_address_merkle_tree_account, + InitAddressTreeAccountsInstructionData, + }, + merkle_tree::BatchedMerkleTreeAccount, +}; +use light_bloom_filter::BloomFilterError; +use light_compressed_account::pubkey::Pubkey; +use light_prover_client::prover::spawn_prover; +use light_test_utils::mock_batched_forester::MockBatchedAddressForester; +use rand::rngs::StdRng; +use serial_test::serial; + +use crate::e2e_tests::shared::*; + +#[serial] +#[tokio::test] +async fn test_fill_address_tree_completely() { + spawn_prover().await; + let mut current_slot = 1; + let roothistory_capacity = vec![17, 80]; + for root_history_capacity in roothistory_capacity { + let mut mock_indexer = + MockBatchedAddressForester::<{ DEFAULT_BATCH_ADDRESS_TREE_HEIGHT as usize }>::default(); + + let mut params = InitAddressTreeAccountsInstructionData::test_default(); + // Root history capacity which is greater than the input updates + params.root_history_capacity = root_history_capacity; + + let owner = Pubkey::new_unique(); + + let mt_account_size = get_address_merkle_tree_account_size_from_params(params); + let mut mt_account_data = vec![0; mt_account_size]; + let mt_pubkey = Pubkey::new_unique(); + + let merkle_tree_rent = 1_000_000_000; + + init_batched_address_merkle_tree_account( + owner, + params, + &mut mt_account_data, + merkle_tree_rent, + mt_pubkey, + ) + .unwrap(); + use rand::SeedableRng; + let mut rng = StdRng::seed_from_u64(0); + + let num_tx = NUM_BATCHES * params.input_queue_batch_size as usize; + let mut first_value = [0u8; 32]; + for tx in 0..num_tx { + println!("Input insert -----------------------------"); + let mut rnd_address = get_rnd_bytes(&mut rng); + rnd_address[0] = 0; + + let mut pre_account_data = mt_account_data.clone(); + let pre_merkle_tree_account = + BatchedMerkleTreeAccount::address_from_bytes(&mut pre_account_data, &mt_pubkey) + .unwrap(); + let pre_account = *pre_merkle_tree_account.get_metadata(); + let pre_roots = pre_merkle_tree_account + .root_history + .iter() + .cloned() + .collect(); + let pre_hash_chains = pre_merkle_tree_account.hash_chain_stores; + let mut merkle_tree_account = + BatchedMerkleTreeAccount::address_from_bytes(&mut mt_account_data, &mt_pubkey) + .unwrap(); + merkle_tree_account + .insert_address_into_queue(&rnd_address, ¤t_slot) + .unwrap(); + assert_input_queue_insert( + pre_account, + &mut [], + pre_roots, + pre_hash_chains, + merkle_tree_account, + vec![rnd_address], + vec![rnd_address], + vec![true], + vec![], + ¤t_slot, + ) + .unwrap(); + current_slot += 1; + mock_indexer.queue_leaves.push(rnd_address); + + // Insert the same value twice + { + // copy data so that failing test doesn't affect the state of + // subsequent tests + let mut mt_account_data = mt_account_data.clone(); + let mut merkle_tree_account = + BatchedMerkleTreeAccount::address_from_bytes(&mut mt_account_data, &mt_pubkey) + .unwrap(); + let result = + merkle_tree_account.insert_address_into_queue(&rnd_address, ¤t_slot); + println!("tx {}", tx); + println!("errors {:?}", result); + if tx == params.input_queue_batch_size as usize * 2 - 1 { + // Error when the value is already inserted into the other batch. + assert_eq!(result.unwrap_err(), BatchedMerkleTreeError::BatchNotReady); + } else if tx == params.input_queue_batch_size as usize - 1 { + // Error when the value is already inserted into the other batch. + // This occurs only when we switch the batch in this test. + assert_eq!( + result.unwrap_err(), + BatchedMerkleTreeError::NonInclusionCheckFailed + ); + } else { + // Error when inserting into the bloom filter directly twice. + assert_eq!(result.unwrap_err(), BloomFilterError::Full.into()); + } + + current_slot += 1; + } + // Try to insert first value into any batch + if tx == 0 { + first_value = rnd_address; + } else { + let mut mt_account_data = mt_account_data.clone(); + let mut merkle_tree_account = + BatchedMerkleTreeAccount::address_from_bytes(&mut mt_account_data, &mt_pubkey) + .unwrap(); + + let result = merkle_tree_account.insert_address_into_queue( + &first_value.to_vec().try_into().unwrap(), + ¤t_slot, + ); + println!("tx {}", tx); + println!("result {:?}", result); + if tx == params.input_queue_batch_size as usize * 2 - 1 { + // Error when the value is already inserted into the other batch. + assert_eq!(result.unwrap_err(), BatchedMerkleTreeError::BatchNotReady); + } else if tx >= params.input_queue_batch_size as usize - 1 + // || tx == params.input_queue_batch_size as usize + { + // Error when the value is already inserted into the other batch. + // This occurs only when we switch the batch in this test. + assert_eq!( + result.unwrap_err(), + BatchedMerkleTreeError::NonInclusionCheckFailed + ); + } else { + // Error when inserting into the bloom filter directly twice. + assert_eq!(result.unwrap_err(), BloomFilterError::Full.into()); + } + current_slot += 1; + + // assert_eq!(result.unwrap_err(), BloomFilterError::Full.into()); + } + } + // Assert input queue is full and doesn't accept more inserts + { + let merkle_tree_account = + &mut BatchedMerkleTreeAccount::address_from_bytes(&mut mt_account_data, &mt_pubkey) + .unwrap(); + let rnd_bytes = get_rnd_bytes(&mut rng); + let result = merkle_tree_account.insert_address_into_queue(&rnd_bytes, ¤t_slot); + assert_eq!(result.unwrap_err(), BatchedMerkleTreeError::BatchNotReady); + } + // Root of the final batch of first input queue batch + let mut first_input_batch_update_root_value = [0u8; 32]; + let num_updates = 10; + let mut batch_roots: Vec<(u32, Vec<[u8; 32]>)> = { + let merkle_tree_account = + BatchedMerkleTreeAccount::address_from_bytes(&mut mt_account_data, &mt_pubkey) + .unwrap(); + let initial_root = *merkle_tree_account.root_history.last().unwrap(); + vec![(0, vec![initial_root])] + }; + for i in 0..num_updates { + println!("address update ----------------------------- {}", i); + perform_address_update( + &mut mt_account_data, + &mut mock_indexer, + mt_pubkey, + &mut batch_roots, + ) + .await; + if i == 4 { + first_input_batch_update_root_value = mock_indexer.merkle_tree.root(); + } + let merkle_tree_account = + BatchedMerkleTreeAccount::address_from_bytes(&mut mt_account_data, &mt_pubkey) + .unwrap(); + + let batch = merkle_tree_account.queue_batches.batches.first().unwrap(); + // assert other batch is not zeroed + let batch_one = merkle_tree_account.queue_batches.batches.get(1).unwrap(); + assert!(!batch_one.bloom_filter_is_zeroed()); + + // after 5 updates the first batch is completely inserted + // As soon as we switch to inserting the second batch we zero out the first batch since + // the second batch is completely full. + if i >= 5 { + assert!(batch.bloom_filter_is_zeroed()); + + // Assert that all unsafe roots from batch 0 are zeroed + let (_, unsafe_roots) = batch_roots.iter().find(|(idx, _)| *idx == 0).unwrap(); + assert_eq!(unsafe_roots.len(), 6, "batch_roots {:?}", batch_roots); + for unsafe_root in unsafe_roots { + assert!( + !merkle_tree_account + .root_history + .iter() + .any(|x| *x == *unsafe_root), + "Unsafe root from batch 0 should be zeroed: {:?}", + unsafe_root + ); + } + } else { + assert!(!batch.bloom_filter_is_zeroed()); + } + } + // assert all bloom_filters are inserted + { + let merkle_tree_account = + &mut BatchedMerkleTreeAccount::address_from_bytes(&mut mt_account_data, &mt_pubkey) + .unwrap(); + for (i, batch) in merkle_tree_account.queue_batches.batches.iter().enumerate() { + assert_eq!(batch.get_state(), BatchState::Inserted); + if i == 0 { + // first batch is zeroed out since the second batch is full + assert!(batch.bloom_filter_is_zeroed()); + } else { + // second batch is not zeroed out since the first batch is empty + assert!(!batch.bloom_filter_is_zeroed()); + } + } + } + { + let merkle_tree_account = + &mut BatchedMerkleTreeAccount::address_from_bytes(&mut mt_account_data, &mt_pubkey) + .unwrap(); + println!("root history {:?}", merkle_tree_account.root_history); + let pre_batch_zero = *merkle_tree_account.queue_batches.batches.first().unwrap(); + + for root in merkle_tree_account.root_history.iter() { + println!("root {:?}", root); + } + println!( + "root in root index {:?}", + merkle_tree_account.root_history[pre_batch_zero.root_index as usize] + ); + // check that all roots have been overwritten except the root index + // of the update + let root_history_len: u32 = merkle_tree_account.root_history.len() as u32; + let start = merkle_tree_account.root_history.last_index() as u32; + println!("start {:?}", start); + for root in start + 1..pre_batch_zero.root_index + root_history_len { + println!("actual index {:?}", root); + let index = root % root_history_len; + + if index == pre_batch_zero.root_index { + let root_index = pre_batch_zero.root_index as usize; + + assert_eq!( + merkle_tree_account.root_history[root_index], + first_input_batch_update_root_value + ); + assert_eq!(merkle_tree_account.root_history[root_index - 1], [0u8; 32]); + break; + } + println!("index {:?}", index); + assert_eq!(merkle_tree_account.root_history[index as usize], [0u8; 32]); + } + } + } +} diff --git a/program-tests/batched-merkle-tree-test/tests/e2e_tests/e2e.rs b/program-tests/batched-merkle-tree-test/tests/e2e_tests/e2e.rs new file mode 100644 index 0000000000..aa225df49a --- /dev/null +++ b/program-tests/batched-merkle-tree-test/tests/e2e_tests/e2e.rs @@ -0,0 +1,347 @@ +#![allow(unused_assignments)] +use std::cmp::min; + +use crate::e2e_tests::shared::*; +use light_batched_merkle_tree::{ + batch::BatchState, + constants::{ + ACCOUNT_COMPRESSION_PROGRAM_ID, DEFAULT_BATCH_ADDRESS_TREE_HEIGHT, + DEFAULT_BATCH_STATE_TREE_HEIGHT, NUM_BATCHES, + }, + errors::BatchedMerkleTreeError, + initialize_address_tree::{ + get_address_merkle_tree_account_size_from_params, init_batched_address_merkle_tree_account, + InitAddressTreeAccountsInstructionData, + }, + initialize_state_tree::{ + init_batched_state_merkle_tree_accounts, + test_utils::get_state_merkle_tree_account_size_from_params, + InitStateTreeAccountsInstructionData, + }, + merkle_tree::{ + assert_batch_adress_event, assert_batch_append_event_event, assert_nullify_event, + test_utils::get_merkle_tree_account_size_default, BatchedMerkleTreeAccount, + InstructionDataBatchAppendInputs, InstructionDataBatchNullifyInputs, + }, + merkle_tree_metadata::BatchedMerkleTreeMetadata, + queue::{ + test_utils::{ + get_output_queue_account_size_default, get_output_queue_account_size_from_params, + }, + BatchedQueueAccount, BatchedQueueMetadata, + }, +}; +use light_bloom_filter::{BloomFilter, BloomFilterError}; +use light_compressed_account::{ + hash_chain::create_hash_chain_from_slice, instruction_data::compressed_proof::CompressedProof, + pubkey::Pubkey, +}; +use light_hasher::{Hasher, Poseidon}; +use light_merkle_tree_reference::MerkleTree; +use light_prover_client::prover::spawn_prover; +use light_test_utils::mock_batched_forester::{ + MockBatchedAddressForester, MockBatchedForester, MockTxEvent, +}; +use light_zero_copy::vec::ZeroCopyVecU64; +use rand::{rngs::StdRng, Rng}; +use serial_test::serial; + +/// queues with a counter which keeps things below X tps and an if that +/// executes tree updates when possible. +#[serial] +#[tokio::test] +async fn test_e2e() { + spawn_prover().await; + let mut mock_indexer = + MockBatchedForester::<{ DEFAULT_BATCH_STATE_TREE_HEIGHT as usize }>::default(); + + let num_tx = 2200; + let owner = Pubkey::new_unique(); + + let queue_account_size = get_output_queue_account_size_default(); + + let mut output_queue_account_data = vec![0; queue_account_size]; + let output_queue_pubkey = Pubkey::new_unique(); + + let mt_account_size = get_merkle_tree_account_size_default(); + let mut mt_account_data = vec![0; mt_account_size]; + let mt_pubkey = Pubkey::new_unique(); + + let params = InitStateTreeAccountsInstructionData::test_default(); + + let merkle_tree_rent = 1_000_000_000; + let queue_rent = 1_000_000_000; + let additional_bytes_rent = 1000; + + init_batched_state_merkle_tree_accounts( + owner, + params, + &mut output_queue_account_data, + output_queue_pubkey, + queue_rent, + &mut mt_account_data, + mt_pubkey, + merkle_tree_rent, + additional_bytes_rent, + ) + .unwrap(); + use rand::SeedableRng; + let mut rng = StdRng::seed_from_u64(0); + let mut in_ready_for_update; + let mut out_ready_for_update; + let mut num_output_updates = 0; + let mut num_input_updates = 0; + let mut num_input_values = 0; + let mut num_output_values = 0; + let mut current_slot = rng.gen(); + + for tx in 0..num_tx { + println!("tx: {}", tx); + println!("num_input_updates: {}", num_input_updates); + println!("num_output_updates: {}", num_output_updates); + // Output queue + { + if rng.gen_bool(0.5) { + println!("Output insert -----------------------------"); + println!("num_output_values: {}", num_output_values); + let rnd_bytes = get_rnd_bytes(&mut rng); + let mut pre_account_bytes = output_queue_account_data.clone(); + let pre_output_account = + BatchedQueueAccount::output_from_bytes(&mut pre_account_bytes).unwrap(); + let pre_account = *pre_output_account.get_metadata(); + let pre_value_store = pre_output_account.value_vecs; + let pre_hash_chains = pre_output_account.hash_chain_stores; + let mut output_account = + BatchedQueueAccount::output_from_bytes(&mut output_queue_account_data).unwrap(); + output_account + .insert_into_current_batch(&rnd_bytes, ¤t_slot) + .unwrap(); + assert_output_queue_insert( + pre_account, + pre_value_store, + pre_hash_chains, + BatchedQueueAccount::output_from_bytes( + &mut output_queue_account_data.clone(), // clone so that data cannot be modified + ) + .unwrap(), + vec![rnd_bytes], + current_slot, + ) + .unwrap(); + current_slot += 1; + num_output_values += 1; + mock_indexer.output_queue_leaves.push(rnd_bytes); + } + let output_account = + BatchedQueueAccount::output_from_bytes(&mut output_queue_account_data).unwrap(); + out_ready_for_update = output_account + .batch_metadata + .batches + .iter() + .any(|batch| batch.get_state() == BatchState::Full); + } + + // Input queue + { + let mut pre_account_bytes = mt_account_data.clone(); + + if rng.gen_bool(0.5) && !mock_indexer.active_leaves.is_empty() { + println!("Input insert -----------------------------"); + let (_, leaf) = get_random_leaf(&mut rng, &mut mock_indexer.active_leaves); + + let pre_mt_account = + BatchedMerkleTreeAccount::state_from_bytes(&mut pre_account_bytes, &mt_pubkey) + .unwrap(); + let pre_account = *pre_mt_account.get_metadata(); + let pre_hash_chains = pre_mt_account.hash_chain_stores; + let pre_roots = pre_mt_account.root_history.iter().cloned().collect(); + let tx_hash = create_hash_chain_from_slice(vec![leaf].as_slice()).unwrap(); + let leaf_index = mock_indexer.merkle_tree.get_leaf_index(&leaf).unwrap(); + mock_indexer.input_queue_leaves.push((leaf, leaf_index)); + mock_indexer.tx_events.push(MockTxEvent { + inputs: vec![leaf], + outputs: vec![], + tx_hash, + }); + let mut merkle_tree_account = + BatchedMerkleTreeAccount::state_from_bytes(&mut mt_account_data, &mt_pubkey) + .unwrap(); + + merkle_tree_account + .insert_nullifier_into_queue( + &leaf.to_vec().try_into().unwrap(), + leaf_index as u64, + &tx_hash, + ¤t_slot, + ) + .unwrap(); + + { + let mut mt_account_data = mt_account_data.clone(); + let merkle_tree_account = BatchedMerkleTreeAccount::state_from_bytes( + &mut mt_account_data, + &mt_pubkey, + ) + .unwrap(); + assert_nullifier_queue_insert( + pre_account, + &mut [], + pre_roots, + pre_hash_chains, + merkle_tree_account, + vec![leaf], + vec![leaf_index as u64], + tx_hash, + vec![true], + vec![], + ¤t_slot, + ) + .unwrap(); + current_slot += 1; + } + num_input_values += 1; + } + let merkle_tree_account = + BatchedMerkleTreeAccount::state_from_bytes(&mut mt_account_data, &mt_pubkey) + .unwrap(); + + in_ready_for_update = merkle_tree_account + .queue_batches + .batches + .iter() + .any(|batch| batch.get_state() == BatchState::Full); + } + + if in_ready_for_update { + println!("Input update -----------------------------"); + println!("Num inserted values: {}", num_input_values); + println!("Num input updates: {}", num_input_updates); + println!("Num output updates: {}", num_output_updates); + println!("Num output values: {}", num_output_values); + let mut pre_mt_account_data = mt_account_data.clone(); + in_ready_for_update = false; + perform_input_update(&mut pre_mt_account_data, &mut mock_indexer, true, mt_pubkey) + .await; + mt_account_data = pre_mt_account_data.clone(); + + num_input_updates += 1; + } + + if out_ready_for_update { + println!("Output update -----------------------------"); + println!("Num inserted values: {}", num_input_values); + println!("Num input updates: {}", num_input_updates); + println!("Num output updates: {}", num_output_updates); + println!("Num output values: {}", num_output_values); + let mut pre_mt_account_data = mt_account_data.clone(); + let mut account = + BatchedMerkleTreeAccount::state_from_bytes(&mut pre_mt_account_data, &mt_pubkey) + .unwrap(); + let output_account = + BatchedQueueAccount::output_from_bytes(&mut output_queue_account_data).unwrap(); + + let next_index = account.get_metadata().next_index; + let next_full_batch = output_account + .get_metadata() + .batch_metadata + .pending_batch_index; + let batch = output_account + .batch_metadata + .batches + .get(next_full_batch as usize) + .unwrap(); + let leaves = output_account + .value_vecs + .get(next_full_batch as usize) + .unwrap() + .to_vec(); + println!("leaves {:?}", leaves.len()); + let leaves_hash_chain = output_account + .hash_chain_stores + .get(next_full_batch as usize) + .unwrap() + .get(batch.get_num_inserted_zkps() as usize) + .unwrap(); + let (proof, new_root) = mock_indexer + .get_batched_append_proof( + next_index as usize, + batch.get_num_inserted_zkps() as u32, + batch.zkp_batch_size as u32, + *leaves_hash_chain, + batch.get_num_zkp_batches() as u32, + ) + .await + .unwrap(); + let start = batch.get_num_inserted_zkps() as usize * batch.zkp_batch_size as usize; + let end = start + batch.zkp_batch_size as usize; + for leaf in &leaves[start..end] { + // Storing the leaf in the output queue indexer so that it + // can be inserted into the input queue later. + mock_indexer.active_leaves.push(*leaf); + } + + let instruction_data = InstructionDataBatchAppendInputs { + new_root, + compressed_proof: CompressedProof { + a: proof.a, + b: proof.b, + c: proof.c, + }, + }; + + let mut pre_output_queue_state = output_queue_account_data.clone(); + println!("Output update -----------------------------"); + + let queue_account = + &mut BatchedQueueAccount::output_from_bytes(&mut pre_output_queue_state).unwrap(); + let output_res = + account.update_tree_from_output_queue_account(queue_account, instruction_data); + + assert_eq!( + *account.root_history.last().unwrap(), + mock_indexer.merkle_tree.root() + ); + println!( + "post update: sequence number: {}", + account.get_metadata().sequence_number + ); + println!("output_res {:?}", output_res); + assert!(output_res.is_ok()); + + println!("output update success {}", num_output_updates); + println!("num_output_values: {}", num_output_values); + println!("num_input_values: {}", num_input_values); + let output_account = + BatchedQueueAccount::output_from_bytes(&mut pre_output_queue_state).unwrap(); + let old_output_account = + BatchedQueueAccount::output_from_bytes(&mut output_queue_account_data).unwrap(); + + let old_account = + BatchedMerkleTreeAccount::state_from_bytes(&mut mt_account_data, &mt_pubkey) + .unwrap(); + + println!("batch 0: {:?}", output_account.batch_metadata.batches[0]); + println!("batch 1: {:?}", output_account.batch_metadata.batches[1]); + assert_merkle_tree_update( + old_account, + account, + Some(old_output_account), + Some(output_account), + new_root, + ); + + output_queue_account_data = pre_output_queue_state; + mt_account_data = pre_mt_account_data; + out_ready_for_update = false; + num_output_updates += 1; + } + } + let output_account = + BatchedQueueAccount::output_from_bytes(&mut output_queue_account_data).unwrap(); + println!("batch 0: {:?}", output_account.batch_metadata.batches[0]); + println!("batch 1: {:?}", output_account.batch_metadata.batches[1]); + println!("num_output_updates: {}", num_output_updates); + println!("num_input_updates: {}", num_input_updates); + println!("num_output_values: {}", num_output_values); + println!("num_input_values: {}", num_input_values); +} diff --git a/program-tests/batched-merkle-tree-test/tests/e2e_tests/mod.rs b/program-tests/batched-merkle-tree-test/tests/e2e_tests/mod.rs new file mode 100644 index 0000000000..b37f796bcf --- /dev/null +++ b/program-tests/batched-merkle-tree-test/tests/e2e_tests/mod.rs @@ -0,0 +1,4 @@ +pub mod address; +pub mod shared; +pub mod simulate_txs; +pub mod state; diff --git a/program-tests/batched-merkle-tree-test/tests/e2e_tests/shared.rs b/program-tests/batched-merkle-tree-test/tests/e2e_tests/shared.rs new file mode 100644 index 0000000000..706f202fe5 --- /dev/null +++ b/program-tests/batched-merkle-tree-test/tests/e2e_tests/shared.rs @@ -0,0 +1,1067 @@ +#![allow(unused_assignments)] + +use light_array_map::ArrayMap; +use light_batched_merkle_tree::{ + batch::BatchState, + constants::DEFAULT_BATCH_STATE_TREE_HEIGHT, + errors::BatchedMerkleTreeError, + merkle_tree::{ + assert_batch_adress_event, BatchedMerkleTreeAccount, InstructionDataBatchNullifyInputs, + }, + merkle_tree_metadata::BatchedMerkleTreeMetadata, + queue::{BatchedQueueAccount, BatchedQueueMetadata}, +}; +use light_bloom_filter::BloomFilter; +use light_compressed_account::{ + instruction_data::compressed_proof::CompressedProof, pubkey::Pubkey, +}; +use light_hasher::{Hasher, Poseidon}; +use light_test_utils::mock_batched_forester::{MockBatchedAddressForester, MockBatchedForester}; +use light_zero_copy::vec::ZeroCopyVecU64; +use rand::{rngs::StdRng, Rng}; + +pub async fn perform_address_update( + mt_account_data: &mut [u8], + mock_indexer: &mut MockBatchedAddressForester<40>, + mt_pubkey: Pubkey, + batch_roots: &mut Vec<(u32, Vec<[u8; 32]>)>, +) { + println!("pre address update -----------------------------"); + let mut cloned_mt_account_data = (*mt_account_data).to_vec(); + let old_account = BatchedMerkleTreeAccount::address_from_bytes( + cloned_mt_account_data.as_mut_slice(), + &mt_pubkey, + ) + .unwrap(); + let (input_res, new_root, _pre_next_full_batch) = { + let mut account = + BatchedMerkleTreeAccount::address_from_bytes(mt_account_data, &mt_pubkey).unwrap(); + + let next_full_batch = account.get_metadata().queue_batches.pending_batch_index; + let next_index = account.get_metadata().next_index; + println!("next index {:?}", next_index); + let batch = account + .queue_batches + .batches + .get(next_full_batch as usize) + .unwrap(); + let batch_start_index = + batch.start_index + batch.get_num_inserted_zkps() * batch.zkp_batch_size; + println!("batch start index {}", batch_start_index); + let leaves_hash_chain = account + .hash_chain_stores + .get(next_full_batch as usize) + .unwrap() + .get(batch.get_num_inserted_zkps() as usize) + .unwrap(); + let current_root = account.root_history.last().unwrap(); + let (proof, new_root) = mock_indexer + .get_batched_address_proof( + account.get_metadata().queue_batches.batch_size as u32, + account.get_metadata().queue_batches.zkp_batch_size as u32, + *leaves_hash_chain, + next_index as usize, + batch_start_index as usize, + *current_root, + ) + .await + .unwrap(); + + mock_indexer.finalize_batch_address_update(10); + assert_eq!(mock_indexer.merkle_tree.root(), new_root); + let instruction_data = InstructionDataBatchNullifyInputs { + new_root, + compressed_proof: CompressedProof { + a: proof.a, + b: proof.b, + c: proof.c, + }, + }; + + ( + account.update_tree_from_address_queue(instruction_data), + new_root, + next_full_batch, + ) + }; + println!("post address update -----------------------------"); + println!("res {:?}", input_res); + assert!(input_res.is_ok()); + let event = input_res.unwrap(); + assert_batch_adress_event(event, new_root, &old_account, mt_pubkey); + + // assert Merkle tree + // sequence number increased X + // next index increased X + // current root index increased X + // One root changed one didn't + + let account = + BatchedMerkleTreeAccount::address_from_bytes(mt_account_data, &mt_pubkey).unwrap(); + + let batch_index_for_this_root = _pre_next_full_batch as u32; + if let Some((_idx, roots)) = batch_roots + .iter_mut() + .find(|(idx, _)| *idx == batch_index_for_this_root) + { + roots.push(new_root); + } else { + batch_roots.push((batch_index_for_this_root, vec![new_root])); + } + + assert_address_merkle_tree_update(old_account, account, new_root, batch_roots); +} + +pub fn assert_merkle_tree_update( + mut old_account: BatchedMerkleTreeAccount, + account: BatchedMerkleTreeAccount, + old_queue_account: Option, + queue_account: Option, + root: [u8; 32], + batch_roots: &mut ArrayMap, 2>, +) { + old_account.sequence_number += 1; + old_account.root_history.push(root); + println!("Adding root: {:?}", root); + // Determine batch index and state for this update + // For both input and output updates, use the INPUT queue's batch index + // because that's what controls root zeroing + let (batch_idx, _) = { + let idx = old_account.queue_batches.pending_batch_index; + let state = old_account + .queue_batches + .batches + .get(idx as usize) + .unwrap() + .get_state(); + (idx as u32, state) + }; + if let Some(roots) = batch_roots.get_mut_by_key(&batch_idx) { + roots.push(root) + } else { + batch_roots.insert(batch_idx, vec![root], ()).unwrap(); + } + + let input_queue_previous_batch_state = + old_account.queue_batches.get_previous_batch().get_state(); + let input_queue_current_batch = old_account.queue_batches.get_current_batch(); + let previous_batch_index = old_account.queue_batches.get_previous_batch_index(); + let is_half_full = input_queue_current_batch.get_num_inserted_elements() + >= input_queue_current_batch.batch_size / 2 + && input_queue_current_batch.get_state() != BatchState::Inserted; + let root_history_len = old_account.root_history.capacity() as u64; + let previous_batch = old_account.queue_batches.get_previous_batch(); + let no_insert_since_last_batch_root = (previous_batch + .sequence_number + .saturating_sub(root_history_len)) + == old_account.sequence_number; + if is_half_full + && input_queue_previous_batch_state == BatchState::Inserted + && !old_account + .queue_batches + .get_previous_batch() + .bloom_filter_is_zeroed() + && !no_insert_since_last_batch_root + { + println!("Entering zeroing block for batch {}", previous_batch_index); + println!( + "Previous batch state: {:?}", + input_queue_previous_batch_state + ); + println!( + "Previous batch: {:?}", + old_account.queue_batches.get_previous_batch() + ); + old_account + .queue_batches + .get_previous_batch_mut() + .set_bloom_filter_to_zeroed(); + old_account.bloom_filter_stores[previous_batch_index] + .iter_mut() + .for_each(|elem| { + *elem = 0; + }); + let previous_full_batch = old_account + .queue_batches + .batches + .get(previous_batch_index) + .unwrap(); + let sequence_number = previous_full_batch.sequence_number; + + // Log the last unsafe root + let last_unsafe_root_index = previous_full_batch.root_index; + let first_safe_root_index = last_unsafe_root_index + 1; + println!("DEBUG: Last unsafe root index: {}", last_unsafe_root_index); + println!("DEBUG: First safe root index: {}", first_safe_root_index); + if let Some(last_unsafe_root) = old_account + .root_history + .get(last_unsafe_root_index as usize) + { + println!( + "DEBUG: Last unsafe root at index {}: {:?}", + last_unsafe_root_index, + &last_unsafe_root[0..4] + ); + } + + let overlapping_roots_exits = sequence_number > old_account.sequence_number; + if overlapping_roots_exits { + let mut oldest_root_index = old_account.root_history.first_index(); + // 2.1. Get, num of remaining roots. + // Remaining roots have not been updated since + // the update of the previous batch hence enable to prove + // inclusion of values nullified in the previous batch. + let num_remaining_roots = sequence_number - old_account.sequence_number; + // 2.2. Zero out roots oldest to first safe root index. + for _ in 0..num_remaining_roots { + old_account.root_history[oldest_root_index] = [0u8; 32]; + oldest_root_index += 1; + oldest_root_index %= old_account.root_history.len(); + } + + // Assert that all unsafe roots from this batch are zeroed + let batch_key = previous_batch_index as u32; + if let Some(unsafe_roots) = batch_roots.get_by_key(&batch_key) { + for unsafe_root in unsafe_roots { + assert!( + !old_account + .root_history + .iter() + .any(|x| *x == *unsafe_root), + "Unsafe root from batch {} should be zeroed: {:?} root history {:?}, unsafe roots {:?}", + previous_batch_index, + unsafe_root, + old_account.root_history, unsafe_roots + ); + } + // Clear unsafe roots after verification - batch index will be reused + if let Some(roots) = batch_roots.get_mut_by_key(&batch_key) { + roots.clear(); + } + } + + // Assert that the correct number of roots remain non-zero + // Calculate expected non-zero roots: those created since the last zeroing + let non_zero_roots: Vec<[u8; 32]> = old_account + .root_history + .iter() + .filter(|root| **root != [0u8; 32]) + .copied() + .collect(); + + // Expected number of non-zero roots = number of updates since last zeroing + // This is the sequence difference that wasn't zeroed + let expected_non_zero = old_account.root_history.len() - num_remaining_roots as usize; + + assert_eq!( + non_zero_roots.len(), + expected_non_zero, + "Expected {} non-zero roots after zeroing, but found {}. Root history: {:?}", + expected_non_zero, + non_zero_roots.len(), + old_account.root_history + ); + + // Assert that all remaining non-zero roots are tracked in the current (non-zeroed) batch + let current_batch_idx = old_account.queue_batches.pending_batch_index as u32; + if let Some(current_batch_roots) = batch_roots.get_by_key(¤t_batch_idx) { + // Debug: print the entire root history + println!("DEBUG: Root history after zeroing:"); + for (i, root) in old_account.root_history.iter().enumerate() { + if *root != [0u8; 32] { + println!(" Index {}: {:?}", i, root); + } + } + + // Debug: print all tracked roots for current batch and their indices + println!("DEBUG: Roots tracked for batch {}:", current_batch_idx); + for (i, root) in current_batch_roots.iter().enumerate() { + let root_index = old_account.root_history.iter().position(|r| r == root); + println!(" Root {}: {:?} at index {:?}", i, root, root_index); + } + let next_batch_index = (current_batch_idx + 1) % 2; + println!("DEBUG: Roots tracked for next batch {}:", next_batch_index); + for (i, root) in batch_roots + .get_by_key(&next_batch_index) + .as_ref() + .unwrap() + .iter() + .enumerate() + { + let root_index = old_account.root_history.iter().position(|r| r == root); + println!(" Root {}: {:?} at index {:?}", i, root, root_index); + } + + for non_zero_root in &non_zero_roots { + // Skip the initial root (usually all zeros or a known starting value) + // which might not be tracked in any batch + if old_account.sequence_number > 0 { + assert!( + current_batch_roots.contains(non_zero_root), + "Non-zero root {:?} should be tracked in current batch {} but wasn't found. Current batch roots: {:?}", + non_zero_root, + current_batch_idx, + current_batch_roots + ); + } + } + + // Also verify the count matches + println!("DEBUG: current_batch_idx: {}", current_batch_idx); + println!( + "DEBUG: current_batch_roots.len(): {}", + current_batch_roots.len() + ); + println!("DEBUG: non_zero_roots.len(): {}", non_zero_roots.len()); + println!( + "DEBUG: merkle_tree.sequence_number: {}", + old_account.sequence_number + ); + println!("DEBUG: num_remaining_roots: {}", num_remaining_roots); + println!("DEBUG: previous_batch.sequence_number: {}", sequence_number); + assert_eq!( + current_batch_roots.len(), + non_zero_roots.len(), + "Current batch {} should have {} roots tracked, but has {}", + current_batch_idx, + non_zero_roots.len(), + current_batch_roots.len() + ); + } + } + } + // Output queue update + if let Some(mut old_queue_account) = old_queue_account { + let queue_account = queue_account.unwrap(); + let old_full_batch_index = old_queue_account.batch_metadata.pending_batch_index; + let old_full_batch = old_queue_account + .batch_metadata + .batches + .get_mut(old_full_batch_index as usize) + .unwrap(); + old_full_batch + .mark_as_inserted_in_merkle_tree( + account.sequence_number, + account.root_history.last_index() as u32, + old_account.root_history.capacity() as u32, + ) + .unwrap(); + + if old_full_batch.get_state() == BatchState::Inserted { + old_queue_account.batch_metadata.pending_batch_index += 1; + old_queue_account.batch_metadata.pending_batch_index %= 2; + } + assert_eq!( + queue_account.get_metadata(), + old_queue_account.get_metadata() + ); + assert_eq!(queue_account, old_queue_account); + // Only the output queue appends state + let zkp_batch_size = old_account.queue_batches.zkp_batch_size; + old_account.next_index += zkp_batch_size; + } else { + // Input queue update + let old_full_batch_index = old_account.queue_batches.pending_batch_index; + let history_capacity = old_account.root_history.capacity(); + let previous_full_batch_index = if old_full_batch_index == 0 { 1 } else { 0 }; + let zkp_batch_size = old_account.queue_batches.zkp_batch_size; + old_account.nullifier_next_index += zkp_batch_size; + + let old_full_batch = old_account + .queue_batches + .batches + .get_mut(old_full_batch_index as usize) + .unwrap(); + + old_full_batch + .mark_as_inserted_in_merkle_tree( + account.sequence_number, + account.root_history.last_index() as u32, + history_capacity as u32, + ) + .unwrap(); + println!( + "current batch {:?}", + old_full_batch.get_num_inserted_elements() + ); + + if old_full_batch.get_state() == BatchState::Inserted { + old_account.queue_batches.pending_batch_index += 1; + old_account.queue_batches.pending_batch_index %= 2; + } + let old_full_batch_index = old_account.queue_batches.pending_batch_index; + + let old_full_batch = old_account + .queue_batches + .batches + .get_mut(old_full_batch_index as usize) + .unwrap(); + let zeroed_batch = old_full_batch.get_num_inserted_elements() + >= old_full_batch.batch_size / 2 + && old_full_batch.get_state() != BatchState::Inserted; + println!("zeroed_batch: {:?}", zeroed_batch); + + let state = account.queue_batches.batches[previous_full_batch_index].get_state(); + let root_history_len = old_account.root_history.capacity() as u64; + let old_account_sequence_number = old_account.sequence_number; + let previous_batch_sequence_number = old_account + .queue_batches + .batches + .get(previous_full_batch_index) + .unwrap() + .sequence_number; + let no_insert_since_last_batch_root = (previous_batch_sequence_number + .saturating_sub(root_history_len)) + == old_account_sequence_number; + println!( + "zeroing out values: {}", + zeroed_batch && state == BatchState::Inserted + ); + if zeroed_batch && state == BatchState::Inserted && !no_insert_since_last_batch_root { + println!( + "DEBUG: Entering OUTPUT queue zeroing block for batch {}", + previous_full_batch_index + ); + let previous_batch = old_account + .queue_batches + .batches + .get_mut(previous_full_batch_index) + .unwrap(); + previous_batch.set_bloom_filter_to_zeroed(); + let sequence_number = previous_batch_sequence_number; + let overlapping_roots_exits = sequence_number > old_account_sequence_number; + if overlapping_roots_exits { + old_account.bloom_filter_stores[previous_full_batch_index] + .iter_mut() + .for_each(|elem| { + *elem = 0; + }); + + let mut oldest_root_index = old_account.root_history.first_index(); + + let num_remaining_roots = sequence_number - old_account_sequence_number; + println!("num_remaining_roots: {}", num_remaining_roots); + println!("sequence_number: {}", account.sequence_number); + for _ in 0..num_remaining_roots { + println!("zeroing out root index: {}", oldest_root_index); + old_account.root_history[oldest_root_index] = [0u8; 32]; + oldest_root_index += 1; + oldest_root_index %= old_account.root_history.len(); + } + + // Assert that all unsafe roots from this batch are zeroed + let batch_key = previous_full_batch_index as u32; + if let Some(unsafe_roots) = batch_roots.get_by_key(&batch_key) { + for unsafe_root in unsafe_roots { + assert!( + !old_account.root_history.iter().any(|x| *x == *unsafe_root), + "Unsafe root from batch {} should be zeroed: {:?}", + previous_full_batch_index, + unsafe_root + ); + } + // Clear unsafe roots after verification - batch index will be reused + if let Some(roots) = batch_roots.get_mut_by_key(&batch_key) { + roots.clear(); + } + } + + // Assert that the correct number of roots remain non-zero + let non_zero_roots: Vec<[u8; 32]> = old_account + .root_history + .iter() + .filter(|root| **root != [0u8; 32]) + .copied() + .collect(); + + // Expected number of non-zero roots = number of updates since last zeroing + let expected_non_zero = + old_account.root_history.len() - num_remaining_roots as usize; + println!("num_remaining_roots {}", num_remaining_roots); + assert_eq!( + non_zero_roots.len(), + expected_non_zero, + "Expected {} non-zero roots after output queue zeroing, but found {}. Root history: {:?}", + expected_non_zero, + non_zero_roots.len(), + old_account.root_history + ); + + // Assert that all remaining non-zero roots are tracked in the current (non-zeroed) batch + let current_batch_idx = old_account.queue_batches.pending_batch_index as u32; + if let Some(current_batch_roots) = batch_roots.get_by_key(¤t_batch_idx) { + for non_zero_root in &non_zero_roots { + // Skip the initial root which might not be tracked in any batch + if old_account.sequence_number > 0 { + assert!( + current_batch_roots.contains(non_zero_root), + "Non-zero root {:?} should be tracked in current batch {} but wasn't found. Current batch roots: {:?}", + non_zero_root, + current_batch_idx, + current_batch_roots + ); + } + } + + // Also verify the count matches + assert_eq!( + current_batch_roots.len(), + non_zero_roots.len(), + "Current batch {} should have {} roots tracked, but has {}", + current_batch_idx, + non_zero_roots.len(), + current_batch_roots.len() + ); + } + } + } + } + + assert_eq!(account.get_metadata(), old_account.get_metadata()); + assert_eq!(account, old_account); + assert_eq!(*account.root_history.last().unwrap(), root); +} + +pub fn assert_address_merkle_tree_update( + mut old_account: BatchedMerkleTreeAccount, + account: BatchedMerkleTreeAccount, + root: [u8; 32], + batch_roots: &[(u32, Vec<[u8; 32]>)], +) { + { + // Input queue update + let old_full_batch_index = old_account.queue_batches.pending_batch_index; + let history_capacity = old_account.root_history.capacity(); + let pre_roots = old_account.root_history.to_vec(); + let old_full_batch = old_account + .queue_batches + .batches + .get_mut(old_full_batch_index as usize) + .unwrap(); + + old_full_batch + .mark_as_inserted_in_merkle_tree( + account.sequence_number, + account.root_history.last_index() as u32, + history_capacity as u32, + ) + .unwrap(); + if old_full_batch.get_state() == BatchState::Inserted { + old_account.queue_batches.pending_batch_index += 1; + old_account.queue_batches.pending_batch_index %= 2; + } + let old_full_batch_index = old_account.queue_batches.pending_batch_index; + + let previous_full_batch_index = if old_full_batch_index == 0 { 1 } else { 0 }; + + let old_full_batch_index = old_account.queue_batches.pending_batch_index; + let old_full_batch = old_account + .queue_batches + .batches + .get_mut(old_full_batch_index as usize) + .unwrap(); + let current_seq = account.sequence_number; + let root_history_len = account.root_history_capacity as u64; + let state_seq = account.queue_batches.batches[previous_full_batch_index].sequence_number; + let no_insert_since_last_batch_root = + state_seq.saturating_sub(root_history_len) == current_seq; + println!( + "previous_batch_is_inserted{}", + old_full_batch.get_state() != BatchState::Inserted + ); + println!( + "no_insert_since_last_batch_root {}", + no_insert_since_last_batch_root + ); + let zeroed_batch = old_full_batch.get_num_inserted_elements() + >= old_full_batch.batch_size / 2 + && old_full_batch.get_state() != BatchState::Inserted + && !no_insert_since_last_batch_root; + println!("zeroed_batch: {:?}", zeroed_batch); + let state = account.queue_batches.batches[previous_full_batch_index].get_state(); + let previous_batch = old_account + .queue_batches + .batches + .get_mut(previous_full_batch_index) + .unwrap(); + if zeroed_batch && state == BatchState::Inserted { + previous_batch.set_bloom_filter_to_zeroed(); + let sequence_number = previous_batch.sequence_number; + let overlapping_roots_exits = sequence_number > old_account.sequence_number; + if overlapping_roots_exits { + old_account.bloom_filter_stores[previous_full_batch_index] + .iter_mut() + .for_each(|elem| { + *elem = 0; + }); + + let mut oldest_root_index = old_account.root_history.first_index(); + + let num_remaining_roots = sequence_number - old_account.sequence_number; + for _ in 0..num_remaining_roots { + println!("zeroing out root index: {}", oldest_root_index); + old_account.root_history[oldest_root_index] = [0u8; 32]; + oldest_root_index += 1; + oldest_root_index %= old_account.root_history.len(); + } + println!( + "pre roots {:?}", + pre_roots + .iter() + .filter(|r| **r != [0u8; 32]) + .cloned() + .collect::>() + ); + + println!( + "post roots (actual account) {:?}", + account + .root_history + .iter() + .filter(|r| **r != [0u8; 32]) + .cloned() + .collect::>() + ); + // No roots of the zeroed batch exist in the root history + if let Some((_idx, zeroed_batch_roots)) = batch_roots + .iter() + .find(|(idx, _)| *idx == previous_full_batch_index as u32) + { + for root in zeroed_batch_roots { + println!("checking root {:?}", root); + assert!( + !account.root_history.iter().any(|r| r == root), + "Zeroed batch root {:?} still exists in root_history", + root + ); + } + } + // All non-zero roots in the root history belong to the current batch + let current_batch_index = old_full_batch_index as u32; + if let Some((_idx, current_batch_roots)) = batch_roots + .iter() + .find(|(idx, _)| *idx == current_batch_index) + { + for root in account.root_history.iter() { + if *root != [0u8; 32] { + assert!( + current_batch_roots.contains(root), + "Non-zero root {:?} in root_history does not belong to current batch {}", + root, + current_batch_index + ); + } + } + } + } + } + } + + old_account.sequence_number += 1; + old_account.next_index += old_account.queue_batches.zkp_batch_size; + old_account.root_history.push(root); + println!( + "post roots (old_account simulation) {:?}", + old_account + .root_history + .iter() + .filter(|r| **r != [0u8; 32]) + .cloned() + .collect::>() + ); + assert_eq!(account.get_metadata(), old_account.get_metadata()); + assert_eq!(*account.root_history.last().unwrap(), root); + assert_eq!(account, old_account); +} + +pub fn get_rnd_bytes(rng: &mut StdRng) -> [u8; 32] { + let mut rnd_bytes = rng.gen::<[u8; 32]>(); + rnd_bytes[0] = 0; + rnd_bytes +} + +pub async fn perform_input_update( + mt_account_data: &mut [u8], + mock_indexer: &mut MockBatchedForester<{ DEFAULT_BATCH_STATE_TREE_HEIGHT as usize }>, + enable_assert: bool, + mt_pubkey: Pubkey, + batch_roots: &mut ArrayMap, 2>, +) { + let mut cloned_mt_account_data = (*mt_account_data).to_vec(); + let old_account = BatchedMerkleTreeAccount::state_from_bytes( + cloned_mt_account_data.as_mut_slice(), + &mt_pubkey, + ) + .unwrap(); + let (input_res, root) = { + let mut account = + BatchedMerkleTreeAccount::state_from_bytes(mt_account_data, &mt_pubkey).unwrap(); + + let next_full_batch = account.get_metadata().queue_batches.pending_batch_index; + let batch = account + .queue_batches + .batches + .get(next_full_batch as usize) + .unwrap(); + let leaves_hash_chain = account + .hash_chain_stores + .get(next_full_batch as usize) + .unwrap() + .get(batch.get_num_inserted_zkps() as usize) + .unwrap(); + let (proof, new_root) = mock_indexer + .get_batched_update_proof( + account.get_metadata().queue_batches.zkp_batch_size as u32, + *leaves_hash_chain, + ) + .await + .unwrap(); + let instruction_data = InstructionDataBatchNullifyInputs { + new_root, + compressed_proof: CompressedProof { + a: proof.a, + b: proof.b, + c: proof.c, + }, + }; + + ( + account.update_tree_from_input_queue(instruction_data), + new_root, + ) + }; + println!("Input update -----------------------------"); + println!("res {:?}", input_res); + assert!(input_res.is_ok()); + + // assert Merkle tree + // sequence number increased X + // next index increased X + // current root index increased X + // One root changed one didn't + + let account = BatchedMerkleTreeAccount::state_from_bytes(mt_account_data, &mt_pubkey).unwrap(); + if enable_assert { + assert_merkle_tree_update(old_account, account, None, None, root, batch_roots); + } +} +// Get random leaf that is not in the input queue. +pub fn get_random_leaf(rng: &mut StdRng, active_leaves: &mut Vec<[u8; 32]>) -> (usize, [u8; 32]) { + if active_leaves.is_empty() { + return (0, [0u8; 32]); + } + let index = rng.gen_range(0..active_leaves.len()); + // get random leaf from vector and remove it + (index, active_leaves.remove(index)) +} +#[allow(clippy::too_many_arguments)] +pub fn assert_nullifier_queue_insert( + pre_account: BatchedMerkleTreeMetadata, + pre_value_vecs: &mut [ZeroCopyVecU64<[u8; 32]>], + pre_roots: Vec<[u8; 32]>, + pre_hash_chains: [ZeroCopyVecU64<[u8; 32]>; 2], + merkle_tree_account: BatchedMerkleTreeAccount, + bloom_filter_insert_values: Vec<[u8; 32]>, + leaf_indices: Vec, + tx_hash: [u8; 32], + input_is_in_tree: Vec, + array_indices: Vec, + current_slot: &u64, +) -> Result<(), BatchedMerkleTreeError> { + let mut leaf_hash_chain_insert_values = vec![]; + for (insert_value, leaf_index) in bloom_filter_insert_values.iter().zip(leaf_indices.iter()) { + let nullifier = + Poseidon::hashv(&[insert_value.as_slice(), &leaf_index.to_be_bytes(), &tx_hash]) + .unwrap(); + leaf_hash_chain_insert_values.push(nullifier); + } + assert_input_queue_insert( + pre_account, + pre_value_vecs, + pre_roots, + pre_hash_chains, + merkle_tree_account, + bloom_filter_insert_values, + leaf_hash_chain_insert_values, + input_is_in_tree, + array_indices, + current_slot, + ) +} +/// Insert into input queue: +/// 1. New value exists in the current batch bloom_filter +/// 2. New value does not exist in the other batch bloom_filters +#[allow(clippy::too_many_arguments)] +pub fn assert_input_queue_insert( + mut pre_account: BatchedMerkleTreeMetadata, + pre_value_vecs: &mut [ZeroCopyVecU64<[u8; 32]>], + pre_roots: Vec<[u8; 32]>, + mut pre_hash_chains: [ZeroCopyVecU64<[u8; 32]>; 2], + mut merkle_tree_account: BatchedMerkleTreeAccount, + bloom_filter_insert_values: Vec<[u8; 32]>, + leaf_hash_chain_insert_values: Vec<[u8; 32]>, + input_is_in_tree: Vec, + array_indices: Vec, + current_slot: &u64, +) -> Result<(), BatchedMerkleTreeError> { + let mut should_be_zeroed = false; + for (i, insert_value) in bloom_filter_insert_values.iter().enumerate() { + if !input_is_in_tree[i] { + let value_vec_index = array_indices[i]; + assert!( + pre_value_vecs.iter_mut().any(|value_vec| { + if value_vec.len() > value_vec_index { + { + if value_vec[value_vec_index] == *insert_value { + value_vec[value_vec_index] = [0u8; 32]; + true + } else { + false + } + } + } else { + false + } + }), + "Value not in value vec." + ); + } + + let post_roots: Vec<[u8; 32]> = merkle_tree_account.root_history.iter().cloned().collect(); + // if root buffer changed it must be only overwritten by [0u8;32] + if post_roots != pre_roots { + let only_zero_overwrites = post_roots + .iter() + .zip(pre_roots.iter()) + .all(|(post, pre)| *post == *pre || *post == [0u8; 32]); + println!("pre_roots: {:?}", pre_roots); + println!("post_roots: {:?}", post_roots); + if !only_zero_overwrites { + panic!("Root buffer changed.") + } + } + + let inserted_batch_index = + pre_account.queue_batches.currently_processing_batch_index as usize; + let expected_batch = pre_account + .queue_batches + .batches + .get_mut(inserted_batch_index) + .unwrap(); + + pre_account.queue_batches.next_index += 1; + + println!( + "assert input queue batch update: expected_batch: {:?}", + expected_batch + ); + println!( + "assert input queue batch update: expected_batch.get_num_inserted_elements(): {}", + expected_batch.get_num_inserted_elements() + ); + println!( + "assert input queue batch update: expected_batch.batch_size / 2: {}", + expected_batch.batch_size / 2 + ); + + if !should_be_zeroed && expected_batch.get_state() == BatchState::Inserted { + should_be_zeroed = + expected_batch.get_num_inserted_elements() == expected_batch.batch_size / 2; + } + println!( + "assert input queue batch update: should_be_zeroed: {}", + should_be_zeroed + ); + if expected_batch.get_state() == BatchState::Inserted { + println!("assert input queue batch update: clearing batch"); + pre_hash_chains[inserted_batch_index].clear(); + expected_batch.advance_state_to_fill(None).unwrap(); + expected_batch.set_start_slot(current_slot); + println!("setting start slot to {}", current_slot); + } else if expected_batch.get_state() == BatchState::Fill + && !expected_batch.start_slot_is_set() + { + // Batch is filled for the first time + expected_batch.set_start_slot(current_slot); + } + println!( + "assert input queue batch update: inserted_batch_index: {}", + inserted_batch_index + ); + // New value exists in the current batch bloom filter + let mut bloom_filter = BloomFilter::new( + merkle_tree_account.queue_batches.batches[inserted_batch_index].num_iters as usize, + merkle_tree_account.queue_batches.batches[inserted_batch_index].bloom_filter_capacity, + merkle_tree_account.bloom_filter_stores[inserted_batch_index], + ) + .unwrap(); + println!( + "assert input queue batch update: insert_value: {:?}", + insert_value + ); + assert!(bloom_filter.contains(insert_value)); + let pre_hash_chain = pre_hash_chains.get_mut(inserted_batch_index).unwrap(); + expected_batch.add_to_hash_chain(&leaf_hash_chain_insert_values[i], pre_hash_chain)?; + + let num_iters = + merkle_tree_account.queue_batches.batches[inserted_batch_index].num_iters as usize; + let bloom_filter_capacity = + merkle_tree_account.queue_batches.batches[inserted_batch_index].bloom_filter_capacity; + // New value does not exist in the other batch bloom_filters + for (i, store) in merkle_tree_account + .bloom_filter_stores + .iter_mut() + .enumerate() + { + // Skip current batch it is already checked above + if i != inserted_batch_index { + let mut bloom_filter = + BloomFilter::new(num_iters, bloom_filter_capacity, store).unwrap(); + assert!(!bloom_filter.contains(insert_value)); + } + } + // if the currently processing batch changed it should + // increment by one and the old batch should be ready to + // update + if expected_batch.get_current_zkp_batch_index() == expected_batch.get_num_zkp_batches() { + assert_eq!( + merkle_tree_account.queue_batches.batches + [pre_account.queue_batches.currently_processing_batch_index as usize] + .get_state(), + BatchState::Full + ); + pre_account.queue_batches.currently_processing_batch_index += 1; + pre_account.queue_batches.currently_processing_batch_index %= + pre_account.queue_batches.num_batches; + assert_eq!( + merkle_tree_account.queue_batches.batches[inserted_batch_index], + *expected_batch + ); + assert_eq!( + merkle_tree_account.hash_chain_stores[inserted_batch_index] + .last() + .unwrap(), + pre_hash_chain.last().unwrap(), + "Hashchain store inconsistent." + ); + } + } + + assert_eq!( + *merkle_tree_account.get_metadata(), + pre_account, + "BatchedMerkleTreeMetadata changed." + ); + let inserted_batch_index = pre_account.queue_batches.currently_processing_batch_index as usize; + let mut expected_batch = pre_account.queue_batches.batches[inserted_batch_index]; + if should_be_zeroed { + expected_batch.set_bloom_filter_to_zeroed(); + } + assert_eq!( + merkle_tree_account.queue_batches.batches[inserted_batch_index], + expected_batch + ); + let other_batch = if inserted_batch_index == 0 { 1 } else { 0 }; + assert_eq!( + merkle_tree_account.queue_batches.batches[other_batch], + pre_account.queue_batches.batches[other_batch] + ); + assert_eq!( + merkle_tree_account.hash_chain_stores, pre_hash_chains, + "Hashchain store inconsistent." + ); + Ok(()) +} + +/// Expected behavior for insert into output queue: +/// - add value to value array +/// - batch.num_inserted += 1 +/// - if batch is full after insertion advance state to ReadyToUpdateTree +pub fn assert_output_queue_insert( + mut pre_account: BatchedQueueMetadata, + // mut pre_batches: Vec, + mut pre_value_store: [ZeroCopyVecU64<[u8; 32]>; 2], + mut pre_hash_chains: [ZeroCopyVecU64<[u8; 32]>; 2], + mut output_account: BatchedQueueAccount, + insert_values: Vec<[u8; 32]>, + current_slot: u64, +) -> Result<(), BatchedMerkleTreeError> { + for batch in output_account.batch_metadata.batches.iter_mut() { + println!("output_account.batch: {:?}", batch); + } + for batch in pre_account.batch_metadata.batches.iter() { + println!("pre_batch: {:?}", batch); + } + for insert_value in insert_values.iter() { + // if the currently processing batch changed it should + // increment by one and the old batch should be ready to + // update + + let inserted_batch_index = + pre_account.batch_metadata.currently_processing_batch_index as usize; + let expected_batch = &mut pre_account.batch_metadata.batches[inserted_batch_index]; + let pre_value_store = pre_value_store.get_mut(inserted_batch_index).unwrap(); + let pre_hash_chain = pre_hash_chains.get_mut(inserted_batch_index).unwrap(); + if expected_batch.get_state() == BatchState::Inserted { + expected_batch + .advance_state_to_fill(Some(pre_account.batch_metadata.next_index)) + .unwrap(); + pre_value_store.clear(); + pre_hash_chain.clear(); + } + pre_account.batch_metadata.next_index += 1; + expected_batch.store_and_hash_value( + insert_value, + pre_value_store, + pre_hash_chain, + ¤t_slot, + )?; + + let other_batch = if inserted_batch_index == 0 { 1 } else { 0 }; + assert!(output_account.value_vecs[inserted_batch_index] + .as_mut_slice() + .to_vec() + .contains(insert_value)); + assert!(!output_account.value_vecs[other_batch] + .as_mut_slice() + .to_vec() + .contains(insert_value)); + if expected_batch.get_num_zkp_batches() == expected_batch.get_current_zkp_batch_index() { + assert_eq!( + output_account.batch_metadata.batches + [pre_account.batch_metadata.currently_processing_batch_index as usize] + .get_state(), + BatchState::Full + ); + pre_account.batch_metadata.currently_processing_batch_index += 1; + pre_account.batch_metadata.currently_processing_batch_index %= + pre_account.batch_metadata.num_batches; + assert_eq!( + output_account.batch_metadata.batches[inserted_batch_index], + *expected_batch + ); + } + } + assert_eq!( + *output_account.get_metadata(), + pre_account, + "BatchedQueueAccount changed." + ); + assert_eq!(pre_hash_chains, output_account.hash_chain_stores); + for (i, (value_store, pre)) in output_account + .value_vecs + .iter() + .zip(pre_value_store.iter()) + .enumerate() + { + for (j, (value, pre_value)) in value_store.iter().zip(pre.iter()).enumerate() { + assert_eq!( + *value, *pre_value, + "{} {} \n value store {:?}\n pre {:?}", + i, j, value_store, pre + ); + } + } + assert_eq!(pre_value_store, output_account.value_vecs); + Ok(()) +} diff --git a/program-tests/batched-merkle-tree-test/tests/e2e_tests/simulate_txs.rs b/program-tests/batched-merkle-tree-test/tests/e2e_tests/simulate_txs.rs new file mode 100644 index 0000000000..273099f2f7 --- /dev/null +++ b/program-tests/batched-merkle-tree-test/tests/e2e_tests/simulate_txs.rs @@ -0,0 +1,555 @@ +#![allow(unused_assignments)] +use std::cmp::min; + +use light_array_map::ArrayMap; +use light_batched_merkle_tree::{ + constants::{ACCOUNT_COMPRESSION_PROGRAM_ID, DEFAULT_BATCH_STATE_TREE_HEIGHT}, + errors::BatchedMerkleTreeError, + initialize_state_tree::{ + init_batched_state_merkle_tree_accounts, InitStateTreeAccountsInstructionData, + }, + merkle_tree::{ + assert_batch_append_event_event, assert_nullify_event, + test_utils::get_merkle_tree_account_size_default, BatchedMerkleTreeAccount, + InstructionDataBatchAppendInputs, InstructionDataBatchNullifyInputs, + }, + queue::{test_utils::get_output_queue_account_size_default, BatchedQueueAccount}, +}; +use light_compressed_account::{ + hash_chain::create_hash_chain_from_slice, instruction_data::compressed_proof::CompressedProof, + pubkey::Pubkey, +}; +use light_hasher::Poseidon; +use light_merkle_tree_reference::MerkleTree; +use light_prover_client::prover::spawn_prover; +use light_test_utils::mock_batched_forester::{MockBatchedForester, MockTxEvent}; +use rand::{rngs::StdRng, Rng}; +use serial_test::serial; + +use crate::e2e_tests::shared::*; + +#[derive(Debug, PartialEq, Clone)] +pub struct MockTransactionInputs { + inputs: Vec<[u8; 32]>, + outputs: Vec<[u8; 32]>, +} +pub fn simulate_transaction( + instruction_data: MockTransactionInputs, + merkle_tree_account_data: &mut [u8], + output_queue_account_data: &mut [u8], + reference_merkle_tree: &MerkleTree, + current_slot: &mut u64, + mt_pubkey: &Pubkey, +) -> Result { + let mut output_account = + BatchedQueueAccount::output_from_bytes(output_queue_account_data).unwrap(); + let mut merkle_tree_account = + BatchedMerkleTreeAccount::state_from_bytes(merkle_tree_account_data, mt_pubkey).unwrap(); + let flattened_inputs = instruction_data + .inputs + .iter() + .cloned() + .chain(instruction_data.outputs.iter().cloned()) + .collect::>(); + let tx_hash = create_hash_chain_from_slice(flattened_inputs.as_slice())?; + + for input in instruction_data.inputs.iter() { + // zkp inclusion in Merkle tree + let inclusion = reference_merkle_tree.get_leaf_index(input); + let leaf_index = if let Some(leaf_index) = inclusion { + leaf_index as u64 + } else { + println!("simulate_transaction: inclusion is none"); + let mut included = false; + let mut leaf_index = 0; + let start_indices = output_account + .batch_metadata + .batches + .iter() + .map(|batch| batch.start_index) + .collect::>(); + + for (batch_index, value_vec) in output_account.value_vecs.iter_mut().enumerate() { + for (value_index, value) in value_vec.iter_mut().enumerate() { + if *value == *input { + let batch_start_index = start_indices[batch_index]; + included = true; + println!("overwriting value: {:?}", value); + *value = [0u8; 32]; + leaf_index = value_index as u64 + batch_start_index; + } + } + } + if !included { + panic!("Value not included in any output queue or trees."); + } + leaf_index + }; + + println!( + "sim tx input: \n {:?} \nleaf index : {:?}, \ntx hash {:?}", + input, leaf_index, tx_hash, + ); + merkle_tree_account.insert_nullifier_into_queue( + input, + leaf_index, + &tx_hash, + current_slot, + )?; + } + + for output in instruction_data.outputs.iter() { + let leaf_index = output_account.batch_metadata.next_index; + println!( + "sim tx output: \n {:?} \nleaf index : {:?}", + output, leaf_index + ); + output_account.insert_into_current_batch(output, current_slot)?; + } + Ok(MockTxEvent { + inputs: instruction_data.inputs.clone(), + outputs: instruction_data.outputs.clone(), + tx_hash, + }) +} + +#[serial] +#[tokio::test] +async fn test_simulate_transactions() { + spawn_prover().await; + let mut mock_indexer = + MockBatchedForester::<{ DEFAULT_BATCH_STATE_TREE_HEIGHT as usize }>::default(); + + let num_tx = 2200; + let owner = Pubkey::new_unique(); + + let queue_account_size = get_output_queue_account_size_default(); + + let mut output_queue_account_data = vec![0; queue_account_size]; + let output_queue_pubkey = Pubkey::new_unique(); + + let mt_account_size = get_merkle_tree_account_size_default(); + let mut mt_account_data = vec![0; mt_account_size]; + let mt_pubkey = ACCOUNT_COMPRESSION_PROGRAM_ID.into(); + + let params = InitStateTreeAccountsInstructionData::test_default(); + + let merkle_tree_rent = 1_000_000_000; + let queue_rent = 1_000_000_000; + let additional_bytes_rent = 1000; + + init_batched_state_merkle_tree_accounts( + owner, + params, + &mut output_queue_account_data, + output_queue_pubkey, + queue_rent, + &mut mt_account_data, + mt_pubkey, + merkle_tree_rent, + additional_bytes_rent, + ) + .unwrap(); + use rand::SeedableRng; + let mut rng = StdRng::seed_from_u64(0); + let mut in_ready_for_update = false; + let mut out_ready_for_update = false; + let mut num_output_updates = 0; + let mut num_input_updates = 0; + let mut num_input_values = 0; + let mut num_output_values = 0; + let mut current_slot = rng.gen(); + + // Track roots created during each batch insertion (batch_index -> roots) + let mut batch_roots: ArrayMap, 2> = ArrayMap::new(); + + // Track the initial root for batch 0 + // For StateV2 trees, this is the zero bytes root for the tree height + { + let initial_root = + light_hasher::Poseidon::zero_bytes()[DEFAULT_BATCH_STATE_TREE_HEIGHT as usize]; + use light_hasher::Hasher; + batch_roots.insert(0, vec![initial_root], ()).unwrap(); + println!("Initial root {:?} tracked for batch 0", initial_root); + } + + for tx in 0..num_tx { + println!("tx: {}", tx); + println!("num_input_updates: {}", num_input_updates); + println!("num_output_updates: {}", num_output_updates); + { + println!("Simulate tx {} -----------------------------", tx); + println!("Num inserted values: {}", num_input_values); + println!("Num input updates: {}", num_input_updates); + println!("Num output updates: {}", num_output_updates); + println!("Num output values: {}", num_output_values); + let number_of_outputs = rng.gen_range(0..7); + let mut outputs = vec![]; + for _ in 0..number_of_outputs { + outputs.push(get_rnd_bytes(&mut rng)); + } + let number_of_inputs = if rng.gen_bool(0.5) { + if !mock_indexer.active_leaves.is_empty() { + let x = min(mock_indexer.active_leaves.len(), 5); + rng.gen_range(0..x) + } else { + 0 + } + } else { + 0 + }; + + let mut inputs = vec![]; + let mut input_is_in_tree = vec![]; + let mut leaf_indices = vec![]; + let mut array_indices = vec![]; + let mut retries = min(10, mock_indexer.active_leaves.len()); + while inputs.len() < number_of_inputs && retries > 0 { + let (_, leaf) = get_random_leaf(&mut rng, &mut mock_indexer.active_leaves); + let inserted = mock_indexer.merkle_tree.get_leaf_index(&leaf); + if let Some(leaf_index) = inserted { + inputs.push(leaf); + leaf_indices.push(leaf_index as u64); + input_is_in_tree.push(true); + array_indices.push(0); + } else if rng.gen_bool(0.1) { + inputs.push(leaf); + let output_queue = + BatchedQueueAccount::output_from_bytes(&mut output_queue_account_data) + .unwrap(); + let mut leaf_array_index = 0; + let mut batch_index = 0; + for (i, vec) in output_queue.value_vecs.iter().enumerate() { + let pos = vec.iter().position(|value| *value == leaf); + if let Some(pos) = pos { + leaf_array_index = pos; + batch_index = i; + break; + } + if i == output_queue.value_vecs.len() - 1 { + panic!("Leaf not found in output queue."); + } + } + let batch = output_queue + .batch_metadata + .batches + .get(batch_index) + .unwrap(); + array_indices.push(leaf_array_index); + let leaf_index: u64 = batch.start_index + leaf_array_index as u64; + leaf_indices.push(leaf_index); + input_is_in_tree.push(false); + } + retries -= 1; + } + let number_of_inputs = inputs.len(); + println!("number_of_inputs: {}", number_of_inputs); + + let instruction_data = MockTransactionInputs { + inputs: inputs.clone(), + outputs: outputs.clone(), + }; + + let merkle_tree_account = + BatchedMerkleTreeAccount::state_from_bytes(&mut mt_account_data, &mt_pubkey) + .unwrap(); + println!( + "input queue: {:?}", + merkle_tree_account.queue_batches.batches[0].get_num_inserted_zkp_batch() + ); + + let mut pre_mt_data = mt_account_data.clone(); + let mut pre_account_bytes = output_queue_account_data.clone(); + + let pre_output_account = + BatchedQueueAccount::output_from_bytes(&mut pre_account_bytes).unwrap(); + let pre_output_metadata = *pre_output_account.get_metadata(); + let mut pre_output_value_stores = pre_output_account.value_vecs; + let pre_output_hash_chains = pre_output_account.hash_chain_stores; + + let mut pre_mt_account_bytes = mt_account_data.clone(); + let pre_merkle_tree_account = + BatchedMerkleTreeAccount::state_from_bytes(&mut pre_mt_account_bytes, &mt_pubkey) + .unwrap(); + let pre_mt_account = *pre_merkle_tree_account.get_metadata(); + let pre_roots = pre_merkle_tree_account + .root_history + .iter() + .cloned() + .collect(); + let pre_mt_hash_chains = pre_merkle_tree_account.hash_chain_stores; + + if !outputs.is_empty() || !inputs.is_empty() { + println!("Simulating tx with inputs: {:?}", instruction_data); + let event = simulate_transaction( + instruction_data, + &mut pre_mt_data, + &mut output_queue_account_data, + &mock_indexer.merkle_tree, + &mut current_slot, + &mt_pubkey, + ) + .unwrap(); + mock_indexer.tx_events.push(event.clone()); + + if !inputs.is_empty() { + let merkle_tree_account = + BatchedMerkleTreeAccount::state_from_bytes(&mut pre_mt_data, &mt_pubkey) + .unwrap(); + println!("inputs: {:?}", inputs); + assert_nullifier_queue_insert( + pre_mt_account, + &mut pre_output_value_stores, // mut to remove values proven by index + pre_roots, + pre_mt_hash_chains, + merkle_tree_account, + inputs.clone(), + leaf_indices.clone(), + event.tx_hash, + input_is_in_tree, + array_indices, + ¤t_slot, + ) + .unwrap(); + } + + if !outputs.is_empty() { + assert_output_queue_insert( + pre_output_metadata, + pre_output_value_stores, + pre_output_hash_chains, + BatchedQueueAccount::output_from_bytes( + &mut output_queue_account_data.clone(), // clone so that data cannot be modified + ) + .unwrap(), + outputs.clone(), + current_slot, + ) + .unwrap(); + } + + for i in 0..number_of_inputs { + mock_indexer + .input_queue_leaves + .push((inputs[i], leaf_indices[i] as usize)); + } + for output in outputs.iter() { + mock_indexer.active_leaves.push(*output); + mock_indexer.output_queue_leaves.push(*output); + } + + num_output_values += number_of_outputs; + num_input_values += number_of_inputs; + let merkle_tree_account = + BatchedMerkleTreeAccount::state_from_bytes(&mut pre_mt_data, &mt_pubkey) + .unwrap(); + in_ready_for_update = merkle_tree_account + .queue_batches + .batches + .iter() + .any(|batch| batch.get_first_ready_zkp_batch().is_ok()); + let output_account = + BatchedQueueAccount::output_from_bytes(&mut output_queue_account_data).unwrap(); + out_ready_for_update = output_account + .batch_metadata + .batches + .iter() + .any(|batch| batch.get_first_ready_zkp_batch().is_ok()); + + mt_account_data = pre_mt_data.clone(); + } else { + println!("Skipping simulate tx for no inputs or outputs"); + } + current_slot += 1; + } + + if in_ready_for_update && rng.gen_bool(1.0) { + println!("Input update -----------------------------"); + println!("Num inserted values: {}", num_input_values); + println!("Num input updates: {}", num_input_updates); + println!("Num output updates: {}", num_output_updates); + println!("Num output values: {}", num_output_values); + let mut pre_mt_account_data = mt_account_data.clone(); + let old_account = + BatchedMerkleTreeAccount::state_from_bytes(&mut mt_account_data, &mt_pubkey) + .unwrap(); + let (input_res, new_root) = { + let mut account = BatchedMerkleTreeAccount::state_from_bytes( + &mut pre_mt_account_data, + &mt_pubkey, + ) + .unwrap(); + println!("batches {:?}", account.queue_batches.batches); + + let next_full_batch = account.get_metadata().queue_batches.pending_batch_index; + let batch = account + .queue_batches + .batches + .get(next_full_batch as usize) + .unwrap(); + println!( + "account + .hash_chain_stores {:?}", + account.hash_chain_stores + ); + println!("hash_chain store len {:?}", account.hash_chain_stores.len()); + println!( + "batch.get_num_inserted_zkps() as usize {:?}", + batch.get_num_inserted_zkps() as usize + ); + let leaves_hash_chain = account + .hash_chain_stores + .get(next_full_batch as usize) + .unwrap() + .get(batch.get_num_inserted_zkps() as usize) + .unwrap(); + + let (proof, new_root) = mock_indexer + .get_batched_update_proof( + account.get_metadata().queue_batches.zkp_batch_size as u32, + *leaves_hash_chain, + ) + .await + .unwrap(); + let instruction_data = InstructionDataBatchNullifyInputs { + new_root, + compressed_proof: CompressedProof { + a: proof.a, + b: proof.b, + c: proof.c, + }, + }; + + ( + account.update_tree_from_input_queue(instruction_data), + new_root, + ) + }; + println!("Input update -----------------------------"); + println!("res {:?}", input_res); + assert!(input_res.is_ok()); + let nullify_event = input_res.unwrap(); + in_ready_for_update = false; + // assert Merkle tree + // sequence number increased X + // next index increased X + // current root index increased X + // One root changed one didn't + + let account = + BatchedMerkleTreeAccount::state_from_bytes(&mut pre_mt_account_data, &mt_pubkey) + .unwrap(); + assert_nullify_event(nullify_event, new_root, &old_account, mt_pubkey); + assert_merkle_tree_update(old_account, account, None, None, new_root, &mut batch_roots); + mt_account_data = pre_mt_account_data.clone(); + + num_input_updates += 1; + } + + if out_ready_for_update && rng.gen_bool(1.0) { + println!("Output update -----------------------------"); + println!("Num inserted values: {}", num_input_values); + println!("Num input updates: {}", num_input_updates); + println!("Num output updates: {}", num_output_updates); + println!("Num output values: {}", num_output_values); + + let mut pre_mt_account_data = mt_account_data.clone(); + let mut account = + BatchedMerkleTreeAccount::state_from_bytes(&mut pre_mt_account_data, &mt_pubkey) + .unwrap(); + let output_account = + BatchedQueueAccount::output_from_bytes(&mut output_queue_account_data).unwrap(); + + let next_index = account.get_metadata().next_index; + let next_full_batch = output_account + .get_metadata() + .batch_metadata + .pending_batch_index; + let batch = output_account + .batch_metadata + .batches + .get(next_full_batch as usize) + .unwrap(); + let leaves_hash_chain = output_account + .hash_chain_stores + .get(next_full_batch as usize) + .unwrap() + .get(batch.get_num_inserted_zkps() as usize) + .unwrap(); + let (proof, new_root) = mock_indexer + .get_batched_append_proof( + next_index as usize, + batch.get_num_inserted_zkps() as u32, + batch.zkp_batch_size as u32, + *leaves_hash_chain, + batch.get_num_zkp_batches() as u32, + ) + .await + .unwrap(); + + let instruction_data = InstructionDataBatchAppendInputs { + new_root, + compressed_proof: CompressedProof { + a: proof.a, + b: proof.b, + c: proof.c, + }, + }; + + let mut pre_output_queue_state = output_queue_account_data.clone(); + println!("Output update -----------------------------"); + + let queue_account = + &mut BatchedQueueAccount::output_from_bytes(&mut pre_output_queue_state).unwrap(); + let output_res = + account.update_tree_from_output_queue_account(queue_account, instruction_data); + println!("output_res: {:?}", output_res); + assert!(output_res.is_ok()); + let batch_append_event = output_res.unwrap(); + + assert_eq!( + *account.root_history.last().unwrap(), + mock_indexer.merkle_tree.root() + ); + let output_account = + BatchedQueueAccount::output_from_bytes(&mut pre_output_queue_state).unwrap(); + let old_output_account = + BatchedQueueAccount::output_from_bytes(&mut output_queue_account_data).unwrap(); + + let old_account = + BatchedMerkleTreeAccount::state_from_bytes(&mut mt_account_data, &mt_pubkey) + .unwrap(); + + println!("batch 0: {:?}", output_account.batch_metadata.batches[0]); + println!("batch 1: {:?}", output_account.batch_metadata.batches[1]); + assert_batch_append_event_event( + batch_append_event, + new_root, + &old_output_account, + &old_account, + mt_pubkey, + ); + assert_merkle_tree_update( + old_account, + account, + Some(old_output_account), + Some(output_account), + new_root, + &mut batch_roots, + ); + + output_queue_account_data = pre_output_queue_state; + mt_account_data = pre_mt_account_data; + out_ready_for_update = false; + num_output_updates += 1; + } + } + let output_account = + BatchedQueueAccount::output_from_bytes(&mut output_queue_account_data).unwrap(); + println!("batch 0: {:?}", output_account.batch_metadata.batches[0]); + println!("batch 1: {:?}", output_account.batch_metadata.batches[1]); + println!("num_output_updates: {}", num_output_updates); + println!("num_input_updates: {}", num_input_updates); + println!("num_output_values: {}", num_output_values); + println!("num_input_values: {}", num_input_values); +} diff --git a/program-tests/batched-merkle-tree-test/tests/e2e_tests/state.rs b/program-tests/batched-merkle-tree-test/tests/e2e_tests/state.rs new file mode 100644 index 0000000000..234f1501ee --- /dev/null +++ b/program-tests/batched-merkle-tree-test/tests/e2e_tests/state.rs @@ -0,0 +1,472 @@ +#![allow(unused_assignments)] + +use light_array_map::ArrayMap; +use light_batched_merkle_tree::{ + batch::BatchState, + constants::{DEFAULT_BATCH_STATE_TREE_HEIGHT, NUM_BATCHES}, + errors::BatchedMerkleTreeError, + initialize_state_tree::{ + init_batched_state_merkle_tree_accounts, + test_utils::get_state_merkle_tree_account_size_from_params, + InitStateTreeAccountsInstructionData, + }, + merkle_tree::{BatchedMerkleTreeAccount, InstructionDataBatchAppendInputs}, + queue::{test_utils::get_output_queue_account_size_from_params, BatchedQueueAccount}, +}; +use light_bloom_filter::BloomFilter; +use light_compressed_account::{ + hash_chain::create_hash_chain_from_slice, instruction_data::compressed_proof::CompressedProof, + pubkey::Pubkey, +}; +use light_prover_client::prover::spawn_prover; +use light_test_utils::mock_batched_forester::{MockBatchedForester, MockTxEvent}; +use rand::rngs::StdRng; +use serial_test::serial; + +use crate::e2e_tests::shared::*; + +#[serial] +#[tokio::test] +async fn test_fill_state_queues_completely() { + spawn_prover().await; + let mut current_slot = 1; + let roothistory_capacity = vec![17, 80]; + for root_history_capacity in roothistory_capacity { + let mut mock_indexer = + MockBatchedForester::<{ DEFAULT_BATCH_STATE_TREE_HEIGHT as usize }>::default(); + + let mut params = InitStateTreeAccountsInstructionData::test_default(); + params.output_queue_batch_size = params.input_queue_batch_size * 10; + // Root history capacity which is greater than the input updates + params.root_history_capacity = root_history_capacity; + + let owner = Pubkey::new_unique(); + + let queue_account_size = get_output_queue_account_size_from_params(params); + + let mut output_queue_account_data = vec![0; queue_account_size]; + let output_queue_pubkey = Pubkey::new_unique(); + + let mt_account_size = get_state_merkle_tree_account_size_from_params(params); + let mut mt_account_data = vec![0; mt_account_size]; + let mt_pubkey = Pubkey::new_unique(); + + let merkle_tree_rent = 1_000_000_000; + let queue_rent = 1_000_000_000; + let additional_bytes_rent = 1000; + + init_batched_state_merkle_tree_accounts( + owner, + params, + &mut output_queue_account_data, + output_queue_pubkey, + queue_rent, + &mut mt_account_data, + mt_pubkey, + merkle_tree_rent, + additional_bytes_rent, + ) + .unwrap(); + use rand::SeedableRng; + let mut rng = StdRng::seed_from_u64(0); + + // Track roots created during each batch insertion (batch_index -> roots) + let mut batch_roots: ArrayMap, 2> = ArrayMap::new(); + + let num_tx = NUM_BATCHES as u64 * params.output_queue_batch_size; + + // Fill up complete output queue + for _ in 0..num_tx { + // Output queue + + let rnd_bytes = get_rnd_bytes(&mut rng); + let mut pre_output_queue_account_data = output_queue_account_data.clone(); + let pre_output_account = + BatchedQueueAccount::output_from_bytes(&mut pre_output_queue_account_data).unwrap(); + let pre_account = *pre_output_account.get_metadata(); + let pre_value_store = pre_output_account.value_vecs; + let pre_hash_chains = pre_output_account.hash_chain_stores; + + let mut output_account = + BatchedQueueAccount::output_from_bytes(&mut output_queue_account_data).unwrap(); + + output_account + .insert_into_current_batch(&rnd_bytes, ¤t_slot) + .unwrap(); + assert_output_queue_insert( + pre_account, + pre_value_store, + pre_hash_chains, + BatchedQueueAccount::output_from_bytes( + &mut output_queue_account_data.clone(), // clone so that data cannot be modified + ) + .unwrap(), + vec![rnd_bytes], + current_slot, + ) + .unwrap(); + current_slot += 1; + mock_indexer.output_queue_leaves.push(rnd_bytes); + } + let rnd_bytes = get_rnd_bytes(&mut rng); + let mut output_account = + BatchedQueueAccount::output_from_bytes(&mut output_queue_account_data).unwrap(); + + let result = output_account.insert_into_current_batch(&rnd_bytes, ¤t_slot); + assert_eq!(result.unwrap_err(), BatchedMerkleTreeError::BatchNotReady); + + output_account + .batch_metadata + .batches + .iter() + .for_each(|b| assert_eq!(b.get_state(), BatchState::Full)); + + // Batch insert output queue into merkle tree. + for _ in 0..output_account + .get_metadata() + .batch_metadata + .get_num_zkp_batches() + { + println!("Output update -----------------------------"); + let mut pre_mt_account_data = mt_account_data.clone(); + let mut account = + BatchedMerkleTreeAccount::state_from_bytes(&mut pre_mt_account_data, &mt_pubkey) + .unwrap(); + let mut pre_output_queue_state = output_queue_account_data.clone(); + let output_account = + BatchedQueueAccount::output_from_bytes(&mut output_queue_account_data).unwrap(); + let next_index = account.get_metadata().next_index; + let next_full_batch = output_account + .get_metadata() + .batch_metadata + .pending_batch_index; + let batch = output_account + .batch_metadata + .batches + .get(next_full_batch as usize) + .unwrap(); + let leaves = mock_indexer.output_queue_leaves.clone(); + let leaves_hash_chain = output_account + .hash_chain_stores + .get(next_full_batch as usize) + .unwrap() + .get(batch.get_num_inserted_zkps() as usize) + .unwrap(); + let (proof, new_root) = mock_indexer + .get_batched_append_proof( + next_index as usize, + batch.get_num_inserted_zkps() as u32, + batch.zkp_batch_size as u32, + *leaves_hash_chain, + batch.get_num_zkp_batches() as u32, + ) + .await + .unwrap(); + let start = batch.get_num_inserted_zkps() as usize * batch.zkp_batch_size as usize; + let end = start + batch.zkp_batch_size as usize; + for leaf in &leaves[start..end] { + // Storing the leaf in the output queue indexer so that it + // can be inserted into the input queue later. + mock_indexer.active_leaves.push(*leaf); + } + + let instruction_data = InstructionDataBatchAppendInputs { + new_root, + compressed_proof: CompressedProof { + a: proof.a, + b: proof.b, + c: proof.c, + }, + }; + + println!("Output update -----------------------------"); + let queue_account = + &mut BatchedQueueAccount::output_from_bytes(&mut pre_output_queue_state).unwrap(); + let output_res = + account.update_tree_from_output_queue_account(queue_account, instruction_data); + assert!(output_res.is_ok()); + + assert_eq!( + *account.root_history.last().unwrap(), + mock_indexer.merkle_tree.root() + ); + + // Track root for this batch + let batch_idx = next_full_batch as u32; + if let Some(roots) = batch_roots.get_mut_by_key(&batch_idx) { + roots.push(new_root); + } else { + batch_roots.insert(batch_idx, vec![new_root], ()).unwrap(); + } + + output_queue_account_data = pre_output_queue_state; + mt_account_data = pre_mt_account_data; + } + + // Fill up complete input queue. + let num_tx = NUM_BATCHES as u64 * params.input_queue_batch_size; + let mut first_value = [0u8; 32]; + for tx in 0..num_tx { + println!("Input insert ----------------------------- {}", tx); + let (_, leaf) = get_random_leaf(&mut rng, &mut mock_indexer.active_leaves); + let leaf_index = mock_indexer.merkle_tree.get_leaf_index(&leaf).unwrap(); + + let mut pre_mt_account_data = mt_account_data.clone(); + let pre_merkle_tree_account = + BatchedMerkleTreeAccount::state_from_bytes(&mut pre_mt_account_data, &mt_pubkey) + .unwrap(); + let pre_account = *pre_merkle_tree_account.get_metadata(); + let pre_roots = pre_merkle_tree_account + .root_history + .iter() + .cloned() + .collect(); + let pre_hash_chains = pre_merkle_tree_account.hash_chain_stores; + let tx_hash = create_hash_chain_from_slice(&[leaf]).unwrap(); + // Index input queue insert event + mock_indexer.input_queue_leaves.push((leaf, leaf_index)); + mock_indexer.tx_events.push(MockTxEvent { + inputs: vec![leaf], + outputs: vec![], + tx_hash, + }); + println!("leaf {:?}", leaf); + println!("leaf_index {:?}", leaf_index); + let mut merkle_tree_account = + BatchedMerkleTreeAccount::state_from_bytes(&mut mt_account_data, &mt_pubkey) + .unwrap(); + + merkle_tree_account + .insert_nullifier_into_queue( + &leaf.to_vec().try_into().unwrap(), + leaf_index as u64, + &tx_hash, + ¤t_slot, + ) + .unwrap(); + println!("current slot {:?}", current_slot); + assert_nullifier_queue_insert( + pre_account, + &mut [], + pre_roots, + pre_hash_chains, + merkle_tree_account, + vec![leaf], + vec![leaf_index as u64], + tx_hash, + vec![true], + vec![], + ¤t_slot, + ) + .unwrap(); + current_slot += 1; + println!("leaf {:?}", leaf); + // Insert the same value twice + { + // copy data so that failing test doesn't affect the state of + // subsequent tests + let mut mt_account_data = mt_account_data.clone(); + let mut merkle_tree_account = + BatchedMerkleTreeAccount::state_from_bytes(&mut mt_account_data, &mt_pubkey) + .unwrap(); + let result = merkle_tree_account.insert_nullifier_into_queue( + &leaf.to_vec().try_into().unwrap(), + leaf_index as u64, + &tx_hash, + ¤t_slot, + ); + result.unwrap_err(); + // assert_eq!( + // result.unwrap_err(), + // BatchedMerkleTreeError::BatchInsertFailed.into() + // ); + } + // Try to insert first value into any batch + if tx == 0 { + first_value = leaf; + } else { + let mut mt_account_data = mt_account_data.clone(); + let mut merkle_tree_account = + BatchedMerkleTreeAccount::state_from_bytes(&mut mt_account_data, &mt_pubkey) + .unwrap(); + let result = merkle_tree_account.insert_nullifier_into_queue( + &first_value.to_vec().try_into().unwrap(), + leaf_index as u64, + &tx_hash, + ¤t_slot, + ); + // assert_eq!( + // result.unwrap_err(), + // BatchedMerkleTreeError::BatchInsertFailed.into() + // ); + result.unwrap_err(); + // assert_eq!(result.unwrap_err(), BloomFilterError::Full.into()); + } + } + // Assert input queue is full and doesn't accept more inserts + { + let merkle_tree_account = + &mut BatchedMerkleTreeAccount::state_from_bytes(&mut mt_account_data, &mt_pubkey) + .unwrap(); + let rnd_bytes = get_rnd_bytes(&mut rng); + let tx_hash = get_rnd_bytes(&mut rng); + let result = merkle_tree_account.insert_nullifier_into_queue( + &rnd_bytes, + 0, + &tx_hash, + ¤t_slot, + ); + assert_eq!(result.unwrap_err(), BatchedMerkleTreeError::BatchNotReady); + } + // Root of the final batch of first input queue batch + let mut first_input_batch_update_root_value = [0u8; 32]; + let num_updates = + params.input_queue_batch_size / params.input_queue_zkp_batch_size * NUM_BATCHES as u64; + for i in 0..num_updates { + println!("input update ----------------------------- {}", i); + + perform_input_update( + &mut mt_account_data, + &mut mock_indexer, + false, + mt_pubkey, + &mut batch_roots, + ) + .await; + + let merkle_tree_account = + &mut BatchedMerkleTreeAccount::state_from_bytes(&mut mt_account_data, &mt_pubkey) + .unwrap(); + + // after 5 updates the first batch is completely inserted + // As soon as we switch to inserting the second batch we zero out the first batch since + // the second batch is completely full. + if i >= 5 { + let batch = merkle_tree_account.queue_batches.batches.first().unwrap(); + assert!(batch.bloom_filter_is_zeroed()); + + // Assert that none of the unsafe roots from batch 0 exist in root history + if let Some(unsafe_roots) = batch_roots.get_by_key(&0) { + for unsafe_root in unsafe_roots { + assert!( + !merkle_tree_account + .root_history + .iter() + .any(|x| *x == *unsafe_root), + "Unsafe root from batch 0 should be zeroed: {:?}", + unsafe_root + ); + } + } + } else { + let batch = merkle_tree_account.queue_batches.batches.first().unwrap(); + assert!(!batch.bloom_filter_is_zeroed()); + } + let batch_one = &merkle_tree_account.queue_batches.batches[1]; + assert!(!batch_one.bloom_filter_is_zeroed()); + + println!( + "performed input queue batched update {} created root {:?}", + i, + mock_indexer.merkle_tree.root() + ); + if i == 4 { + first_input_batch_update_root_value = mock_indexer.merkle_tree.root(); + } + let merkle_tree_account = + BatchedMerkleTreeAccount::state_from_bytes(&mut mt_account_data, &mt_pubkey) + .unwrap(); + println!( + "root {:?}", + merkle_tree_account.root_history.last().unwrap() + ); + println!( + "root last index {:?}", + merkle_tree_account.root_history.last_index() + ); + } + // assert all bloom_filters are inserted + { + let merkle_tree_account = + &mut BatchedMerkleTreeAccount::state_from_bytes(&mut mt_account_data, &mt_pubkey) + .unwrap(); + for (i, batch) in merkle_tree_account.queue_batches.batches.iter().enumerate() { + assert_eq!(batch.get_state(), BatchState::Inserted); + if i == 0 { + assert!(batch.bloom_filter_is_zeroed()); + } else { + assert!(!batch.bloom_filter_is_zeroed()); + } + } + } + // do one insert and expect that roots until merkle_tree_account.batches[0].root_index are zero + { + let merkle_tree_account = + &mut BatchedMerkleTreeAccount::state_from_bytes(&mut mt_account_data, &mt_pubkey) + .unwrap(); + let pre_batch_zero = *merkle_tree_account.queue_batches.batches.first().unwrap(); + + let value = &get_rnd_bytes(&mut rng); + let tx_hash = &get_rnd_bytes(&mut rng); + merkle_tree_account + .insert_nullifier_into_queue(value, 0, tx_hash, ¤t_slot) + .unwrap(); + { + let post_batch = *merkle_tree_account.queue_batches.batches.first().unwrap(); + assert_eq!(post_batch.get_state(), BatchState::Fill); + assert_eq!(post_batch.get_num_inserted_zkp_batch(), 1); + let bloom_filter_store = + merkle_tree_account.bloom_filter_stores.get_mut(0).unwrap(); + let mut bloom_filter = BloomFilter::new( + params.bloom_filter_num_iters as usize, + params.bloom_filter_capacity, + bloom_filter_store, + ) + .unwrap(); + assert!(bloom_filter.contains(value)); + } + + for root in merkle_tree_account.root_history.iter() { + println!("root {:?}", root); + } + println!( + "root in root index {:?}", + merkle_tree_account.root_history[pre_batch_zero.root_index as usize] + ); + for batch_idx in 0..NUM_BATCHES as u32 { + println!("batch idx {:?}", batch_idx); + if let Some(roots) = batch_roots.get_by_key(&batch_idx) { + for root in roots.iter() { + println!("tracked root {:?}", root); + } + } else { + println!("No roots found for batch {}", batch_idx); + } + } + // check that all roots have been overwritten except the root index + // of the update + let root_history_len: u32 = merkle_tree_account.root_history.len() as u32; + let start = merkle_tree_account.root_history.last_index() as u32; + println!("start {:?}", start); + for root in start + 1..pre_batch_zero.root_index + root_history_len { + println!("actual index {:?}", root); + let index = root % root_history_len; + + if index == pre_batch_zero.root_index { + let root_index = pre_batch_zero.root_index as usize; + + assert_eq!( + merkle_tree_account.root_history[root_index], + first_input_batch_update_root_value + ); + assert_eq!(merkle_tree_account.root_history[root_index], [0u8; 32]); + // First non zeroed root. + assert_ne!(merkle_tree_account.root_history[root_index + 1], [0u8; 32]); + break; + } + println!("index {:?}", index); + assert_eq!(merkle_tree_account.root_history[index as usize], [0u8; 32]); + } + } + } +} diff --git a/program-tests/batched-merkle-tree-test/tests/merkle_tree.rs b/program-tests/batched-merkle-tree-test/tests/merkle_tree.rs deleted file mode 100644 index f06ad2e867..0000000000 --- a/program-tests/batched-merkle-tree-test/tests/merkle_tree.rs +++ /dev/null @@ -1,2206 +0,0 @@ -#![allow(unused_assignments)] -use std::cmp::min; - -use light_batched_merkle_tree::{ - batch::BatchState, - constants::{ - ACCOUNT_COMPRESSION_PROGRAM_ID, DEFAULT_BATCH_ADDRESS_TREE_HEIGHT, - DEFAULT_BATCH_STATE_TREE_HEIGHT, NUM_BATCHES, - }, - errors::BatchedMerkleTreeError, - initialize_address_tree::{ - get_address_merkle_tree_account_size_from_params, init_batched_address_merkle_tree_account, - InitAddressTreeAccountsInstructionData, - }, - initialize_state_tree::{ - init_batched_state_merkle_tree_accounts, - test_utils::get_state_merkle_tree_account_size_from_params, - InitStateTreeAccountsInstructionData, - }, - merkle_tree::{ - assert_batch_adress_event, assert_batch_append_event_event, assert_nullify_event, - test_utils::get_merkle_tree_account_size_default, BatchedMerkleTreeAccount, - InstructionDataBatchAppendInputs, InstructionDataBatchNullifyInputs, - }, - merkle_tree_metadata::BatchedMerkleTreeMetadata, - queue::{ - test_utils::{ - get_output_queue_account_size_default, get_output_queue_account_size_from_params, - }, - BatchedQueueAccount, BatchedQueueMetadata, - }, -}; -use light_bloom_filter::{BloomFilter, BloomFilterError}; -use light_compressed_account::{ - hash_chain::create_hash_chain_from_slice, instruction_data::compressed_proof::CompressedProof, - pubkey::Pubkey, -}; -use light_hasher::{Hasher, Poseidon}; -use light_merkle_tree_reference::MerkleTree; -use light_prover_client::prover::spawn_prover; -use light_test_utils::mock_batched_forester::{ - MockBatchedAddressForester, MockBatchedForester, MockTxEvent, -}; -use light_zero_copy::vec::ZeroCopyVecU64; -use rand::{rngs::StdRng, Rng}; -use serial_test::serial; - -#[allow(clippy::too_many_arguments)] -pub fn assert_nullifier_queue_insert( - pre_account: BatchedMerkleTreeMetadata, - pre_value_vecs: &mut [ZeroCopyVecU64<[u8; 32]>], - pre_roots: Vec<[u8; 32]>, - pre_hash_chains: [ZeroCopyVecU64<[u8; 32]>; 2], - merkle_tree_account: BatchedMerkleTreeAccount, - bloom_filter_insert_values: Vec<[u8; 32]>, - leaf_indices: Vec, - tx_hash: [u8; 32], - input_is_in_tree: Vec, - array_indices: Vec, - current_slot: &u64, -) -> Result<(), BatchedMerkleTreeError> { - let mut leaf_hash_chain_insert_values = vec![]; - for (insert_value, leaf_index) in bloom_filter_insert_values.iter().zip(leaf_indices.iter()) { - let nullifier = - Poseidon::hashv(&[insert_value.as_slice(), &leaf_index.to_be_bytes(), &tx_hash]) - .unwrap(); - leaf_hash_chain_insert_values.push(nullifier); - } - assert_input_queue_insert( - pre_account, - pre_value_vecs, - pre_roots, - pre_hash_chains, - merkle_tree_account, - bloom_filter_insert_values, - leaf_hash_chain_insert_values, - input_is_in_tree, - array_indices, - current_slot, - ) -} -/// Insert into input queue: -/// 1. New value exists in the current batch bloom_filter -/// 2. New value does not exist in the other batch bloom_filters -#[allow(clippy::too_many_arguments)] -pub fn assert_input_queue_insert( - mut pre_account: BatchedMerkleTreeMetadata, - pre_value_vecs: &mut [ZeroCopyVecU64<[u8; 32]>], - pre_roots: Vec<[u8; 32]>, - mut pre_hash_chains: [ZeroCopyVecU64<[u8; 32]>; 2], - mut merkle_tree_account: BatchedMerkleTreeAccount, - bloom_filter_insert_values: Vec<[u8; 32]>, - leaf_hash_chain_insert_values: Vec<[u8; 32]>, - input_is_in_tree: Vec, - array_indices: Vec, - current_slot: &u64, -) -> Result<(), BatchedMerkleTreeError> { - let mut should_be_zeroed = false; - for (i, insert_value) in bloom_filter_insert_values.iter().enumerate() { - if !input_is_in_tree[i] { - let value_vec_index = array_indices[i]; - assert!( - pre_value_vecs.iter_mut().any(|value_vec| { - if value_vec.len() > value_vec_index { - { - if value_vec[value_vec_index] == *insert_value { - value_vec[value_vec_index] = [0u8; 32]; - true - } else { - false - } - } - } else { - false - } - }), - "Value not in value vec." - ); - } - - let post_roots: Vec<[u8; 32]> = merkle_tree_account.root_history.iter().cloned().collect(); - // if root buffer changed it must be only overwritten by [0u8;32] - if post_roots != pre_roots { - let only_zero_overwrites = post_roots - .iter() - .zip(pre_roots.iter()) - .all(|(post, pre)| *post == *pre || *post == [0u8; 32]); - println!("pre_roots: {:?}", pre_roots); - println!("post_roots: {:?}", post_roots); - if !only_zero_overwrites { - panic!("Root buffer changed.") - } - } - - let inserted_batch_index = - pre_account.queue_batches.currently_processing_batch_index as usize; - let expected_batch = pre_account - .queue_batches - .batches - .get_mut(inserted_batch_index) - .unwrap(); - - pre_account.queue_batches.next_index += 1; - - println!( - "assert input queue batch update: expected_batch: {:?}", - expected_batch - ); - println!( - "assert input queue batch update: expected_batch.get_num_inserted_elements(): {}", - expected_batch.get_num_inserted_elements() - ); - println!( - "assert input queue batch update: expected_batch.batch_size / 2: {}", - expected_batch.batch_size / 2 - ); - - if !should_be_zeroed && expected_batch.get_state() == BatchState::Inserted { - should_be_zeroed = - expected_batch.get_num_inserted_elements() == expected_batch.batch_size / 2; - } - println!( - "assert input queue batch update: should_be_zeroed: {}", - should_be_zeroed - ); - if expected_batch.get_state() == BatchState::Inserted { - println!("assert input queue batch update: clearing batch"); - pre_hash_chains[inserted_batch_index].clear(); - expected_batch.advance_state_to_fill(None).unwrap(); - expected_batch.set_start_slot(current_slot); - println!("setting start slot to {}", current_slot); - } else if expected_batch.get_state() == BatchState::Fill - && !expected_batch.start_slot_is_set() - { - // Batch is filled for the first time - expected_batch.set_start_slot(current_slot); - } - println!( - "assert input queue batch update: inserted_batch_index: {}", - inserted_batch_index - ); - // New value exists in the current batch bloom filter - let mut bloom_filter = BloomFilter::new( - merkle_tree_account.queue_batches.batches[inserted_batch_index].num_iters as usize, - merkle_tree_account.queue_batches.batches[inserted_batch_index].bloom_filter_capacity, - merkle_tree_account.bloom_filter_stores[inserted_batch_index], - ) - .unwrap(); - println!( - "assert input queue batch update: insert_value: {:?}", - insert_value - ); - assert!(bloom_filter.contains(insert_value)); - let pre_hash_chain = pre_hash_chains.get_mut(inserted_batch_index).unwrap(); - expected_batch.add_to_hash_chain(&leaf_hash_chain_insert_values[i], pre_hash_chain)?; - - let num_iters = - merkle_tree_account.queue_batches.batches[inserted_batch_index].num_iters as usize; - let bloom_filter_capacity = - merkle_tree_account.queue_batches.batches[inserted_batch_index].bloom_filter_capacity; - // New value does not exist in the other batch bloom_filters - for (i, store) in merkle_tree_account - .bloom_filter_stores - .iter_mut() - .enumerate() - { - // Skip current batch it is already checked above - if i != inserted_batch_index { - let mut bloom_filter = - BloomFilter::new(num_iters, bloom_filter_capacity, store).unwrap(); - assert!(!bloom_filter.contains(insert_value)); - } - } - // if the currently processing batch changed it should - // increment by one and the old batch should be ready to - // update - if expected_batch.get_current_zkp_batch_index() == expected_batch.get_num_zkp_batches() { - assert_eq!( - merkle_tree_account.queue_batches.batches - [pre_account.queue_batches.currently_processing_batch_index as usize] - .get_state(), - BatchState::Full - ); - pre_account.queue_batches.currently_processing_batch_index += 1; - pre_account.queue_batches.currently_processing_batch_index %= - pre_account.queue_batches.num_batches; - assert_eq!( - merkle_tree_account.queue_batches.batches[inserted_batch_index], - *expected_batch - ); - assert_eq!( - merkle_tree_account.hash_chain_stores[inserted_batch_index] - .last() - .unwrap(), - pre_hash_chain.last().unwrap(), - "Hashchain store inconsistent." - ); - } - } - - assert_eq!( - *merkle_tree_account.get_metadata(), - pre_account, - "BatchedMerkleTreeMetadata changed." - ); - let inserted_batch_index = pre_account.queue_batches.currently_processing_batch_index as usize; - let mut expected_batch = pre_account.queue_batches.batches[inserted_batch_index]; - if should_be_zeroed { - expected_batch.set_bloom_filter_to_zeroed(); - } - assert_eq!( - merkle_tree_account.queue_batches.batches[inserted_batch_index], - expected_batch - ); - let other_batch = if inserted_batch_index == 0 { 1 } else { 0 }; - assert_eq!( - merkle_tree_account.queue_batches.batches[other_batch], - pre_account.queue_batches.batches[other_batch] - ); - assert_eq!( - merkle_tree_account.hash_chain_stores, pre_hash_chains, - "Hashchain store inconsistent." - ); - Ok(()) -} - -/// Expected behavior for insert into output queue: -/// - add value to value array -/// - batch.num_inserted += 1 -/// - if batch is full after insertion advance state to ReadyToUpdateTree -pub fn assert_output_queue_insert( - mut pre_account: BatchedQueueMetadata, - // mut pre_batches: Vec, - mut pre_value_store: [ZeroCopyVecU64<[u8; 32]>; 2], - mut pre_hash_chains: [ZeroCopyVecU64<[u8; 32]>; 2], - mut output_account: BatchedQueueAccount, - insert_values: Vec<[u8; 32]>, - current_slot: u64, -) -> Result<(), BatchedMerkleTreeError> { - for batch in output_account.batch_metadata.batches.iter_mut() { - println!("output_account.batch: {:?}", batch); - } - for batch in pre_account.batch_metadata.batches.iter() { - println!("pre_batch: {:?}", batch); - } - for insert_value in insert_values.iter() { - // if the currently processing batch changed it should - // increment by one and the old batch should be ready to - // update - - let inserted_batch_index = - pre_account.batch_metadata.currently_processing_batch_index as usize; - let expected_batch = &mut pre_account.batch_metadata.batches[inserted_batch_index]; - let pre_value_store = pre_value_store.get_mut(inserted_batch_index).unwrap(); - let pre_hash_chain = pre_hash_chains.get_mut(inserted_batch_index).unwrap(); - if expected_batch.get_state() == BatchState::Inserted { - expected_batch - .advance_state_to_fill(Some(pre_account.batch_metadata.next_index)) - .unwrap(); - pre_value_store.clear(); - pre_hash_chain.clear(); - } - pre_account.batch_metadata.next_index += 1; - expected_batch.store_and_hash_value( - insert_value, - pre_value_store, - pre_hash_chain, - ¤t_slot, - )?; - - let other_batch = if inserted_batch_index == 0 { 1 } else { 0 }; - assert!(output_account.value_vecs[inserted_batch_index] - .as_mut_slice() - .to_vec() - .contains(insert_value)); - assert!(!output_account.value_vecs[other_batch] - .as_mut_slice() - .to_vec() - .contains(insert_value)); - if expected_batch.get_num_zkp_batches() == expected_batch.get_current_zkp_batch_index() { - assert_eq!( - output_account.batch_metadata.batches - [pre_account.batch_metadata.currently_processing_batch_index as usize] - .get_state(), - BatchState::Full - ); - pre_account.batch_metadata.currently_processing_batch_index += 1; - pre_account.batch_metadata.currently_processing_batch_index %= - pre_account.batch_metadata.num_batches; - assert_eq!( - output_account.batch_metadata.batches[inserted_batch_index], - *expected_batch - ); - } - } - assert_eq!( - *output_account.get_metadata(), - pre_account, - "BatchedQueueAccount changed." - ); - assert_eq!(pre_hash_chains, output_account.hash_chain_stores); - for (i, (value_store, pre)) in output_account - .value_vecs - .iter() - .zip(pre_value_store.iter()) - .enumerate() - { - for (j, (value, pre_value)) in value_store.iter().zip(pre.iter()).enumerate() { - assert_eq!( - *value, *pre_value, - "{} {} \n value store {:?}\n pre {:?}", - i, j, value_store, pre - ); - } - } - assert_eq!(pre_value_store, output_account.value_vecs); - Ok(()) -} - -#[derive(Debug, PartialEq, Clone)] -pub struct MockTransactionInputs { - inputs: Vec<[u8; 32]>, - outputs: Vec<[u8; 32]>, -} - -pub fn simulate_transaction( - instruction_data: MockTransactionInputs, - merkle_tree_account_data: &mut [u8], - output_queue_account_data: &mut [u8], - reference_merkle_tree: &MerkleTree, - current_slot: &mut u64, - mt_pubkey: &Pubkey, -) -> Result { - let mut output_account = - BatchedQueueAccount::output_from_bytes(output_queue_account_data).unwrap(); - let mut merkle_tree_account = - BatchedMerkleTreeAccount::state_from_bytes(merkle_tree_account_data, mt_pubkey).unwrap(); - let flattened_inputs = instruction_data - .inputs - .iter() - .cloned() - .chain(instruction_data.outputs.iter().cloned()) - .collect::>(); - let tx_hash = create_hash_chain_from_slice(flattened_inputs.as_slice())?; - - for input in instruction_data.inputs.iter() { - // zkp inclusion in Merkle tree - let inclusion = reference_merkle_tree.get_leaf_index(input); - let leaf_index = if let Some(leaf_index) = inclusion { - leaf_index as u64 - } else { - println!("simulate_transaction: inclusion is none"); - let mut included = false; - let mut leaf_index = 0; - let start_indices = output_account - .batch_metadata - .batches - .iter() - .map(|batch| batch.start_index) - .collect::>(); - - for (batch_index, value_vec) in output_account.value_vecs.iter_mut().enumerate() { - for (value_index, value) in value_vec.iter_mut().enumerate() { - if *value == *input { - let batch_start_index = start_indices[batch_index]; - included = true; - println!("overwriting value: {:?}", value); - *value = [0u8; 32]; - leaf_index = value_index as u64 + batch_start_index; - } - } - } - if !included { - panic!("Value not included in any output queue or trees."); - } - leaf_index - }; - - println!( - "sim tx input: \n {:?} \nleaf index : {:?}, \ntx hash {:?}", - input, leaf_index, tx_hash, - ); - merkle_tree_account.insert_nullifier_into_queue( - input, - leaf_index, - &tx_hash, - current_slot, - )?; - } - - for output in instruction_data.outputs.iter() { - let leaf_index = output_account.batch_metadata.next_index; - println!( - "sim tx output: \n {:?} \nleaf index : {:?}", - output, leaf_index - ); - output_account.insert_into_current_batch(output, current_slot)?; - } - Ok(MockTxEvent { - inputs: instruction_data.inputs.clone(), - outputs: instruction_data.outputs.clone(), - tx_hash, - }) -} - -#[serial] -#[tokio::test] -async fn test_simulate_transactions() { - spawn_prover().await; - let mut mock_indexer = - MockBatchedForester::<{ DEFAULT_BATCH_STATE_TREE_HEIGHT as usize }>::default(); - - let num_tx = 2200; - let owner = Pubkey::new_unique(); - - let queue_account_size = get_output_queue_account_size_default(); - - let mut output_queue_account_data = vec![0; queue_account_size]; - let output_queue_pubkey = Pubkey::new_unique(); - - let mt_account_size = get_merkle_tree_account_size_default(); - let mut mt_account_data = vec![0; mt_account_size]; - let mt_pubkey = ACCOUNT_COMPRESSION_PROGRAM_ID.into(); - - let params = InitStateTreeAccountsInstructionData::test_default(); - - let merkle_tree_rent = 1_000_000_000; - let queue_rent = 1_000_000_000; - let additional_bytes_rent = 1000; - - init_batched_state_merkle_tree_accounts( - owner, - params, - &mut output_queue_account_data, - output_queue_pubkey, - queue_rent, - &mut mt_account_data, - mt_pubkey, - merkle_tree_rent, - additional_bytes_rent, - ) - .unwrap(); - use rand::SeedableRng; - let mut rng = StdRng::seed_from_u64(0); - let mut in_ready_for_update = false; - let mut out_ready_for_update = false; - let mut num_output_updates = 0; - let mut num_input_updates = 0; - let mut num_input_values = 0; - let mut num_output_values = 0; - let mut current_slot = rng.gen(); - - for tx in 0..num_tx { - println!("tx: {}", tx); - println!("num_input_updates: {}", num_input_updates); - println!("num_output_updates: {}", num_output_updates); - { - println!("Simulate tx {} -----------------------------", tx); - println!("Num inserted values: {}", num_input_values); - println!("Num input updates: {}", num_input_updates); - println!("Num output updates: {}", num_output_updates); - println!("Num output values: {}", num_output_values); - let number_of_outputs = rng.gen_range(0..7); - let mut outputs = vec![]; - for _ in 0..number_of_outputs { - outputs.push(get_rnd_bytes(&mut rng)); - } - let number_of_inputs = if rng.gen_bool(0.5) { - if !mock_indexer.active_leaves.is_empty() { - let x = min(mock_indexer.active_leaves.len(), 5); - rng.gen_range(0..x) - } else { - 0 - } - } else { - 0 - }; - - let mut inputs = vec![]; - let mut input_is_in_tree = vec![]; - let mut leaf_indices = vec![]; - let mut array_indices = vec![]; - let mut retries = min(10, mock_indexer.active_leaves.len()); - while inputs.len() < number_of_inputs && retries > 0 { - let (_, leaf) = get_random_leaf(&mut rng, &mut mock_indexer.active_leaves); - let inserted = mock_indexer.merkle_tree.get_leaf_index(&leaf); - if let Some(leaf_index) = inserted { - inputs.push(leaf); - leaf_indices.push(leaf_index as u64); - input_is_in_tree.push(true); - array_indices.push(0); - } else if rng.gen_bool(0.1) { - inputs.push(leaf); - let output_queue = - BatchedQueueAccount::output_from_bytes(&mut output_queue_account_data) - .unwrap(); - let mut leaf_array_index = 0; - let mut batch_index = 0; - for (i, vec) in output_queue.value_vecs.iter().enumerate() { - let pos = vec.iter().position(|value| *value == leaf); - if let Some(pos) = pos { - leaf_array_index = pos; - batch_index = i; - break; - } - if i == output_queue.value_vecs.len() - 1 { - panic!("Leaf not found in output queue."); - } - } - let batch = output_queue - .batch_metadata - .batches - .get(batch_index) - .unwrap(); - array_indices.push(leaf_array_index); - let leaf_index: u64 = batch.start_index + leaf_array_index as u64; - leaf_indices.push(leaf_index); - input_is_in_tree.push(false); - } - retries -= 1; - } - let number_of_inputs = inputs.len(); - println!("number_of_inputs: {}", number_of_inputs); - - let instruction_data = MockTransactionInputs { - inputs: inputs.clone(), - outputs: outputs.clone(), - }; - - let merkle_tree_account = - BatchedMerkleTreeAccount::state_from_bytes(&mut mt_account_data, &mt_pubkey) - .unwrap(); - println!( - "input queue: {:?}", - merkle_tree_account.queue_batches.batches[0].get_num_inserted_zkp_batch() - ); - - let mut pre_mt_data = mt_account_data.clone(); - let mut pre_account_bytes = output_queue_account_data.clone(); - - let pre_output_account = - BatchedQueueAccount::output_from_bytes(&mut pre_account_bytes).unwrap(); - let pre_output_metadata = *pre_output_account.get_metadata(); - let mut pre_output_value_stores = pre_output_account.value_vecs; - let pre_output_hash_chains = pre_output_account.hash_chain_stores; - - let mut pre_mt_account_bytes = mt_account_data.clone(); - let pre_merkle_tree_account = - BatchedMerkleTreeAccount::state_from_bytes(&mut pre_mt_account_bytes, &mt_pubkey) - .unwrap(); - let pre_mt_account = *pre_merkle_tree_account.get_metadata(); - let pre_roots = pre_merkle_tree_account - .root_history - .iter() - .cloned() - .collect(); - let pre_mt_hash_chains = pre_merkle_tree_account.hash_chain_stores; - - if !outputs.is_empty() || !inputs.is_empty() { - println!("Simulating tx with inputs: {:?}", instruction_data); - let event = simulate_transaction( - instruction_data, - &mut pre_mt_data, - &mut output_queue_account_data, - &mock_indexer.merkle_tree, - &mut current_slot, - &mt_pubkey, - ) - .unwrap(); - mock_indexer.tx_events.push(event.clone()); - - if !inputs.is_empty() { - let merkle_tree_account = - BatchedMerkleTreeAccount::state_from_bytes(&mut pre_mt_data, &mt_pubkey) - .unwrap(); - println!("inputs: {:?}", inputs); - assert_nullifier_queue_insert( - pre_mt_account, - &mut pre_output_value_stores, // mut to remove values proven by index - pre_roots, - pre_mt_hash_chains, - merkle_tree_account, - inputs.clone(), - leaf_indices.clone(), - event.tx_hash, - input_is_in_tree, - array_indices, - ¤t_slot, - ) - .unwrap(); - } - - if !outputs.is_empty() { - assert_output_queue_insert( - pre_output_metadata, - pre_output_value_stores, - pre_output_hash_chains, - BatchedQueueAccount::output_from_bytes( - &mut output_queue_account_data.clone(), // clone so that data cannot be modified - ) - .unwrap(), - outputs.clone(), - current_slot, - ) - .unwrap(); - } - - for i in 0..number_of_inputs { - mock_indexer - .input_queue_leaves - .push((inputs[i], leaf_indices[i] as usize)); - } - for output in outputs.iter() { - mock_indexer.active_leaves.push(*output); - mock_indexer.output_queue_leaves.push(*output); - } - - num_output_values += number_of_outputs; - num_input_values += number_of_inputs; - let merkle_tree_account = - BatchedMerkleTreeAccount::state_from_bytes(&mut pre_mt_data, &mt_pubkey) - .unwrap(); - in_ready_for_update = merkle_tree_account - .queue_batches - .batches - .iter() - .any(|batch| batch.get_first_ready_zkp_batch().is_ok()); - let output_account = - BatchedQueueAccount::output_from_bytes(&mut output_queue_account_data).unwrap(); - out_ready_for_update = output_account - .batch_metadata - .batches - .iter() - .any(|batch| batch.get_first_ready_zkp_batch().is_ok()); - - mt_account_data = pre_mt_data.clone(); - } else { - println!("Skipping simulate tx for no inputs or outputs"); - } - current_slot += 1; - } - - if in_ready_for_update && rng.gen_bool(1.0) { - println!("Input update -----------------------------"); - println!("Num inserted values: {}", num_input_values); - println!("Num input updates: {}", num_input_updates); - println!("Num output updates: {}", num_output_updates); - println!("Num output values: {}", num_output_values); - let mut pre_mt_account_data = mt_account_data.clone(); - let old_account = - BatchedMerkleTreeAccount::state_from_bytes(&mut mt_account_data, &mt_pubkey) - .unwrap(); - let (input_res, new_root) = { - let mut account = BatchedMerkleTreeAccount::state_from_bytes( - &mut pre_mt_account_data, - &mt_pubkey, - ) - .unwrap(); - println!("batches {:?}", account.queue_batches.batches); - - let next_full_batch = account.get_metadata().queue_batches.pending_batch_index; - let batch = account - .queue_batches - .batches - .get(next_full_batch as usize) - .unwrap(); - println!( - "account - .hash_chain_stores {:?}", - account.hash_chain_stores - ); - println!("hash_chain store len {:?}", account.hash_chain_stores.len()); - println!( - "batch.get_num_inserted_zkps() as usize {:?}", - batch.get_num_inserted_zkps() as usize - ); - let leaves_hash_chain = account - .hash_chain_stores - .get(next_full_batch as usize) - .unwrap() - .get(batch.get_num_inserted_zkps() as usize) - .unwrap(); - - let (proof, new_root) = mock_indexer - .get_batched_update_proof( - account.get_metadata().queue_batches.zkp_batch_size as u32, - *leaves_hash_chain, - ) - .await - .unwrap(); - let instruction_data = InstructionDataBatchNullifyInputs { - new_root, - compressed_proof: CompressedProof { - a: proof.a, - b: proof.b, - c: proof.c, - }, - }; - - ( - account.update_tree_from_input_queue(instruction_data), - new_root, - ) - }; - println!("Input update -----------------------------"); - println!("res {:?}", input_res); - assert!(input_res.is_ok()); - let nullify_event = input_res.unwrap(); - in_ready_for_update = false; - // assert Merkle tree - // sequence number increased X - // next index increased X - // current root index increased X - // One root changed one didn't - - let account = - BatchedMerkleTreeAccount::state_from_bytes(&mut pre_mt_account_data, &mt_pubkey) - .unwrap(); - assert_nullify_event(nullify_event, new_root, &old_account, mt_pubkey); - assert_merkle_tree_update(old_account, account, None, None, new_root); - mt_account_data = pre_mt_account_data.clone(); - - num_input_updates += 1; - } - - if out_ready_for_update && rng.gen_bool(1.0) { - println!("Output update -----------------------------"); - println!("Num inserted values: {}", num_input_values); - println!("Num input updates: {}", num_input_updates); - println!("Num output updates: {}", num_output_updates); - println!("Num output values: {}", num_output_values); - - let mut pre_mt_account_data = mt_account_data.clone(); - let mut account = - BatchedMerkleTreeAccount::state_from_bytes(&mut pre_mt_account_data, &mt_pubkey) - .unwrap(); - let output_account = - BatchedQueueAccount::output_from_bytes(&mut output_queue_account_data).unwrap(); - - let next_index = account.get_metadata().next_index; - let next_full_batch = output_account - .get_metadata() - .batch_metadata - .pending_batch_index; - let batch = output_account - .batch_metadata - .batches - .get(next_full_batch as usize) - .unwrap(); - let leaves_hash_chain = output_account - .hash_chain_stores - .get(next_full_batch as usize) - .unwrap() - .get(batch.get_num_inserted_zkps() as usize) - .unwrap(); - let (proof, new_root) = mock_indexer - .get_batched_append_proof( - next_index as usize, - batch.get_num_inserted_zkps() as u32, - batch.zkp_batch_size as u32, - *leaves_hash_chain, - batch.get_num_zkp_batches() as u32, - ) - .await - .unwrap(); - - let instruction_data = InstructionDataBatchAppendInputs { - new_root, - compressed_proof: CompressedProof { - a: proof.a, - b: proof.b, - c: proof.c, - }, - }; - - let mut pre_output_queue_state = output_queue_account_data.clone(); - println!("Output update -----------------------------"); - - let queue_account = - &mut BatchedQueueAccount::output_from_bytes(&mut pre_output_queue_state).unwrap(); - let output_res = - account.update_tree_from_output_queue_account(queue_account, instruction_data); - println!("output_res: {:?}", output_res); - assert!(output_res.is_ok()); - let batch_append_event = output_res.unwrap(); - - assert_eq!( - *account.root_history.last().unwrap(), - mock_indexer.merkle_tree.root() - ); - let output_account = - BatchedQueueAccount::output_from_bytes(&mut pre_output_queue_state).unwrap(); - let old_output_account = - BatchedQueueAccount::output_from_bytes(&mut output_queue_account_data).unwrap(); - - let old_account = - BatchedMerkleTreeAccount::state_from_bytes(&mut mt_account_data, &mt_pubkey) - .unwrap(); - - println!("batch 0: {:?}", output_account.batch_metadata.batches[0]); - println!("batch 1: {:?}", output_account.batch_metadata.batches[1]); - assert_batch_append_event_event( - batch_append_event, - new_root, - &old_output_account, - &old_account, - mt_pubkey, - ); - assert_merkle_tree_update( - old_account, - account, - Some(old_output_account), - Some(output_account), - new_root, - ); - - output_queue_account_data = pre_output_queue_state; - mt_account_data = pre_mt_account_data; - out_ready_for_update = false; - num_output_updates += 1; - } - } - let output_account = - BatchedQueueAccount::output_from_bytes(&mut output_queue_account_data).unwrap(); - println!("batch 0: {:?}", output_account.batch_metadata.batches[0]); - println!("batch 1: {:?}", output_account.batch_metadata.batches[1]); - println!("num_output_updates: {}", num_output_updates); - println!("num_input_updates: {}", num_input_updates); - println!("num_output_values: {}", num_output_values); - println!("num_input_values: {}", num_input_values); -} - -// Get random leaf that is not in the input queue. -pub fn get_random_leaf(rng: &mut StdRng, active_leaves: &mut Vec<[u8; 32]>) -> (usize, [u8; 32]) { - if active_leaves.is_empty() { - return (0, [0u8; 32]); - } - let index = rng.gen_range(0..active_leaves.len()); - // get random leaf from vector and remove it - (index, active_leaves.remove(index)) -} - -/// queues with a counter which keeps things below X tps and an if that -/// executes tree updates when possible. -#[serial] -#[tokio::test] -async fn test_e2e() { - spawn_prover().await; - let mut mock_indexer = - MockBatchedForester::<{ DEFAULT_BATCH_STATE_TREE_HEIGHT as usize }>::default(); - - let num_tx = 2200; - let owner = Pubkey::new_unique(); - - let queue_account_size = get_output_queue_account_size_default(); - - let mut output_queue_account_data = vec![0; queue_account_size]; - let output_queue_pubkey = Pubkey::new_unique(); - - let mt_account_size = get_merkle_tree_account_size_default(); - let mut mt_account_data = vec![0; mt_account_size]; - let mt_pubkey = Pubkey::new_unique(); - - let params = InitStateTreeAccountsInstructionData::test_default(); - - let merkle_tree_rent = 1_000_000_000; - let queue_rent = 1_000_000_000; - let additional_bytes_rent = 1000; - - init_batched_state_merkle_tree_accounts( - owner, - params, - &mut output_queue_account_data, - output_queue_pubkey, - queue_rent, - &mut mt_account_data, - mt_pubkey, - merkle_tree_rent, - additional_bytes_rent, - ) - .unwrap(); - use rand::SeedableRng; - let mut rng = StdRng::seed_from_u64(0); - let mut in_ready_for_update; - let mut out_ready_for_update; - let mut num_output_updates = 0; - let mut num_input_updates = 0; - let mut num_input_values = 0; - let mut num_output_values = 0; - let mut current_slot = rng.gen(); - - for tx in 0..num_tx { - println!("tx: {}", tx); - println!("num_input_updates: {}", num_input_updates); - println!("num_output_updates: {}", num_output_updates); - // Output queue - { - if rng.gen_bool(0.5) { - println!("Output insert -----------------------------"); - println!("num_output_values: {}", num_output_values); - let rnd_bytes = get_rnd_bytes(&mut rng); - let mut pre_account_bytes = output_queue_account_data.clone(); - let pre_output_account = - BatchedQueueAccount::output_from_bytes(&mut pre_account_bytes).unwrap(); - let pre_account = *pre_output_account.get_metadata(); - let pre_value_store = pre_output_account.value_vecs; - let pre_hash_chains = pre_output_account.hash_chain_stores; - let mut output_account = - BatchedQueueAccount::output_from_bytes(&mut output_queue_account_data).unwrap(); - output_account - .insert_into_current_batch(&rnd_bytes, ¤t_slot) - .unwrap(); - assert_output_queue_insert( - pre_account, - pre_value_store, - pre_hash_chains, - BatchedQueueAccount::output_from_bytes( - &mut output_queue_account_data.clone(), // clone so that data cannot be modified - ) - .unwrap(), - vec![rnd_bytes], - current_slot, - ) - .unwrap(); - current_slot += 1; - num_output_values += 1; - mock_indexer.output_queue_leaves.push(rnd_bytes); - } - let output_account = - BatchedQueueAccount::output_from_bytes(&mut output_queue_account_data).unwrap(); - out_ready_for_update = output_account - .batch_metadata - .batches - .iter() - .any(|batch| batch.get_state() == BatchState::Full); - } - - // Input queue - { - let mut pre_account_bytes = mt_account_data.clone(); - - if rng.gen_bool(0.5) && !mock_indexer.active_leaves.is_empty() { - println!("Input insert -----------------------------"); - let (_, leaf) = get_random_leaf(&mut rng, &mut mock_indexer.active_leaves); - - let pre_mt_account = - BatchedMerkleTreeAccount::state_from_bytes(&mut pre_account_bytes, &mt_pubkey) - .unwrap(); - let pre_account = *pre_mt_account.get_metadata(); - let pre_hash_chains = pre_mt_account.hash_chain_stores; - let pre_roots = pre_mt_account.root_history.iter().cloned().collect(); - let tx_hash = create_hash_chain_from_slice(vec![leaf].as_slice()).unwrap(); - let leaf_index = mock_indexer.merkle_tree.get_leaf_index(&leaf).unwrap(); - mock_indexer.input_queue_leaves.push((leaf, leaf_index)); - mock_indexer.tx_events.push(MockTxEvent { - inputs: vec![leaf], - outputs: vec![], - tx_hash, - }); - let mut merkle_tree_account = - BatchedMerkleTreeAccount::state_from_bytes(&mut mt_account_data, &mt_pubkey) - .unwrap(); - - merkle_tree_account - .insert_nullifier_into_queue( - &leaf.to_vec().try_into().unwrap(), - leaf_index as u64, - &tx_hash, - ¤t_slot, - ) - .unwrap(); - - { - let mut mt_account_data = mt_account_data.clone(); - let merkle_tree_account = BatchedMerkleTreeAccount::state_from_bytes( - &mut mt_account_data, - &mt_pubkey, - ) - .unwrap(); - assert_nullifier_queue_insert( - pre_account, - &mut [], - pre_roots, - pre_hash_chains, - merkle_tree_account, - vec![leaf], - vec![leaf_index as u64], - tx_hash, - vec![true], - vec![], - ¤t_slot, - ) - .unwrap(); - current_slot += 1; - } - num_input_values += 1; - } - let merkle_tree_account = - BatchedMerkleTreeAccount::state_from_bytes(&mut mt_account_data, &mt_pubkey) - .unwrap(); - - in_ready_for_update = merkle_tree_account - .queue_batches - .batches - .iter() - .any(|batch| batch.get_state() == BatchState::Full); - } - - if in_ready_for_update { - println!("Input update -----------------------------"); - println!("Num inserted values: {}", num_input_values); - println!("Num input updates: {}", num_input_updates); - println!("Num output updates: {}", num_output_updates); - println!("Num output values: {}", num_output_values); - let mut pre_mt_account_data = mt_account_data.clone(); - in_ready_for_update = false; - perform_input_update(&mut pre_mt_account_data, &mut mock_indexer, true, mt_pubkey) - .await; - mt_account_data = pre_mt_account_data.clone(); - - num_input_updates += 1; - } - - if out_ready_for_update { - println!("Output update -----------------------------"); - println!("Num inserted values: {}", num_input_values); - println!("Num input updates: {}", num_input_updates); - println!("Num output updates: {}", num_output_updates); - println!("Num output values: {}", num_output_values); - let mut pre_mt_account_data = mt_account_data.clone(); - let mut account = - BatchedMerkleTreeAccount::state_from_bytes(&mut pre_mt_account_data, &mt_pubkey) - .unwrap(); - let output_account = - BatchedQueueAccount::output_from_bytes(&mut output_queue_account_data).unwrap(); - - let next_index = account.get_metadata().next_index; - let next_full_batch = output_account - .get_metadata() - .batch_metadata - .pending_batch_index; - let batch = output_account - .batch_metadata - .batches - .get(next_full_batch as usize) - .unwrap(); - let leaves = output_account - .value_vecs - .get(next_full_batch as usize) - .unwrap() - .to_vec(); - println!("leaves {:?}", leaves.len()); - let leaves_hash_chain = output_account - .hash_chain_stores - .get(next_full_batch as usize) - .unwrap() - .get(batch.get_num_inserted_zkps() as usize) - .unwrap(); - let (proof, new_root) = mock_indexer - .get_batched_append_proof( - next_index as usize, - batch.get_num_inserted_zkps() as u32, - batch.zkp_batch_size as u32, - *leaves_hash_chain, - batch.get_num_zkp_batches() as u32, - ) - .await - .unwrap(); - let start = batch.get_num_inserted_zkps() as usize * batch.zkp_batch_size as usize; - let end = start + batch.zkp_batch_size as usize; - for leaf in &leaves[start..end] { - // Storing the leaf in the output queue indexer so that it - // can be inserted into the input queue later. - mock_indexer.active_leaves.push(*leaf); - } - - let instruction_data = InstructionDataBatchAppendInputs { - new_root, - compressed_proof: CompressedProof { - a: proof.a, - b: proof.b, - c: proof.c, - }, - }; - - let mut pre_output_queue_state = output_queue_account_data.clone(); - println!("Output update -----------------------------"); - - let queue_account = - &mut BatchedQueueAccount::output_from_bytes(&mut pre_output_queue_state).unwrap(); - let output_res = - account.update_tree_from_output_queue_account(queue_account, instruction_data); - - assert_eq!( - *account.root_history.last().unwrap(), - mock_indexer.merkle_tree.root() - ); - println!( - "post update: sequence number: {}", - account.get_metadata().sequence_number - ); - println!("output_res {:?}", output_res); - assert!(output_res.is_ok()); - - println!("output update success {}", num_output_updates); - println!("num_output_values: {}", num_output_values); - println!("num_input_values: {}", num_input_values); - let output_account = - BatchedQueueAccount::output_from_bytes(&mut pre_output_queue_state).unwrap(); - let old_output_account = - BatchedQueueAccount::output_from_bytes(&mut output_queue_account_data).unwrap(); - - let old_account = - BatchedMerkleTreeAccount::state_from_bytes(&mut mt_account_data, &mt_pubkey) - .unwrap(); - - println!("batch 0: {:?}", output_account.batch_metadata.batches[0]); - println!("batch 1: {:?}", output_account.batch_metadata.batches[1]); - assert_merkle_tree_update( - old_account, - account, - Some(old_output_account), - Some(output_account), - new_root, - ); - - output_queue_account_data = pre_output_queue_state; - mt_account_data = pre_mt_account_data; - out_ready_for_update = false; - num_output_updates += 1; - } - } - let output_account = - BatchedQueueAccount::output_from_bytes(&mut output_queue_account_data).unwrap(); - println!("batch 0: {:?}", output_account.batch_metadata.batches[0]); - println!("batch 1: {:?}", output_account.batch_metadata.batches[1]); - println!("num_output_updates: {}", num_output_updates); - println!("num_input_updates: {}", num_input_updates); - println!("num_output_values: {}", num_output_values); - println!("num_input_values: {}", num_input_values); -} -pub async fn perform_input_update( - mt_account_data: &mut [u8], - mock_indexer: &mut MockBatchedForester<{ DEFAULT_BATCH_STATE_TREE_HEIGHT as usize }>, - enable_assert: bool, - mt_pubkey: Pubkey, -) { - let mut cloned_mt_account_data = (*mt_account_data).to_vec(); - let old_account = BatchedMerkleTreeAccount::state_from_bytes( - cloned_mt_account_data.as_mut_slice(), - &mt_pubkey, - ) - .unwrap(); - let (input_res, root) = { - let mut account = - BatchedMerkleTreeAccount::state_from_bytes(mt_account_data, &mt_pubkey).unwrap(); - - let next_full_batch = account.get_metadata().queue_batches.pending_batch_index; - let batch = account - .queue_batches - .batches - .get(next_full_batch as usize) - .unwrap(); - let leaves_hash_chain = account - .hash_chain_stores - .get(next_full_batch as usize) - .unwrap() - .get(batch.get_num_inserted_zkps() as usize) - .unwrap(); - let (proof, new_root) = mock_indexer - .get_batched_update_proof( - account.get_metadata().queue_batches.zkp_batch_size as u32, - *leaves_hash_chain, - ) - .await - .unwrap(); - let instruction_data = InstructionDataBatchNullifyInputs { - new_root, - compressed_proof: CompressedProof { - a: proof.a, - b: proof.b, - c: proof.c, - }, - }; - - ( - account.update_tree_from_input_queue(instruction_data), - new_root, - ) - }; - println!("Input update -----------------------------"); - println!("res {:?}", input_res); - assert!(input_res.is_ok()); - - // assert Merkle tree - // sequence number increased X - // next index increased X - // current root index increased X - // One root changed one didn't - - let account = BatchedMerkleTreeAccount::state_from_bytes(mt_account_data, &mt_pubkey).unwrap(); - if enable_assert { - assert_merkle_tree_update(old_account, account, None, None, root); - } -} - -pub async fn perform_address_update( - mt_account_data: &mut [u8], - mock_indexer: &mut MockBatchedAddressForester<40>, - mt_pubkey: Pubkey, -) { - println!("pre address update -----------------------------"); - let mut cloned_mt_account_data = (*mt_account_data).to_vec(); - let old_account = BatchedMerkleTreeAccount::address_from_bytes( - cloned_mt_account_data.as_mut_slice(), - &mt_pubkey, - ) - .unwrap(); - let (input_res, new_root, _pre_next_full_batch) = { - let mut account = - BatchedMerkleTreeAccount::address_from_bytes(mt_account_data, &mt_pubkey).unwrap(); - - let next_full_batch = account.get_metadata().queue_batches.pending_batch_index; - let next_index = account.get_metadata().next_index; - println!("next index {:?}", next_index); - let batch = account - .queue_batches - .batches - .get(next_full_batch as usize) - .unwrap(); - let batch_start_index = - batch.start_index + batch.get_num_inserted_zkps() * batch.zkp_batch_size; - println!("batch start index {}", batch_start_index); - let leaves_hash_chain = account - .hash_chain_stores - .get(next_full_batch as usize) - .unwrap() - .get(batch.get_num_inserted_zkps() as usize) - .unwrap(); - let current_root = account.root_history.last().unwrap(); - let (proof, new_root) = mock_indexer - .get_batched_address_proof( - account.get_metadata().queue_batches.batch_size as u32, - account.get_metadata().queue_batches.zkp_batch_size as u32, - *leaves_hash_chain, - next_index as usize, - batch_start_index as usize, - *current_root, - ) - .await - .unwrap(); - - mock_indexer.finalize_batch_address_update(10); - assert_eq!(mock_indexer.merkle_tree.root(), new_root); - let instruction_data = InstructionDataBatchNullifyInputs { - new_root, - compressed_proof: CompressedProof { - a: proof.a, - b: proof.b, - c: proof.c, - }, - }; - - ( - account.update_tree_from_address_queue(instruction_data), - new_root, - next_full_batch, - ) - }; - println!("post address update -----------------------------"); - println!("res {:?}", input_res); - assert!(input_res.is_ok()); - let event = input_res.unwrap(); - assert_batch_adress_event(event, new_root, &old_account, mt_pubkey); - - // assert Merkle tree - // sequence number increased X - // next index increased X - // current root index increased X - // One root changed one didn't - - let account = - BatchedMerkleTreeAccount::address_from_bytes(mt_account_data, &mt_pubkey).unwrap(); - - assert_address_merkle_tree_update(old_account, account, new_root); -} - -fn assert_merkle_tree_update( - mut old_account: BatchedMerkleTreeAccount, - account: BatchedMerkleTreeAccount, - old_queue_account: Option, - queue_account: Option, - root: [u8; 32], -) { - let input_queue_previous_batch_state = - old_account.queue_batches.get_previous_batch().get_state(); - let input_queue_current_batch = old_account.queue_batches.get_current_batch(); - let previous_batch_index = old_account.queue_batches.get_previous_batch_index(); - let is_half_full = input_queue_current_batch.get_num_inserted_elements() - >= input_queue_current_batch.batch_size / 2 - && input_queue_current_batch.get_state() != BatchState::Inserted; - if is_half_full - && input_queue_previous_batch_state == BatchState::Inserted - && !old_account - .queue_batches - .get_previous_batch() - .bloom_filter_is_zeroed() - { - old_account - .queue_batches - .get_previous_batch_mut() - .set_bloom_filter_to_zeroed(); - old_account.bloom_filter_stores[previous_batch_index] - .iter_mut() - .for_each(|elem| { - *elem = 0; - }); - let previous_full_batch = old_account - .queue_batches - .batches - .get(previous_batch_index) - .unwrap(); - let sequence_number = previous_full_batch.sequence_number; - let overlapping_roots_exits = sequence_number > old_account.sequence_number; - if overlapping_roots_exits { - let mut oldest_root_index = old_account.root_history.first_index(); - // 2.1. Get, num of remaining roots. - // Remaining roots have not been updated since - // the update of the previous batch hence enable to prove - // inclusion of values nullified in the previous batch. - let num_remaining_roots = sequence_number - old_account.sequence_number; - // 2.2. Zero out roots oldest to first safe root index. - // Skip one iteration we don't need to zero out - // the first safe root. - for _ in 1..num_remaining_roots { - old_account.root_history[oldest_root_index] = [0u8; 32]; - oldest_root_index += 1; - oldest_root_index %= old_account.root_history.len(); - } - } - } - // Output queue update - if let Some(mut old_queue_account) = old_queue_account { - let queue_account = queue_account.unwrap(); - let old_full_batch_index = old_queue_account.batch_metadata.pending_batch_index; - let old_full_batch = old_queue_account - .batch_metadata - .batches - .get_mut(old_full_batch_index as usize) - .unwrap(); - old_full_batch - .mark_as_inserted_in_merkle_tree( - account.sequence_number, - account.root_history.last_index() as u32, - old_account.root_history.capacity() as u32, - ) - .unwrap(); - - if old_full_batch.get_state() == BatchState::Inserted { - old_queue_account.batch_metadata.pending_batch_index += 1; - old_queue_account.batch_metadata.pending_batch_index %= 2; - } - assert_eq!( - queue_account.get_metadata(), - old_queue_account.get_metadata() - ); - assert_eq!(queue_account, old_queue_account); - // Only the output queue appends state - let zkp_batch_size = old_account.queue_batches.zkp_batch_size; - old_account.next_index += zkp_batch_size; - } else { - // Input queue update - let old_full_batch_index = old_account.queue_batches.pending_batch_index; - let history_capacity = old_account.root_history.capacity(); - let previous_full_batch_index = if old_full_batch_index == 0 { 1 } else { 0 }; - let zkp_batch_size = old_account.queue_batches.zkp_batch_size; - old_account.nullifier_next_index += zkp_batch_size; - - let old_full_batch = old_account - .queue_batches - .batches - .get_mut(old_full_batch_index as usize) - .unwrap(); - - old_full_batch - .mark_as_inserted_in_merkle_tree( - account.sequence_number, - account.root_history.last_index() as u32, - history_capacity as u32, - ) - .unwrap(); - println!( - "current batch {:?}", - old_full_batch.get_num_inserted_elements() - ); - - if old_full_batch.get_state() == BatchState::Inserted { - old_account.queue_batches.pending_batch_index += 1; - old_account.queue_batches.pending_batch_index %= 2; - } - let old_full_batch_index = old_account.queue_batches.pending_batch_index; - - let old_full_batch = old_account - .queue_batches - .batches - .get_mut(old_full_batch_index as usize) - .unwrap(); - let zeroed_batch = old_full_batch.get_num_inserted_elements() - >= old_full_batch.batch_size / 2 - && old_full_batch.get_state() != BatchState::Inserted; - println!("zeroed_batch: {:?}", zeroed_batch); - - let state = account.queue_batches.batches[previous_full_batch_index].get_state(); - let previous_batch = old_account - .queue_batches - .batches - .get_mut(previous_full_batch_index) - .unwrap(); - println!( - "zeroing out values: {}", - zeroed_batch && state == BatchState::Inserted - ); - if zeroed_batch && state == BatchState::Inserted { - previous_batch.set_bloom_filter_to_zeroed(); - let sequence_number = previous_batch.sequence_number; - let overlapping_roots_exits = sequence_number > old_account.sequence_number; - if overlapping_roots_exits { - old_account.bloom_filter_stores[previous_full_batch_index] - .iter_mut() - .for_each(|elem| { - *elem = 0; - }); - - let mut oldest_root_index = old_account.root_history.first_index(); - - let num_remaining_roots = sequence_number - old_account.sequence_number; - for _ in 1..num_remaining_roots { - println!("zeroing out root index: {}", oldest_root_index); - old_account.root_history[oldest_root_index] = [0u8; 32]; - oldest_root_index += 1; - oldest_root_index %= old_account.root_history.len(); - } - } - } - } - - old_account.sequence_number += 1; - old_account.root_history.push(root); - assert_eq!(account.get_metadata(), old_account.get_metadata()); - assert_eq!(account, old_account); - assert_eq!(*account.root_history.last().unwrap(), root); -} - -fn assert_address_merkle_tree_update( - mut old_account: BatchedMerkleTreeAccount, - account: BatchedMerkleTreeAccount, - root: [u8; 32], -) { - { - // Input queue update - let old_full_batch_index = old_account.queue_batches.pending_batch_index; - let history_capacity = old_account.root_history.capacity(); - - let old_full_batch = old_account - .queue_batches - .batches - .get_mut(old_full_batch_index as usize) - .unwrap(); - - old_full_batch - .mark_as_inserted_in_merkle_tree( - account.sequence_number, - account.root_history.last_index() as u32, - history_capacity as u32, - ) - .unwrap(); - if old_full_batch.get_state() == BatchState::Inserted { - old_account.queue_batches.pending_batch_index += 1; - old_account.queue_batches.pending_batch_index %= 2; - } - let old_full_batch_index = old_account.queue_batches.pending_batch_index; - - let previous_full_batch_index = if old_full_batch_index == 0 { 1 } else { 0 }; - - let old_full_batch_index = old_account.queue_batches.pending_batch_index; - - let old_full_batch = old_account - .queue_batches - .batches - .get_mut(old_full_batch_index as usize) - .unwrap(); - let zeroed_batch = old_full_batch.get_num_inserted_elements() - >= old_full_batch.batch_size / 2 - && old_full_batch.get_state() != BatchState::Inserted; - println!("zeroed_batch: {:?}", zeroed_batch); - let state = account.queue_batches.batches[previous_full_batch_index].get_state(); - let previous_batch = old_account - .queue_batches - .batches - .get_mut(previous_full_batch_index) - .unwrap(); - if zeroed_batch && state == BatchState::Inserted { - previous_batch.set_bloom_filter_to_zeroed(); - let sequence_number = previous_batch.sequence_number; - let overlapping_roots_exits = sequence_number > old_account.sequence_number; - if overlapping_roots_exits { - old_account.bloom_filter_stores[previous_full_batch_index] - .iter_mut() - .for_each(|elem| { - *elem = 0; - }); - - let mut oldest_root_index = old_account.root_history.first_index(); - - let num_remaining_roots = sequence_number - old_account.sequence_number; - for _ in 1..num_remaining_roots { - println!("zeroing out root index: {}", oldest_root_index); - old_account.root_history[oldest_root_index] = [0u8; 32]; - oldest_root_index += 1; - oldest_root_index %= old_account.root_history.len(); - } - } - } - } - - old_account.sequence_number += 1; - old_account.next_index += old_account.queue_batches.zkp_batch_size; - old_account.root_history.push(root); - assert_eq!(account.get_metadata(), old_account.get_metadata()); - assert_eq!(account, old_account); - assert_eq!(*account.root_history.last().unwrap(), root); -} - -pub fn get_rnd_bytes(rng: &mut StdRng) -> [u8; 32] { - let mut rnd_bytes = rng.gen::<[u8; 32]>(); - rnd_bytes[0] = 0; - rnd_bytes -} - -#[serial] -#[tokio::test] -async fn test_fill_state_queues_completely() { - spawn_prover().await; - let mut current_slot = 1; - let roothistory_capacity = vec![17, 80]; - for root_history_capacity in roothistory_capacity { - let mut mock_indexer = - MockBatchedForester::<{ DEFAULT_BATCH_STATE_TREE_HEIGHT as usize }>::default(); - - let mut params = InitStateTreeAccountsInstructionData::test_default(); - params.output_queue_batch_size = params.input_queue_batch_size * 10; - // Root history capacity which is greater than the input updates - params.root_history_capacity = root_history_capacity; - - let owner = Pubkey::new_unique(); - - let queue_account_size = get_output_queue_account_size_from_params(params); - - let mut output_queue_account_data = vec![0; queue_account_size]; - let output_queue_pubkey = Pubkey::new_unique(); - - let mt_account_size = get_state_merkle_tree_account_size_from_params(params); - let mut mt_account_data = vec![0; mt_account_size]; - let mt_pubkey = Pubkey::new_unique(); - - let merkle_tree_rent = 1_000_000_000; - let queue_rent = 1_000_000_000; - let additional_bytes_rent = 1000; - - init_batched_state_merkle_tree_accounts( - owner, - params, - &mut output_queue_account_data, - output_queue_pubkey, - queue_rent, - &mut mt_account_data, - mt_pubkey, - merkle_tree_rent, - additional_bytes_rent, - ) - .unwrap(); - use rand::SeedableRng; - let mut rng = StdRng::seed_from_u64(0); - - let num_tx = NUM_BATCHES as u64 * params.output_queue_batch_size; - - // Fill up complete output queue - for _ in 0..num_tx { - // Output queue - - let rnd_bytes = get_rnd_bytes(&mut rng); - let mut pre_output_queue_account_data = output_queue_account_data.clone(); - let pre_output_account = - BatchedQueueAccount::output_from_bytes(&mut pre_output_queue_account_data).unwrap(); - let pre_account = *pre_output_account.get_metadata(); - let pre_value_store = pre_output_account.value_vecs; - let pre_hash_chains = pre_output_account.hash_chain_stores; - - let mut output_account = - BatchedQueueAccount::output_from_bytes(&mut output_queue_account_data).unwrap(); - - output_account - .insert_into_current_batch(&rnd_bytes, ¤t_slot) - .unwrap(); - assert_output_queue_insert( - pre_account, - pre_value_store, - pre_hash_chains, - BatchedQueueAccount::output_from_bytes( - &mut output_queue_account_data.clone(), // clone so that data cannot be modified - ) - .unwrap(), - vec![rnd_bytes], - current_slot, - ) - .unwrap(); - current_slot += 1; - mock_indexer.output_queue_leaves.push(rnd_bytes); - } - let rnd_bytes = get_rnd_bytes(&mut rng); - let mut output_account = - BatchedQueueAccount::output_from_bytes(&mut output_queue_account_data).unwrap(); - - let result = output_account.insert_into_current_batch(&rnd_bytes, ¤t_slot); - assert_eq!(result.unwrap_err(), BatchedMerkleTreeError::BatchNotReady); - - output_account - .batch_metadata - .batches - .iter() - .for_each(|b| assert_eq!(b.get_state(), BatchState::Full)); - - // Batch insert output queue into merkle tree. - for _ in 0..output_account - .get_metadata() - .batch_metadata - .get_num_zkp_batches() - { - println!("Output update -----------------------------"); - let mut pre_mt_account_data = mt_account_data.clone(); - let mut account = - BatchedMerkleTreeAccount::state_from_bytes(&mut pre_mt_account_data, &mt_pubkey) - .unwrap(); - let mut pre_output_queue_state = output_queue_account_data.clone(); - let output_account = - BatchedQueueAccount::output_from_bytes(&mut output_queue_account_data).unwrap(); - let next_index = account.get_metadata().next_index; - let next_full_batch = output_account - .get_metadata() - .batch_metadata - .pending_batch_index; - let batch = output_account - .batch_metadata - .batches - .get(next_full_batch as usize) - .unwrap(); - let leaves = mock_indexer.output_queue_leaves.clone(); - let leaves_hash_chain = output_account - .hash_chain_stores - .get(next_full_batch as usize) - .unwrap() - .get(batch.get_num_inserted_zkps() as usize) - .unwrap(); - let (proof, new_root) = mock_indexer - .get_batched_append_proof( - next_index as usize, - batch.get_num_inserted_zkps() as u32, - batch.zkp_batch_size as u32, - *leaves_hash_chain, - batch.get_num_zkp_batches() as u32, - ) - .await - .unwrap(); - let start = batch.get_num_inserted_zkps() as usize * batch.zkp_batch_size as usize; - let end = start + batch.zkp_batch_size as usize; - for leaf in &leaves[start..end] { - // Storing the leaf in the output queue indexer so that it - // can be inserted into the input queue later. - mock_indexer.active_leaves.push(*leaf); - } - - let instruction_data = InstructionDataBatchAppendInputs { - new_root, - compressed_proof: CompressedProof { - a: proof.a, - b: proof.b, - c: proof.c, - }, - }; - - println!("Output update -----------------------------"); - let queue_account = - &mut BatchedQueueAccount::output_from_bytes(&mut pre_output_queue_state).unwrap(); - let output_res = - account.update_tree_from_output_queue_account(queue_account, instruction_data); - assert!(output_res.is_ok()); - - assert_eq!( - *account.root_history.last().unwrap(), - mock_indexer.merkle_tree.root() - ); - - output_queue_account_data = pre_output_queue_state; - mt_account_data = pre_mt_account_data; - } - - // Fill up complete input queue. - let num_tx = NUM_BATCHES as u64 * params.input_queue_batch_size; - let mut first_value = [0u8; 32]; - for tx in 0..num_tx { - println!("Input insert ----------------------------- {}", tx); - let (_, leaf) = get_random_leaf(&mut rng, &mut mock_indexer.active_leaves); - let leaf_index = mock_indexer.merkle_tree.get_leaf_index(&leaf).unwrap(); - - let mut pre_mt_account_data = mt_account_data.clone(); - let pre_merkle_tree_account = - BatchedMerkleTreeAccount::state_from_bytes(&mut pre_mt_account_data, &mt_pubkey) - .unwrap(); - let pre_account = *pre_merkle_tree_account.get_metadata(); - let pre_roots = pre_merkle_tree_account - .root_history - .iter() - .cloned() - .collect(); - let pre_hash_chains = pre_merkle_tree_account.hash_chain_stores; - let tx_hash = create_hash_chain_from_slice(&[leaf]).unwrap(); - // Index input queue insert event - mock_indexer.input_queue_leaves.push((leaf, leaf_index)); - mock_indexer.tx_events.push(MockTxEvent { - inputs: vec![leaf], - outputs: vec![], - tx_hash, - }); - println!("leaf {:?}", leaf); - println!("leaf_index {:?}", leaf_index); - let mut merkle_tree_account = - BatchedMerkleTreeAccount::state_from_bytes(&mut mt_account_data, &mt_pubkey) - .unwrap(); - - merkle_tree_account - .insert_nullifier_into_queue( - &leaf.to_vec().try_into().unwrap(), - leaf_index as u64, - &tx_hash, - ¤t_slot, - ) - .unwrap(); - println!("current slot {:?}", current_slot); - assert_nullifier_queue_insert( - pre_account, - &mut [], - pre_roots, - pre_hash_chains, - merkle_tree_account, - vec![leaf], - vec![leaf_index as u64], - tx_hash, - vec![true], - vec![], - ¤t_slot, - ) - .unwrap(); - current_slot += 1; - println!("leaf {:?}", leaf); - // Insert the same value twice - { - // copy data so that failing test doesn't affect the state of - // subsequent tests - let mut mt_account_data = mt_account_data.clone(); - let mut merkle_tree_account = - BatchedMerkleTreeAccount::state_from_bytes(&mut mt_account_data, &mt_pubkey) - .unwrap(); - let result = merkle_tree_account.insert_nullifier_into_queue( - &leaf.to_vec().try_into().unwrap(), - leaf_index as u64, - &tx_hash, - ¤t_slot, - ); - result.unwrap_err(); - // assert_eq!( - // result.unwrap_err(), - // BatchedMerkleTreeError::BatchInsertFailed.into() - // ); - } - // Try to insert first value into any batch - if tx == 0 { - first_value = leaf; - } else { - let mut mt_account_data = mt_account_data.clone(); - let mut merkle_tree_account = - BatchedMerkleTreeAccount::state_from_bytes(&mut mt_account_data, &mt_pubkey) - .unwrap(); - let result = merkle_tree_account.insert_nullifier_into_queue( - &first_value.to_vec().try_into().unwrap(), - leaf_index as u64, - &tx_hash, - ¤t_slot, - ); - // assert_eq!( - // result.unwrap_err(), - // BatchedMerkleTreeError::BatchInsertFailed.into() - // ); - result.unwrap_err(); - // assert_eq!(result.unwrap_err(), BloomFilterError::Full.into()); - } - } - // Assert input queue is full and doesn't accept more inserts - { - let merkle_tree_account = - &mut BatchedMerkleTreeAccount::state_from_bytes(&mut mt_account_data, &mt_pubkey) - .unwrap(); - let rnd_bytes = get_rnd_bytes(&mut rng); - let tx_hash = get_rnd_bytes(&mut rng); - let result = merkle_tree_account.insert_nullifier_into_queue( - &rnd_bytes, - 0, - &tx_hash, - ¤t_slot, - ); - assert_eq!(result.unwrap_err(), BatchedMerkleTreeError::BatchNotReady); - } - // Root of the final batch of first input queue batch - let mut first_input_batch_update_root_value = [0u8; 32]; - let num_updates = - params.input_queue_batch_size / params.input_queue_zkp_batch_size * NUM_BATCHES as u64; - for i in 0..num_updates { - println!("input update ----------------------------- {}", i); - perform_input_update(&mut mt_account_data, &mut mock_indexer, false, mt_pubkey).await; - - let merkle_tree_account = - &mut BatchedMerkleTreeAccount::state_from_bytes(&mut mt_account_data, &mt_pubkey) - .unwrap(); - // after 5 updates the first batch is completely inserted - // As soon as we switch to inserting the second batch we zero out the first batch since - // the second batch is completely full. - if i >= 4 { - let batch = merkle_tree_account.queue_batches.batches.first().unwrap(); - assert!(batch.bloom_filter_is_zeroed()); - } else { - let batch = merkle_tree_account.queue_batches.batches.first().unwrap(); - assert!(!batch.bloom_filter_is_zeroed()); - } - let batch_one = &merkle_tree_account.queue_batches.batches[1]; - assert!(!batch_one.bloom_filter_is_zeroed()); - - println!( - "performed input queue batched update {} created root {:?}", - i, - mock_indexer.merkle_tree.root() - ); - if i == 4 { - first_input_batch_update_root_value = mock_indexer.merkle_tree.root(); - } - let merkle_tree_account = - BatchedMerkleTreeAccount::state_from_bytes(&mut mt_account_data, &mt_pubkey) - .unwrap(); - println!( - "root {:?}", - merkle_tree_account.root_history.last().unwrap() - ); - println!( - "root last index {:?}", - merkle_tree_account.root_history.last_index() - ); - } - // assert all bloom_filters are inserted - { - let merkle_tree_account = - &mut BatchedMerkleTreeAccount::state_from_bytes(&mut mt_account_data, &mt_pubkey) - .unwrap(); - for (i, batch) in merkle_tree_account.queue_batches.batches.iter().enumerate() { - assert_eq!(batch.get_state(), BatchState::Inserted); - if i == 0 { - assert!(batch.bloom_filter_is_zeroed()); - } else { - assert!(!batch.bloom_filter_is_zeroed()); - } - } - } - // do one insert and expect that roots until merkle_tree_account.batches[0].root_index are zero - { - let merkle_tree_account = - &mut BatchedMerkleTreeAccount::state_from_bytes(&mut mt_account_data, &mt_pubkey) - .unwrap(); - let pre_batch_zero = *merkle_tree_account.queue_batches.batches.first().unwrap(); - - let value = &get_rnd_bytes(&mut rng); - let tx_hash = &get_rnd_bytes(&mut rng); - merkle_tree_account - .insert_nullifier_into_queue(value, 0, tx_hash, ¤t_slot) - .unwrap(); - { - let post_batch = *merkle_tree_account.queue_batches.batches.first().unwrap(); - assert_eq!(post_batch.get_state(), BatchState::Fill); - assert_eq!(post_batch.get_num_inserted_zkp_batch(), 1); - let bloom_filter_store = - merkle_tree_account.bloom_filter_stores.get_mut(0).unwrap(); - let mut bloom_filter = BloomFilter::new( - params.bloom_filter_num_iters as usize, - params.bloom_filter_capacity, - bloom_filter_store, - ) - .unwrap(); - assert!(bloom_filter.contains(value)); - } - - for root in merkle_tree_account.root_history.iter() { - println!("root {:?}", root); - } - println!( - "root in root index {:?}", - merkle_tree_account.root_history[pre_batch_zero.root_index as usize] - ); - // check that all roots have been overwritten except the root index - // of the update - let root_history_len: u32 = merkle_tree_account.root_history.len() as u32; - let start = merkle_tree_account.root_history.last_index() as u32; - println!("start {:?}", start); - for root in start + 1..pre_batch_zero.root_index + root_history_len { - println!("actual index {:?}", root); - let index = root % root_history_len; - - if index == pre_batch_zero.root_index { - let root_index = pre_batch_zero.root_index as usize; - - assert_eq!( - merkle_tree_account.root_history[root_index], - first_input_batch_update_root_value - ); - assert_eq!(merkle_tree_account.root_history[root_index - 1], [0u8; 32]); - break; - } - println!("index {:?}", index); - assert_eq!(merkle_tree_account.root_history[index as usize], [0u8; 32]); - } - } - } -} - -#[serial] -#[tokio::test] -async fn test_fill_address_tree_completely() { - spawn_prover().await; - let mut current_slot = 1; - let roothistory_capacity = vec![17, 80]; // - for root_history_capacity in roothistory_capacity { - let mut mock_indexer = - MockBatchedAddressForester::<{ DEFAULT_BATCH_ADDRESS_TREE_HEIGHT as usize }>::default(); - - let mut params = InitAddressTreeAccountsInstructionData::test_default(); - // Root history capacity which is greater than the input updates - params.root_history_capacity = root_history_capacity; - - let owner = Pubkey::new_unique(); - - let mt_account_size = get_address_merkle_tree_account_size_from_params(params); - let mut mt_account_data = vec![0; mt_account_size]; - let mt_pubkey = Pubkey::new_unique(); - - let merkle_tree_rent = 1_000_000_000; - - init_batched_address_merkle_tree_account( - owner, - params, - &mut mt_account_data, - merkle_tree_rent, - mt_pubkey, - ) - .unwrap(); - use rand::SeedableRng; - let mut rng = StdRng::seed_from_u64(0); - - let num_tx = NUM_BATCHES * params.input_queue_batch_size as usize; - let mut first_value = [0u8; 32]; - for tx in 0..num_tx { - println!("Input insert -----------------------------"); - let mut rnd_address = get_rnd_bytes(&mut rng); - rnd_address[0] = 0; - - let mut pre_account_data = mt_account_data.clone(); - let pre_merkle_tree_account = - BatchedMerkleTreeAccount::address_from_bytes(&mut pre_account_data, &mt_pubkey) - .unwrap(); - let pre_account = *pre_merkle_tree_account.get_metadata(); - let pre_roots = pre_merkle_tree_account - .root_history - .iter() - .cloned() - .collect(); - let pre_hash_chains = pre_merkle_tree_account.hash_chain_stores; - let mut merkle_tree_account = - BatchedMerkleTreeAccount::address_from_bytes(&mut mt_account_data, &mt_pubkey) - .unwrap(); - merkle_tree_account - .insert_address_into_queue(&rnd_address, ¤t_slot) - .unwrap(); - assert_input_queue_insert( - pre_account, - &mut [], - pre_roots, - pre_hash_chains, - merkle_tree_account, - vec![rnd_address], - vec![rnd_address], - vec![true], - vec![], - ¤t_slot, - ) - .unwrap(); - current_slot += 1; - mock_indexer.queue_leaves.push(rnd_address); - - // Insert the same value twice - { - // copy data so that failing test doesn't affect the state of - // subsequent tests - let mut mt_account_data = mt_account_data.clone(); - let mut merkle_tree_account = - BatchedMerkleTreeAccount::address_from_bytes(&mut mt_account_data, &mt_pubkey) - .unwrap(); - let result = - merkle_tree_account.insert_address_into_queue(&rnd_address, ¤t_slot); - println!("tx {}", tx); - println!("errors {:?}", result); - if tx == params.input_queue_batch_size as usize * 2 - 1 { - // Error when the value is already inserted into the other batch. - assert_eq!(result.unwrap_err(), BatchedMerkleTreeError::BatchNotReady); - } else if tx == params.input_queue_batch_size as usize - 1 { - // Error when the value is already inserted into the other batch. - // This occurs only when we switch the batch in this test. - assert_eq!( - result.unwrap_err(), - BatchedMerkleTreeError::NonInclusionCheckFailed - ); - } else { - // Error when inserting into the bloom filter directly twice. - assert_eq!(result.unwrap_err(), BloomFilterError::Full.into()); - } - - current_slot += 1; - } - // Try to insert first value into any batch - if tx == 0 { - first_value = rnd_address; - } else { - let mut mt_account_data = mt_account_data.clone(); - let mut merkle_tree_account = - BatchedMerkleTreeAccount::address_from_bytes(&mut mt_account_data, &mt_pubkey) - .unwrap(); - - let result = merkle_tree_account.insert_address_into_queue( - &first_value.to_vec().try_into().unwrap(), - ¤t_slot, - ); - println!("tx {}", tx); - println!("result {:?}", result); - if tx == params.input_queue_batch_size as usize * 2 - 1 { - // Error when the value is already inserted into the other batch. - assert_eq!(result.unwrap_err(), BatchedMerkleTreeError::BatchNotReady); - } else if tx >= params.input_queue_batch_size as usize - 1 - // || tx == params.input_queue_batch_size as usize - { - // Error when the value is already inserted into the other batch. - // This occurs only when we switch the batch in this test. - assert_eq!( - result.unwrap_err(), - BatchedMerkleTreeError::NonInclusionCheckFailed - ); - } else { - // Error when inserting into the bloom filter directly twice. - assert_eq!(result.unwrap_err(), BloomFilterError::Full.into()); - } - current_slot += 1; - - // assert_eq!(result.unwrap_err(), BloomFilterError::Full.into()); - } - } - // Assert input queue is full and doesn't accept more inserts - { - let merkle_tree_account = - &mut BatchedMerkleTreeAccount::address_from_bytes(&mut mt_account_data, &mt_pubkey) - .unwrap(); - let rnd_bytes = get_rnd_bytes(&mut rng); - let result = merkle_tree_account.insert_address_into_queue(&rnd_bytes, ¤t_slot); - assert_eq!(result.unwrap_err(), BatchedMerkleTreeError::BatchNotReady); - } - // Root of the final batch of first input queue batch - let mut first_input_batch_update_root_value = [0u8; 32]; - let num_updates = 10; - for i in 0..num_updates { - println!("address update ----------------------------- {}", i); - perform_address_update(&mut mt_account_data, &mut mock_indexer, mt_pubkey).await; - if i == 4 { - first_input_batch_update_root_value = mock_indexer.merkle_tree.root(); - } - let merkle_tree_account = - BatchedMerkleTreeAccount::address_from_bytes(&mut mt_account_data, &mt_pubkey) - .unwrap(); - let batch = merkle_tree_account.queue_batches.batches.first().unwrap(); - let batch_one = merkle_tree_account.queue_batches.batches.get(1).unwrap(); - assert!(!batch_one.bloom_filter_is_zeroed()); - - // after 5 updates the first batch is completely inserted - // As soon as we switch to inserting the second batch we zero out the first batch since - // the second batch is completely full. - if i >= 4 { - assert!(batch.bloom_filter_is_zeroed()); - } else { - assert!(!batch.bloom_filter_is_zeroed()); - } - } - // assert all bloom_filters are inserted - { - let merkle_tree_account = - &mut BatchedMerkleTreeAccount::address_from_bytes(&mut mt_account_data, &mt_pubkey) - .unwrap(); - for (i, batch) in merkle_tree_account.queue_batches.batches.iter().enumerate() { - assert_eq!(batch.get_state(), BatchState::Inserted); - if i == 0 { - // first batch is zeroed out since the second batch is full - assert!(batch.bloom_filter_is_zeroed()); - } else { - // second batch is not zeroed out since the first batch is empty - assert!(!batch.bloom_filter_is_zeroed()); - } - } - } - { - let merkle_tree_account = - &mut BatchedMerkleTreeAccount::address_from_bytes(&mut mt_account_data, &mt_pubkey) - .unwrap(); - println!("root history {:?}", merkle_tree_account.root_history); - let pre_batch_zero = *merkle_tree_account.queue_batches.batches.first().unwrap(); - - for root in merkle_tree_account.root_history.iter() { - println!("root {:?}", root); - } - println!( - "root in root index {:?}", - merkle_tree_account.root_history[pre_batch_zero.root_index as usize] - ); - // check that all roots have been overwritten except the root index - // of the update - let root_history_len: u32 = merkle_tree_account.root_history.len() as u32; - let start = merkle_tree_account.root_history.last_index() as u32; - println!("start {:?}", start); - for root in start + 1..pre_batch_zero.root_index + root_history_len { - println!("actual index {:?}", root); - let index = root % root_history_len; - - if index == pre_batch_zero.root_index { - let root_index = pre_batch_zero.root_index as usize; - - assert_eq!( - merkle_tree_account.root_history[root_index], - first_input_batch_update_root_value - ); - assert_eq!(merkle_tree_account.root_history[root_index - 1], [0u8; 32]); - break; - } - println!("index {:?}", index); - assert_eq!(merkle_tree_account.root_history[index as usize], [0u8; 32]); - } - } - } -} diff --git a/program-tests/registry-test/tests/tests.rs b/program-tests/registry-test/tests/tests.rs index 1f8cf2d793..3c20c6fac4 100644 --- a/program-tests/registry-test/tests/tests.rs +++ b/program-tests/registry-test/tests/tests.rs @@ -1880,7 +1880,7 @@ async fn test_batch_address_tree() { { println!("pre perform_batch_address_merkle_tree_update"); - for _ in 0..4 { + for _ in 0..6 { perform_batch_address_merkle_tree_update( &mut rpc, &mut test_indexer, @@ -1928,7 +1928,7 @@ async fn test_batch_address_tree() { .await .unwrap(); } - for _ in 0..5 { + for _ in 0..3 { perform_batch_address_merkle_tree_update( &mut rpc, &mut test_indexer,