diff --git a/anchor-programs/system/src/errors.rs b/anchor-programs/system/src/errors.rs index 8b4f889c4c..b35417f75d 100644 --- a/anchor-programs/system/src/errors.rs +++ b/anchor-programs/system/src/errors.rs @@ -88,4 +88,6 @@ pub enum SystemProgramError { TooManyOutputAccounts, #[msg("Failed to borrow account data")] BorrowingDataFailed, + #[msg("DuplicateAccountInInputsAndReadOnly")] + DuplicateAccountInInputsAndReadOnly, } diff --git a/program-libs/batched-merkle-tree/src/batch.rs b/program-libs/batched-merkle-tree/src/batch.rs index d09b20cf68..ab7fa8ab72 100644 --- a/program-libs/batched-merkle-tree/src/batch.rs +++ b/program-libs/batched-merkle-tree/src/batch.rs @@ -989,10 +989,9 @@ mod tests { num_iters, bloom_filter_capacity, Pubkey::new_unique(), + 16, // Tree height 4 -> capacity 16 ) .unwrap(); - // Tree height 4 -> capacity 16 - account.tree_capacity = 16; assert_eq!(account.get_num_inserted_in_current_batch(), 0); // Fill first batch for i in 1..=batch_size { diff --git a/program-libs/batched-merkle-tree/src/initialize_address_tree.rs b/program-libs/batched-merkle-tree/src/initialize_address_tree.rs index ce513ad8c9..974b71e97c 100644 --- a/program-libs/batched-merkle-tree/src/initialize_address_tree.rs +++ b/program-libs/batched-merkle-tree/src/initialize_address_tree.rs @@ -11,7 +11,6 @@ use crate::{ DEFAULT_BATCH_ROOT_HISTORY_LEN, DEFAULT_BATCH_SIZE, }, errors::BatchedMerkleTreeError, - initialize_state_tree::match_circuit_size, merkle_tree::{get_merkle_tree_account_size, BatchedMerkleTreeAccount}, BorshDeserialize, BorshSerialize, }; @@ -129,6 +128,7 @@ pub fn init_batched_address_merkle_tree_account( ) } +/// Only used for testing. For production use the default config. pub fn validate_batched_address_tree_params(params: InitAddressTreeAccountsInstructionData) { assert!(params.input_queue_batch_size > 0); assert_eq!( @@ -138,7 +138,7 @@ pub fn validate_batched_address_tree_params(params: InitAddressTreeAccountsInstr ); assert!( match_circuit_size(params.input_queue_zkp_batch_size), - "Zkp batch size not supported. Supported 1, 10, 100, 500, 1000" + "Zkp batch size not supported. Supported: 10, 250" ); assert!(params.bloom_filter_num_iters > 0); @@ -151,10 +151,24 @@ pub fn validate_batched_address_tree_params(params: InitAddressTreeAccountsInstr assert!(params.bloom_filter_capacity > 0); assert!(params.root_history_capacity > 0); assert!(params.input_queue_batch_size > 0); + + // Validate root_history_capacity is sufficient for input operations + // (address trees only have input queues, no output queues) + let required_capacity = params.input_queue_batch_size / params.input_queue_zkp_batch_size; + assert!( + params.root_history_capacity >= required_capacity as u32, + "root_history_capacity ({}) must be >= {} (input_queue_batch_size / input_queue_zkp_batch_size)", + params.root_history_capacity, + required_capacity + ); + assert_eq!(params.close_threshold, None); assert_eq!(params.height, DEFAULT_BATCH_ADDRESS_TREE_HEIGHT); } - +/// Only 10 and 250 are supported. +pub fn match_circuit_size(size: u64) -> bool { + matches!(size, 10 | 250) +} pub fn get_address_merkle_tree_account_size_from_params( params: InitAddressTreeAccountsInstructionData, ) -> usize { @@ -330,3 +344,23 @@ fn test_height_not_40() { }; validate_batched_address_tree_params(params); } + +#[test] +fn test_validate_root_history_capacity_address_tree() { + // Test with valid params (default should pass) + let params = InitAddressTreeAccountsInstructionData::default(); + validate_batched_address_tree_params(params); // Should not panic +} + +#[test] +#[should_panic(expected = "root_history_capacity")] +fn test_validate_root_history_capacity_insufficient_address_tree() { + let params = InitAddressTreeAccountsInstructionData { + root_history_capacity: 1, // Much too small + input_queue_batch_size: 1000, + input_queue_zkp_batch_size: 10, + // Required: 1000/10 = 100, but we set only 1 + ..Default::default() + }; + validate_batched_address_tree_params(params); // Should panic +} diff --git a/program-libs/batched-merkle-tree/src/initialize_state_tree.rs b/program-libs/batched-merkle-tree/src/initialize_state_tree.rs index 55b1f734bb..b6863a553c 100644 --- a/program-libs/batched-merkle-tree/src/initialize_state_tree.rs +++ b/program-libs/batched-merkle-tree/src/initialize_state_tree.rs @@ -162,6 +162,7 @@ pub fn init_batched_state_merkle_tree_accounts<'a>( 0, 0, output_queue_pubkey, + 2u64.pow(params.height), )?; } let metadata = MerkleTreeMetadata { @@ -198,6 +199,7 @@ pub fn init_batched_state_merkle_tree_accounts<'a>( ) } +/// Only used for testing. For production use the default config. pub fn validate_batched_tree_params(params: InitStateTreeAccountsInstructionData) { assert!(params.input_queue_batch_size > 0); assert!(params.output_queue_batch_size > 0); @@ -213,11 +215,11 @@ pub fn validate_batched_tree_params(params: InitStateTreeAccountsInstructionData ); assert!( match_circuit_size(params.input_queue_zkp_batch_size), - "Zkp batch size not supported. Supported 1, 10, 100, 500, 1000" + "Zkp batch size not supported. Supported 10, 500" ); assert!( match_circuit_size(params.output_queue_zkp_batch_size), - "Zkp batch size not supported. Supported 1, 10, 100, 500, 1000" + "Zkp batch size not supported. Supported 10, 500" ); assert!(params.bloom_filter_num_iters > 0); @@ -230,12 +232,46 @@ pub fn validate_batched_tree_params(params: InitStateTreeAccountsInstructionData assert!(params.bloom_filter_capacity > 0); assert!(params.root_history_capacity > 0); assert!(params.input_queue_batch_size > 0); + + // Validate root_history_capacity is sufficient for both input and output operations + let required_capacity = (params.output_queue_batch_size / params.output_queue_zkp_batch_size) + + (params.input_queue_batch_size / params.input_queue_zkp_batch_size); + assert!( + params.root_history_capacity >= required_capacity as u32, + "root_history_capacity ({}) must be >= {} (output_queue_batch_size / output_queue_zkp_batch_size + input_queue_batch_size / input_queue_zkp_batch_size)", + params.root_history_capacity, + required_capacity + ); + assert_eq!(params.close_threshold, None); assert_eq!(params.height, DEFAULT_BATCH_STATE_TREE_HEIGHT); } +/// Only 10 and 500 are supported. pub fn match_circuit_size(size: u64) -> bool { - matches!(size, 10 | 100 | 250 | 500 | 1000) + matches!(size, 10 | 500) +} + +#[test] +fn test_validate_root_history_capacity_state_tree() { + // Test with valid params (default should pass) + let params = InitStateTreeAccountsInstructionData::default(); + validate_batched_tree_params(params); // Should not panic +} + +#[test] +#[should_panic(expected = "root_history_capacity")] +fn test_validate_root_history_capacity_insufficient_state_tree() { + let params = InitStateTreeAccountsInstructionData { + root_history_capacity: 1, // Much too small + input_queue_batch_size: 1000, + output_queue_batch_size: 1000, + input_queue_zkp_batch_size: 10, + output_queue_zkp_batch_size: 10, + // Required: (1000/10) + (1000/10) = 200, but we set only 1 + ..Default::default() + }; + validate_batched_tree_params(params); // Should panic } #[cfg(feature = "test-only")] pub mod test_utils { diff --git a/program-libs/batched-merkle-tree/src/merkle_tree.rs b/program-libs/batched-merkle-tree/src/merkle_tree.rs index 1738b674d9..ca91e6ec9b 100644 --- a/program-libs/batched-merkle-tree/src/merkle_tree.rs +++ b/program-libs/batched-merkle-tree/src/merkle_tree.rs @@ -376,7 +376,7 @@ impl<'a> BatchedMerkleTreeAccount<'a> { queue_account: &mut BatchedQueueAccount, instruction_data: InstructionDataBatchAppendInputs, ) -> Result { - self.check_tree_is_full()?; + self.check_tree_is_full(Some(queue_account.batch_metadata.zkp_batch_size))?; let pending_batch_index = queue_account.batch_metadata.pending_batch_index as usize; let new_root = instruction_data.new_root; let circuit_batch_size = queue_account.batch_metadata.zkp_batch_size; @@ -465,7 +465,7 @@ impl<'a> BatchedMerkleTreeAccount<'a> { if self.tree_type != TreeType::AddressV2 as u64 { return Err(MerkleTreeMetadataError::InvalidTreeType.into()); } - self.check_tree_is_full()?; + self.check_tree_is_full(Some(self.metadata.queue_batches.zkp_batch_size))?; Ok(MerkleTreeEvent::BatchAddressAppend( self.update_input_queue::(instruction_data)?, )) @@ -886,8 +886,10 @@ impl<'a> BatchedMerkleTreeAccount<'a> { Ok(()) } - pub fn tree_is_full(&self) -> bool { - self.next_index >= self.capacity + /// Checks if the tree is full, optionally for a batch size. + /// If batch_size is provided, checks if there is enough space for the batch. + pub fn tree_is_full(&self, batch_size: Option) -> bool { + self.next_index + batch_size.unwrap_or_default() >= self.capacity } pub fn check_queue_next_index_reached_tree_capacity( @@ -899,8 +901,13 @@ impl<'a> BatchedMerkleTreeAccount<'a> { Ok(()) } - pub fn check_tree_is_full(&self) -> Result<(), BatchedMerkleTreeError> { - if self.tree_is_full() { + /// Checks if the tree is full, optionally for a batch size. + /// If batch_size is provided, checks if there is enough space for the batch. + pub fn check_tree_is_full( + &self, + batch_size: Option, + ) -> Result<(), BatchedMerkleTreeError> { + if self.tree_is_full(batch_size) { return Err(BatchedMerkleTreeError::TreeIsFull); } Ok(()) @@ -1578,7 +1585,7 @@ mod test { ) .unwrap(); // 1. empty tree is not full - assert!(!account.tree_is_full()); + assert!(!account.tree_is_full(None)); let mut inserted_elements = vec![]; let rng = &mut rand::rngs::StdRng::from_seed([0u8; 32]); @@ -1677,17 +1684,30 @@ mod test { ) .unwrap(); // 1. empty tree is not full - assert!(!account.tree_is_full()); - assert!(account.check_tree_is_full().is_ok()); + assert!(!account.tree_is_full(None)); + assert!(account.check_tree_is_full(None).is_ok()); + assert!(!account.tree_is_full(Some(1))); + assert!(account.check_tree_is_full(Some(1)).is_ok()); + account.next_index = account.capacity - 2; + assert!(!account.tree_is_full(None)); + assert!(account.check_tree_is_full(None).is_ok()); + assert!(!account.tree_is_full(Some(1))); + assert!(account.check_tree_is_full(Some(1)).is_ok()); account.next_index = account.capacity - 1; - assert!(!account.tree_is_full()); - assert!(account.check_tree_is_full().is_ok()); + assert!(!account.tree_is_full(None)); + assert!(account.check_tree_is_full(None).is_ok()); + assert!(account.tree_is_full(Some(1))); + assert!(account.check_tree_is_full(Some(1)).is_err()); account.next_index = account.capacity; - assert!(account.tree_is_full()); - assert!(account.check_tree_is_full().is_err()); + assert!(account.tree_is_full(None)); + assert!(account.check_tree_is_full(None).is_err()); + assert!(account.tree_is_full(Some(1))); + assert!(account.check_tree_is_full(Some(1)).is_err()); account.next_index = account.capacity + 1; - assert!(account.tree_is_full()); - assert!(account.check_tree_is_full().is_err()); + assert!(account.tree_is_full(None)); + assert!(account.check_tree_is_full(None).is_err()); + assert!(account.tree_is_full(Some(1))); + assert!(account.check_tree_is_full(Some(1)).is_err()); } #[test] fn test_increment_next_index() { diff --git a/program-libs/batched-merkle-tree/src/queue.rs b/program-libs/batched-merkle-tree/src/queue.rs index 0d120fc1c4..5ec1f80f3b 100644 --- a/program-libs/batched-merkle-tree/src/queue.rs +++ b/program-libs/batched-merkle-tree/src/queue.rs @@ -49,6 +49,7 @@ pub struct BatchedQueueMetadata { } impl BatchedQueueMetadata { + #[allow(clippy::too_many_arguments)] pub fn init( &mut self, meta_data: QueueMetadata, @@ -57,6 +58,7 @@ impl BatchedQueueMetadata { bloom_filter_capacity: u64, num_iters: u64, queue_pubkey: &Pubkey, + tree_capacity: u64, ) -> Result<(), BatchedMerkleTreeError> { self.metadata = meta_data; self.batch_metadata.init(batch_size, zkp_batch_size)?; @@ -71,6 +73,9 @@ impl BatchedQueueMetadata { ); } + // Set tree capacity for overflow checks + self.tree_capacity = tree_capacity; + // Precompute Merkle tree pubkey hash for use in system program. // The compressed account hash depends on the Merkle tree pubkey and leaf index. // Poseidon hashes required input size < bn254 field size. @@ -207,6 +212,7 @@ impl<'a> BatchedQueueAccount<'a> { }) } + #[allow(clippy::too_many_arguments)] pub fn init( account_data: &'a mut [u8], metadata: QueueMetadata, @@ -215,6 +221,7 @@ impl<'a> BatchedQueueAccount<'a> { num_iters: u64, bloom_filter_capacity: u64, pubkey: Pubkey, + tree_capacity: u64, ) -> Result, BatchedMerkleTreeError> { let account_data_len = account_data.len(); let (discriminator, account_data) = account_data.split_at_mut(DISCRIMINATOR_LEN); @@ -231,6 +238,7 @@ impl<'a> BatchedQueueAccount<'a> { bloom_filter_capacity, num_iters, &pubkey, + tree_capacity, )?; if account_data_len @@ -411,7 +419,7 @@ impl<'a> BatchedQueueAccount<'a> { /// Returns true if the tree is full. pub fn tree_is_full(&self) -> bool { - self.tree_capacity == self.batch_metadata.next_index + self.batch_metadata.next_index >= self.tree_capacity } /// Check if the tree is full. @@ -672,6 +680,7 @@ fn test_batched_queue_metadata_init() { let num_iters = 5; let queue_pubkey = Pubkey::new_unique(); + let tree_capacity = 16; // 2^4 for test purposes let result = metadata.init( queue_metadata, batch_size, @@ -679,6 +688,7 @@ fn test_batched_queue_metadata_init() { bloom_filter_capacity, num_iters, &queue_pubkey, + tree_capacity, ); assert!(result.is_ok()); @@ -701,6 +711,7 @@ fn test_batched_queue_metadata_init() { hashed_merkle_tree_pubkey ); assert_eq!(metadata.hashed_queue_pubkey, hashed_queue_pubkey); + assert_eq!(metadata.tree_capacity, tree_capacity); } #[test] @@ -722,6 +733,7 @@ fn test_check_is_associated() { num_iters, bloom_filter_capacity, Pubkey::new_unique(), + 16, // 2^4 for test purposes ) .unwrap(); // 1. Functional @@ -762,7 +774,42 @@ fn test_pubkey() { num_iters, bloom_filter_capacity, pubkey, + 16, // 2^4 for test purposes ) .unwrap(); assert_eq!(*account.pubkey(), pubkey); } + +#[test] +fn test_tree_capacity_is_set_correctly() { + let mut account_data = vec![0u8; 1000]; + let mut queue_metadata = QueueMetadata::default(); + let associated_merkle_tree = Pubkey::new_unique(); + queue_metadata.associated_merkle_tree = associated_merkle_tree; + queue_metadata.queue_type = QueueType::OutputStateV2 as u64; + let batch_size = 4; + let zkp_batch_size = 2; + let bloom_filter_capacity = 0; + let num_iters = 0; + let pubkey = Pubkey::new_unique(); + let tree_capacity = 1024; // 2^10 + + let account = BatchedQueueAccount::init( + &mut account_data, + queue_metadata, + batch_size, + zkp_batch_size, + num_iters, + bloom_filter_capacity, + pubkey, + tree_capacity, + ) + .unwrap(); + + // Verify tree_capacity is correctly set + assert_eq!(account.tree_capacity, tree_capacity); + + // Verify tree_is_full works correctly + assert!(!account.tree_is_full()); // Should not be full initially + assert!(account.check_tree_is_full().is_ok()); +} diff --git a/program-libs/batched-merkle-tree/tests/queue.rs b/program-libs/batched-merkle-tree/tests/queue.rs index 6d1d0b52fd..d57c3fa686 100644 --- a/program-libs/batched-merkle-tree/tests/queue.rs +++ b/program-libs/batched-merkle-tree/tests/queue.rs @@ -71,6 +71,7 @@ fn test_output_queue_account() { bloom_filter_num_iters, bloom_filter_capacity, queue_pubkey, + 1024, // 2^10 for test purposes ) .unwrap(); @@ -99,6 +100,7 @@ fn test_value_exists_in_value_vec() { 0, 0, queue_pubkey, + 1024, // 2^10 for test purposes ) .unwrap(); let current_slot = 2; diff --git a/program-tests/system-cpi-v2-test/tests/invoke_cpi_with_read_only.rs b/program-tests/system-cpi-v2-test/tests/invoke_cpi_with_read_only.rs index 14f193bb9b..1acfd13797 100644 --- a/program-tests/system-cpi-v2-test/tests/invoke_cpi_with_read_only.rs +++ b/program-tests/system-cpi-v2-test/tests/invoke_cpi_with_read_only.rs @@ -2826,6 +2826,109 @@ fn get_output_account_info(output_merkle_tree_index: u8) -> OutAccountInfo { } } +#[serial] +#[tokio::test] +async fn test_duplicate_account_in_inputs_and_read_only() { + spawn_prover(ProverConfig::default()).await; + + let mut config = ProgramTestConfig::default_with_batched_trees(false); + config.with_prover = false; + config.additional_programs = Some(vec![( + "create_address_test_program", + create_address_test_program::ID, + )]); + config.v2_state_tree_config = Some(InitStateTreeAccountsInstructionData::default()); + + let mut rpc = LightProgramTest::new(config).await.unwrap(); + let env = rpc.test_accounts.clone(); + let queue = env.v2_state_trees[0].output_queue; + let tree = env.v2_state_trees[0].merkle_tree; + + let payer = rpc.get_payer().insecure_clone(); + let mut test_indexer = TestIndexer::init_from_acounts(&payer, &env, 0).await; + + // Create a compressed account first + let output_account = get_compressed_output_account(true, queue); + local_sdk::perform_test_transaction( + &mut rpc, + &mut test_indexer, + &payer, + vec![], + vec![output_account.clone()], + vec![], + None, // proof + None, + None, + false, + false, + Vec::new(), + Vec::new(), + queue, + tree, + false, + None, + false, + None, + None, + ) + .await + .unwrap() + .unwrap(); + + // Now try to use the same account as both input and read-only + let compressed_account = test_indexer.compressed_accounts[0].clone(); + + let read_only_account = ReadOnlyCompressedAccount { + account_hash: compressed_account.hash().unwrap(), + merkle_context: MerkleContext { + merkle_tree_pubkey: tree.into(), + queue_pubkey: queue.into(), + leaf_index: 0, + prove_by_index: false, + tree_type: TreeType::StateV2, + }, + root_index: 0, + }; + + // Get validity proof for the input account + let rpc_result = rpc + .get_validity_proof(vec![compressed_account.hash().unwrap()], Vec::new(), None) + .await + .unwrap(); + + // Attempt transaction with duplicate account - should fail + let result = local_sdk::perform_test_transaction( + &mut rpc, + &mut test_indexer, + &payer, + vec![compressed_account], // input_accounts + vec![], // output_accounts + vec![], // new_addresses + rpc_result.value.proof.0, // proof + None, // sol_compression_recipient + None, // account_infos + false, // small_ix + false, // with_transaction_hash + vec![read_only_account], // read_only_accounts + Vec::new(), // read_only_addresses + queue, + tree, + false, // is_compress + None, // compress_or_decompress_lamports + false, // invalid_sol_pool + None, // invalid_fee_recipient + None, // invalid_cpi_context + ) + .await; + + assert_rpc_error( + result, + 0, + SystemProgramError::DuplicateAccountInInputsAndReadOnly.into(), + ) + .unwrap(); +} + pub mod local_sdk { use std::collections::HashMap; diff --git a/program-tests/utils/src/e2e_test_env.rs b/program-tests/utils/src/e2e_test_env.rs index 59396c6887..5c966bd087 100644 --- a/program-tests/utils/src/e2e_test_env.rs +++ b/program-tests/utils/src/e2e_test_env.rs @@ -2491,7 +2491,9 @@ where for _ in 0..select_num_accounts { let index = Self::safe_gen_range(&mut self.rng, 0..num_accounts, 0); let account = program_accounts[index].clone(); - accounts.push(account); + if !input_accounts.contains(&account) { + accounts.push(account); + } } accounts .iter() diff --git a/programs/account-compression/src/instructions/migrate_state.rs b/programs/account-compression/src/instructions/migrate_state.rs index 51af7dceb0..dbb88c1333 100644 --- a/programs/account-compression/src/instructions/migrate_state.rs +++ b/programs/account-compression/src/instructions/migrate_state.rs @@ -215,6 +215,7 @@ mod migrate_state_test { 3, account.batch_metadata.bloom_filter_capacity, queue_pubkey.into(), + account.tree_capacity, ) .unwrap(); mock_account.account = Some(output_queue); diff --git a/programs/system/src/accounts/account_checks.rs b/programs/system/src/accounts/account_checks.rs index 04a4e6b2c7..c98de1a228 100644 --- a/programs/system/src/accounts/account_checks.rs +++ b/programs/system/src/accounts/account_checks.rs @@ -55,19 +55,21 @@ pub fn check_anchor_option_sol_pool_pda( &crate::ID, option_sol_pool_pda, )?; + check_mut(option_sol_pool_pda).map_err(ProgramError::from)?; Some(option_sol_pool_pda) }; Ok(sol_pool_pda) } /// Processes account equivalent to anchor Accounts Option. -pub fn anchor_option_account_info( +pub fn anchor_option_mut_account_info( account_info: Option<&AccountInfo>, ) -> Result> { let option_decompression_recipient = account_info.ok_or(ProgramError::NotEnoughAccountKeys)?; let decompression_recipient = if *option_decompression_recipient.key() == crate::ID { None } else { + check_mut(option_decompression_recipient).map_err(ProgramError::from)?; Some(option_decompression_recipient) }; Ok(decompression_recipient) @@ -108,6 +110,7 @@ where let option_decompression_recipient = account_infos .next() .ok_or(ProgramError::NotEnoughAccountKeys)?; + check_mut(option_decompression_recipient).map_err(ProgramError::from)?; Some(option_decompression_recipient) } else { None @@ -147,6 +150,7 @@ where .next() .ok_or(ProgramError::NotEnoughAccountKeys)?; check_pda_seeds(&[SOL_POOL_PDA_SEED], &crate::ID, option_sol_pool_pda)?; + check_mut(option_sol_pool_pda).map_err(ProgramError::from)?; Some(option_sol_pool_pda) } else { None diff --git a/programs/system/src/errors.rs b/programs/system/src/errors.rs index 30a58c8ce0..4b30fcb0d5 100644 --- a/programs/system/src/errors.rs +++ b/programs/system/src/errors.rs @@ -112,6 +112,10 @@ pub enum SystemProgramError { InvalidTreeHeight, #[error("TooManyOutputAccounts")] TooManyOutputAccounts, + #[error("Borrowing data failed")] + BorrowingDataFailed, + #[error("DuplicateAccountInInputsAndReadOnly")] + DuplicateAccountInInputsAndReadOnly, #[error("Batched Merkle tree error {0}")] BatchedMerkleTreeError(#[from] BatchedMerkleTreeError), #[error("Concurrent Merkle tree error {0}")] @@ -124,8 +128,6 @@ pub enum SystemProgramError { ZeroCopyError(#[from] ZeroCopyError), #[error("Program error code: {0}")] ProgramError(u64), - #[error("Borrowing data failed")] - BorrowingDataFailed, } impl From for u32 { @@ -183,12 +185,13 @@ impl From for u32 { SystemProgramError::CpiContextAlreadySet => 6049, SystemProgramError::InvalidTreeHeight => 6050, SystemProgramError::TooManyOutputAccounts => 6051, + SystemProgramError::BorrowingDataFailed => 6052, + SystemProgramError::DuplicateAccountInInputsAndReadOnly => 6053, SystemProgramError::BatchedMerkleTreeError(e) => e.into(), SystemProgramError::IndexedMerkleTreeError(e) => e.into(), SystemProgramError::ConcurrentMerkleTreeError(e) => e.into(), SystemProgramError::AccountError(e) => e.into(), SystemProgramError::ProgramError(e) => u32::try_from(e).unwrap_or(0), - SystemProgramError::BorrowingDataFailed => 6052, SystemProgramError::ZeroCopyError(e) => e.into(), } } diff --git a/programs/system/src/invoke/instruction.rs b/programs/system/src/invoke/instruction.rs index 1241ecd05e..c2bc3e5cb1 100644 --- a/programs/system/src/invoke/instruction.rs +++ b/programs/system/src/invoke/instruction.rs @@ -4,7 +4,7 @@ use pinocchio::{account_info::AccountInfo, program_error::ProgramError}; use crate::{ accounts::{ account_checks::{ - anchor_option_account_info, check_account_compression_program, + anchor_option_mut_account_info, check_account_compression_program, check_anchor_option_sol_pool_pda, check_fee_payer, check_non_mut_account_info, check_system_program, }, @@ -62,7 +62,7 @@ impl<'info> InvokeInstruction<'info> { let sol_pool_pda = check_anchor_option_sol_pool_pda(accounts.next())?; - let decompression_recipient = anchor_option_account_info(accounts.next())?; + let decompression_recipient = anchor_option_mut_account_info(accounts.next())?; let system_program = check_system_program(accounts.next())?; diff --git a/programs/system/src/invoke_cpi/instruction.rs b/programs/system/src/invoke_cpi/instruction.rs index a928a5e257..1a69148ff9 100644 --- a/programs/system/src/invoke_cpi/instruction.rs +++ b/programs/system/src/invoke_cpi/instruction.rs @@ -3,7 +3,7 @@ use pinocchio::{account_info::AccountInfo, program_error::ProgramError}; use crate::{ accounts::{ account_checks::{ - anchor_option_account_info, check_account_compression_program, + anchor_option_mut_account_info, check_account_compression_program, check_anchor_option_cpi_context_account, check_anchor_option_sol_pool_pda, check_authority, check_fee_payer, check_non_mut_account_info, check_system_program, }, @@ -60,7 +60,7 @@ impl<'info> InvokeCpiInstruction<'info> { let sol_pool_pda = check_anchor_option_sol_pool_pda(accounts.next())?; - let decompression_recipient = anchor_option_account_info(accounts.next())?; + let decompression_recipient = anchor_option_mut_account_info(accounts.next())?; let system_program = check_system_program(accounts.next())?; diff --git a/programs/system/src/processor/process.rs b/programs/system/src/processor/process.rs index 872ae6d9df..8de8880e1e 100644 --- a/programs/system/src/processor/process.rs +++ b/programs/system/src/processor/process.rs @@ -187,6 +187,7 @@ pub fn process< &mut cpi_ix_data, accounts.as_slice(), )?; + let read_only_accounts = inputs.read_only_accounts().unwrap_or_default(); // 10. hash input compressed accounts --------------------------------------------------- if !inputs.inputs_empty() { @@ -211,6 +212,12 @@ pub fn process< ) .map_err(ProgramError::from)?; } + + // 10.2. Check for duplicate accounts between inputs and read-only --------------------------------------------------- + check_no_duplicate_accounts_in_inputs_and_read_only( + &cpi_ix_data.nullifiers, + read_only_accounts, + )?; } // 11. Sum check --------------------------------------------------- let num_input_accounts_by_index = sum_check(&inputs, &None, &inputs.is_compress())?; @@ -218,9 +225,7 @@ pub fn process< // 12. Compress or decompress lamports --------------------------------------------------- compress_or_decompress_lamports::(&inputs, ctx)?; - // 13. Verify read-only account inclusion by index --------------------------------------------------- - let read_only_accounts = inputs.read_only_accounts().unwrap_or_default(); - + // 14. Verify read-only account inclusion by index --------------------------------------------------- let num_read_only_accounts_by_index = verify_read_only_account_inclusion_by_index(accounts.as_mut_slice(), read_only_accounts)?; @@ -232,7 +237,7 @@ pub fn process< num_accounts_by_zkp + num_read_only_accounts_by_zkp }; - // 14. Read state roots --------------------------------------------------- + // 15. Read state roots --------------------------------------------------- let mut input_compressed_account_roots = Vec::with_capacity(num_inclusion_proof_inputs); let state_tree_height = read_input_state_roots( accounts.as_slice(), @@ -241,7 +246,7 @@ pub fn process< &mut input_compressed_account_roots, )?; - // 15. Verify Inclusion & Non-inclusion Proof --------------------------------------------------- + // 16. Verify Inclusion & Non-inclusion Proof --------------------------------------------------- if num_inclusion_proof_inputs != 0 || num_non_inclusion_proof_inputs != 0 { if let Some(proof) = inputs.proof().as_ref() { let mut new_addresses = Vec::with_capacity(num_non_inclusion_proof_inputs); @@ -320,7 +325,7 @@ pub fn process< { return Err(SystemProgramError::EmptyInputs.into()); } - // 16. Transfer network, address, and rollover fees --------------------------------------------------- + // 17. Transfer network, address, and rollover fees --------------------------------------------------- // Note: we transfer rollover fees from the system program instead // of the account compression program to reduce cpi depth. context.transfer_fees(remaining_accounts, ctx.get_fee_payer())?; @@ -331,7 +336,7 @@ pub fn process< return Ok(()); } - // 17. Copy CPI context outputs --------------------------------------------------- + // 18. Copy CPI context outputs --------------------------------------------------- copy_cpi_context_outputs( inputs.get_cpi_context_account(), inputs.get_cpi_context_outputs_start_offset(), @@ -340,10 +345,28 @@ pub fn process< cpi_outputs_data_len, bytes, )?; - // 18. CPI account compression program --------------------------------------------------- + // 19. CPI account compression program --------------------------------------------------- cpi_account_compression_program(context, cpi_ix_bytes) } +/// Check that no read only account is an input account. +/// Multiple reads of the same account are allowed. +/// Multiple writes of the same account will fail at nullifier queue insertion. +#[inline(always)] +fn check_no_duplicate_accounts_in_inputs_and_read_only( + input_nullifiers: &ZeroCopySliceMut<'_, u8, InsertNullifierInput, false>, + read_only_accounts: &[ZPackedReadOnlyCompressedAccount], +) -> Result<()> { + for read_only_account in read_only_accounts { + for input_nullifier in input_nullifiers.iter() { + if read_only_account.account_hash == input_nullifier.account_hash { + return Err(SystemProgramError::DuplicateAccountInInputsAndReadOnly.into()); + } + } + } + Ok(()) +} + #[inline(always)] fn filter_for_accounts_not_proven_by_index( read_only_accounts: &[ZPackedReadOnlyCompressedAccount],