diff --git a/program-tests/compressed-token-test/tests/transfer2/compress_failing.rs b/program-tests/compressed-token-test/tests/transfer2/compress_failing.rs index e6229ffb35..e87a8b5095 100644 --- a/program-tests/compressed-token-test/tests/transfer2/compress_failing.rs +++ b/program-tests/compressed-token-test/tests/transfer2/compress_failing.rs @@ -708,3 +708,139 @@ async fn test_compression_max_top_up_exceeded() -> Result<(), RpcError> { Ok(()) } + +/// Test that compressing the same compressible CToken account twice in one +/// Transfer2 instruction does NOT double-charge the rent top-up budget. +/// (Audit issue #13 — delta-based deduction means the second compression +/// to the same account sees delta=0.) +#[tokio::test] +async fn test_compression_duplicate_account_no_double_charge_top_up() -> Result<(), RpcError> { + let mut rpc = LightProgramTest::new(ProgramTestConfig::new_v2(false, None)).await?; + let payer = rpc.get_payer().insecure_clone(); + + // Create owner and airdrop lamports + let owner = Keypair::new(); + rpc.airdrop_lamports(&owner.pubkey(), 1_000_000_000).await?; + + // Create mint authority + let mint_authority = Keypair::new(); + rpc.airdrop_lamports(&mint_authority.pubkey(), 1_000_000_000) + .await?; + + // Create compressed mint seed + let mint_seed = Keypair::new(); + + // Derive mint and ATA addresses + let (mint, _) = find_mint_address(&mint_seed.pubkey()); + let (ctoken_ata, _) = derive_token_ata(&owner.pubkey(), &mint); + + // Create compressible Light Token ATA with pre_pay_num_epochs = 0 (NO prepaid rent) + let compressible_params = CompressibleParams { + compressible_config: rpc + .test_accounts + .funding_pool_config + .compressible_config_pda, + rent_sponsor: rpc.test_accounts.funding_pool_config.rent_sponsor_pda, + pre_pay_num_epochs: 0, + lamports_per_write: Some(1000), + compress_to_account_pubkey: None, + token_account_version: TokenDataVersion::ShaFlat, + compression_only: true, + }; + + let create_ata_instruction = + CreateAssociatedTokenAccount::new(payer.pubkey(), owner.pubkey(), mint) + .with_compressible(compressible_params) + .instruction() + .map_err(|e| RpcError::AssertRpcError(format!("Failed to create ATA: {:?}", e)))?; + + rpc.create_and_send_transaction(&[create_ata_instruction], &payer.pubkey(), &[&payer]) + .await?; + + // Mint 2000 tokens — enough for two 1000-token compressions + let token_amount = 2000u64; + let decompressed_recipients = vec![Recipient::new(owner.pubkey(), token_amount)]; + + light_test_utils::actions::mint_action_comprehensive( + &mut rpc, + &mint_seed, + &mint_authority, + &payer, + None, + false, + vec![], + decompressed_recipients, + None, + None, + Some( + light_test_utils::actions::legacy::instructions::mint_action::NewMint { + decimals: 6, + supply: 0, + mint_authority: mint_authority.pubkey(), + freeze_authority: None, + metadata: None, + version: 3, + }, + ), + ) + .await?; + + // Get output queue for compression + let output_queue = rpc + .get_random_state_tree_info() + .unwrap() + .get_output_pubkey() + .unwrap(); + + // Warp forward ~37 epochs to create a rent deficit + use light_program_test::program_test::TestRpc; + rpc.warp_to_slot(500_000)?; + + // Build Transfer2 with TWO compress operations (1000 each) on the same CToken ATA. + // Each CTokenAccount2 can only hold one compression, so we need two instances. + let mut packed_accounts = PackedAccounts::default(); + packed_accounts.insert_or_get(output_queue); + + let mint_index = packed_accounts.insert_or_get_read_only(mint); + let authority_index = packed_accounts.insert_or_get_config(owner.pubkey(), true, false); + let recipient_index = packed_accounts.insert_or_get_read_only(owner.pubkey()); + let ctoken_ata_index = packed_accounts.insert_or_get_config(ctoken_ata, false, true); + + let mut compression_account_1 = CTokenAccount2::new_empty(recipient_index, mint_index); + compression_account_1 + .compress(1000, ctoken_ata_index, authority_index) + .map_err(|e| RpcError::AssertRpcError(format!("Failed to compress: {:?}", e)))?; + + let mut compression_account_2 = CTokenAccount2::new_empty(recipient_index, mint_index); + compression_account_2 + .compress(1000, ctoken_ata_index, authority_index) + .map_err(|e| RpcError::AssertRpcError(format!("Failed to compress: {:?}", e)))?; + + let (account_metas, _, _) = packed_accounts.to_account_metas(); + + // max_top_up = 50_000: sufficient for ONE top-up (~26,744) but NOT for two (~53,488). + // Without the fix, this would fail with MaxTopUpExceeded because the budget + // would be double-charged. With the delta-based fix, the second compression + // sees delta=0 and does not consume additional budget. + let compression_inputs = Transfer2Inputs { + token_accounts: vec![compression_account_1, compression_account_2], + validity_proof: ValidityProof::default(), + transfer_config: Transfer2Config::default() + .filter_zero_amount_outputs() + .with_max_top_up(50_000), + meta_config: Transfer2AccountsMetaConfig::new(payer.pubkey(), account_metas), + in_lamports: None, + out_lamports: None, + output_queue: 0, + in_tlv: None, + }; + + let ix = create_transfer2_instruction(compression_inputs) + .map_err(|e| RpcError::AssertRpcError(format!("Failed to create instruction: {:?}", e)))?; + + // Transaction should succeed — the fix ensures only one top-up charge + rpc.create_and_send_transaction(&[ix], &payer.pubkey(), &[&payer, &owner]) + .await?; + + Ok(()) +} diff --git a/programs/compressed-token/program/src/extensions/mod.rs b/programs/compressed-token/program/src/extensions/mod.rs index ea4c71c8d0..7d7f5ef7ae 100644 --- a/programs/compressed-token/program/src/extensions/mod.rs +++ b/programs/compressed-token/program/src/extensions/mod.rs @@ -149,24 +149,28 @@ fn build_metadata_config( let mut processed_keys = tinyvec::ArrayVec::<[&[u8]; 20]>::new(); let should_add_key = |key: &[u8]| -> bool { - // Key exists if it's in original metadata OR added via UpdateMetadataField - let exists_in_original = metadata.iter().any(|item| item.key == key); - let added_via_update = actions.iter().any(|action| { - matches!(action, ZAction::UpdateMetadataField(update) - if update.extension_index as usize == extension_index - && update.field_type == 3 - && update.key == key) - }); - - // Key should be included if it exists and is not removed - let should_exist = exists_in_original || added_via_update; - let is_removed = actions.iter().any(|action| { - matches!(action, ZAction::RemoveMetadataKey(remove) - if remove.extension_index as usize == extension_index - && remove.key == key) - }); - - should_exist && !is_removed + // Start with whether the key exists in original metadata + let mut exists = metadata.iter().any(|item| item.key == key); + // Process actions in order to determine final state + // (handles add-remove-add sequences correctly) + for action in actions { + match action { + ZAction::UpdateMetadataField(update) + if update.extension_index as usize == extension_index + && update.field_type == 3 + && update.key == key => + { + exists = true; + } + ZAction::RemoveMetadataKey(remove) + if remove.extension_index as usize == extension_index && remove.key == key => + { + exists = false; + } + _ => {} + } + } + exists }; // Process all original metadata keys diff --git a/programs/compressed-token/program/tests/extensions_metadata.rs b/programs/compressed-token/program/tests/extensions_metadata.rs new file mode 100644 index 0000000000..bb5c6620dd --- /dev/null +++ b/programs/compressed-token/program/tests/extensions_metadata.rs @@ -0,0 +1,253 @@ +/// Randomized test for process_extensions_config_with_actions. +/// +/// Validates that metadata add/remove action sequences produce correct +/// AdditionalMetadataConfig output, covering the add-remove-add bug +/// from audit issue #16. +use borsh::BorshSerialize; +use light_compressed_account::Pubkey; +use light_compressed_token::extensions::process_extensions_config_with_actions; +use light_token_interface::{ + instructions::mint_action::{ + Action, CpiContext, CreateMint, MintActionCompressedInstructionData, MintInstructionData, + RemoveMetadataKeyAction, UpdateMetadataFieldAction, + }, + state::{ + extensions::{AdditionalMetadataConfig, TokenMetadataConfig}, + AdditionalMetadata, ExtensionStruct, ExtensionStructConfig, MintMetadata, TokenMetadata, + }, + MINT_ADDRESS_TREE, +}; +use light_zero_copy::traits::ZeroCopyAt; +use rand::{rngs::StdRng, thread_rng, Rng, SeedableRng}; + +/// Small key pool to maximize add/remove/re-add collisions. +const KEY_POOL: &[&[u8]] = &[b"k0", b"k1", b"k2", b"k3", b"k4"]; + +fn random_key(rng: &mut StdRng) -> Vec { + KEY_POOL[rng.gen_range(0..KEY_POOL.len())].to_vec() +} + +fn random_value(rng: &mut StdRng) -> Vec { + let len = rng.gen_range(1..20); + (0..len).map(|_| rng.gen::()).collect() +} + +fn random_additional_metadata(rng: &mut StdRng) -> Vec { + let count = rng.gen_range(0..=4); + let mut items = Vec::new(); + let mut used_keys = Vec::new(); + for _ in 0..count { + let key = random_key(rng); + if used_keys.contains(&key) { + continue; // no duplicate keys in initial metadata + } + used_keys.push(key.clone()); + items.push(AdditionalMetadata { + key, + value: random_value(rng), + }); + } + items +} + +/// Generate random metadata actions (UpdateMetadataField with field_type=3 +/// and RemoveMetadataKey), plus occasional name/symbol/uri updates. +fn random_metadata_actions(rng: &mut StdRng) -> Vec { + let count = rng.gen_range(0..=10); + let mut actions = Vec::with_capacity(count); + for _ in 0..count { + match rng.gen_range(0..5) { + // Custom field update (field_type=3) targeting extension 0 + 0 | 1 => actions.push(Action::UpdateMetadataField(UpdateMetadataFieldAction { + extension_index: 0, + field_type: 3, + key: random_key(rng), + value: random_value(rng), + })), + // Remove key targeting extension 0 + 2 => actions.push(Action::RemoveMetadataKey(RemoveMetadataKeyAction { + extension_index: 0, + key: random_key(rng), + idempotent: 1, + })), + // Name/symbol/uri update targeting extension 0 + 3 => actions.push(Action::UpdateMetadataField(UpdateMetadataFieldAction { + extension_index: 0, + field_type: rng.gen_range(0..3), // 0=name, 1=symbol, 2=uri + key: vec![], + value: random_value(rng), + })), + // Action targeting a different extension (should be ignored) + 4 => actions.push(Action::UpdateMetadataField(UpdateMetadataFieldAction { + extension_index: 1, + field_type: 3, + key: random_key(rng), + value: random_value(rng), + })), + _ => unreachable!(), + } + } + actions +} + +/// Reference implementation: replay actions on a state map to compute expected config. +/// +/// Uses a simple sequential approach: maintain an ordered list of (key, exists, value_len) +/// entries. Each action mutates the state in place. Original keys appear first (in their +/// original order), newly added keys are appended. At the end, filter to existing keys. +fn compute_expected_config(metadata: &TokenMetadata, actions: &[Action]) -> TokenMetadataConfig { + let extension_index = 0usize; + + // Track name/symbol/uri lengths (last update wins) + let mut name_len = metadata.name.len(); + let mut symbol_len = metadata.symbol.len(); + let mut uri_len = metadata.uri.len(); + + // State map: (key, exists, value_len) - preserves insertion order + let mut state: Vec<(Vec, bool, usize)> = metadata + .additional_metadata + .iter() + .map(|item| (item.key.clone(), true, item.value.len())) + .collect(); + + // Replay all actions sequentially + for action in actions { + match action { + Action::UpdateMetadataField(update) + if update.extension_index as usize == extension_index => + { + match update.field_type { + 0 => name_len = update.value.len(), + 1 => symbol_len = update.value.len(), + 2 => uri_len = update.value.len(), + 3 => { + if let Some(entry) = state.iter_mut().find(|(k, _, _)| *k == update.key) { + entry.1 = true; + entry.2 = update.value.len(); + } else { + state.push((update.key.clone(), true, update.value.len())); + } + } + _ => {} + } + } + Action::RemoveMetadataKey(remove) + if remove.extension_index as usize == extension_index => + { + if let Some(entry) = state.iter_mut().find(|(k, _, _)| *k == remove.key) { + entry.1 = false; + } + } + _ => {} + } + } + + TokenMetadataConfig { + name: name_len as u32, + symbol: symbol_len as u32, + uri: uri_len as u32, + additional_metadata: state + .into_iter() + .filter(|(_, exists, _)| *exists) + .map(|(key, _, value_len)| AdditionalMetadataConfig { + key: key.len() as u32, + value: value_len as u32, + }) + .collect(), + } +} + +/// Wrap actions in a MintActionCompressedInstructionData, serialize, +/// and zero-copy parse to get &[ZAction]. +fn serialize_actions(actions: &[Action]) -> Vec { + let instruction_data = MintActionCompressedInstructionData { + create_mint: Some(CreateMint::default()), + leaf_index: 0, + prove_by_index: false, + root_index: 0, + max_top_up: 0, + actions: actions.to_vec(), + proof: None, + cpi_context: Some(CpiContext { + set_context: false, + first_set_context: false, + in_tree_index: 0, + in_queue_index: 0, + out_queue_index: 0, + token_out_queue_index: 0, + assigned_account_index: 0, + read_only_address_trees: [0u8; 4], + address_tree_pubkey: MINT_ADDRESS_TREE, + }), + mint: Some(MintInstructionData { + supply: 0, + decimals: 0, + metadata: MintMetadata { + version: 0, + mint_decompressed: false, + mint: Pubkey::default(), + mint_signer: [0u8; 32], + bump: 0, + }, + mint_authority: None, + freeze_authority: None, + extensions: None, + }), + }; + instruction_data.try_to_vec().expect("Failed to serialize") +} + +#[test] +fn test_metadata_config_with_actions_randomized() { + let mut rng = thread_rng(); + let seed: u64 = rng.gen(); + println!("seed value: {}", seed); + let mut rng = StdRng::seed_from_u64(seed); + + for i in 0..1000 { + let additional_metadata = random_additional_metadata(&mut rng); + let token_metadata = TokenMetadata { + update_authority: Pubkey::default(), + mint: Pubkey::default(), + name: random_value(&mut rng), + symbol: random_value(&mut rng), + uri: random_value(&mut rng), + additional_metadata, + }; + let actions = random_metadata_actions(&mut rng); + let extensions = vec![ExtensionStruct::TokenMetadata(token_metadata.clone())]; + + // Serialize and zero-copy parse to get ZAction slice + let serialized = serialize_actions(&actions); + let (zc_data, _) = MintActionCompressedInstructionData::zero_copy_at(&serialized) + .unwrap_or_else(|e| panic!("iteration {i}, seed {seed}: zero_copy_at failed: {e}")); + + let (has_extensions, config_vec, _) = + process_extensions_config_with_actions(Some(&extensions), &zc_data.actions) + .unwrap_or_else(|e| { + panic!("iteration {i}, seed {seed}: process_extensions failed: {e:?}") + }); + + assert!( + has_extensions, + "iteration {i}, seed {seed}: expected has_extensions=true" + ); + assert_eq!( + config_vec.len(), + 1, + "iteration {i}, seed {seed}: expected 1 config" + ); + + let actual = match &config_vec[0] { + ExtensionStructConfig::TokenMetadata(cfg) => cfg, + other => panic!("iteration {i}, seed {seed}: unexpected config type: {other:?}"), + }; + let expected = compute_expected_config(&token_metadata, &actions); + + assert_eq!( + *actual, expected, + "iteration {i}, seed {seed}\nactions: {actions:?}\nmetadata: {:?}", + token_metadata.additional_metadata + ); + } +}