From a9ded7f8a1d184d75dc7433139c8fbaaec272099 Mon Sep 17 00:00:00 2001 From: ananas Date: Thu, 19 Feb 2026 18:04:00 +0000 Subject: [PATCH 1/7] feat: pinocchio account add custom discriminator, add 1 byte discriminator compress decompress test --- sdk-libs/macros/src/lib.rs | 2 +- .../macros/src/light_pdas/account/derive.rs | 163 +++++++++++++++++- .../macros/src/light_pdas/program/compress.rs | 77 +++++---- .../src/interface/program/compression/pda.rs | 8 +- .../interface/program/decompression/pda.rs | 8 +- .../pinocchio-light-program-test/Cargo.toml | 2 + .../pinocchio-light-program-test/src/lib.rs | 27 +++ .../src/one_byte_pda/accounts.rs | 88 ++++++++++ .../src/one_byte_pda/mod.rs | 4 + .../src/one_byte_pda/processor.rs | 92 ++++++++++ .../pinocchio-light-program-test/src/state.rs | 15 ++ .../tests/test_create_one_byte_record.rs | 144 ++++++++++++++++ 12 files changed, 587 insertions(+), 43 deletions(-) create mode 100644 sdk-tests/pinocchio-light-program-test/src/one_byte_pda/accounts.rs create mode 100644 sdk-tests/pinocchio-light-program-test/src/one_byte_pda/mod.rs create mode 100644 sdk-tests/pinocchio-light-program-test/src/one_byte_pda/processor.rs create mode 100644 sdk-tests/pinocchio-light-program-test/tests/test_create_one_byte_record.rs diff --git a/sdk-libs/macros/src/lib.rs b/sdk-libs/macros/src/lib.rs index 878d55739b..921539bfdc 100644 --- a/sdk-libs/macros/src/lib.rs +++ b/sdk-libs/macros/src/lib.rs @@ -395,7 +395,7 @@ pub fn light_account_derive(input: TokenStream) -> TokenStream { /// - The `compression_info` field must be first or last field in the struct /// - Struct should be `#[repr(C)]` for predictable memory layout /// - Use `[u8; 32]` instead of `Pubkey` for address fields -#[proc_macro_derive(LightPinocchioAccount, attributes(compress_as, skip))] +#[proc_macro_derive(LightPinocchioAccount, attributes(compress_as, skip, light_pinocchio))] pub fn light_pinocchio_account_derive(input: TokenStream) -> TokenStream { let input = parse_macro_input!(input as DeriveInput); into_token_stream(light_pdas::account::derive::derive_light_pinocchio_account( diff --git a/sdk-libs/macros/src/light_pdas/account/derive.rs b/sdk-libs/macros/src/light_pdas/account/derive.rs index c7fcde8777..dad34d9ca0 100644 --- a/sdk-libs/macros/src/light_pdas/account/derive.rs +++ b/sdk-libs/macros/src/light_pdas/account/derive.rs @@ -117,6 +117,62 @@ pub fn derive_light_pinocchio_account(input: DeriveInput) -> Result derive_light_account_internal(input, Framework::Pinocchio) } +/// Parses the `discriminator` bytes from `#[light_pinocchio(discriminator = [...])]` if present. +/// Returns None if the attribute is absent (use hash-derived discriminator). +fn parse_pinocchio_discriminator(attrs: &[syn::Attribute]) -> Result>> { + for attr in attrs { + if !attr.path().is_ident("light_pinocchio") { + continue; + } + let meta_list = attr.meta.require_list()?; + let nested: Punctuated = + meta_list.parse_args_with(Punctuated::parse_terminated)?; + for meta in &nested { + if let syn::Meta::NameValue(nv) = meta { + if nv.path.is_ident("discriminator") { + if let syn::Expr::Array(arr) = &nv.value { + let bytes: Vec = arr + .elems + .iter() + .map(|e| { + if let syn::Expr::Lit(lit) = e { + if let syn::Lit::Int(i) = &lit.lit { + return i + .base10_parse::() + .map_err(|err| syn::Error::new_spanned(i, err)); + } + } + if let syn::Expr::Cast(cast) = e { + if let syn::Expr::Lit(lit) = cast.expr.as_ref() { + if let syn::Lit::Int(i) = &lit.lit { + return i + .base10_parse::() + .map_err(|err| syn::Error::new_spanned(i, err)); + } + } + } + Err(syn::Error::new_spanned(e, "expected integer literal")) + }) + .collect::>>()?; + if bytes.is_empty() { + return Err(syn::Error::new_spanned( + &arr, + "discriminator must have at least one byte", + )); + } + return Ok(Some(bytes)); + } + return Err(syn::Error::new_spanned( + &nv.value, + "discriminator must be an array like [1u8]", + )); + } + } + } + } + Ok(None) +} + /// Internal implementation of LightAccount derive, parameterized by framework. fn derive_light_account_internal(input: DeriveInput, framework: Framework) -> Result { // Convert DeriveInput to ItemStruct for macros that need it @@ -125,8 +181,35 @@ fn derive_light_account_internal(input: DeriveInput, framework: Framework) -> Re // Generate LightHasherSha implementation let hasher_impl = derive_light_hasher_sha(item_struct.clone())?; - // Generate LightDiscriminator implementation - let discriminator_impl = discriminator::anchor_discriminator(item_struct)?; + // Check for custom discriminator argument from #[light_pinocchio(discriminator = [...])] + // Only valid for the Pinocchio framework; reject it on Anchor to avoid silent misuse. + let discriminator_impl = if let Some(disc_bytes) = parse_pinocchio_discriminator(&input.attrs)? + { + if framework != Framework::Pinocchio { + return Err(syn::Error::new_spanned( + &input.ident, + "#[light_pinocchio(discriminator = [...])] is only valid with \ + #[derive(LightPinocchioAccount)], not with #[derive(LightAccount)]", + )); + } + let mut padded = [0u8; 8]; + let copy_len = disc_bytes.len().min(8); + padded[..copy_len].copy_from_slice(&disc_bytes[..copy_len]); + let discriminator_tokens: proc_macro2::TokenStream = format!("{padded:?}").parse().unwrap(); + let slice_tokens: proc_macro2::TokenStream = format!("{disc_bytes:?}").parse().unwrap(); + let struct_name = &input.ident; + let (impl_gen, type_gen, where_clause) = input.generics.split_for_impl(); + quote! { + impl #impl_gen LightDiscriminator for #struct_name #type_gen #where_clause { + const LIGHT_DISCRIMINATOR: [u8; 8] = #discriminator_tokens; + const LIGHT_DISCRIMINATOR_SLICE: &'static [u8] = &#slice_tokens; + fn discriminator() -> [u8; 8] { Self::LIGHT_DISCRIMINATOR } + } + } + } else { + // Generate LightDiscriminator implementation via SHA256 + discriminator::anchor_discriminator(item_struct)? + }; // Generate unified LightAccount implementation (includes PackedXxx struct) let light_account_impl = generate_light_account_impl(&input, framework)?; @@ -747,6 +830,82 @@ mod tests { use super::*; + #[test] + fn test_light_pinocchio_custom_discriminator() { + let input: DeriveInput = parse_quote! { + #[light_pinocchio(discriminator = [1u8])] + pub struct OneByteRecord { + pub compression_info: CompressionInfo, + pub owner: [u8; 32], + } + }; + + let result = derive_light_pinocchio_account(input); + assert!( + result.is_ok(), + "LightPinocchioAccount with custom discriminator should succeed: {:?}", + result.err() + ); + + let output = result.unwrap().to_string(); + + // Should contain custom discriminator (1, 0, 0, 0, 0, 0, 0, 0) + assert!( + output.contains("LIGHT_DISCRIMINATOR"), + "Should have LIGHT_DISCRIMINATOR" + ); + assert!( + output.contains("1 , 0 , 0 , 0 , 0 , 0 , 0 , 0") + || output.contains("1, 0, 0, 0, 0, 0, 0, 0"), + "LIGHT_DISCRIMINATOR should be [1,0,0,0,0,0,0,0]" + ); + // LIGHT_DISCRIMINATOR_SLICE must be &[1] (1 byte), NOT the padded &[1, 0, 0, 0, 0, 0, 0, 0] + assert!( + output.contains("LIGHT_DISCRIMINATOR_SLICE"), + "Should have LIGHT_DISCRIMINATOR_SLICE" + ); + // Verify the slice contains exactly 1 element (not 8) + // The generated token stream renders as `& [1u8]` or `& [1]` + assert!( + output.contains("& [1u8]") || output.contains("& [1]"), + "LIGHT_DISCRIMINATOR_SLICE should be &[1] (1 byte), got: {output}" + ); + } + + #[test] + fn test_light_pinocchio_custom_discriminator_empty_rejected() { + let input: DeriveInput = parse_quote! { + #[light_pinocchio(discriminator = [])] + pub struct EmptyDisc { + pub compression_info: CompressionInfo, + pub owner: [u8; 32], + } + }; + let result = derive_light_pinocchio_account(input); + assert!(result.is_err(), "Empty discriminator array should be rejected"); + let err = result.unwrap_err().to_string(); + assert!( + err.contains("at least one byte"), + "Error should mention 'at least one byte', got: {err}" + ); + } + + #[test] + fn test_light_pinocchio_discriminator_rejected_on_anchor() { + let input: DeriveInput = parse_quote! { + #[light_pinocchio(discriminator = [1u8])] + pub struct AnchorRecord { + pub compression_info: CompressionInfo, + pub owner: Pubkey, + } + }; + let result = derive_light_account(input); + assert!( + result.is_err(), + "#[light_pinocchio(discriminator)] should be rejected with LightAccount (Anchor)" + ); + } + #[test] fn test_light_account_basic() { let input: DeriveInput = parse_quote! { diff --git a/sdk-libs/macros/src/light_pdas/program/compress.rs b/sdk-libs/macros/src/light_pdas/program/compress.rs index e37c7b8d5f..c1aec7369d 100644 --- a/sdk-libs/macros/src/light_pdas/program/compress.rs +++ b/sdk-libs/macros/src/light_pdas/program/compress.rs @@ -311,6 +311,18 @@ impl CompressBuilder { } /// Generate compress dispatch as an associated function on the enum using the specified backend. + /// + /// # Discriminator ordering invariant + /// + /// The dispatch uses a sequential if-chain keyed on `LIGHT_DISCRIMINATOR_SLICE`. Because a + /// shorter discriminator is a prefix of any byte sequence, types with shorter discriminators + /// MUST be placed *after* all types with longer discriminators in the `ProgramAccounts` enum. + /// Violating this ordering causes the short discriminator to match prematurely, corrupting + /// dispatch for longer-discriminator types whose on-chain prefix happens to share the same + /// leading bytes. + /// + /// The `LightProgramPinocchio` derive preserves enum declaration order, so the caller must + /// declare non-standard (short) discriminator variants last. pub fn generate_enum_dispatch_method_with_backend( &self, enum_name: &syn::Ident, @@ -329,25 +341,33 @@ impl CompressBuilder { if info.is_zero_copy { quote! { - d if d == #name::LIGHT_DISCRIMINATOR => { - let pod_bytes = &data[8..8 + core::mem::size_of::<#name>()]; - let mut account_data: #name = *bytemuck::from_bytes(pod_bytes); - drop(data); - #account_crate::prepare_account_for_compression( - account_info, &mut account_data, meta, index, ctx, - ) + { + let __disc_slice = <#name as #account_crate::LightDiscriminator>::LIGHT_DISCRIMINATOR_SLICE; + let __disc_len = __disc_slice.len(); + if data.len() >= __disc_len && &data[..__disc_len] == __disc_slice { + let pod_bytes = &data[__disc_len..__disc_len + core::mem::size_of::<#name>()]; + let mut account_data: #name = *bytemuck::from_bytes(pod_bytes); + drop(data); + return #account_crate::prepare_account_for_compression( + account_info, &mut account_data, meta, index, ctx, + ); + } } } } else { quote! { - d if d == #name::LIGHT_DISCRIMINATOR => { - let mut reader = &data[8..]; - let mut account_data = #name::deserialize(&mut reader) - .map_err(|_| #sdk_error::InvalidInstructionData)?; - drop(data); - #account_crate::prepare_account_for_compression( - account_info, &mut account_data, meta, index, ctx, - ) + { + let __disc_slice = <#name as #account_crate::LightDiscriminator>::LIGHT_DISCRIMINATOR_SLICE; + let __disc_len = __disc_slice.len(); + if data.len() >= __disc_len && &data[..__disc_len] == __disc_slice { + let mut reader = &data[__disc_len..]; + let mut account_data = #name::deserialize(&mut reader) + .map_err(|_| #sdk_error::InvalidInstructionData)?; + drop(data); + return #account_crate::prepare_account_for_compression( + account_info, &mut account_data, meta, index, ctx, + ); + } } } } @@ -363,16 +383,10 @@ impl CompressBuilder { index: usize, ctx: &mut #account_crate::CompressCtx<'_>, ) -> std::result::Result<(), #sdk_error> { - use #account_crate::LightDiscriminator; use borsh::BorshDeserialize; let data = account_info.try_borrow_data()#borrow_error; - let discriminator: [u8; 8] = data[..8] - .try_into() - .map_err(|_| #sdk_error::InvalidInstructionData)?; - match discriminator { - #(#compress_arms)* - _ => Ok(()), - } + #(#compress_arms)* + Ok(()) } } }) @@ -385,16 +399,10 @@ impl CompressBuilder { index: usize, ctx: &mut #account_crate::CompressCtx<'_, 'info>, ) -> std::result::Result<(), #sdk_error> { - use #account_crate::LightDiscriminator; use borsh::BorshDeserialize; let data = account_info.try_borrow_data()#borrow_error; - let discriminator: [u8; 8] = data[..8] - .try_into() - .map_err(|_| #sdk_error::InvalidInstructionData)?; - match discriminator { - #(#compress_arms)* - _ => Ok(()), - } + #(#compress_arms)* + Ok(()) } } }) @@ -448,10 +456,13 @@ impl CompressBuilder { let qualified_type = qualify_type_with_crate(&info.account_type); if backend.is_pinocchio() { - // For pinocchio, all types use INIT_SPACE constant (no CompressedInitSpace trait) + // For pinocchio, use LIGHT_DISCRIMINATOR_SLICE.len() for the on-chain prefix size. + // This supports types with non-standard (e.g. 1-byte) discriminators. quote! { const _: () = { - const COMPRESSED_SIZE: usize = 8 + #qualified_type::INIT_SPACE; + const COMPRESSED_SIZE: usize = + <#qualified_type as #account_crate::LightDiscriminator>::LIGHT_DISCRIMINATOR_SLICE.len() + + #qualified_type::INIT_SPACE; assert!( COMPRESSED_SIZE <= 800, concat!( diff --git a/sdk-libs/sdk-types/src/interface/program/compression/pda.rs b/sdk-libs/sdk-types/src/interface/program/compression/pda.rs index c5ab44fce1..a2e9f07e25 100644 --- a/sdk-libs/sdk-types/src/interface/program/compression/pda.rs +++ b/sdk-libs/sdk-types/src/interface/program/compression/pda.rs @@ -97,10 +97,12 @@ where let mut data = account_info .try_borrow_mut_data() .map_err(LightSdkTypesError::AccountError)?; - // Write discriminator first - data[..8].copy_from_slice(&A::LIGHT_DISCRIMINATOR); + // Write discriminator first (variable length: LIGHT_DISCRIMINATOR_SLICE may be < 8 bytes) + let disc_slice = A::LIGHT_DISCRIMINATOR_SLICE; + let disc_len = disc_slice.len(); + data[..disc_len].copy_from_slice(disc_slice); // Write serialized account data after discriminator - let writer = &mut &mut data[8..]; + let writer = &mut &mut data[disc_len..]; account_data .serialize(writer) .map_err(|_| LightSdkTypesError::Borsh)?; diff --git a/sdk-libs/sdk-types/src/interface/program/decompression/pda.rs b/sdk-libs/sdk-types/src/interface/program/decompression/pda.rs index 2cbf6ba77a..3e32ec6ef3 100644 --- a/sdk-libs/sdk-types/src/interface/program/decompression/pda.rs +++ b/sdk-libs/sdk-types/src/interface/program/decompression/pda.rs @@ -101,7 +101,8 @@ where input_data_hash[0] = 0; // Zero first byte per protocol convention // 6. Calculate space and create PDA - let discriminator_len = 8; + let disc_slice = as LightDiscriminator>::LIGHT_DISCRIMINATOR_SLICE; + let discriminator_len = disc_slice.len(); let space = discriminator_len + data_len.max( as LightAccount>::INIT_SPACE); let rent_minimum = AI::get_min_rent_balance(space)?; @@ -127,13 +128,12 @@ where let mut pda_data = pda_account .try_borrow_mut_data() .map_err(|_| LightSdkTypesError::ConstraintViolation)?; - pda_data[..8] - .copy_from_slice(& as LightDiscriminator>::LIGHT_DISCRIMINATOR); + pda_data[..discriminator_len].copy_from_slice(disc_slice); // 8. Set decompressed state and serialize let mut decompressed = account_data; decompressed.set_decompressed(ctx.light_config, ctx.current_slot); - let writer = &mut &mut pda_data[8..]; + let writer = &mut &mut pda_data[discriminator_len..]; decompressed .serialize(writer) .map_err(|_| LightSdkTypesError::Borsh)?; diff --git a/sdk-tests/pinocchio-light-program-test/Cargo.toml b/sdk-tests/pinocchio-light-program-test/Cargo.toml index 912ce4b1a6..8606fe3193 100644 --- a/sdk-tests/pinocchio-light-program-test/Cargo.toml +++ b/sdk-tests/pinocchio-light-program-test/Cargo.toml @@ -21,6 +21,8 @@ pinocchio = { workspace = true } pinocchio-pubkey = { workspace = true } pinocchio-system = { workspace = true } light-hasher = { workspace = true } +light-compressed-account = { workspace = true } +light-compressible = { workspace = true } [dev-dependencies] light-program-test = { workspace = true, features = ["devenv"] } diff --git a/sdk-tests/pinocchio-light-program-test/src/lib.rs b/sdk-tests/pinocchio-light-program-test/src/lib.rs index 6a7ba2256c..87e19732ac 100644 --- a/sdk-tests/pinocchio-light-program-test/src/lib.rs +++ b/sdk-tests/pinocchio-light-program-test/src/lib.rs @@ -14,6 +14,7 @@ pub mod account_loader; pub mod all; pub mod ata; pub mod mint; +pub mod one_byte_pda; pub mod pda; pub mod state; pub mod token_account; @@ -48,6 +49,9 @@ pub enum ProgramAccounts { #[light_account(pda::seeds = [RECORD_SEED, ctx.owner], pda::zero_copy)] ZeroCopyRecord(ZeroCopyRecord), + + #[light_account(pda::seeds = [b"one_byte_record", ctx.owner])] + OneByteRecord(OneByteRecord), } // ============================================================================ @@ -61,6 +65,8 @@ pub mod discriminators { pub const CREATE_MINT: [u8; 8] = [69, 44, 215, 132, 253, 214, 41, 45]; pub const CREATE_TWO_MINTS: [u8; 8] = [222, 41, 188, 84, 174, 115, 236, 105]; pub const CREATE_ALL: [u8; 8] = [149, 49, 144, 45, 208, 155, 177, 43]; + /// Discriminator for CREATE_ONE_BYTE_RECORD instruction. + pub const CREATE_ONE_BYTE_RECORD: [u8; 8] = [1, 0, 0, 0, 0, 0, 0, 0]; } // ============================================================================ @@ -89,6 +95,7 @@ pub fn process_instruction( discriminators::CREATE_MINT => process_create_mint(accounts, data), discriminators::CREATE_TWO_MINTS => process_create_two_mints(accounts, data), discriminators::CREATE_ALL => process_create_all(accounts, data), + discriminators::CREATE_ONE_BYTE_RECORD => process_create_one_byte_record(accounts, data), ProgramAccounts::INITIALIZE_COMPRESSION_CONFIG => { ProgramAccounts::process_initialize_config(accounts, data) } @@ -276,3 +283,23 @@ fn process_create_all(accounts: &[AccountInfo], data: &[u8]) -> Result<(), Progr Ok(()) } + +fn process_create_one_byte_record( + accounts: &[AccountInfo], + data: &[u8], +) -> Result<(), ProgramError> { + use borsh::BorshDeserialize; + use one_byte_pda::accounts::{CreateOneByteRecord, CreateOneByteRecordParams}; + + let params = CreateOneByteRecordParams::deserialize(&mut &data[..]) + .map_err(|_| ProgramError::BorshIoError)?; + + let remaining_start = CreateOneByteRecord::FIXED_LEN; + let (fixed_accounts, remaining_accounts) = accounts.split_at(remaining_start); + let ctx = CreateOneByteRecord::parse(fixed_accounts, ¶ms)?; + + one_byte_pda::processor::process(&ctx, ¶ms, remaining_accounts) + .map_err(|e| ProgramError::Custom(u32::from(e)))?; + + Ok(()) +} diff --git a/sdk-tests/pinocchio-light-program-test/src/one_byte_pda/accounts.rs b/sdk-tests/pinocchio-light-program-test/src/one_byte_pda/accounts.rs new file mode 100644 index 0000000000..c2504eb3cd --- /dev/null +++ b/sdk-tests/pinocchio-light-program-test/src/one_byte_pda/accounts.rs @@ -0,0 +1,88 @@ +use borsh::{BorshDeserialize, BorshSerialize}; +use light_account_pinocchio::{CreateAccountsProof, LightAccount, LightDiscriminator}; +use pinocchio::{ + account_info::AccountInfo, + instruction::{Seed, Signer}, + program_error::ProgramError, + sysvars::Sysvar, +}; + +use crate::state::OneByteRecord; + +#[derive(BorshSerialize, BorshDeserialize, Clone)] +pub struct CreateOneByteRecordParams { + pub create_accounts_proof: CreateAccountsProof, + pub owner: [u8; 32], +} + +pub struct CreateOneByteRecord<'a> { + pub fee_payer: &'a AccountInfo, + pub compression_config: &'a AccountInfo, + pub pda_rent_sponsor: &'a AccountInfo, + pub record: &'a AccountInfo, + pub system_program: &'a AccountInfo, +} + +impl<'a> CreateOneByteRecord<'a> { + pub const FIXED_LEN: usize = 5; + + pub fn parse( + accounts: &'a [AccountInfo], + params: &CreateOneByteRecordParams, + ) -> Result { + let fee_payer = &accounts[0]; + let compression_config = &accounts[1]; + let pda_rent_sponsor = &accounts[2]; + let record = &accounts[3]; + let system_program = &accounts[4]; + + if !fee_payer.is_signer() { + return Err(ProgramError::MissingRequiredSignature); + } + + // Derive PDA with discriminator layout: space = disc_len + INIT_SPACE + let disc_len = OneByteRecord::LIGHT_DISCRIMINATOR_SLICE.len(); + let space = disc_len + OneByteRecord::INIT_SPACE; + let seeds: &[&[u8]] = &[b"one_byte_record", ¶ms.owner]; + let (expected_pda, bump) = pinocchio::pubkey::find_program_address(seeds, &crate::ID); + if record.key() != &expected_pda { + return Err(ProgramError::InvalidSeeds); + } + + let rent = + pinocchio::sysvars::rent::Rent::get().map_err(|_| ProgramError::UnsupportedSysvar)?; + let lamports = rent.minimum_balance(space); + + let bump_bytes = [bump]; + let seed_array = [ + Seed::from(b"one_byte_record" as &[u8]), + Seed::from(params.owner.as_ref()), + Seed::from(bump_bytes.as_ref()), + ]; + let signer = Signer::from(&seed_array); + pinocchio_system::instructions::CreateAccount { + from: fee_payer, + to: record, + lamports, + space: space as u64, + owner: &crate::ID, + } + .invoke_signed(&[signer])?; + + // Write discriminator to data[0..disc_len] + { + let mut data = record + .try_borrow_mut_data() + .map_err(|_| ProgramError::AccountBorrowFailed)?; + data[0..disc_len].copy_from_slice(OneByteRecord::LIGHT_DISCRIMINATOR_SLICE); + } + + Ok(Self { + fee_payer, + compression_config, + pda_rent_sponsor, + record, + system_program, + }) + } +} diff --git a/sdk-tests/pinocchio-light-program-test/src/one_byte_pda/mod.rs b/sdk-tests/pinocchio-light-program-test/src/one_byte_pda/mod.rs new file mode 100644 index 0000000000..c33d77f1e1 --- /dev/null +++ b/sdk-tests/pinocchio-light-program-test/src/one_byte_pda/mod.rs @@ -0,0 +1,4 @@ +pub mod accounts; +pub mod processor; + +pub use accounts::*; diff --git a/sdk-tests/pinocchio-light-program-test/src/one_byte_pda/processor.rs b/sdk-tests/pinocchio-light-program-test/src/one_byte_pda/processor.rs new file mode 100644 index 0000000000..573692b8fd --- /dev/null +++ b/sdk-tests/pinocchio-light-program-test/src/one_byte_pda/processor.rs @@ -0,0 +1,92 @@ +use borsh::BorshDeserialize; +use light_account_pinocchio::{ + prepare_compressed_account_on_init, CompressionInfo, CompressedCpiContext, CpiAccounts, + CpiAccountsConfig, InstructionDataInvokeCpiWithAccountInfo, InvokeLightSystemProgram, + LightConfig, LightDiscriminator, LightSdkTypesError, PackedAddressTreeInfoExt, +}; +use pinocchio::{ + account_info::AccountInfo, + sysvars::{clock::Clock, Sysvar}, +}; + +use super::accounts::{CreateOneByteRecord, CreateOneByteRecordParams}; +use crate::state::OneByteRecord; + +pub fn process( + ctx: &CreateOneByteRecord<'_>, + params: &CreateOneByteRecordParams, + remaining_accounts: &[AccountInfo], +) -> Result<(), LightSdkTypesError> { + let system_accounts_offset = params.create_accounts_proof.system_accounts_offset as usize; + if remaining_accounts.len() < system_accounts_offset { + return Err(LightSdkTypesError::FewerAccountsThanSystemAccounts); + } + let config = CpiAccountsConfig::new(crate::LIGHT_CPI_SIGNER); + let cpi_accounts = CpiAccounts::new_with_config( + ctx.fee_payer, + &remaining_accounts[system_accounts_offset..], + config, + ); + + let address_tree_info = ¶ms.create_accounts_proof.address_tree_info; + let address_tree_pubkey = address_tree_info + .get_tree_pubkey(&cpi_accounts) + .map_err(|_| LightSdkTypesError::InvalidInstructionData)?; + let output_tree_index = params.create_accounts_proof.output_state_tree_index; + let current_account_index: u8 = 0; + let cpi_context = CompressedCpiContext::default(); + let mut new_address_params = Vec::with_capacity(1); + let mut account_infos = Vec::with_capacity(1); + + let light_config = LightConfig::load_checked(ctx.compression_config, &crate::ID) + .map_err(|_| LightSdkTypesError::InvalidInstructionData)?; + let current_slot = Clock::get() + .map_err(|_| LightSdkTypesError::InvalidInstructionData)? + .slot; + + let record_key = *ctx.record.key(); + prepare_compressed_account_on_init( + &record_key, + &address_tree_pubkey, + address_tree_info, + output_tree_index, + current_account_index, + &crate::ID, + &mut new_address_params, + &mut account_infos, + )?; + + // Set owner and compression_info on the record at offset data[disc_len..] + { + let disc_len = OneByteRecord::LIGHT_DISCRIMINATOR_SLICE.len(); + let mut account_data = ctx + .record + .try_borrow_mut_data() + .map_err(|_| LightSdkTypesError::Borsh)?; + let mut record = OneByteRecord::try_from_slice(&account_data[disc_len..]) + .map_err(|_| LightSdkTypesError::Borsh)?; + record.owner = params.owner; + record.compression_info = CompressionInfo::new_from_config(&light_config, current_slot); + let serialized = borsh::to_vec(&record).map_err(|_| LightSdkTypesError::Borsh)?; + account_data[disc_len..disc_len + serialized.len()].copy_from_slice(&serialized); + } + + let instruction_data = InstructionDataInvokeCpiWithAccountInfo { + mode: 1, + bump: crate::LIGHT_CPI_SIGNER.bump, + invoking_program_id: crate::LIGHT_CPI_SIGNER.program_id.into(), + compress_or_decompress_lamports: 0, + is_compress: false, + with_cpi_context: false, + with_transaction_hash: false, + cpi_context, + proof: params.create_accounts_proof.proof.0, + new_address_params, + account_infos, + read_only_addresses: vec![], + read_only_accounts: vec![], + }; + + instruction_data.invoke(cpi_accounts)?; + Ok(()) +} diff --git a/sdk-tests/pinocchio-light-program-test/src/state.rs b/sdk-tests/pinocchio-light-program-test/src/state.rs index ac696b1eff..4e5ba9f260 100644 --- a/sdk-tests/pinocchio-light-program-test/src/state.rs +++ b/sdk-tests/pinocchio-light-program-test/src/state.rs @@ -24,6 +24,21 @@ pub struct MinimalRecord { pub owner: Pubkey, } +/// A PDA with a 1-byte on-chain type identifier instead of the standard 8-byte +/// LIGHT_DISCRIMINATOR. On-chain layout: `[1 byte DISC][borsh data]`. +/// +/// `LIGHT_DISCRIMINATOR = [1,0,0,0,0,0,0,0]` (8 bytes, for the compressed Merkle leaf). +/// `LIGHT_DISCRIMINATOR_SLICE = &[1u8]` (1 byte, written on-chain). +#[derive( + Default, Debug, Clone, PartialEq, BorshSerialize, BorshDeserialize, LightPinocchioAccount, +)] +#[light_pinocchio(discriminator = [1u8])] +#[repr(C)] +pub struct OneByteRecord { + pub compression_info: CompressionInfo, + pub owner: Pubkey, +} + /// A zero-copy account using Pod serialization. /// Used for efficient on-chain zero-copy access. #[derive( diff --git a/sdk-tests/pinocchio-light-program-test/tests/test_create_one_byte_record.rs b/sdk-tests/pinocchio-light-program-test/tests/test_create_one_byte_record.rs new file mode 100644 index 0000000000..3f193f5a49 --- /dev/null +++ b/sdk-tests/pinocchio-light-program-test/tests/test_create_one_byte_record.rs @@ -0,0 +1,144 @@ +mod shared; + +use light_account::LightDiscriminator; +use light_client::interface::{ + create_load_instructions, get_create_accounts_proof, AccountSpec, CreateAccountsProofInput, + PdaSpec, +}; +use light_compressible::rent::SLOTS_PER_EPOCH; +use light_program_test::{program_test::TestRpc, Rpc}; +use pinocchio_light_program_test::{ + discriminators, one_byte_pda::accounts::CreateOneByteRecordParams, LightAccountVariant, + OneByteRecord, OneByteRecordSeeds, +}; +use solana_instruction::{AccountMeta, Instruction}; +use solana_keypair::Keypair; +use solana_pubkey::Pubkey; +use solana_signer::Signer; + +#[tokio::test] +async fn test_create_compress_decompress_one_byte_record() { + let env = shared::setup_test_env().await; + let mut rpc = env.rpc; + let payer = env.payer; + let program_id = env.program_id; + + let owner = Keypair::new().pubkey(); + + let (record_pda, _) = + Pubkey::find_program_address(&[b"one_byte_record", owner.as_ref()], &program_id); + + // PHASE 1: Create + let proof_result = get_create_accounts_proof( + &rpc, + &program_id, + vec![CreateAccountsProofInput::pda(record_pda)], + ) + .await + .unwrap(); + + let params = CreateOneByteRecordParams { + create_accounts_proof: proof_result.create_accounts_proof, + owner: owner.to_bytes(), + }; + + let accounts = vec![ + AccountMeta::new(payer.pubkey(), true), + AccountMeta::new_readonly(env.config_pda, false), + AccountMeta::new(env.rent_sponsor, false), + AccountMeta::new(record_pda, false), + AccountMeta::new_readonly(solana_sdk::system_program::ID, false), + ]; + + let instruction = Instruction { + program_id, + accounts: [accounts, proof_result.remaining_accounts].concat(), + data: shared::build_instruction_data(&discriminators::CREATE_ONE_BYTE_RECORD, ¶ms), + }; + + rpc.create_and_send_transaction(&[instruction], &payer.pubkey(), &[&payer]) + .await + .expect("CreateOneByteRecord should succeed"); + + // Verify on-chain state after creation + let record_account = rpc + .get_account(record_pda) + .await + .unwrap() + .expect("OneByteRecord PDA should exist on-chain"); + + assert_eq!( + &record_account.data[..OneByteRecord::LIGHT_DISCRIMINATOR_SLICE.len()], + OneByteRecord::LIGHT_DISCRIMINATOR_SLICE, + "First byte(s) should match OneByteRecord discriminator" + ); + + let record: OneByteRecord = + borsh::BorshDeserialize::deserialize(&mut &record_account.data[1..]) + .expect("Failed to deserialize OneByteRecord"); + + assert_eq!( + record.owner, + owner.to_bytes(), + "Owner should match after creation" + ); + + // PHASE 2: Warp to trigger auto-compression + rpc.warp_slot_forward(SLOTS_PER_EPOCH * 30).await.unwrap(); + shared::assert_onchain_closed(&mut rpc, &record_pda, "OneByteRecord").await; + + // PHASE 3: Decompress via create_load_instructions + let account_interface = rpc + .get_account_interface(&record_pda, None) + .await + .expect("failed to get OneByteRecord interface") + .value + .expect("OneByteRecord interface should exist"); + assert!(account_interface.is_cold(), "OneByteRecord should be cold"); + + // The indexer returns: [8-byte LIGHT_DISCRIMINATOR] + [borsh(OneByteRecord)] + let data: OneByteRecord = + borsh::BorshDeserialize::deserialize(&mut &account_interface.account.data[8..]) + .expect("Failed to parse OneByteRecord from interface"); + assert_eq!( + data.owner, + owner.to_bytes(), + "Owner should match in compressed state" + ); + + let variant = LightAccountVariant::OneByteRecord { + seeds: OneByteRecordSeeds { + owner: owner.to_bytes(), + }, + data, + }; + + let spec = PdaSpec::new(account_interface, variant, program_id); + let specs: Vec> = vec![AccountSpec::Pda(spec)]; + + let ixs = create_load_instructions(&specs, payer.pubkey(), env.config_pda, &rpc) + .await + .expect("create_load_instructions should succeed"); + + rpc.create_and_send_transaction(&ixs, &payer.pubkey(), &[&payer]) + .await + .expect("Decompression should succeed"); + + // PHASE 4: Verify state preserved after decompression + shared::assert_onchain_exists(&mut rpc, &record_pda, "OneByteRecord").await; + + let account = rpc.get_account(record_pda).await.unwrap().unwrap(); + assert_eq!( + &account.data[..OneByteRecord::LIGHT_DISCRIMINATOR_SLICE.len()], + OneByteRecord::LIGHT_DISCRIMINATOR_SLICE, + "First byte(s) should match OneByteRecord discriminator after decompression" + ); + + let decompressed: OneByteRecord = borsh::BorshDeserialize::deserialize(&mut &account.data[1..]) + .expect("Failed to deserialize decompressed OneByteRecord"); + assert_eq!( + decompressed.owner, + owner.to_bytes(), + "Owner should match after decompression" + ); +} From dcc6d69707ba8d5a65701be688c2678d818f42e2 Mon Sep 17 00:00:00 2001 From: ananas Date: Thu, 19 Feb 2026 18:26:22 +0000 Subject: [PATCH 2/7] feat: add 1 byte discriminator account to stress test --- .../src/all/accounts.rs | 68 +++++++++++---- .../src/all/processor.rs | 20 ++++- .../pinocchio-light-program-test/src/lib.rs | 13 +++ .../tests/stress_test.rs | 57 ++++++++++++- .../tests/test_create_all.rs | 85 ++++++++++++++++--- 5 files changed, 212 insertions(+), 31 deletions(-) diff --git a/sdk-tests/pinocchio-light-program-test/src/all/accounts.rs b/sdk-tests/pinocchio-light-program-test/src/all/accounts.rs index 828016092f..8060552c45 100644 --- a/sdk-tests/pinocchio-light-program-test/src/all/accounts.rs +++ b/sdk-tests/pinocchio-light-program-test/src/all/accounts.rs @@ -7,7 +7,7 @@ use pinocchio::{ sysvars::Sysvar, }; -use crate::state::{MinimalRecord, ZeroCopyRecord}; +use crate::state::{MinimalRecord, OneByteRecord, ZeroCopyRecord}; #[derive(Clone, BorshSerialize, BorshDeserialize, Debug)] pub struct CreateAllParams { @@ -23,6 +23,7 @@ pub struct CreateAllAccounts<'a> { pub compression_config: &'a AccountInfo, pub borsh_record: &'a AccountInfo, pub zero_copy_record: &'a AccountInfo, + pub one_byte_record: &'a AccountInfo, pub mint_signer: &'a AccountInfo, pub mint: &'a AccountInfo, pub token_vault: &'a AccountInfo, @@ -39,7 +40,7 @@ pub struct CreateAllAccounts<'a> { } impl<'a> CreateAllAccounts<'a> { - pub const FIXED_LEN: usize = 16; + pub const FIXED_LEN: usize = 17; pub fn parse( accounts: &'a [AccountInfo], @@ -50,17 +51,18 @@ impl<'a> CreateAllAccounts<'a> { let compression_config = &accounts[2]; let borsh_record = &accounts[3]; let zero_copy_record = &accounts[4]; - let mint_signer = &accounts[5]; - let mint = &accounts[6]; - let token_vault = &accounts[7]; - let vault_owner = &accounts[8]; - let ata_owner = &accounts[9]; - let user_ata = &accounts[10]; - let compressible_config = &accounts[11]; - let rent_sponsor = &accounts[12]; - let light_token_program = &accounts[13]; - let cpi_authority = &accounts[14]; - let system_program = &accounts[15]; + let one_byte_record = &accounts[5]; + let mint_signer = &accounts[6]; + let mint = &accounts[7]; + let token_vault = &accounts[8]; + let vault_owner = &accounts[9]; + let ata_owner = &accounts[10]; + let user_ata = &accounts[11]; + let compressible_config = &accounts[12]; + let rent_sponsor = &accounts[13]; + let light_token_program = &accounts[14]; + let cpi_authority = &accounts[15]; + let system_program = &accounts[16]; if !payer.is_signer() { return Err(ProgramError::MissingRequiredSignature); @@ -141,6 +143,41 @@ impl<'a> CreateAllAccounts<'a> { data[..8].copy_from_slice(&ZeroCopyRecord::LIGHT_DISCRIMINATOR); } + // Create OneByteRecord PDA + { + use light_account_pinocchio::LightDiscriminator; + let disc_len = OneByteRecord::LIGHT_DISCRIMINATOR_SLICE.len(); + let space = disc_len + OneByteRecord::INIT_SPACE; + let seeds: &[&[u8]] = &[b"one_byte_record", ¶ms.owner]; + let (expected_pda, bump) = pinocchio::pubkey::find_program_address(seeds, &crate::ID); + if one_byte_record.key() != &expected_pda { + return Err(ProgramError::InvalidSeeds); + } + let rent = pinocchio::sysvars::rent::Rent::get() + .map_err(|_| ProgramError::UnsupportedSysvar)?; + let lamports = rent.minimum_balance(space); + let bump_bytes = [bump]; + let seed_array = [ + Seed::from(b"one_byte_record" as &[u8]), + Seed::from(params.owner.as_ref()), + Seed::from(bump_bytes.as_ref()), + ]; + let signer = Signer::from(&seed_array); + pinocchio_system::instructions::CreateAccount { + from: payer, + to: one_byte_record, + lamports, + space: space as u64, + owner: &crate::ID, + } + .invoke_signed(&[signer])?; + + let mut data = one_byte_record + .try_borrow_mut_data() + .map_err(|_| ProgramError::AccountBorrowFailed)?; + data[..disc_len].copy_from_slice(OneByteRecord::LIGHT_DISCRIMINATOR_SLICE); + } + // Validate mint_signer PDA { let authority_key = authority.key(); @@ -175,6 +212,7 @@ impl<'a> CreateAllAccounts<'a> { compression_config, borsh_record, zero_copy_record, + one_byte_record, mint_signer, mint, token_vault, @@ -186,8 +224,8 @@ impl<'a> CreateAllAccounts<'a> { light_token_program, cpi_authority, system_program, - mint_signers_slice: &accounts[5..6], - mints_slice: &accounts[6..7], + mint_signers_slice: &accounts[6..7], + mints_slice: &accounts[7..8], }) } } diff --git a/sdk-tests/pinocchio-light-program-test/src/all/processor.rs b/sdk-tests/pinocchio-light-program-test/src/all/processor.rs index 801acdab07..e76448fa14 100644 --- a/sdk-tests/pinocchio-light-program-test/src/all/processor.rs +++ b/sdk-tests/pinocchio-light-program-test/src/all/processor.rs @@ -13,7 +13,7 @@ pub fn process( ) -> Result<(), LightSdkTypesError> { use borsh::BorshDeserialize; - const NUM_LIGHT_PDAS: usize = 2; + const NUM_LIGHT_PDAS: usize = 3; const NUM_LIGHT_MINTS: usize = 1; const NUM_TOKENS: usize = 1; const NUM_ATAS: usize = 1; @@ -36,6 +36,7 @@ pub fn process( let borsh_record = ctx.borsh_record; let zero_copy_record = ctx.zero_copy_record; + let one_byte_record = ctx.one_byte_record; create_accounts::( [ @@ -45,6 +46,9 @@ pub fn process( PdaInitParam { account: ctx.zero_copy_record, }, + PdaInitParam { + account: ctx.one_byte_record, + }, ], |light_config, current_slot| { // Set compression_info on the Borsh record @@ -69,6 +73,20 @@ pub fn process( bytemuck::from_bytes_mut(record_bytes); record.set_decompressed(light_config, current_slot); } + // Set compression_info on the OneByteRecord + { + use light_account_pinocchio::LightDiscriminator; + let disc_len = crate::state::OneByteRecord::LIGHT_DISCRIMINATOR_SLICE.len(); + let mut account_data = one_byte_record + .try_borrow_mut_data() + .map_err(|_| LightSdkTypesError::Borsh)?; + let mut record = + crate::state::OneByteRecord::try_from_slice(&account_data[disc_len..]) + .map_err(|_| LightSdkTypesError::Borsh)?; + record.set_decompressed(light_config, current_slot); + let serialized = borsh::to_vec(&record).map_err(|_| LightSdkTypesError::Borsh)?; + account_data[disc_len..disc_len + serialized.len()].copy_from_slice(&serialized); + } Ok(()) }, Some(CreateMintsInput { diff --git a/sdk-tests/pinocchio-light-program-test/src/lib.rs b/sdk-tests/pinocchio-light-program-test/src/lib.rs index 87e19732ac..4ef17d5b6c 100644 --- a/sdk-tests/pinocchio-light-program-test/src/lib.rs +++ b/sdk-tests/pinocchio-light-program-test/src/lib.rs @@ -277,6 +277,19 @@ fn process_create_all(accounts: &[AccountInfo], data: &[u8]) -> Result<(), Progr let record: &mut state::ZeroCopyRecord = bytemuck::from_bytes_mut(record_bytes); record.owner = params.owner; } + { + use light_account_pinocchio::LightDiscriminator; + let disc_len = state::OneByteRecord::LIGHT_DISCRIMINATOR_SLICE.len(); + let mut ob_data = ctx + .one_byte_record + .try_borrow_mut_data() + .map_err(|_| ProgramError::AccountBorrowFailed)?; + let mut ob_record = state::OneByteRecord::try_from_slice(&ob_data[disc_len..]) + .map_err(|_| ProgramError::BorshIoError)?; + ob_record.owner = params.owner; + let serialized = borsh::to_vec(&ob_record).map_err(|_| ProgramError::BorshIoError)?; + ob_data[disc_len..disc_len + serialized.len()].copy_from_slice(&serialized); + } all::processor::process(&ctx, ¶ms, remaining_accounts) .map_err(|e| ProgramError::Custom(u32::from(e)))?; diff --git a/sdk-tests/pinocchio-light-program-test/tests/stress_test.rs b/sdk-tests/pinocchio-light-program-test/tests/stress_test.rs index c2bb6a4938..7e2393ad7c 100644 --- a/sdk-tests/pinocchio-light-program-test/tests/stress_test.rs +++ b/sdk-tests/pinocchio-light-program-test/tests/stress_test.rs @@ -24,10 +24,11 @@ use light_program_test::{ use light_sdk_types::LIGHT_TOKEN_PROGRAM_ID; use light_token::instruction::{LIGHT_TOKEN_CONFIG, LIGHT_TOKEN_RENT_SPONSOR}; use light_token_interface::state::{token::Token, Mint}; +use light_account::LightDiscriminator; use pinocchio_light_program_test::{ all::accounts::CreateAllParams, discriminators, LightAccountVariant, MinimalRecord, - MinimalRecordSeeds, VaultSeeds, ZeroCopyRecord, ZeroCopyRecordSeeds, MINT_SIGNER_SEED_A, - RECORD_SEED, VAULT_AUTH_SEED, VAULT_SEED, + MinimalRecordSeeds, OneByteRecord, OneByteRecordSeeds, VaultSeeds, ZeroCopyRecord, + ZeroCopyRecordSeeds, MINT_SIGNER_SEED_A, RECORD_SEED, VAULT_AUTH_SEED, VAULT_SEED, }; use solana_instruction::{AccountMeta, Instruction}; use solana_keypair::Keypair; @@ -39,6 +40,7 @@ use solana_signer::Signer; struct TestPdas { record: Pubkey, zc_record: Pubkey, + one_byte: Pubkey, ata: Pubkey, ata_owner: Pubkey, vault: Pubkey, @@ -51,6 +53,7 @@ struct TestPdas { struct CachedState { record: MinimalRecord, zc_record: ZeroCopyRecord, + ob_record: OneByteRecord, ata_token: Token, vault_token: Token, owner: [u8; 32], @@ -108,6 +111,8 @@ async fn setup() -> (StressTestContext, TestPdas) { Pubkey::find_program_address(&[b"minimal_record", owner.as_ref()], &program_id); let (zc_record_pda, _) = Pubkey::find_program_address(&[RECORD_SEED, owner.as_ref()], &program_id); + let (one_byte_pda, _) = + Pubkey::find_program_address(&[b"one_byte_record", owner.as_ref()], &program_id); // Mint signer PDA let (mint_signer, mint_signer_bump) = Pubkey::find_program_address( @@ -132,6 +137,7 @@ async fn setup() -> (StressTestContext, TestPdas) { vec![ CreateAccountsProofInput::pda(record_pda), CreateAccountsProofInput::pda(zc_record_pda), + CreateAccountsProofInput::pda(one_byte_pda), CreateAccountsProofInput::mint(mint_signer), ], ) @@ -152,6 +158,7 @@ async fn setup() -> (StressTestContext, TestPdas) { AccountMeta::new_readonly(config_pda, false), AccountMeta::new(record_pda, false), AccountMeta::new(zc_record_pda, false), + AccountMeta::new(one_byte_pda, false), AccountMeta::new_readonly(mint_signer, false), AccountMeta::new(mint_pda, false), AccountMeta::new(vault, false), @@ -178,6 +185,7 @@ async fn setup() -> (StressTestContext, TestPdas) { let pdas = TestPdas { record: record_pda, zc_record: zc_record_pda, + one_byte: one_byte_pda, ata, ata_owner, vault, @@ -208,12 +216,18 @@ async fn refresh_cache( let zc_account = rpc.get_account(pdas.zc_record).await.unwrap().unwrap(); let zc_record: ZeroCopyRecord = *bytemuck::from_bytes(&zc_account.data[8..]); + let ob_account = rpc.get_account(pdas.one_byte).await.unwrap().unwrap(); + let disc_len = OneByteRecord::LIGHT_DISCRIMINATOR_SLICE.len(); + let ob_record: OneByteRecord = + borsh::BorshDeserialize::deserialize(&mut &ob_account.data[disc_len..]).unwrap(); + let ata_token = parse_token(&rpc.get_account(pdas.ata).await.unwrap().unwrap().data); let vault_token = parse_token(&rpc.get_account(pdas.vault).await.unwrap().unwrap().data); CachedState { record, zc_record, + ob_record, ata_token, vault_token, owner, @@ -264,6 +278,27 @@ async fn decompress_all(ctx: &mut StressTestContext, pdas: &TestPdas, cached: &C }; let zc_spec = PdaSpec::new(zc_interface, zc_variant, ctx.program_id); + // PDA: OneByteRecord + let ob_interface = ctx + .rpc + .get_account_interface(&pdas.one_byte, None) + .await + .expect("failed to get OneByteRecord interface") + .value + .expect("OneByteRecord interface should exist"); + assert!(ob_interface.is_cold(), "OneByteRecord should be cold"); + + let ob_data: OneByteRecord = + borsh::BorshDeserialize::deserialize(&mut &ob_interface.account.data[8..]) + .expect("Failed to parse OneByteRecord from interface"); + let ob_variant = LightAccountVariant::OneByteRecord { + seeds: OneByteRecordSeeds { + owner: cached.owner, + }, + data: ob_data, + }; + let ob_spec = PdaSpec::new(ob_interface, ob_variant, ctx.program_id); + // ATA let ata_interface = ctx .rpc @@ -318,6 +353,7 @@ async fn decompress_all(ctx: &mut StressTestContext, pdas: &TestPdas, cached: &C let specs: Vec> = vec![ AccountSpec::Pda(record_spec), AccountSpec::Pda(zc_spec), + AccountSpec::Pda(ob_spec), AccountSpec::Mint(mint_ai), AccountSpec::Ata(Box::new(ata_interface)), AccountSpec::Pda(vault_spec), @@ -337,6 +373,7 @@ async fn decompress_all(ctx: &mut StressTestContext, pdas: &TestPdas, cached: &C for (pda, name) in [ (&pdas.record, "MinimalRecord"), (&pdas.zc_record, "ZeroCopyRecord"), + (&pdas.one_byte, "OneByteRecord"), (&pdas.ata, "ATA"), (&pdas.vault, "Vault"), (&pdas.mint, "Mint"), @@ -355,6 +392,7 @@ async fn compress_all(ctx: &mut StressTestContext, pdas: &TestPdas) { for (pda, name) in [ (&pdas.record, "MinimalRecord"), (&pdas.zc_record, "ZeroCopyRecord"), + (&pdas.one_byte, "OneByteRecord"), (&pdas.ata, "ATA"), (&pdas.vault, "Vault"), (&pdas.mint, "Mint"), @@ -395,6 +433,20 @@ async fn assert_all_state( "ZeroCopyRecord mismatch at iteration {iteration}" ); + // OneByteRecord + let ob_account = rpc.get_account(pdas.one_byte).await.unwrap().unwrap(); + let disc_len = OneByteRecord::LIGHT_DISCRIMINATOR_SLICE.len(); + let actual_ob: OneByteRecord = + borsh::BorshDeserialize::deserialize(&mut &ob_account.data[disc_len..]).unwrap(); + let expected_ob = OneByteRecord { + compression_info: shared::expected_compression_info(&actual_ob.compression_info), + ..cached.ob_record.clone() + }; + assert_eq!( + actual_ob, expected_ob, + "OneByteRecord mismatch at iteration {iteration}" + ); + // ATA let actual_ata = parse_token(&rpc.get_account(pdas.ata).await.unwrap().unwrap().data); let expected_ata = Token { @@ -436,6 +488,7 @@ async fn test_stress_20_iterations() { for (pda, name) in [ (&pdas.record, "MinimalRecord"), (&pdas.zc_record, "ZeroCopyRecord"), + (&pdas.one_byte, "OneByteRecord"), (&pdas.ata, "ATA"), (&pdas.vault, "Vault"), (&pdas.mint, "Mint"), diff --git a/sdk-tests/pinocchio-light-program-test/tests/test_create_all.rs b/sdk-tests/pinocchio-light-program-test/tests/test_create_all.rs index 9c8dd630f8..a461461505 100644 --- a/sdk-tests/pinocchio-light-program-test/tests/test_create_all.rs +++ b/sdk-tests/pinocchio-light-program-test/tests/test_create_all.rs @@ -10,10 +10,11 @@ use light_program_test::{program_test::TestRpc, Rpc}; use light_sdk_types::LIGHT_TOKEN_PROGRAM_ID; use light_token::instruction::{LIGHT_TOKEN_CONFIG, LIGHT_TOKEN_RENT_SPONSOR}; use light_token_interface::state::token::{AccountState, Token, ACCOUNT_TYPE_TOKEN_ACCOUNT}; +use light_account::LightDiscriminator; use pinocchio_light_program_test::{ all::accounts::CreateAllParams, discriminators, LightAccountVariant, MinimalRecord, - MinimalRecordSeeds, VaultSeeds, ZeroCopyRecord, ZeroCopyRecordSeeds, MINT_SIGNER_SEED_A, - RECORD_SEED, VAULT_AUTH_SEED, VAULT_SEED, + MinimalRecordSeeds, OneByteRecord, OneByteRecordSeeds, VaultSeeds, ZeroCopyRecord, + ZeroCopyRecordSeeds, MINT_SIGNER_SEED_A, RECORD_SEED, VAULT_AUTH_SEED, VAULT_SEED, }; use solana_instruction::{AccountMeta, Instruction}; use solana_keypair::Keypair; @@ -38,6 +39,10 @@ async fn test_create_all_derive() { let (zc_record_pda, _) = Pubkey::find_program_address(&[RECORD_SEED, owner.as_ref()], &program_id); + // PDA: OneByteRecord + let (one_byte_pda, _) = + Pubkey::find_program_address(&[b"one_byte_record", owner.as_ref()], &program_id); + // Mint signer PDA let (mint_signer, mint_signer_bump) = Pubkey::find_program_address( &[MINT_SIGNER_SEED_A, authority.pubkey().as_ref()], @@ -61,6 +66,7 @@ async fn test_create_all_derive() { vec![ CreateAccountsProofInput::pda(record_pda), CreateAccountsProofInput::pda(zc_record_pda), + CreateAccountsProofInput::pda(one_byte_pda), CreateAccountsProofInput::mint(mint_signer), ], ) @@ -80,23 +86,25 @@ async fn test_create_all_derive() { // [2] compression_config // [3] borsh_record (writable) // [4] zero_copy_record (writable) - // [5] mint_signer - // [6] mint (writable) - // [7] token_vault (writable) - // [8] vault_owner - // [9] ata_owner - // [10] user_ata (writable) - // [11] compressible_config (LIGHT_TOKEN_CONFIG) - // [12] rent_sponsor (LIGHT_TOKEN_RENT_SPONSOR, writable) - // [13] light_token_program - // [14] cpi_authority - // [15] system_program + // [5] one_byte_record (writable) + // [6] mint_signer + // [7] mint (writable) + // [8] token_vault (writable) + // [9] vault_owner + // [10] ata_owner + // [11] user_ata (writable) + // [12] compressible_config (LIGHT_TOKEN_CONFIG) + // [13] rent_sponsor (LIGHT_TOKEN_RENT_SPONSOR, writable) + // [14] light_token_program + // [15] cpi_authority + // [16] system_program let accounts = vec![ AccountMeta::new(payer.pubkey(), true), AccountMeta::new_readonly(authority.pubkey(), true), AccountMeta::new_readonly(env.config_pda, false), AccountMeta::new(record_pda, false), AccountMeta::new(zc_record_pda, false), + AccountMeta::new(one_byte_pda, false), AccountMeta::new_readonly(mint_signer, false), AccountMeta::new(mint_pda, false), AccountMeta::new(vault, false), @@ -146,6 +154,22 @@ async fn test_create_all_derive() { ); assert_eq!(zc_record.counter, 0, "ZC record counter should be 0"); + let ob_account = rpc + .get_account(one_byte_pda) + .await + .unwrap() + .expect("OneByteRecord PDA should exist"); + let disc_len = OneByteRecord::LIGHT_DISCRIMINATOR_SLICE.len(); + assert_eq!( + &ob_account.data[..disc_len], + OneByteRecord::LIGHT_DISCRIMINATOR_SLICE, + "OneByteRecord discriminator should match" + ); + let actual_ob: OneByteRecord = + borsh::BorshDeserialize::deserialize(&mut &ob_account.data[disc_len..]) + .expect("Failed to deserialize OneByteRecord"); + assert_eq!(actual_ob.owner, owner.to_bytes(), "OneByteRecord owner should match"); + let ata_account = rpc .get_account(ata) .await @@ -198,6 +222,7 @@ async fn test_create_all_derive() { shared::assert_onchain_closed(&mut rpc, &record_pda, "MinimalRecord").await; shared::assert_onchain_closed(&mut rpc, &zc_record_pda, "ZeroCopyRecord").await; + shared::assert_onchain_closed(&mut rpc, &one_byte_pda, "OneByteRecord").await; shared::assert_onchain_closed(&mut rpc, &ata, "ATA").await; shared::assert_onchain_closed(&mut rpc, &vault, "Vault").await; shared::assert_onchain_closed(&mut rpc, &mint_pda, "Mint").await; @@ -244,6 +269,27 @@ async fn test_create_all_derive() { }; let zc_spec = PdaSpec::new(zc_interface, zc_variant, program_id); + // PDA: OneByteRecord + let ob_interface = rpc + .get_account_interface(&one_byte_pda, None) + .await + .expect("failed to get OneByteRecord interface") + .value + .expect("OneByteRecord interface should exist"); + assert!(ob_interface.is_cold(), "OneByteRecord should be cold"); + + // The indexer returns: [8-byte LIGHT_DISCRIMINATOR] + [borsh(OneByteRecord)] + let ob_data: OneByteRecord = + borsh::BorshDeserialize::deserialize(&mut &ob_interface.account.data[8..]) + .expect("Failed to parse OneByteRecord from interface"); + let ob_variant = LightAccountVariant::OneByteRecord { + seeds: OneByteRecordSeeds { + owner: owner.to_bytes(), + }, + data: ob_data, + }; + let ob_spec = PdaSpec::new(ob_interface, ob_variant, program_id); + // ATA let ata_interface = rpc .get_associated_token_account_interface(&ata_owner, &mint_pda, None) @@ -295,6 +341,7 @@ async fn test_create_all_derive() { let specs: Vec> = vec![ AccountSpec::Pda(record_spec), AccountSpec::Pda(zc_spec), + AccountSpec::Pda(ob_spec), AccountSpec::Mint(mint_ai), AccountSpec::Ata(Box::new(ata_interface)), AccountSpec::Pda(vault_spec), @@ -311,6 +358,7 @@ async fn test_create_all_derive() { // PHASE 4: Assert state preserved after decompression shared::assert_onchain_exists(&mut rpc, &record_pda, "MinimalRecord").await; shared::assert_onchain_exists(&mut rpc, &zc_record_pda, "ZeroCopyRecord").await; + shared::assert_onchain_exists(&mut rpc, &one_byte_pda, "OneByteRecord").await; shared::assert_onchain_exists(&mut rpc, &ata, "ATA").await; shared::assert_onchain_exists(&mut rpc, &vault, "Vault").await; shared::assert_onchain_exists(&mut rpc, &mint_pda, "Mint").await; @@ -341,6 +389,17 @@ async fn test_create_all_derive() { "ZeroCopyRecord should match after decompression" ); + // OneByteRecord + let ob_account = rpc.get_account(one_byte_pda).await.unwrap().unwrap(); + let disc_len = OneByteRecord::LIGHT_DISCRIMINATOR_SLICE.len(); + let actual_ob: OneByteRecord = + borsh::BorshDeserialize::deserialize(&mut &ob_account.data[disc_len..]).unwrap(); + let expected_ob = OneByteRecord { + compression_info: shared::expected_compression_info(&actual_ob.compression_info), + owner: owner.to_bytes(), + }; + assert_eq!(actual_ob, expected_ob, "OneByteRecord should match after decompression"); + // ATA let actual_ata: Token = shared::parse_token(&rpc.get_account(ata).await.unwrap().unwrap().data); let expected_ata = Token { From e1fde1a177290f3306043f822309f7064697968c Mon Sep 17 00:00:00 2001 From: ananas Date: Thu, 19 Feb 2026 19:18:30 +0000 Subject: [PATCH 3/7] randomize tests and format --- Cargo.lock | 1 + .../macros/src/light_pdas/account/derive.rs | 7 +- .../pinocchio-light-program-test/Cargo.toml | 1 + .../src/one_byte_pda/processor.rs | 2 +- .../tests/stress_test.rs | 599 ++++++++++-------- .../tests/test_create_all.rs | 21 +- 6 files changed, 371 insertions(+), 260 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index af5725380a..0b23ea663c 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -5334,6 +5334,7 @@ dependencies = [ "pinocchio", "pinocchio-pubkey", "pinocchio-system", + "rand 0.8.5", "solana-account", "solana-instruction", "solana-keypair", diff --git a/sdk-libs/macros/src/light_pdas/account/derive.rs b/sdk-libs/macros/src/light_pdas/account/derive.rs index dad34d9ca0..eee7e5a5b9 100644 --- a/sdk-libs/macros/src/light_pdas/account/derive.rs +++ b/sdk-libs/macros/src/light_pdas/account/derive.rs @@ -156,7 +156,7 @@ fn parse_pinocchio_discriminator(attrs: &[syn::Attribute]) -> Result>>()?; if bytes.is_empty() { return Err(syn::Error::new_spanned( - &arr, + arr, "discriminator must have at least one byte", )); } @@ -882,7 +882,10 @@ mod tests { } }; let result = derive_light_pinocchio_account(input); - assert!(result.is_err(), "Empty discriminator array should be rejected"); + assert!( + result.is_err(), + "Empty discriminator array should be rejected" + ); let err = result.unwrap_err().to_string(); assert!( err.contains("at least one byte"), diff --git a/sdk-tests/pinocchio-light-program-test/Cargo.toml b/sdk-tests/pinocchio-light-program-test/Cargo.toml index 8606fe3193..496d4cf552 100644 --- a/sdk-tests/pinocchio-light-program-test/Cargo.toml +++ b/sdk-tests/pinocchio-light-program-test/Cargo.toml @@ -33,6 +33,7 @@ light-token-client = { workspace = true } light-account = { workspace = true } light-batched-merkle-tree = { workspace = true } tokio = { workspace = true } +rand = { workspace = true } solana-sdk = { workspace = true } solana-account = { workspace = true } solana-keypair = { workspace = true } diff --git a/sdk-tests/pinocchio-light-program-test/src/one_byte_pda/processor.rs b/sdk-tests/pinocchio-light-program-test/src/one_byte_pda/processor.rs index 573692b8fd..035ffea9a8 100644 --- a/sdk-tests/pinocchio-light-program-test/src/one_byte_pda/processor.rs +++ b/sdk-tests/pinocchio-light-program-test/src/one_byte_pda/processor.rs @@ -1,6 +1,6 @@ use borsh::BorshDeserialize; use light_account_pinocchio::{ - prepare_compressed_account_on_init, CompressionInfo, CompressedCpiContext, CpiAccounts, + prepare_compressed_account_on_init, CompressedCpiContext, CompressionInfo, CpiAccounts, CpiAccountsConfig, InstructionDataInvokeCpiWithAccountInfo, InvokeLightSystemProgram, LightConfig, LightDiscriminator, LightSdkTypesError, PackedAddressTreeInfoExt, }; diff --git a/sdk-tests/pinocchio-light-program-test/tests/stress_test.rs b/sdk-tests/pinocchio-light-program-test/tests/stress_test.rs index 7e2393ad7c..42e41ece33 100644 --- a/sdk-tests/pinocchio-light-program-test/tests/stress_test.rs +++ b/sdk-tests/pinocchio-light-program-test/tests/stress_test.rs @@ -1,12 +1,10 @@ /// Stress test: 20-iteration compression/decompression cycles for all account types. /// -/// Tests repeated cycles of: -/// 1. Decompress all accounts -/// 2. Assert cached state matches on-chain state -/// 3. Update cache from on-chain state -/// 4. Compress all accounts (warp forward) +/// Each iteration randomly selects a subset of accounts to decompress, leaving the rest +/// compressed. Tests that hot/cold accounts coexist correctly across repeated cycles. mod shared; +use light_account::LightDiscriminator; use light_account_pinocchio::token::TokenDataWithSeeds; use light_batched_merkle_tree::{ initialize_address_tree::InitAddressTreeAccountsInstructionData, @@ -24,19 +22,54 @@ use light_program_test::{ use light_sdk_types::LIGHT_TOKEN_PROGRAM_ID; use light_token::instruction::{LIGHT_TOKEN_CONFIG, LIGHT_TOKEN_RENT_SPONSOR}; use light_token_interface::state::{token::Token, Mint}; -use light_account::LightDiscriminator; use pinocchio_light_program_test::{ all::accounts::CreateAllParams, discriminators, LightAccountVariant, MinimalRecord, MinimalRecordSeeds, OneByteRecord, OneByteRecordSeeds, VaultSeeds, ZeroCopyRecord, ZeroCopyRecordSeeds, MINT_SIGNER_SEED_A, RECORD_SEED, VAULT_AUTH_SEED, VAULT_SEED, }; +use rand::{seq::SliceRandom, thread_rng, Rng}; use solana_instruction::{AccountMeta, Instruction}; use solana_keypair::Keypair; use solana_pubkey::Pubkey; use solana_signer::Signer; -/// Stores all derived PDAs -#[allow(dead_code)] +/// Which accounts are hot (decompressed / on-chain) this iteration. +#[derive(Debug, Clone)] +struct HotSet { + record: bool, + zc_record: bool, + one_byte: bool, + /// Mint must be true whenever ata or vault is true. + mint: bool, + ata: bool, + vault: bool, +} + +impl HotSet { + /// Random subset. Ensures Mint is hot when ATA or Vault is, and at least + /// one account is always hot. + fn random(rng: &mut impl Rng) -> Self { + let ata = rng.gen_bool(0.7); + let vault = rng.gen_bool(0.7); + // Mint must precede ATA/Vault, so force it hot when either is selected. + let mint = ata || vault || rng.gen_bool(0.7); + let mut hot = Self { + record: rng.gen_bool(0.7), + zc_record: rng.gen_bool(0.7), + one_byte: rng.gen_bool(0.7), + mint, + ata, + vault, + }; + // Guarantee at least one account is hot. + if !hot.record && !hot.zc_record && !hot.one_byte && !hot.mint { + hot.record = true; + } + hot + } +} + +/// Stores all derived PDAs. struct TestPdas { record: Pubkey, zc_record: Pubkey, @@ -44,7 +77,6 @@ struct TestPdas { ata: Pubkey, ata_owner: Pubkey, vault: Pubkey, - vault_owner: Pubkey, mint: Pubkey, } @@ -59,7 +91,7 @@ struct CachedState { owner: [u8; 32], } -/// Test context +/// Test context. struct StressTestContext { rpc: LightProgramTest, payer: Keypair, @@ -71,7 +103,7 @@ fn parse_token(data: &[u8]) -> Token { borsh::BorshDeserialize::deserialize(&mut &data[..]).unwrap() } -/// Setup environment with larger queues for stress test +/// Setup environment with larger queues for stress test. async fn setup() -> (StressTestContext, TestPdas) { let program_id = Pubkey::new_from_array(pinocchio_light_program_test::ID); let mut config = ProgramTestConfig::new_v2( @@ -114,23 +146,19 @@ async fn setup() -> (StressTestContext, TestPdas) { let (one_byte_pda, _) = Pubkey::find_program_address(&[b"one_byte_record", owner.as_ref()], &program_id); - // Mint signer PDA let (mint_signer, mint_signer_bump) = Pubkey::find_program_address( &[MINT_SIGNER_SEED_A, authority.pubkey().as_ref()], &program_id, ); let (mint_pda, _) = light_token::instruction::find_mint_address(&mint_signer); - // Token vault PDA (uses the mint we're creating) let (vault_owner, _) = Pubkey::find_program_address(&[VAULT_AUTH_SEED], &program_id); let (vault, vault_bump) = Pubkey::find_program_address(&[VAULT_SEED, mint_pda.as_ref()], &program_id); - // ATA (uses the mint we're creating) let ata_owner = payer.pubkey(); let ata = light_token::instruction::derive_token_ata(&ata_owner, &mint_pda); - // Create all accounts in one instruction let proof_result = get_create_accounts_proof( &rpc, &program_id, @@ -151,7 +179,6 @@ async fn setup() -> (StressTestContext, TestPdas) { token_vault_bump: vault_bump, }; - // Account order per all/accounts.rs let accounts = vec![ AccountMeta::new(payer.pubkey(), true), AccountMeta::new_readonly(authority.pubkey(), true), @@ -189,7 +216,6 @@ async fn setup() -> (StressTestContext, TestPdas) { ata, ata_owner, vault, - vault_owner, mint: mint_pda, }; @@ -203,26 +229,46 @@ async fn setup() -> (StressTestContext, TestPdas) { (ctx, pdas) } -/// Re-read all on-chain accounts into the cache -async fn refresh_cache( +/// Read on-chain state for all accounts in `hot`, keep old values for the rest. +async fn refresh_cache_partial( rpc: &mut LightProgramTest, pdas: &TestPdas, - owner: [u8; 32], + hot: &HotSet, + old: &CachedState, ) -> CachedState { - let record_account = rpc.get_account(pdas.record).await.unwrap().unwrap(); - let record: MinimalRecord = - borsh::BorshDeserialize::deserialize(&mut &record_account.data[8..]).unwrap(); + let record = if hot.record { + let data = rpc.get_account(pdas.record).await.unwrap().unwrap().data; + borsh::BorshDeserialize::deserialize(&mut &data[8..]).unwrap() + } else { + old.record.clone() + }; - let zc_account = rpc.get_account(pdas.zc_record).await.unwrap().unwrap(); - let zc_record: ZeroCopyRecord = *bytemuck::from_bytes(&zc_account.data[8..]); + let zc_record = if hot.zc_record { + let data = rpc.get_account(pdas.zc_record).await.unwrap().unwrap().data; + *bytemuck::from_bytes(&data[8..]) + } else { + old.zc_record + }; - let ob_account = rpc.get_account(pdas.one_byte).await.unwrap().unwrap(); - let disc_len = OneByteRecord::LIGHT_DISCRIMINATOR_SLICE.len(); - let ob_record: OneByteRecord = - borsh::BorshDeserialize::deserialize(&mut &ob_account.data[disc_len..]).unwrap(); + let ob_record = if hot.one_byte { + let data = rpc.get_account(pdas.one_byte).await.unwrap().unwrap().data; + let disc_len = OneByteRecord::LIGHT_DISCRIMINATOR_SLICE.len(); + borsh::BorshDeserialize::deserialize(&mut &data[disc_len..]).unwrap() + } else { + old.ob_record.clone() + }; - let ata_token = parse_token(&rpc.get_account(pdas.ata).await.unwrap().unwrap().data); - let vault_token = parse_token(&rpc.get_account(pdas.vault).await.unwrap().unwrap().data); + let ata_token = if hot.ata { + parse_token(&rpc.get_account(pdas.ata).await.unwrap().unwrap().data) + } else { + old.ata_token.clone() + }; + + let vault_token = if hot.vault { + parse_token(&rpc.get_account(pdas.vault).await.unwrap().unwrap().data) + } else { + old.vault_token.clone() + }; CachedState { record, @@ -230,159 +276,187 @@ async fn refresh_cache( ob_record, ata_token, vault_token, - owner, + owner: old.owner, } } -/// Decompress all accounts -async fn decompress_all(ctx: &mut StressTestContext, pdas: &TestPdas, cached: &CachedState) { - // PDA: MinimalRecord - let record_interface = ctx - .rpc - .get_account_interface(&pdas.record, None) - .await - .expect("failed to get MinimalRecord interface") - .value - .expect("MinimalRecord interface should exist"); - assert!(record_interface.is_cold(), "MinimalRecord should be cold"); - - let record_data: MinimalRecord = - borsh::BorshDeserialize::deserialize(&mut &record_interface.account.data[8..]) - .expect("Failed to parse MinimalRecord"); - let record_variant = LightAccountVariant::MinimalRecord { - seeds: MinimalRecordSeeds { - owner: cached.owner, - }, - data: record_data, - }; - let record_spec = PdaSpec::new(record_interface, record_variant, ctx.program_id); +/// Decompress only the accounts listed in `hot`. Mint is always placed first in the +/// specs vec; everything else is shuffled. +async fn decompress_subset( + ctx: &mut StressTestContext, + pdas: &TestPdas, + cached: &CachedState, + hot: &HotSet, +) { + let mut specs: Vec> = Vec::new(); - // PDA: ZeroCopyRecord - let zc_interface = ctx - .rpc - .get_account_interface(&pdas.zc_record, None) - .await - .expect("failed to get ZeroCopyRecord interface") - .value - .expect("ZeroCopyRecord interface should exist"); - assert!(zc_interface.is_cold(), "ZeroCopyRecord should be cold"); - - let zc_data: ZeroCopyRecord = - borsh::BorshDeserialize::deserialize(&mut &zc_interface.account.data[8..]) - .expect("Failed to parse ZeroCopyRecord"); - let zc_variant = LightAccountVariant::ZeroCopyRecord { - seeds: ZeroCopyRecordSeeds { - owner: cached.owner, - }, - data: zc_data, - }; - let zc_spec = PdaSpec::new(zc_interface, zc_variant, ctx.program_id); + // Mint first (ATA and Vault depend on it). + if hot.mint { + let mint_iface = ctx + .rpc + .get_mint_interface(&pdas.mint, None) + .await + .expect("failed to get mint interface") + .value + .expect("mint interface should exist"); + assert!(mint_iface.is_cold(), "Mint should be cold"); + specs.push(AccountSpec::Mint(AccountInterface::from(mint_iface))); + } - // PDA: OneByteRecord - let ob_interface = ctx - .rpc - .get_account_interface(&pdas.one_byte, None) - .await - .expect("failed to get OneByteRecord interface") - .value - .expect("OneByteRecord interface should exist"); - assert!(ob_interface.is_cold(), "OneByteRecord should be cold"); - - let ob_data: OneByteRecord = - borsh::BorshDeserialize::deserialize(&mut &ob_interface.account.data[8..]) - .expect("Failed to parse OneByteRecord from interface"); - let ob_variant = LightAccountVariant::OneByteRecord { - seeds: OneByteRecordSeeds { - owner: cached.owner, - }, - data: ob_data, - }; - let ob_spec = PdaSpec::new(ob_interface, ob_variant, ctx.program_id); + // Remaining specs, shuffled. + let mut rest: Vec> = Vec::new(); - // ATA - let ata_interface = ctx - .rpc - .get_associated_token_account_interface(&pdas.ata_owner, &pdas.mint, None) - .await - .expect("failed to get ATA interface") - .value - .expect("ATA interface should exist"); - assert!(ata_interface.is_cold(), "ATA should be cold"); + if hot.record { + let iface = ctx + .rpc + .get_account_interface(&pdas.record, None) + .await + .expect("failed to get MinimalRecord interface") + .value + .expect("MinimalRecord interface should exist"); + assert!(iface.is_cold(), "MinimalRecord should be cold"); + let data: MinimalRecord = + borsh::BorshDeserialize::deserialize(&mut &iface.account.data[8..]) + .expect("Failed to parse MinimalRecord"); + let variant = LightAccountVariant::MinimalRecord { + seeds: MinimalRecordSeeds { + owner: cached.owner, + }, + data, + }; + rest.push(AccountSpec::Pda(PdaSpec::new( + iface, + variant, + ctx.program_id, + ))); + } - // Token PDA: Vault - let vault_iface = ctx - .rpc - .get_token_account_interface(&pdas.vault, None) - .await - .expect("failed to get vault interface") - .value - .expect("vault interface should exist"); - assert!(vault_iface.is_cold(), "Vault should be cold"); + if hot.zc_record { + let iface = ctx + .rpc + .get_account_interface(&pdas.zc_record, None) + .await + .expect("failed to get ZeroCopyRecord interface") + .value + .expect("ZeroCopyRecord interface should exist"); + assert!(iface.is_cold(), "ZeroCopyRecord should be cold"); + let data: ZeroCopyRecord = + borsh::BorshDeserialize::deserialize(&mut &iface.account.data[8..]) + .expect("Failed to parse ZeroCopyRecord"); + let variant = LightAccountVariant::ZeroCopyRecord { + seeds: ZeroCopyRecordSeeds { + owner: cached.owner, + }, + data, + }; + rest.push(AccountSpec::Pda(PdaSpec::new( + iface, + variant, + ctx.program_id, + ))); + } - let vault_token_data: Token = - borsh::BorshDeserialize::deserialize(&mut &vault_iface.account.data[..]) - .expect("Failed to parse vault Token"); - let vault_variant = LightAccountVariant::Vault(TokenDataWithSeeds { - seeds: VaultSeeds { - mint: pdas.mint.to_bytes(), - }, - token_data: vault_token_data, - }); - let vault_compressed = vault_iface - .compressed() - .expect("cold vault must have compressed data"); - let vault_interface = AccountInterface { - key: vault_iface.key, - account: vault_iface.account.clone(), - cold: Some(vault_compressed.account.clone()), - }; - let vault_spec = PdaSpec::new(vault_interface, vault_variant, ctx.program_id); + if hot.one_byte { + let iface = ctx + .rpc + .get_account_interface(&pdas.one_byte, None) + .await + .expect("failed to get OneByteRecord interface") + .value + .expect("OneByteRecord interface should exist"); + assert!(iface.is_cold(), "OneByteRecord should be cold"); + let data: OneByteRecord = + borsh::BorshDeserialize::deserialize(&mut &iface.account.data[8..]) + .expect("Failed to parse OneByteRecord"); + let variant = LightAccountVariant::OneByteRecord { + seeds: OneByteRecordSeeds { + owner: cached.owner, + }, + data, + }; + rest.push(AccountSpec::Pda(PdaSpec::new( + iface, + variant, + ctx.program_id, + ))); + } - // Mint - let mint_iface = ctx - .rpc - .get_mint_interface(&pdas.mint, None) - .await - .expect("failed to get mint interface") - .value - .expect("mint interface should exist"); - assert!(mint_iface.is_cold(), "Mint should be cold"); - let mint_ai = AccountInterface::from(mint_iface); - - // Mint must come before ATA and vault since they depend on mint being decompressed - let specs: Vec> = vec![ - AccountSpec::Pda(record_spec), - AccountSpec::Pda(zc_spec), - AccountSpec::Pda(ob_spec), - AccountSpec::Mint(mint_ai), - AccountSpec::Ata(Box::new(ata_interface)), - AccountSpec::Pda(vault_spec), - ]; + if hot.ata { + let iface = ctx + .rpc + .get_associated_token_account_interface(&pdas.ata_owner, &pdas.mint, None) + .await + .expect("failed to get ATA interface") + .value + .expect("ATA interface should exist"); + assert!(iface.is_cold(), "ATA should be cold"); + rest.push(AccountSpec::Ata(Box::new(iface))); + } - let decompress_ixs = - create_load_instructions(&specs, ctx.payer.pubkey(), ctx.config_pda, &ctx.rpc) + if hot.vault { + let iface = ctx + .rpc + .get_token_account_interface(&pdas.vault, None) .await - .expect("create_load_instructions should succeed"); + .expect("failed to get vault interface") + .value + .expect("vault interface should exist"); + assert!(iface.is_cold(), "Vault should be cold"); + let token_data: Token = borsh::BorshDeserialize::deserialize(&mut &iface.account.data[..]) + .expect("Failed to parse vault Token"); + let variant = LightAccountVariant::Vault(TokenDataWithSeeds { + seeds: VaultSeeds { + mint: pdas.mint.to_bytes(), + }, + token_data, + }); + let compressed = iface + .compressed() + .expect("cold vault must have compressed data"); + let vault_interface = AccountInterface { + key: iface.key, + account: iface.account.clone(), + cold: Some(compressed.account.clone()), + }; + rest.push(AccountSpec::Pda(PdaSpec::new( + vault_interface, + variant, + ctx.program_id, + ))); + } + + rest.shuffle(&mut thread_rng()); + specs.extend(rest); + + if specs.is_empty() { + return; + } + + let ixs = create_load_instructions(&specs, ctx.payer.pubkey(), ctx.config_pda, &ctx.rpc) + .await + .expect("create_load_instructions should succeed"); ctx.rpc - .create_and_send_transaction(&decompress_ixs, &ctx.payer.pubkey(), &[&ctx.payer]) + .create_and_send_transaction(&ixs, &ctx.payer.pubkey(), &[&ctx.payer]) .await .expect("Decompression should succeed"); - // Verify all decompressed accounts exist on-chain - for (pda, name) in [ - (&pdas.record, "MinimalRecord"), - (&pdas.zc_record, "ZeroCopyRecord"), - (&pdas.one_byte, "OneByteRecord"), - (&pdas.ata, "ATA"), - (&pdas.vault, "Vault"), - (&pdas.mint, "Mint"), + // Assert hot accounts are now on-chain. + for (flag, pda, name) in [ + (hot.record, &pdas.record, "MinimalRecord"), + (hot.zc_record, &pdas.zc_record, "ZeroCopyRecord"), + (hot.one_byte, &pdas.one_byte, "OneByteRecord"), + (hot.mint, &pdas.mint, "Mint"), + (hot.ata, &pdas.ata, "ATA"), + (hot.vault, &pdas.vault, "Vault"), ] { - shared::assert_onchain_exists(&mut ctx.rpc, pda, name).await; + if flag { + shared::assert_onchain_exists(&mut ctx.rpc, pda, name).await; + } } } -/// Compress all accounts by warping forward epochs +/// Compress all accounts by warping forward. Everything goes cold regardless of what was hot. async fn compress_all(ctx: &mut StressTestContext, pdas: &TestPdas) { ctx.rpc .warp_slot_forward(SLOTS_PER_EPOCH * 100) @@ -401,83 +475,84 @@ async fn compress_all(ctx: &mut StressTestContext, pdas: &TestPdas) { } } -/// Full-struct assertions for all accounts against cached state -async fn assert_all_state( +/// Assert on-chain state only for accounts in `hot`. +async fn assert_hot_state( rpc: &mut LightProgramTest, pdas: &TestPdas, cached: &CachedState, + hot: &HotSet, iteration: usize, ) { - // MinimalRecord - let account = rpc.get_account(pdas.record).await.unwrap().unwrap(); - let actual_record: MinimalRecord = - borsh::BorshDeserialize::deserialize(&mut &account.data[8..]).unwrap(); - let expected_record = MinimalRecord { - compression_info: shared::expected_compression_info(&actual_record.compression_info), - ..cached.record.clone() - }; - assert_eq!( - actual_record, expected_record, - "MinimalRecord mismatch at iteration {iteration}" - ); + if hot.record { + let account = rpc.get_account(pdas.record).await.unwrap().unwrap(); + let actual: MinimalRecord = + borsh::BorshDeserialize::deserialize(&mut &account.data[8..]).unwrap(); + let expected = MinimalRecord { + compression_info: shared::expected_compression_info(&actual.compression_info), + ..cached.record.clone() + }; + assert_eq!( + actual, expected, + "MinimalRecord mismatch at iteration {iteration}" + ); + } - // ZeroCopyRecord - let account = rpc.get_account(pdas.zc_record).await.unwrap().unwrap(); - let actual_zc: &ZeroCopyRecord = bytemuck::from_bytes(&account.data[8..]); - let expected_zc = ZeroCopyRecord { - compression_info: shared::expected_compression_info(&actual_zc.compression_info), - ..cached.zc_record - }; - assert_eq!( - *actual_zc, expected_zc, - "ZeroCopyRecord mismatch at iteration {iteration}" - ); + if hot.zc_record { + let account = rpc.get_account(pdas.zc_record).await.unwrap().unwrap(); + let actual: &ZeroCopyRecord = bytemuck::from_bytes(&account.data[8..]); + let expected = ZeroCopyRecord { + compression_info: shared::expected_compression_info(&actual.compression_info), + ..cached.zc_record + }; + assert_eq!( + *actual, expected, + "ZeroCopyRecord mismatch at iteration {iteration}" + ); + } - // OneByteRecord - let ob_account = rpc.get_account(pdas.one_byte).await.unwrap().unwrap(); - let disc_len = OneByteRecord::LIGHT_DISCRIMINATOR_SLICE.len(); - let actual_ob: OneByteRecord = - borsh::BorshDeserialize::deserialize(&mut &ob_account.data[disc_len..]).unwrap(); - let expected_ob = OneByteRecord { - compression_info: shared::expected_compression_info(&actual_ob.compression_info), - ..cached.ob_record.clone() - }; - assert_eq!( - actual_ob, expected_ob, - "OneByteRecord mismatch at iteration {iteration}" - ); + if hot.one_byte { + let account = rpc.get_account(pdas.one_byte).await.unwrap().unwrap(); + let disc_len = OneByteRecord::LIGHT_DISCRIMINATOR_SLICE.len(); + let actual: OneByteRecord = + borsh::BorshDeserialize::deserialize(&mut &account.data[disc_len..]).unwrap(); + let expected = OneByteRecord { + compression_info: shared::expected_compression_info(&actual.compression_info), + ..cached.ob_record.clone() + }; + assert_eq!( + actual, expected, + "OneByteRecord mismatch at iteration {iteration}" + ); + } - // ATA - let actual_ata = parse_token(&rpc.get_account(pdas.ata).await.unwrap().unwrap().data); - let expected_ata = Token { - extensions: actual_ata.extensions.clone(), - ..cached.ata_token.clone() - }; - assert_eq!( - actual_ata, expected_ata, - "ATA mismatch at iteration {iteration}" - ); + if hot.ata { + let actual = parse_token(&rpc.get_account(pdas.ata).await.unwrap().unwrap().data); + let expected = Token { + extensions: actual.extensions.clone(), + ..cached.ata_token.clone() + }; + assert_eq!(actual, expected, "ATA mismatch at iteration {iteration}"); + } - // Vault - let actual_vault = parse_token(&rpc.get_account(pdas.vault).await.unwrap().unwrap().data); - let expected_vault = Token { - extensions: actual_vault.extensions.clone(), - ..cached.vault_token.clone() - }; - assert_eq!( - actual_vault, expected_vault, - "Vault mismatch at iteration {iteration}" - ); + if hot.vault { + let actual = parse_token(&rpc.get_account(pdas.vault).await.unwrap().unwrap().data); + let expected = Token { + extensions: actual.extensions.clone(), + ..cached.vault_token.clone() + }; + assert_eq!(actual, expected, "Vault mismatch at iteration {iteration}"); + } - // Mint - let actual_mint: Mint = borsh::BorshDeserialize::deserialize( - &mut &rpc.get_account(pdas.mint).await.unwrap().unwrap().data[..], - ) - .unwrap(); - assert_eq!( - actual_mint.base.decimals, 9, - "Mint decimals mismatch at iteration {iteration}" - ); + if hot.mint { + let actual: Mint = borsh::BorshDeserialize::deserialize( + &mut &rpc.get_account(pdas.mint).await.unwrap().unwrap().data[..], + ) + .unwrap(); + assert_eq!( + actual.base.decimals, 9, + "Mint decimals mismatch at iteration {iteration}" + ); + } } #[tokio::test] @@ -496,32 +571,58 @@ async fn test_stress_20_iterations() { shared::assert_onchain_exists(&mut ctx.rpc, pda, name).await; } - // Cache initial state - let owner = { - let account = ctx.rpc.get_account(pdas.record).await.unwrap().unwrap(); - let record: MinimalRecord = - borsh::BorshDeserialize::deserialize(&mut &account.data[8..]).unwrap(); - record.owner + // Read initial state — all accounts are on-chain right after creation. + let record_data = ctx + .rpc + .get_account(pdas.record) + .await + .unwrap() + .unwrap() + .data; + let owner: [u8; 32] = { + let r: MinimalRecord = + borsh::BorshDeserialize::deserialize(&mut &record_data[8..]).unwrap(); + r.owner + }; + let zc_data = ctx + .rpc + .get_account(pdas.zc_record) + .await + .unwrap() + .unwrap() + .data; + let ob_data = ctx + .rpc + .get_account(pdas.one_byte) + .await + .unwrap() + .unwrap() + .data; + let disc_len = OneByteRecord::LIGHT_DISCRIMINATOR_SLICE.len(); + let mut cached = CachedState { + record: borsh::BorshDeserialize::deserialize(&mut &record_data[8..]).unwrap(), + zc_record: *bytemuck::from_bytes(&zc_data[8..]), + ob_record: borsh::BorshDeserialize::deserialize(&mut &ob_data[disc_len..]).unwrap(), + ata_token: parse_token(&ctx.rpc.get_account(pdas.ata).await.unwrap().unwrap().data), + vault_token: parse_token(&ctx.rpc.get_account(pdas.vault).await.unwrap().unwrap().data), + owner, }; - let mut cached = refresh_cache(&mut ctx.rpc, &pdas, owner).await; - // First compression + // First compression — all accounts go cold. compress_all(&mut ctx, &pdas).await; - // Main loop: 20 iterations - for i in 0..20 { - println!("--- Iteration {i} ---"); + let mut rng = thread_rng(); - // Decompress all - decompress_all(&mut ctx, &pdas, &cached).await; + for i in 0..20 { + let hot = HotSet::random(&mut rng); + println!("--- Iteration {i}: hot={hot:?} ---"); - // Assert all cached state - assert_all_state(&mut ctx.rpc, &pdas, &cached, i).await; + decompress_subset(&mut ctx, &pdas, &cached, &hot).await; + assert_hot_state(&mut ctx.rpc, &pdas, &cached, &hot, i).await; - // Update cache after decompression (compression_info changes) - cached = refresh_cache(&mut ctx.rpc, &pdas, owner).await; + // Update cache only for accounts that were decompressed this iteration. + cached = refresh_cache_partial(&mut ctx.rpc, &pdas, &hot, &cached).await; - // Compress all compress_all(&mut ctx, &pdas).await; println!(" iteration {i} complete"); diff --git a/sdk-tests/pinocchio-light-program-test/tests/test_create_all.rs b/sdk-tests/pinocchio-light-program-test/tests/test_create_all.rs index a461461505..adda67154c 100644 --- a/sdk-tests/pinocchio-light-program-test/tests/test_create_all.rs +++ b/sdk-tests/pinocchio-light-program-test/tests/test_create_all.rs @@ -1,5 +1,6 @@ mod shared; +use light_account::LightDiscriminator; use light_account_pinocchio::token::TokenDataWithSeeds; use light_client::interface::{ create_load_instructions, get_create_accounts_proof, AccountInterface, AccountSpec, @@ -10,7 +11,6 @@ use light_program_test::{program_test::TestRpc, Rpc}; use light_sdk_types::LIGHT_TOKEN_PROGRAM_ID; use light_token::instruction::{LIGHT_TOKEN_CONFIG, LIGHT_TOKEN_RENT_SPONSOR}; use light_token_interface::state::token::{AccountState, Token, ACCOUNT_TYPE_TOKEN_ACCOUNT}; -use light_account::LightDiscriminator; use pinocchio_light_program_test::{ all::accounts::CreateAllParams, discriminators, LightAccountVariant, MinimalRecord, MinimalRecordSeeds, OneByteRecord, OneByteRecordSeeds, VaultSeeds, ZeroCopyRecord, @@ -160,15 +160,17 @@ async fn test_create_all_derive() { .unwrap() .expect("OneByteRecord PDA should exist"); let disc_len = OneByteRecord::LIGHT_DISCRIMINATOR_SLICE.len(); - assert_eq!( - &ob_account.data[..disc_len], - OneByteRecord::LIGHT_DISCRIMINATOR_SLICE, - "OneByteRecord discriminator should match" - ); let actual_ob: OneByteRecord = borsh::BorshDeserialize::deserialize(&mut &ob_account.data[disc_len..]) .expect("Failed to deserialize OneByteRecord"); - assert_eq!(actual_ob.owner, owner.to_bytes(), "OneByteRecord owner should match"); + let expected_ob = OneByteRecord { + compression_info: shared::expected_compression_info(&actual_ob.compression_info), + owner: owner.to_bytes(), + }; + assert_eq!( + actual_ob, expected_ob, + "OneByteRecord should match after creation" + ); let ata_account = rpc .get_account(ata) @@ -398,7 +400,10 @@ async fn test_create_all_derive() { compression_info: shared::expected_compression_info(&actual_ob.compression_info), owner: owner.to_bytes(), }; - assert_eq!(actual_ob, expected_ob, "OneByteRecord should match after decompression"); + assert_eq!( + actual_ob, expected_ob, + "OneByteRecord should match after decompression" + ); // ATA let actual_ata: Token = shared::parse_token(&rpc.get_account(ata).await.unwrap().unwrap().data); From b08ce6994a92815b49b3ece99995ef7ff1213092 Mon Sep 17 00:00:00 2001 From: ananas Date: Fri, 20 Feb 2026 00:52:54 +0000 Subject: [PATCH 4/7] address feedback --- .../src/v3/actions/create-mint-interface.ts | 4 +-- .../src/v3/get-mint-interface.ts | 4 +-- sdk-libs/macros/src/lib.rs | 7 +++++ .../macros/src/light_pdas/account/derive.rs | 27 +++++++++++++++++++ .../macros/src/light_pdas/program/compress.rs | 5 ++-- .../src/interface/program/compression/pda.rs | 2 +- 6 files changed, 40 insertions(+), 9 deletions(-) diff --git a/js/compressed-token/src/v3/actions/create-mint-interface.ts b/js/compressed-token/src/v3/actions/create-mint-interface.ts index 757c38a6b7..f57f7bbd40 100644 --- a/js/compressed-token/src/v3/actions/create-mint-interface.ts +++ b/js/compressed-token/src/v3/actions/create-mint-interface.ts @@ -81,9 +81,7 @@ export async function createMintInterface( // Default: light-token mint creation if (!('secretKey' in mintAuthority)) { - throw new Error( - 'mintAuthority must be a Signer for light-token mints', - ); + throw new Error('mintAuthority must be a Signer for light-token mints'); } if ( addressTreeInfo && diff --git a/js/compressed-token/src/v3/get-mint-interface.ts b/js/compressed-token/src/v3/get-mint-interface.ts index aeffe71611..3d5bf10033 100644 --- a/js/compressed-token/src/v3/get-mint-interface.ts +++ b/js/compressed-token/src/v3/get-mint-interface.ts @@ -103,9 +103,7 @@ export async function getMintInterface( ); if (!compressedAccount?.data?.data) { - throw new Error( - `Light mint not found for ${address.toString()}`, - ); + throw new Error(`Light mint not found for ${address.toString()}`); } const compressedData = Buffer.from(compressedAccount.data.data); diff --git a/sdk-libs/macros/src/lib.rs b/sdk-libs/macros/src/lib.rs index 921539bfdc..745f2800bd 100644 --- a/sdk-libs/macros/src/lib.rs +++ b/sdk-libs/macros/src/lib.rs @@ -395,6 +395,13 @@ pub fn light_account_derive(input: TokenStream) -> TokenStream { /// - The `compression_info` field must be first or last field in the struct /// - Struct should be `#[repr(C)]` for predictable memory layout /// - Use `[u8; 32]` instead of `Pubkey` for address fields +/// +/// ## Custom discriminator +/// +/// Use `#[light_pinocchio(discriminator = [1u8])]` to override the default +/// 8-byte SHA256 discriminator with a shorter custom discriminator (1-8 bytes). +/// Variants with short discriminators should be declared last in `ProgramAccounts` +/// enums to avoid prefix-matching conflicts during dispatch. #[proc_macro_derive(LightPinocchioAccount, attributes(compress_as, skip, light_pinocchio))] pub fn light_pinocchio_account_derive(input: TokenStream) -> TokenStream { let input = parse_macro_input!(input as DeriveInput); diff --git a/sdk-libs/macros/src/light_pdas/account/derive.rs b/sdk-libs/macros/src/light_pdas/account/derive.rs index eee7e5a5b9..7c9a6e0162 100644 --- a/sdk-libs/macros/src/light_pdas/account/derive.rs +++ b/sdk-libs/macros/src/light_pdas/account/derive.rs @@ -160,6 +160,12 @@ fn parse_pinocchio_discriminator(attrs: &[syn::Attribute]) -> Result 8 { + return Err(syn::Error::new_spanned( + arr, + "discriminator must not exceed 8 bytes", + )); + } return Ok(Some(bytes)); } return Err(syn::Error::new_spanned( @@ -893,6 +899,27 @@ mod tests { ); } + #[test] + fn test_light_pinocchio_custom_discriminator_too_long_rejected() { + let input: DeriveInput = parse_quote! { + #[light_pinocchio(discriminator = [1, 2, 3, 4, 5, 6, 7, 8, 9])] + pub struct TooLongDisc { + pub compression_info: CompressionInfo, + pub owner: [u8; 32], + } + }; + let result = derive_light_pinocchio_account(input); + assert!( + result.is_err(), + "Discriminator longer than 8 bytes should be rejected" + ); + let err = result.unwrap_err().to_string(); + assert!( + err.contains("exceed 8 bytes"), + "Error should mention max length, got: {err}" + ); + } + #[test] fn test_light_pinocchio_discriminator_rejected_on_anchor() { let input: DeriveInput = parse_quote! { diff --git a/sdk-libs/macros/src/light_pdas/program/compress.rs b/sdk-libs/macros/src/light_pdas/program/compress.rs index c1aec7369d..231e8ab2c5 100644 --- a/sdk-libs/macros/src/light_pdas/program/compress.rs +++ b/sdk-libs/macros/src/light_pdas/program/compress.rs @@ -344,8 +344,9 @@ impl CompressBuilder { { let __disc_slice = <#name as #account_crate::LightDiscriminator>::LIGHT_DISCRIMINATOR_SLICE; let __disc_len = __disc_slice.len(); - if data.len() >= __disc_len && &data[..__disc_len] == __disc_slice { - let pod_bytes = &data[__disc_len..__disc_len + core::mem::size_of::<#name>()]; + let __expected_len = __disc_len + core::mem::size_of::<#name>(); + if data.len() >= __expected_len && &data[..__disc_len] == __disc_slice { + let pod_bytes = &data[__disc_len..__expected_len]; let mut account_data: #name = *bytemuck::from_bytes(pod_bytes); drop(data); return #account_crate::prepare_account_for_compression( diff --git a/sdk-libs/sdk-types/src/interface/program/compression/pda.rs b/sdk-libs/sdk-types/src/interface/program/compression/pda.rs index a2e9f07e25..c0cfedbf7b 100644 --- a/sdk-libs/sdk-types/src/interface/program/compression/pda.rs +++ b/sdk-libs/sdk-types/src/interface/program/compression/pda.rs @@ -92,7 +92,7 @@ where // Mark as compressed account_data.compression_info_mut()?.set_compressed(); - // Serialize updated account data back (includes 8-byte discriminator) + // Serialize updated account data back (includes discriminator prefix) { let mut data = account_info .try_borrow_mut_data() From 4b6ef2962eeea43ebf7c558335844d2799e2858f Mon Sep 17 00:00:00 2001 From: ananas Date: Fri, 20 Feb 2026 14:42:07 +0000 Subject: [PATCH 5/7] test: discriminators with 2-7 bytes --- .../pinocchio-light-program-test/src/lib.rs | 44 ++ .../src/multi_byte_pda/accounts.rs | 257 ++++++++++ .../src/multi_byte_pda/mod.rs | 4 + .../src/multi_byte_pda/processor.rs | 145 ++++++ .../pinocchio-light-program-test/src/state.rs | 66 +++ .../tests/test_create_multi_byte_records.rs | 476 ++++++++++++++++++ 6 files changed, 992 insertions(+) create mode 100644 sdk-tests/pinocchio-light-program-test/src/multi_byte_pda/accounts.rs create mode 100644 sdk-tests/pinocchio-light-program-test/src/multi_byte_pda/mod.rs create mode 100644 sdk-tests/pinocchio-light-program-test/src/multi_byte_pda/processor.rs create mode 100644 sdk-tests/pinocchio-light-program-test/tests/test_create_multi_byte_records.rs diff --git a/sdk-tests/pinocchio-light-program-test/src/lib.rs b/sdk-tests/pinocchio-light-program-test/src/lib.rs index 4ef17d5b6c..1c0f420a5c 100644 --- a/sdk-tests/pinocchio-light-program-test/src/lib.rs +++ b/sdk-tests/pinocchio-light-program-test/src/lib.rs @@ -14,6 +14,7 @@ pub mod account_loader; pub mod all; pub mod ata; pub mod mint; +pub mod multi_byte_pda; pub mod one_byte_pda; pub mod pda; pub mod state; @@ -50,6 +51,24 @@ pub enum ProgramAccounts { #[light_account(pda::seeds = [RECORD_SEED, ctx.owner], pda::zero_copy)] ZeroCopyRecord(ZeroCopyRecord), + #[light_account(pda::seeds = [b"seven_byte_record", ctx.owner])] + SevenByteRecord(SevenByteRecord), + + #[light_account(pda::seeds = [b"six_byte_record", ctx.owner])] + SixByteRecord(SixByteRecord), + + #[light_account(pda::seeds = [b"five_byte_record", ctx.owner])] + FiveByteRecord(FiveByteRecord), + + #[light_account(pda::seeds = [b"four_byte_record", ctx.owner])] + FourByteRecord(FourByteRecord), + + #[light_account(pda::seeds = [b"three_byte_record", ctx.owner])] + ThreeByteRecord(ThreeByteRecord), + + #[light_account(pda::seeds = [b"two_byte_record", ctx.owner])] + TwoByteRecord(TwoByteRecord), + #[light_account(pda::seeds = [b"one_byte_record", ctx.owner])] OneByteRecord(OneByteRecord), } @@ -67,6 +86,8 @@ pub mod discriminators { pub const CREATE_ALL: [u8; 8] = [149, 49, 144, 45, 208, 155, 177, 43]; /// Discriminator for CREATE_ONE_BYTE_RECORD instruction. pub const CREATE_ONE_BYTE_RECORD: [u8; 8] = [1, 0, 0, 0, 0, 0, 0, 0]; + /// Discriminator for CREATE_MULTI_BYTE_RECORDS instruction (sha256("global:create_multi_byte_records")[..8]). + pub const CREATE_MULTI_BYTE_RECORDS: [u8; 8] = [184, 194, 128, 69, 116, 76, 186, 170]; } // ============================================================================ @@ -96,6 +117,9 @@ pub fn process_instruction( discriminators::CREATE_TWO_MINTS => process_create_two_mints(accounts, data), discriminators::CREATE_ALL => process_create_all(accounts, data), discriminators::CREATE_ONE_BYTE_RECORD => process_create_one_byte_record(accounts, data), + discriminators::CREATE_MULTI_BYTE_RECORDS => { + process_create_multi_byte_records(accounts, data) + } ProgramAccounts::INITIALIZE_COMPRESSION_CONFIG => { ProgramAccounts::process_initialize_config(accounts, data) } @@ -297,6 +321,26 @@ fn process_create_all(accounts: &[AccountInfo], data: &[u8]) -> Result<(), Progr Ok(()) } +fn process_create_multi_byte_records( + accounts: &[AccountInfo], + data: &[u8], +) -> Result<(), ProgramError> { + use borsh::BorshDeserialize; + use multi_byte_pda::accounts::{CreateMultiByteRecords, CreateMultiByteRecordsParams}; + + let params = CreateMultiByteRecordsParams::deserialize(&mut &data[..]) + .map_err(|_| ProgramError::BorshIoError)?; + + let remaining_start = CreateMultiByteRecords::FIXED_LEN; + let (fixed_accounts, remaining_accounts) = accounts.split_at(remaining_start); + let ctx = CreateMultiByteRecords::parse(fixed_accounts, ¶ms)?; + + multi_byte_pda::processor::process(&ctx, ¶ms, remaining_accounts) + .map_err(|e| ProgramError::Custom(u32::from(e)))?; + + Ok(()) +} + fn process_create_one_byte_record( accounts: &[AccountInfo], data: &[u8], diff --git a/sdk-tests/pinocchio-light-program-test/src/multi_byte_pda/accounts.rs b/sdk-tests/pinocchio-light-program-test/src/multi_byte_pda/accounts.rs new file mode 100644 index 0000000000..be869eadb8 --- /dev/null +++ b/sdk-tests/pinocchio-light-program-test/src/multi_byte_pda/accounts.rs @@ -0,0 +1,257 @@ +use borsh::{BorshDeserialize, BorshSerialize}; +use light_account_pinocchio::{CreateAccountsProof, LightAccount, LightDiscriminator}; +use pinocchio::{ + account_info::AccountInfo, + instruction::{Seed, Signer}, + program_error::ProgramError, + sysvars::Sysvar, +}; + +use crate::state::{ + FiveByteRecord, FourByteRecord, SevenByteRecord, SixByteRecord, ThreeByteRecord, TwoByteRecord, +}; + +#[derive(BorshSerialize, BorshDeserialize, Clone)] +pub struct CreateMultiByteRecordsParams { + pub create_accounts_proof: CreateAccountsProof, + pub owner: [u8; 32], +} + +pub struct CreateMultiByteRecords<'a> { + pub fee_payer: &'a AccountInfo, + pub compression_config: &'a AccountInfo, + pub pda_rent_sponsor: &'a AccountInfo, + pub two_byte_record: &'a AccountInfo, + pub three_byte_record: &'a AccountInfo, + pub four_byte_record: &'a AccountInfo, + pub five_byte_record: &'a AccountInfo, + pub six_byte_record: &'a AccountInfo, + pub seven_byte_record: &'a AccountInfo, + pub system_program: &'a AccountInfo, +} + +impl<'a> CreateMultiByteRecords<'a> { + pub const FIXED_LEN: usize = 10; + + pub fn parse( + accounts: &'a [AccountInfo], + params: &CreateMultiByteRecordsParams, + ) -> Result { + let fee_payer = &accounts[0]; + let compression_config = &accounts[1]; + let pda_rent_sponsor = &accounts[2]; + let two_byte_record = &accounts[3]; + let three_byte_record = &accounts[4]; + let four_byte_record = &accounts[5]; + let five_byte_record = &accounts[6]; + let six_byte_record = &accounts[7]; + let seven_byte_record = &accounts[8]; + let system_program = &accounts[9]; + + if !fee_payer.is_signer() { + return Err(ProgramError::MissingRequiredSignature); + } + + let rent = + pinocchio::sysvars::rent::Rent::get().map_err(|_| ProgramError::UnsupportedSysvar)?; + + // Create TwoByteRecord PDA + { + let disc_len = TwoByteRecord::LIGHT_DISCRIMINATOR_SLICE.len(); + let space = disc_len + TwoByteRecord::INIT_SPACE; + let seeds: &[&[u8]] = &[b"two_byte_record", ¶ms.owner]; + let (expected_pda, bump) = pinocchio::pubkey::find_program_address(seeds, &crate::ID); + if two_byte_record.key() != &expected_pda { + return Err(ProgramError::InvalidSeeds); + } + let lamports = rent.minimum_balance(space); + let bump_bytes = [bump]; + let seed_array = [ + Seed::from(b"two_byte_record" as &[u8]), + Seed::from(params.owner.as_ref()), + Seed::from(bump_bytes.as_ref()), + ]; + let signer = Signer::from(&seed_array); + pinocchio_system::instructions::CreateAccount { + from: fee_payer, + to: two_byte_record, + lamports, + space: space as u64, + owner: &crate::ID, + } + .invoke_signed(&[signer])?; + let mut data = two_byte_record + .try_borrow_mut_data() + .map_err(|_| ProgramError::AccountBorrowFailed)?; + data[0..disc_len].copy_from_slice(TwoByteRecord::LIGHT_DISCRIMINATOR_SLICE); + } + + // Create ThreeByteRecord PDA + { + let disc_len = ThreeByteRecord::LIGHT_DISCRIMINATOR_SLICE.len(); + let space = disc_len + ThreeByteRecord::INIT_SPACE; + let seeds: &[&[u8]] = &[b"three_byte_record", ¶ms.owner]; + let (expected_pda, bump) = pinocchio::pubkey::find_program_address(seeds, &crate::ID); + if three_byte_record.key() != &expected_pda { + return Err(ProgramError::InvalidSeeds); + } + let lamports = rent.minimum_balance(space); + let bump_bytes = [bump]; + let seed_array = [ + Seed::from(b"three_byte_record" as &[u8]), + Seed::from(params.owner.as_ref()), + Seed::from(bump_bytes.as_ref()), + ]; + let signer = Signer::from(&seed_array); + pinocchio_system::instructions::CreateAccount { + from: fee_payer, + to: three_byte_record, + lamports, + space: space as u64, + owner: &crate::ID, + } + .invoke_signed(&[signer])?; + let mut data = three_byte_record + .try_borrow_mut_data() + .map_err(|_| ProgramError::AccountBorrowFailed)?; + data[0..disc_len].copy_from_slice(ThreeByteRecord::LIGHT_DISCRIMINATOR_SLICE); + } + + // Create FourByteRecord PDA + { + let disc_len = FourByteRecord::LIGHT_DISCRIMINATOR_SLICE.len(); + let space = disc_len + FourByteRecord::INIT_SPACE; + let seeds: &[&[u8]] = &[b"four_byte_record", ¶ms.owner]; + let (expected_pda, bump) = pinocchio::pubkey::find_program_address(seeds, &crate::ID); + if four_byte_record.key() != &expected_pda { + return Err(ProgramError::InvalidSeeds); + } + let lamports = rent.minimum_balance(space); + let bump_bytes = [bump]; + let seed_array = [ + Seed::from(b"four_byte_record" as &[u8]), + Seed::from(params.owner.as_ref()), + Seed::from(bump_bytes.as_ref()), + ]; + let signer = Signer::from(&seed_array); + pinocchio_system::instructions::CreateAccount { + from: fee_payer, + to: four_byte_record, + lamports, + space: space as u64, + owner: &crate::ID, + } + .invoke_signed(&[signer])?; + let mut data = four_byte_record + .try_borrow_mut_data() + .map_err(|_| ProgramError::AccountBorrowFailed)?; + data[0..disc_len].copy_from_slice(FourByteRecord::LIGHT_DISCRIMINATOR_SLICE); + } + + // Create FiveByteRecord PDA + { + let disc_len = FiveByteRecord::LIGHT_DISCRIMINATOR_SLICE.len(); + let space = disc_len + FiveByteRecord::INIT_SPACE; + let seeds: &[&[u8]] = &[b"five_byte_record", ¶ms.owner]; + let (expected_pda, bump) = pinocchio::pubkey::find_program_address(seeds, &crate::ID); + if five_byte_record.key() != &expected_pda { + return Err(ProgramError::InvalidSeeds); + } + let lamports = rent.minimum_balance(space); + let bump_bytes = [bump]; + let seed_array = [ + Seed::from(b"five_byte_record" as &[u8]), + Seed::from(params.owner.as_ref()), + Seed::from(bump_bytes.as_ref()), + ]; + let signer = Signer::from(&seed_array); + pinocchio_system::instructions::CreateAccount { + from: fee_payer, + to: five_byte_record, + lamports, + space: space as u64, + owner: &crate::ID, + } + .invoke_signed(&[signer])?; + let mut data = five_byte_record + .try_borrow_mut_data() + .map_err(|_| ProgramError::AccountBorrowFailed)?; + data[0..disc_len].copy_from_slice(FiveByteRecord::LIGHT_DISCRIMINATOR_SLICE); + } + + // Create SixByteRecord PDA + { + let disc_len = SixByteRecord::LIGHT_DISCRIMINATOR_SLICE.len(); + let space = disc_len + SixByteRecord::INIT_SPACE; + let seeds: &[&[u8]] = &[b"six_byte_record", ¶ms.owner]; + let (expected_pda, bump) = pinocchio::pubkey::find_program_address(seeds, &crate::ID); + if six_byte_record.key() != &expected_pda { + return Err(ProgramError::InvalidSeeds); + } + let lamports = rent.minimum_balance(space); + let bump_bytes = [bump]; + let seed_array = [ + Seed::from(b"six_byte_record" as &[u8]), + Seed::from(params.owner.as_ref()), + Seed::from(bump_bytes.as_ref()), + ]; + let signer = Signer::from(&seed_array); + pinocchio_system::instructions::CreateAccount { + from: fee_payer, + to: six_byte_record, + lamports, + space: space as u64, + owner: &crate::ID, + } + .invoke_signed(&[signer])?; + let mut data = six_byte_record + .try_borrow_mut_data() + .map_err(|_| ProgramError::AccountBorrowFailed)?; + data[0..disc_len].copy_from_slice(SixByteRecord::LIGHT_DISCRIMINATOR_SLICE); + } + + // Create SevenByteRecord PDA + { + let disc_len = SevenByteRecord::LIGHT_DISCRIMINATOR_SLICE.len(); + let space = disc_len + SevenByteRecord::INIT_SPACE; + let seeds: &[&[u8]] = &[b"seven_byte_record", ¶ms.owner]; + let (expected_pda, bump) = pinocchio::pubkey::find_program_address(seeds, &crate::ID); + if seven_byte_record.key() != &expected_pda { + return Err(ProgramError::InvalidSeeds); + } + let lamports = rent.minimum_balance(space); + let bump_bytes = [bump]; + let seed_array = [ + Seed::from(b"seven_byte_record" as &[u8]), + Seed::from(params.owner.as_ref()), + Seed::from(bump_bytes.as_ref()), + ]; + let signer = Signer::from(&seed_array); + pinocchio_system::instructions::CreateAccount { + from: fee_payer, + to: seven_byte_record, + lamports, + space: space as u64, + owner: &crate::ID, + } + .invoke_signed(&[signer])?; + let mut data = seven_byte_record + .try_borrow_mut_data() + .map_err(|_| ProgramError::AccountBorrowFailed)?; + data[0..disc_len].copy_from_slice(SevenByteRecord::LIGHT_DISCRIMINATOR_SLICE); + } + + Ok(Self { + fee_payer, + compression_config, + pda_rent_sponsor, + two_byte_record, + three_byte_record, + four_byte_record, + five_byte_record, + six_byte_record, + seven_byte_record, + system_program, + }) + } +} diff --git a/sdk-tests/pinocchio-light-program-test/src/multi_byte_pda/mod.rs b/sdk-tests/pinocchio-light-program-test/src/multi_byte_pda/mod.rs new file mode 100644 index 0000000000..c33d77f1e1 --- /dev/null +++ b/sdk-tests/pinocchio-light-program-test/src/multi_byte_pda/mod.rs @@ -0,0 +1,4 @@ +pub mod accounts; +pub mod processor; + +pub use accounts::*; diff --git a/sdk-tests/pinocchio-light-program-test/src/multi_byte_pda/processor.rs b/sdk-tests/pinocchio-light-program-test/src/multi_byte_pda/processor.rs new file mode 100644 index 0000000000..f75ebbcf6c --- /dev/null +++ b/sdk-tests/pinocchio-light-program-test/src/multi_byte_pda/processor.rs @@ -0,0 +1,145 @@ +use borsh::BorshDeserialize; +use light_account_pinocchio::{ + create_accounts, LightAccount, LightDiscriminator, LightSdkTypesError, PdaInitParam, + SharedAccounts, +}; +use pinocchio::account_info::AccountInfo; + +use super::accounts::{CreateMultiByteRecords, CreateMultiByteRecordsParams}; +use crate::state::{ + FiveByteRecord, FourByteRecord, SevenByteRecord, SixByteRecord, ThreeByteRecord, TwoByteRecord, +}; + +pub fn process( + ctx: &CreateMultiByteRecords<'_>, + params: &CreateMultiByteRecordsParams, + remaining_accounts: &[AccountInfo], +) -> Result<(), LightSdkTypesError> { + let two_byte_record = ctx.two_byte_record; + let three_byte_record = ctx.three_byte_record; + let four_byte_record = ctx.four_byte_record; + let five_byte_record = ctx.five_byte_record; + let six_byte_record = ctx.six_byte_record; + let seven_byte_record = ctx.seven_byte_record; + let owner = params.owner; + + create_accounts::( + [ + PdaInitParam { + account: ctx.two_byte_record, + }, + PdaInitParam { + account: ctx.three_byte_record, + }, + PdaInitParam { + account: ctx.four_byte_record, + }, + PdaInitParam { + account: ctx.five_byte_record, + }, + PdaInitParam { + account: ctx.six_byte_record, + }, + PdaInitParam { + account: ctx.seven_byte_record, + }, + ], + |light_config, current_slot| { + // TwoByteRecord + { + let disc_len = TwoByteRecord::LIGHT_DISCRIMINATOR_SLICE.len(); + let mut account_data = two_byte_record + .try_borrow_mut_data() + .map_err(|_| LightSdkTypesError::Borsh)?; + let mut record = TwoByteRecord::try_from_slice(&account_data[disc_len..]) + .map_err(|_| LightSdkTypesError::Borsh)?; + record.owner = owner; + record.set_decompressed(light_config, current_slot); + let serialized = borsh::to_vec(&record).map_err(|_| LightSdkTypesError::Borsh)?; + account_data[disc_len..disc_len + serialized.len()].copy_from_slice(&serialized); + } + // ThreeByteRecord + { + let disc_len = ThreeByteRecord::LIGHT_DISCRIMINATOR_SLICE.len(); + let mut account_data = three_byte_record + .try_borrow_mut_data() + .map_err(|_| LightSdkTypesError::Borsh)?; + let mut record = ThreeByteRecord::try_from_slice(&account_data[disc_len..]) + .map_err(|_| LightSdkTypesError::Borsh)?; + record.owner = owner; + record.set_decompressed(light_config, current_slot); + let serialized = borsh::to_vec(&record).map_err(|_| LightSdkTypesError::Borsh)?; + account_data[disc_len..disc_len + serialized.len()].copy_from_slice(&serialized); + } + // FourByteRecord + { + let disc_len = FourByteRecord::LIGHT_DISCRIMINATOR_SLICE.len(); + let mut account_data = four_byte_record + .try_borrow_mut_data() + .map_err(|_| LightSdkTypesError::Borsh)?; + let mut record = FourByteRecord::try_from_slice(&account_data[disc_len..]) + .map_err(|_| LightSdkTypesError::Borsh)?; + record.owner = owner; + record.set_decompressed(light_config, current_slot); + let serialized = borsh::to_vec(&record).map_err(|_| LightSdkTypesError::Borsh)?; + account_data[disc_len..disc_len + serialized.len()].copy_from_slice(&serialized); + } + // FiveByteRecord + { + let disc_len = FiveByteRecord::LIGHT_DISCRIMINATOR_SLICE.len(); + let mut account_data = five_byte_record + .try_borrow_mut_data() + .map_err(|_| LightSdkTypesError::Borsh)?; + let mut record = FiveByteRecord::try_from_slice(&account_data[disc_len..]) + .map_err(|_| LightSdkTypesError::Borsh)?; + record.owner = owner; + record.set_decompressed(light_config, current_slot); + let serialized = borsh::to_vec(&record).map_err(|_| LightSdkTypesError::Borsh)?; + account_data[disc_len..disc_len + serialized.len()].copy_from_slice(&serialized); + } + // SixByteRecord + { + let disc_len = SixByteRecord::LIGHT_DISCRIMINATOR_SLICE.len(); + let mut account_data = six_byte_record + .try_borrow_mut_data() + .map_err(|_| LightSdkTypesError::Borsh)?; + let mut record = SixByteRecord::try_from_slice(&account_data[disc_len..]) + .map_err(|_| LightSdkTypesError::Borsh)?; + record.owner = owner; + record.set_decompressed(light_config, current_slot); + let serialized = borsh::to_vec(&record).map_err(|_| LightSdkTypesError::Borsh)?; + account_data[disc_len..disc_len + serialized.len()].copy_from_slice(&serialized); + } + // SevenByteRecord + { + let disc_len = SevenByteRecord::LIGHT_DISCRIMINATOR_SLICE.len(); + let mut account_data = seven_byte_record + .try_borrow_mut_data() + .map_err(|_| LightSdkTypesError::Borsh)?; + let mut record = SevenByteRecord::try_from_slice(&account_data[disc_len..]) + .map_err(|_| LightSdkTypesError::Borsh)?; + record.owner = owner; + record.set_decompressed(light_config, current_slot); + let serialized = borsh::to_vec(&record).map_err(|_| LightSdkTypesError::Borsh)?; + account_data[disc_len..disc_len + serialized.len()].copy_from_slice(&serialized); + } + Ok(()) + }, + None, + [], + [], + &SharedAccounts { + fee_payer: ctx.fee_payer, + cpi_signer: crate::LIGHT_CPI_SIGNER, + proof: ¶ms.create_accounts_proof, + program_id: crate::ID, + compression_config: Some(ctx.compression_config), + compressible_config: None, + rent_sponsor: None, + cpi_authority: None, + system_program: None, + }, + remaining_accounts, + )?; + Ok(()) +} diff --git a/sdk-tests/pinocchio-light-program-test/src/state.rs b/sdk-tests/pinocchio-light-program-test/src/state.rs index 4e5ba9f260..366dc736ab 100644 --- a/sdk-tests/pinocchio-light-program-test/src/state.rs +++ b/sdk-tests/pinocchio-light-program-test/src/state.rs @@ -39,6 +39,72 @@ pub struct OneByteRecord { pub owner: Pubkey, } +/// 2-byte on-chain discriminator. LIGHT_DISCRIMINATOR_SLICE = &[2u8, 2u8]. +#[derive( + Default, Debug, Clone, PartialEq, BorshSerialize, BorshDeserialize, LightPinocchioAccount, +)] +#[light_pinocchio(discriminator = [2u8, 2u8])] +#[repr(C)] +pub struct TwoByteRecord { + pub compression_info: CompressionInfo, + pub owner: Pubkey, +} + +/// 3-byte on-chain discriminator. LIGHT_DISCRIMINATOR_SLICE = &[3u8, 3u8, 3u8]. +#[derive( + Default, Debug, Clone, PartialEq, BorshSerialize, BorshDeserialize, LightPinocchioAccount, +)] +#[light_pinocchio(discriminator = [3u8, 3u8, 3u8])] +#[repr(C)] +pub struct ThreeByteRecord { + pub compression_info: CompressionInfo, + pub owner: Pubkey, +} + +/// 4-byte on-chain discriminator. +#[derive( + Default, Debug, Clone, PartialEq, BorshSerialize, BorshDeserialize, LightPinocchioAccount, +)] +#[light_pinocchio(discriminator = [4u8, 4u8, 4u8, 4u8])] +#[repr(C)] +pub struct FourByteRecord { + pub compression_info: CompressionInfo, + pub owner: Pubkey, +} + +/// 5-byte on-chain discriminator. +#[derive( + Default, Debug, Clone, PartialEq, BorshSerialize, BorshDeserialize, LightPinocchioAccount, +)] +#[light_pinocchio(discriminator = [5u8, 5u8, 5u8, 5u8, 5u8])] +#[repr(C)] +pub struct FiveByteRecord { + pub compression_info: CompressionInfo, + pub owner: Pubkey, +} + +/// 6-byte on-chain discriminator. +#[derive( + Default, Debug, Clone, PartialEq, BorshSerialize, BorshDeserialize, LightPinocchioAccount, +)] +#[light_pinocchio(discriminator = [6u8, 6u8, 6u8, 6u8, 6u8, 6u8])] +#[repr(C)] +pub struct SixByteRecord { + pub compression_info: CompressionInfo, + pub owner: Pubkey, +} + +/// 7-byte on-chain discriminator. +#[derive( + Default, Debug, Clone, PartialEq, BorshSerialize, BorshDeserialize, LightPinocchioAccount, +)] +#[light_pinocchio(discriminator = [7u8, 7u8, 7u8, 7u8, 7u8, 7u8, 7u8])] +#[repr(C)] +pub struct SevenByteRecord { + pub compression_info: CompressionInfo, + pub owner: Pubkey, +} + /// A zero-copy account using Pod serialization. /// Used for efficient on-chain zero-copy access. #[derive( diff --git a/sdk-tests/pinocchio-light-program-test/tests/test_create_multi_byte_records.rs b/sdk-tests/pinocchio-light-program-test/tests/test_create_multi_byte_records.rs new file mode 100644 index 0000000000..890ff516e8 --- /dev/null +++ b/sdk-tests/pinocchio-light-program-test/tests/test_create_multi_byte_records.rs @@ -0,0 +1,476 @@ +mod shared; + +use light_account::LightDiscriminator; +use light_client::interface::{ + create_load_instructions, get_create_accounts_proof, AccountSpec, CreateAccountsProofInput, + PdaSpec, +}; +use light_compressible::rent::SLOTS_PER_EPOCH; +use light_program_test::{program_test::TestRpc, Rpc}; +use pinocchio_light_program_test::{ + discriminators, multi_byte_pda::accounts::CreateMultiByteRecordsParams, FiveByteRecord, + FiveByteRecordSeeds, FourByteRecord, FourByteRecordSeeds, LightAccountVariant, SevenByteRecord, + SevenByteRecordSeeds, SixByteRecord, SixByteRecordSeeds, ThreeByteRecord, ThreeByteRecordSeeds, + TwoByteRecord, TwoByteRecordSeeds, +}; +use solana_instruction::{AccountMeta, Instruction}; +use solana_keypair::Keypair; +use solana_pubkey::Pubkey; +use solana_signer::Signer; + +#[tokio::test] +async fn test_create_compress_decompress_multi_byte_records() { + let env = shared::setup_test_env().await; + let mut rpc = env.rpc; + let payer = env.payer; + let program_id = env.program_id; + + let owner = Keypair::new().pubkey(); + + let (two_byte_pda, _) = + Pubkey::find_program_address(&[b"two_byte_record", owner.as_ref()], &program_id); + let (three_byte_pda, _) = + Pubkey::find_program_address(&[b"three_byte_record", owner.as_ref()], &program_id); + let (four_byte_pda, _) = + Pubkey::find_program_address(&[b"four_byte_record", owner.as_ref()], &program_id); + let (five_byte_pda, _) = + Pubkey::find_program_address(&[b"five_byte_record", owner.as_ref()], &program_id); + let (six_byte_pda, _) = + Pubkey::find_program_address(&[b"six_byte_record", owner.as_ref()], &program_id); + let (seven_byte_pda, _) = + Pubkey::find_program_address(&[b"seven_byte_record", owner.as_ref()], &program_id); + + // PHASE 1: Create all 6 accounts + let proof_result = get_create_accounts_proof( + &rpc, + &program_id, + vec![ + CreateAccountsProofInput::pda(two_byte_pda), + CreateAccountsProofInput::pda(three_byte_pda), + CreateAccountsProofInput::pda(four_byte_pda), + CreateAccountsProofInput::pda(five_byte_pda), + CreateAccountsProofInput::pda(six_byte_pda), + CreateAccountsProofInput::pda(seven_byte_pda), + ], + ) + .await + .unwrap(); + + let params = CreateMultiByteRecordsParams { + create_accounts_proof: proof_result.create_accounts_proof, + owner: owner.to_bytes(), + }; + + let accounts = vec![ + AccountMeta::new(payer.pubkey(), true), + AccountMeta::new_readonly(env.config_pda, false), + AccountMeta::new(env.rent_sponsor, false), + AccountMeta::new(two_byte_pda, false), + AccountMeta::new(three_byte_pda, false), + AccountMeta::new(four_byte_pda, false), + AccountMeta::new(five_byte_pda, false), + AccountMeta::new(six_byte_pda, false), + AccountMeta::new(seven_byte_pda, false), + AccountMeta::new_readonly(solana_sdk::system_program::ID, false), + ]; + + let instruction = Instruction { + program_id, + accounts: [accounts, proof_result.remaining_accounts].concat(), + data: shared::build_instruction_data(&discriminators::CREATE_MULTI_BYTE_RECORDS, ¶ms), + }; + + rpc.create_and_send_transaction(&[instruction], &payer.pubkey(), &[&payer]) + .await + .expect("CreateMultiByteRecords should succeed"); + + // Verify all 6 PDAs on-chain after creation + let two_byte_account = rpc + .get_account(two_byte_pda) + .await + .unwrap() + .expect("TwoByteRecord PDA should exist"); + let disc_len = TwoByteRecord::LIGHT_DISCRIMINATOR_SLICE.len(); + assert_eq!( + &two_byte_account.data[..disc_len], + TwoByteRecord::LIGHT_DISCRIMINATOR_SLICE, + "TwoByteRecord discriminator should match" + ); + let actual_two: TwoByteRecord = + borsh::BorshDeserialize::deserialize(&mut &two_byte_account.data[disc_len..]) + .expect("Failed to deserialize TwoByteRecord"); + assert_eq!( + actual_two, + TwoByteRecord { + compression_info: shared::expected_compression_info(&actual_two.compression_info), + owner: owner.to_bytes(), + }, + "TwoByteRecord should match after creation" + ); + + let three_byte_account = rpc + .get_account(three_byte_pda) + .await + .unwrap() + .expect("ThreeByteRecord PDA should exist"); + let disc_len = ThreeByteRecord::LIGHT_DISCRIMINATOR_SLICE.len(); + assert_eq!( + &three_byte_account.data[..disc_len], + ThreeByteRecord::LIGHT_DISCRIMINATOR_SLICE, + "ThreeByteRecord discriminator should match" + ); + let actual_three: ThreeByteRecord = + borsh::BorshDeserialize::deserialize(&mut &three_byte_account.data[disc_len..]) + .expect("Failed to deserialize ThreeByteRecord"); + assert_eq!( + actual_three, + ThreeByteRecord { + compression_info: shared::expected_compression_info(&actual_three.compression_info), + owner: owner.to_bytes(), + }, + "ThreeByteRecord should match after creation" + ); + + let four_byte_account = rpc + .get_account(four_byte_pda) + .await + .unwrap() + .expect("FourByteRecord PDA should exist"); + let disc_len = FourByteRecord::LIGHT_DISCRIMINATOR_SLICE.len(); + assert_eq!( + &four_byte_account.data[..disc_len], + FourByteRecord::LIGHT_DISCRIMINATOR_SLICE, + "FourByteRecord discriminator should match" + ); + let actual_four: FourByteRecord = + borsh::BorshDeserialize::deserialize(&mut &four_byte_account.data[disc_len..]) + .expect("Failed to deserialize FourByteRecord"); + assert_eq!( + actual_four, + FourByteRecord { + compression_info: shared::expected_compression_info(&actual_four.compression_info), + owner: owner.to_bytes(), + }, + "FourByteRecord should match after creation" + ); + + let five_byte_account = rpc + .get_account(five_byte_pda) + .await + .unwrap() + .expect("FiveByteRecord PDA should exist"); + let disc_len = FiveByteRecord::LIGHT_DISCRIMINATOR_SLICE.len(); + assert_eq!( + &five_byte_account.data[..disc_len], + FiveByteRecord::LIGHT_DISCRIMINATOR_SLICE, + "FiveByteRecord discriminator should match" + ); + let actual_five: FiveByteRecord = + borsh::BorshDeserialize::deserialize(&mut &five_byte_account.data[disc_len..]) + .expect("Failed to deserialize FiveByteRecord"); + assert_eq!( + actual_five, + FiveByteRecord { + compression_info: shared::expected_compression_info(&actual_five.compression_info), + owner: owner.to_bytes(), + }, + "FiveByteRecord should match after creation" + ); + + let six_byte_account = rpc + .get_account(six_byte_pda) + .await + .unwrap() + .expect("SixByteRecord PDA should exist"); + let disc_len = SixByteRecord::LIGHT_DISCRIMINATOR_SLICE.len(); + assert_eq!( + &six_byte_account.data[..disc_len], + SixByteRecord::LIGHT_DISCRIMINATOR_SLICE, + "SixByteRecord discriminator should match" + ); + let actual_six: SixByteRecord = + borsh::BorshDeserialize::deserialize(&mut &six_byte_account.data[disc_len..]) + .expect("Failed to deserialize SixByteRecord"); + assert_eq!( + actual_six, + SixByteRecord { + compression_info: shared::expected_compression_info(&actual_six.compression_info), + owner: owner.to_bytes(), + }, + "SixByteRecord should match after creation" + ); + + let seven_byte_account = rpc + .get_account(seven_byte_pda) + .await + .unwrap() + .expect("SevenByteRecord PDA should exist"); + let disc_len = SevenByteRecord::LIGHT_DISCRIMINATOR_SLICE.len(); + assert_eq!( + &seven_byte_account.data[..disc_len], + SevenByteRecord::LIGHT_DISCRIMINATOR_SLICE, + "SevenByteRecord discriminator should match" + ); + let actual_seven: SevenByteRecord = + borsh::BorshDeserialize::deserialize(&mut &seven_byte_account.data[disc_len..]) + .expect("Failed to deserialize SevenByteRecord"); + assert_eq!( + actual_seven, + SevenByteRecord { + compression_info: shared::expected_compression_info(&actual_seven.compression_info), + owner: owner.to_bytes(), + }, + "SevenByteRecord should match after creation" + ); + + // PHASE 2: Warp to trigger auto-compression + rpc.warp_slot_forward(SLOTS_PER_EPOCH * 30).await.unwrap(); + shared::assert_onchain_closed(&mut rpc, &two_byte_pda, "TwoByteRecord").await; + shared::assert_onchain_closed(&mut rpc, &three_byte_pda, "ThreeByteRecord").await; + shared::assert_onchain_closed(&mut rpc, &four_byte_pda, "FourByteRecord").await; + shared::assert_onchain_closed(&mut rpc, &five_byte_pda, "FiveByteRecord").await; + shared::assert_onchain_closed(&mut rpc, &six_byte_pda, "SixByteRecord").await; + shared::assert_onchain_closed(&mut rpc, &seven_byte_pda, "SevenByteRecord").await; + + // PHASE 3: Decompress via create_load_instructions + let two_byte_iface = rpc + .get_account_interface(&two_byte_pda, None) + .await + .expect("failed to get TwoByteRecord interface") + .value + .expect("TwoByteRecord interface should exist"); + assert!(two_byte_iface.is_cold(), "TwoByteRecord should be cold"); + let two_byte_data: TwoByteRecord = + borsh::BorshDeserialize::deserialize(&mut &two_byte_iface.account.data[8..]) + .expect("Failed to parse TwoByteRecord from interface"); + let two_byte_variant = LightAccountVariant::TwoByteRecord { + seeds: TwoByteRecordSeeds { + owner: owner.to_bytes(), + }, + data: two_byte_data, + }; + let two_byte_spec = PdaSpec::new(two_byte_iface, two_byte_variant, program_id); + + let three_byte_iface = rpc + .get_account_interface(&three_byte_pda, None) + .await + .expect("failed to get ThreeByteRecord interface") + .value + .expect("ThreeByteRecord interface should exist"); + assert!(three_byte_iface.is_cold(), "ThreeByteRecord should be cold"); + let three_byte_data: ThreeByteRecord = + borsh::BorshDeserialize::deserialize(&mut &three_byte_iface.account.data[8..]) + .expect("Failed to parse ThreeByteRecord from interface"); + let three_byte_variant = LightAccountVariant::ThreeByteRecord { + seeds: ThreeByteRecordSeeds { + owner: owner.to_bytes(), + }, + data: three_byte_data, + }; + let three_byte_spec = PdaSpec::new(three_byte_iface, three_byte_variant, program_id); + + let four_byte_iface = rpc + .get_account_interface(&four_byte_pda, None) + .await + .expect("failed to get FourByteRecord interface") + .value + .expect("FourByteRecord interface should exist"); + assert!(four_byte_iface.is_cold(), "FourByteRecord should be cold"); + let four_byte_data: FourByteRecord = + borsh::BorshDeserialize::deserialize(&mut &four_byte_iface.account.data[8..]) + .expect("Failed to parse FourByteRecord from interface"); + let four_byte_variant = LightAccountVariant::FourByteRecord { + seeds: FourByteRecordSeeds { + owner: owner.to_bytes(), + }, + data: four_byte_data, + }; + let four_byte_spec = PdaSpec::new(four_byte_iface, four_byte_variant, program_id); + + let five_byte_iface = rpc + .get_account_interface(&five_byte_pda, None) + .await + .expect("failed to get FiveByteRecord interface") + .value + .expect("FiveByteRecord interface should exist"); + assert!(five_byte_iface.is_cold(), "FiveByteRecord should be cold"); + let five_byte_data: FiveByteRecord = + borsh::BorshDeserialize::deserialize(&mut &five_byte_iface.account.data[8..]) + .expect("Failed to parse FiveByteRecord from interface"); + let five_byte_variant = LightAccountVariant::FiveByteRecord { + seeds: FiveByteRecordSeeds { + owner: owner.to_bytes(), + }, + data: five_byte_data, + }; + let five_byte_spec = PdaSpec::new(five_byte_iface, five_byte_variant, program_id); + + let six_byte_iface = rpc + .get_account_interface(&six_byte_pda, None) + .await + .expect("failed to get SixByteRecord interface") + .value + .expect("SixByteRecord interface should exist"); + assert!(six_byte_iface.is_cold(), "SixByteRecord should be cold"); + let six_byte_data: SixByteRecord = + borsh::BorshDeserialize::deserialize(&mut &six_byte_iface.account.data[8..]) + .expect("Failed to parse SixByteRecord from interface"); + let six_byte_variant = LightAccountVariant::SixByteRecord { + seeds: SixByteRecordSeeds { + owner: owner.to_bytes(), + }, + data: six_byte_data, + }; + let six_byte_spec = PdaSpec::new(six_byte_iface, six_byte_variant, program_id); + + let seven_byte_iface = rpc + .get_account_interface(&seven_byte_pda, None) + .await + .expect("failed to get SevenByteRecord interface") + .value + .expect("SevenByteRecord interface should exist"); + assert!(seven_byte_iface.is_cold(), "SevenByteRecord should be cold"); + let seven_byte_data: SevenByteRecord = + borsh::BorshDeserialize::deserialize(&mut &seven_byte_iface.account.data[8..]) + .expect("Failed to parse SevenByteRecord from interface"); + let seven_byte_variant = LightAccountVariant::SevenByteRecord { + seeds: SevenByteRecordSeeds { + owner: owner.to_bytes(), + }, + data: seven_byte_data, + }; + let seven_byte_spec = PdaSpec::new(seven_byte_iface, seven_byte_variant, program_id); + + let specs: Vec> = vec![ + AccountSpec::Pda(two_byte_spec), + AccountSpec::Pda(three_byte_spec), + AccountSpec::Pda(four_byte_spec), + AccountSpec::Pda(five_byte_spec), + AccountSpec::Pda(six_byte_spec), + AccountSpec::Pda(seven_byte_spec), + ]; + + let ixs = create_load_instructions(&specs, payer.pubkey(), env.config_pda, &rpc) + .await + .expect("create_load_instructions should succeed"); + + rpc.create_and_send_transaction(&ixs, &payer.pubkey(), &[&payer]) + .await + .expect("Decompression should succeed"); + + // PHASE 4: Verify state preserved after decompression + shared::assert_onchain_exists(&mut rpc, &two_byte_pda, "TwoByteRecord").await; + shared::assert_onchain_exists(&mut rpc, &three_byte_pda, "ThreeByteRecord").await; + shared::assert_onchain_exists(&mut rpc, &four_byte_pda, "FourByteRecord").await; + shared::assert_onchain_exists(&mut rpc, &five_byte_pda, "FiveByteRecord").await; + shared::assert_onchain_exists(&mut rpc, &six_byte_pda, "SixByteRecord").await; + shared::assert_onchain_exists(&mut rpc, &seven_byte_pda, "SevenByteRecord").await; + + let account = rpc.get_account(two_byte_pda).await.unwrap().unwrap(); + let disc_len = TwoByteRecord::LIGHT_DISCRIMINATOR_SLICE.len(); + assert_eq!( + &account.data[..disc_len], + TwoByteRecord::LIGHT_DISCRIMINATOR_SLICE, + "TwoByteRecord discriminator should match after decompression" + ); + let actual: TwoByteRecord = + borsh::BorshDeserialize::deserialize(&mut &account.data[disc_len..]).unwrap(); + assert_eq!( + actual, + TwoByteRecord { + compression_info: shared::expected_compression_info(&actual.compression_info), + owner: owner.to_bytes(), + }, + "TwoByteRecord should match after decompression" + ); + + let account = rpc.get_account(three_byte_pda).await.unwrap().unwrap(); + let disc_len = ThreeByteRecord::LIGHT_DISCRIMINATOR_SLICE.len(); + assert_eq!( + &account.data[..disc_len], + ThreeByteRecord::LIGHT_DISCRIMINATOR_SLICE, + "ThreeByteRecord discriminator should match after decompression" + ); + let actual: ThreeByteRecord = + borsh::BorshDeserialize::deserialize(&mut &account.data[disc_len..]).unwrap(); + assert_eq!( + actual, + ThreeByteRecord { + compression_info: shared::expected_compression_info(&actual.compression_info), + owner: owner.to_bytes(), + }, + "ThreeByteRecord should match after decompression" + ); + + let account = rpc.get_account(four_byte_pda).await.unwrap().unwrap(); + let disc_len = FourByteRecord::LIGHT_DISCRIMINATOR_SLICE.len(); + assert_eq!( + &account.data[..disc_len], + FourByteRecord::LIGHT_DISCRIMINATOR_SLICE, + "FourByteRecord discriminator should match after decompression" + ); + let actual: FourByteRecord = + borsh::BorshDeserialize::deserialize(&mut &account.data[disc_len..]).unwrap(); + assert_eq!( + actual, + FourByteRecord { + compression_info: shared::expected_compression_info(&actual.compression_info), + owner: owner.to_bytes(), + }, + "FourByteRecord should match after decompression" + ); + + let account = rpc.get_account(five_byte_pda).await.unwrap().unwrap(); + let disc_len = FiveByteRecord::LIGHT_DISCRIMINATOR_SLICE.len(); + assert_eq!( + &account.data[..disc_len], + FiveByteRecord::LIGHT_DISCRIMINATOR_SLICE, + "FiveByteRecord discriminator should match after decompression" + ); + let actual: FiveByteRecord = + borsh::BorshDeserialize::deserialize(&mut &account.data[disc_len..]).unwrap(); + assert_eq!( + actual, + FiveByteRecord { + compression_info: shared::expected_compression_info(&actual.compression_info), + owner: owner.to_bytes(), + }, + "FiveByteRecord should match after decompression" + ); + + let account = rpc.get_account(six_byte_pda).await.unwrap().unwrap(); + let disc_len = SixByteRecord::LIGHT_DISCRIMINATOR_SLICE.len(); + assert_eq!( + &account.data[..disc_len], + SixByteRecord::LIGHT_DISCRIMINATOR_SLICE, + "SixByteRecord discriminator should match after decompression" + ); + let actual: SixByteRecord = + borsh::BorshDeserialize::deserialize(&mut &account.data[disc_len..]).unwrap(); + assert_eq!( + actual, + SixByteRecord { + compression_info: shared::expected_compression_info(&actual.compression_info), + owner: owner.to_bytes(), + }, + "SixByteRecord should match after decompression" + ); + + let account = rpc.get_account(seven_byte_pda).await.unwrap().unwrap(); + let disc_len = SevenByteRecord::LIGHT_DISCRIMINATOR_SLICE.len(); + assert_eq!( + &account.data[..disc_len], + SevenByteRecord::LIGHT_DISCRIMINATOR_SLICE, + "SevenByteRecord discriminator should match after decompression" + ); + let actual: SevenByteRecord = + borsh::BorshDeserialize::deserialize(&mut &account.data[disc_len..]).unwrap(); + assert_eq!( + actual, + SevenByteRecord { + compression_info: shared::expected_compression_info(&actual.compression_info), + owner: owner.to_bytes(), + }, + "SevenByteRecord should match after decompression" + ); +} From e38e0fa619aee845a15ed1e910c640a97186b7f3 Mon Sep 17 00:00:00 2001 From: ananas Date: Fri, 20 Feb 2026 17:16:52 +0000 Subject: [PATCH 6/7] feat: add discriminator compile time collision detection --- .../macros/src/light_pdas/program/compress.rs | 70 +++++++++++++++++++ .../src/light_pdas/program/instructions.rs | 3 + 2 files changed, 73 insertions(+) diff --git a/sdk-libs/macros/src/light_pdas/program/compress.rs b/sdk-libs/macros/src/light_pdas/program/compress.rs index 231e8ab2c5..abfe166caa 100644 --- a/sdk-libs/macros/src/light_pdas/program/compress.rs +++ b/sdk-libs/macros/src/light_pdas/program/compress.rs @@ -446,6 +446,76 @@ impl CompressBuilder { } } + /// Generate compile-time discriminator collision checks for all pairs of account types. + /// + /// Only emitted for the Pinocchio backend. The Pinocchio compress dispatch uses a sequential + /// `if &data[..disc_len] == disc_slice` chain keyed on `LIGHT_DISCRIMINATOR_SLICE` (variable + /// length). A shorter discriminator that is a prefix of a longer one causes incorrect dispatch, + /// and users can introduce such collisions via `#[light_pinocchio(discriminator = [...])]`. + /// + /// Anchor discriminators are 8-byte SHA256-derived values; we rely on Anchor for collision safety. + /// + /// For each pair (A, B), emits a `const _: () = { ... }` block asserting neither slice is a + /// prefix of the other — catching both ordering violations and exact discriminator collisions. + pub fn generate_discriminator_collision_checks( + &self, + backend: &dyn CodegenBackend, + ) -> Result { + if !backend.is_pinocchio() { + return Ok(quote! {}); + } + let account_crate = backend.account_crate(); + // Deduplicate by qualified type string so that types used in multiple instructions are + // only compared once (and a type is never compared against itself). + let mut seen = std::collections::HashSet::new(); + let unique_accounts: Vec<&CompressibleAccountInfo> = self + .accounts + .iter() + .filter(|info| { + let ty = qualify_type_with_crate(&info.account_type); + seen.insert(quote::quote!(#ty).to_string()) + }) + .collect(); + let mut checks = Vec::new(); + + for i in 0..unique_accounts.len() { + for j in (i + 1)..unique_accounts.len() { + let type_a = qualify_type_with_crate(&unique_accounts[i].account_type); + let type_b = qualify_type_with_crate(&unique_accounts[j].account_type); + + // Compute type name strings at proc-macro time for the error message. + // Replace token-stream spacing (" :: ") with idiomatic Rust path separators ("::"). + let type_a_str = quote::quote!(#type_a).to_string().replace(" :: ", "::"); + let type_b_str = quote::quote!(#type_b).to_string().replace(" :: ", "::"); + let msg = format!( + "Discriminator collision: {} and {} share a prefix. \ + Declare variants with longer discriminators before those with shorter ones in the enum.", + type_a_str, type_b_str + ); + + checks.push(quote! { + const _: () = { + const A: &[u8] = <#type_a as #account_crate::LightDiscriminator>::LIGHT_DISCRIMINATOR_SLICE; + const B: &[u8] = <#type_b as #account_crate::LightDiscriminator>::LIGHT_DISCRIMINATOR_SLICE; + let min_len = if A.len() < B.len() { A.len() } else { B.len() }; + let mut i = 0usize; + let mut is_prefix = true; + while i < min_len { + if A[i] != B[i] { + is_prefix = false; + break; + } + i += 1; + } + assert!(!is_prefix, #msg); + }; + }); + } + } + + Ok(quote! { #(#checks)* }) + } + /// Generate compile-time size validation for compressed accounts using the specified backend. pub fn generate_size_validation_with_backend( &self, diff --git a/sdk-libs/macros/src/light_pdas/program/instructions.rs b/sdk-libs/macros/src/light_pdas/program/instructions.rs index d9d64edb83..686ee2c2f7 100644 --- a/sdk-libs/macros/src/light_pdas/program/instructions.rs +++ b/sdk-libs/macros/src/light_pdas/program/instructions.rs @@ -441,6 +441,8 @@ pub(crate) fn generate_light_program_items_with_backend( compress_builder.validate()?; let size_validation_checks = compress_builder.generate_size_validation_with_backend(backend)?; + let discriminator_collision_checks = + compress_builder.generate_discriminator_collision_checks(backend)?; // Error codes are only generated for Anchor let error_codes = if !backend.is_pinocchio() { Some(compress_builder.generate_error_codes()?) @@ -723,6 +725,7 @@ pub(crate) fn generate_light_program_items_with_backend( } items.push(size_validation_checks); + items.push(discriminator_collision_checks); items.push(enum_and_traits); // Anchor-only: accounts structs, trait impls, processor module From 045073ffb0b3342c93a3a3f5f4bb226756d0c562 Mon Sep 17 00:00:00 2001 From: ananas Date: Fri, 20 Feb 2026 17:39:11 +0000 Subject: [PATCH 7/7] fix doc comment --- .../macros/src/light_pdas/program/compress.rs | 20 ++++++++----------- 1 file changed, 8 insertions(+), 12 deletions(-) diff --git a/sdk-libs/macros/src/light_pdas/program/compress.rs b/sdk-libs/macros/src/light_pdas/program/compress.rs index abfe166caa..bd4c563d1d 100644 --- a/sdk-libs/macros/src/light_pdas/program/compress.rs +++ b/sdk-libs/macros/src/light_pdas/program/compress.rs @@ -312,17 +312,13 @@ impl CompressBuilder { /// Generate compress dispatch as an associated function on the enum using the specified backend. /// - /// # Discriminator ordering invariant + /// # Discriminator uniqueness invariant /// - /// The dispatch uses a sequential if-chain keyed on `LIGHT_DISCRIMINATOR_SLICE`. Because a - /// shorter discriminator is a prefix of any byte sequence, types with shorter discriminators - /// MUST be placed *after* all types with longer discriminators in the `ProgramAccounts` enum. - /// Violating this ordering causes the short discriminator to match prematurely, corrupting - /// dispatch for longer-discriminator types whose on-chain prefix happens to share the same - /// leading bytes. - /// - /// The `LightProgramPinocchio` derive preserves enum declaration order, so the caller must - /// declare non-standard (short) discriminator variants last. + /// The dispatch uses a sequential if-chain keyed on `LIGHT_DISCRIMINATOR_SLICE`. No + /// discriminator may be a prefix of another — including exact duplicates. Violating this + /// causes silent incorrect dispatch. The `LightProgramPinocchio` derive enforces this at + /// compile time via `generate_discriminator_collision_checks`; if the check fires, change + /// the discriminator bytes so that no pair shares a prefix. pub fn generate_enum_dispatch_method_with_backend( &self, enum_name: &syn::Ident, @@ -488,8 +484,8 @@ impl CompressBuilder { let type_a_str = quote::quote!(#type_a).to_string().replace(" :: ", "::"); let type_b_str = quote::quote!(#type_b).to_string().replace(" :: ", "::"); let msg = format!( - "Discriminator collision: {} and {} share a prefix. \ - Declare variants with longer discriminators before those with shorter ones in the enum.", + "Discriminator collision: {} and {} share a prefix (or are identical). \ + Change one of the discriminator byte arrays so no pair shares a prefix.", type_a_str, type_b_str );