diff --git a/.github/workflows/programs.yml b/.github/workflows/programs.yml index c84da7dfe3..9cce8303e0 100644 --- a/.github/workflows/programs.yml +++ b/.github/workflows/programs.yml @@ -7,7 +7,7 @@ on: - "program-tests/**" - "program-libs/**" - "prover/client/**" - - ".github/workflows/light-system-programs-tests.yml" + - ".github/workflows/programs.yml" pull_request: branches: - "*" @@ -16,7 +16,7 @@ on: - "program-tests/**" - "program-libs/**" - "prover/client/**" - - ".github/workflows/light-system-programs-tests.yml" + - ".github/workflows/programs.yml" types: - opened - synchronize @@ -24,6 +24,8 @@ on: - ready_for_review name: programs +permissions: + contents: read concurrency: group: ${{ github.workflow }}-${{ github.ref }} @@ -31,7 +33,7 @@ concurrency: jobs: system-programs: - name: programs + name: ${{ matrix.test-group }} if: github.event.pull_request.draft == false runs-on: warp-ubuntu-latest-x64-4x timeout-minutes: 90 @@ -52,27 +54,16 @@ jobs: strategy: matrix: - include: - - program: account-compression-and-registry - sub-tests: '["cargo-test-sbf -p account-compression-test", "cargo-test-sbf -p registry-test"]' - - program: light-system-program-address - sub-tests: '["cargo-test-sbf -p system-test -- test_with_address", "cargo-test-sbf -p e2e-test", "cargo-test-sbf -p compressed-token-test --test light_token"]' - - program: light-system-program-compression - sub-tests: '["cargo-test-sbf -p system-test -- test_with_compression", "cargo-test-sbf -p system-test --test test_re_init_cpi_account"]' - - program: compressed-token-and-e2e - sub-tests: '["cargo test -p light-compressed-token", "cargo-test-sbf -p compressed-token-test --test v1", "cargo-test-sbf -p compressed-token-test --test mint"]' - - program: compressed-token-batched-tree - sub-tests: '["cargo-test-sbf -p compressed-token-test -- test_transfer_with_photon_and_batched_tree"]' - - program: system-cpi-test - sub-tests: - '["cargo-test-sbf -p system-cpi-test", "cargo test -p light-system-program-pinocchio", - "cargo-test-sbf -p system-cpi-v2-test -- --skip functional_ --skip event::parse", "cargo-test-sbf -p system-cpi-v2-test -- event::parse", - "cargo-test-sbf -p compressed-token-test --test transfer2" - ]' - - program: system-cpi-test-v2-functional-read-only - sub-tests: '["cargo-test-sbf -p system-cpi-v2-test -- functional_read_only"]' - - program: system-cpi-test-v2-functional-account-infos - sub-tests: '["cargo-test-sbf -p system-cpi-v2-test -- functional_account_infos"]' + test-group: + - account-compression-and-registry + - system-address + - system-compression + - compressed-token-and-e2e + - compressed-token-batched-tree + - system-cpi + - system-cpi-v2-functional-read-only + - system-cpi-v2-functional-account-infos + steps: - name: Checkout sources uses: actions/checkout@v6 @@ -87,34 +78,7 @@ jobs: run: | just cli build - - name: ${{ matrix.program }} + - name: Run tests + working-directory: program-tests run: | - - IFS=',' read -r -a sub_tests <<< "${{ join(fromJSON(matrix['sub-tests']), ', ') }}" - for subtest in "${sub_tests[@]}" - do - echo "$subtest" - - # Retry logic for flaky batched-tree test - if [[ "$subtest" == *"test_transfer_with_photon_and_batched_tree"* ]]; then - echo "Running flaky test with retry logic (max 3 attempts)..." - attempt=1 - max_attempts=3 - until RUSTFLAGS="-D warnings" eval "$subtest"; do - attempt=$((attempt + 1)) - if [ $attempt -gt $max_attempts ]; then - echo "Test failed after $max_attempts attempts" - exit 1 - fi - echo "Attempt $attempt/$max_attempts failed, retrying..." - sleep 5 - done - echo "Test passed on attempt $attempt" - else - RUSTFLAGS="-D warnings" eval "$subtest" - if [ "$subtest" == "cargo-test-sbf -p e2e-test" ]; then - just programs build-compressed-token-small - RUSTFLAGS="-D warnings" eval "$subtest -- --test test_10_all" - fi - fi - done + just ci-${{ matrix.test-group }} diff --git a/.mise.toml b/.mise.toml new file mode 100644 index 0000000000..c5492f4b9e --- /dev/null +++ b/.mise.toml @@ -0,0 +1,4 @@ +# Disable mise's Go management for this project. +# We use our own Go installation via devenv.sh. +[settings] +disable_tools = ["go"] diff --git a/Cargo.lock b/Cargo.lock index 0ca14e0aaa..25d90546f4 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -3858,6 +3858,7 @@ dependencies = [ "borsh 0.10.4", "light-compressed-account", "light-hasher", + "light-token-interface", "light-zero-copy", "rand 0.8.5", "thiserror 2.0.17", diff --git a/Cargo.toml b/Cargo.toml index 7c54409b9d..3dd5881ce7 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -147,6 +147,7 @@ pinocchio-pubkey = { version = "0.3.0" } pinocchio-system = { version = "0.3.0" } bs58 = "^0.5.1" sha2 = "0.10" +hex = "0.4" litesvm = "0.7" # Anchor anchor-lang = { version = "0.31.1" } diff --git a/cli/src/commands/test-validator/index.ts b/cli/src/commands/test-validator/index.ts index e61796d0c1..9a33d58c86 100644 --- a/cli/src/commands/test-validator/index.ts +++ b/cli/src/commands/test-validator/index.ts @@ -43,6 +43,22 @@ class SetupCommand extends Command { "Runs a test validator without starting a new prover service.", default: false, }), + forester: Flags.boolean({ + description: + "Start the forester service for auto-compression of compressible accounts.", + default: false, + }), + "forester-port": Flags.integer({ + description: "Port for the forester API server.", + required: false, + default: 8080, + }), + "compressible-pda-program": Flags.string({ + description: + "Compressible PDA programs to track. Format: 'program_id:discriminator_base58'. Can be specified multiple times.", + required: false, + multiple: true, + }), "skip-system-accounts": Flags.boolean({ description: "Runs a test validator without initialized light system accounts.", @@ -147,6 +163,13 @@ class SetupCommand extends Command { default: true, allowNo: true, }), + "account-dir": Flags.string({ + description: + "Additional directory containing account JSON files to preload. Can be specified multiple times.", + required: false, + multiple: true, + summary: "Usage: --account-dir ", + }), }; validatePrograms( @@ -216,6 +239,7 @@ class SetupCommand extends Command { await stopTestEnv({ indexer: !flags["skip-indexer"], prover: !flags["skip-prover"], + forester: flags.forester, }); this.log("\nTest validator stopped successfully \x1b[32m✔\x1b[0m"); } else { @@ -268,6 +292,9 @@ class SetupCommand extends Command { indexerPort: flags["indexer-port"], proverPort: flags["prover-port"], prover: !flags["skip-prover"], + forester: flags.forester, + foresterPort: flags["forester-port"], + compressiblePdaPrograms: flags["compressible-pda-program"], skipSystemAccounts: flags["skip-system-accounts"], geyserConfig: flags["geyser-config"], validatorArgs: flags["validator-args"], @@ -279,6 +306,7 @@ class SetupCommand extends Command { verbose: flags.verbose, skipReset: flags["skip-reset"], useSurfpool: flags["use-surfpool"], + additionalAccountDirs: flags["account-dir"], }); this.log("\nSetup tasks completed successfully \x1b[32m✔\x1b[0m"); } diff --git a/cli/src/utils/constants.ts b/cli/src/utils/constants.ts index db2547d45e..7ff7e03730 100644 --- a/cli/src/utils/constants.ts +++ b/cli/src/utils/constants.ts @@ -19,6 +19,7 @@ export const SOLANA_VALIDATOR_PROCESS_NAME = "solana-test-validator"; export const SURFPOOL_PROCESS_NAME = "surfpool"; export const LIGHT_PROVER_PROCESS_NAME = "light-prover"; export const INDEXER_PROCESS_NAME = "photon"; +export const FORESTER_PROCESS_NAME = "forester"; export const SURFPOOL_VERSION = "1.0.1"; export const SURFPOOL_RELEASE_TAG = "v1.0.1-light"; diff --git a/cli/src/utils/initTestEnv.ts b/cli/src/utils/initTestEnv.ts index 71722b6cff..7dc57d2a92 100644 --- a/cli/src/utils/initTestEnv.ts +++ b/cli/src/utils/initTestEnv.ts @@ -24,6 +24,11 @@ import { } from "./process"; import { killProver, startProver } from "./processProverServer"; import { killIndexer, startIndexer } from "./processPhotonIndexer"; +import { + killForester, + startForester, + getPayerForForester, +} from "./processForester"; import { Connection, PublicKey } from "@solana/web3.js"; import { execSync } from "child_process"; @@ -101,8 +106,10 @@ async function getProgramOwnedAccounts( export async function stopTestEnv(options: { indexer: boolean; prover: boolean; + forester?: boolean; }) { const processesToKill = [ + { name: "forester", condition: options.forester ?? false, killFunction: killForester }, { name: "photon", condition: options.indexer, killFunction: killIndexer }, { name: "prover", condition: options.prover, killFunction: killProver }, { @@ -135,9 +142,11 @@ export async function initTestEnv({ skipSystemAccounts, indexer = true, prover = true, + forester = false, rpcPort = 8899, indexerPort = 8784, proverPort = 3001, + foresterPort = 8080, gossipHost = "127.0.0.1", checkPhotonVersion = true, photonDatabaseUrl, @@ -148,6 +157,8 @@ export async function initTestEnv({ verbose, skipReset, useSurfpool, + compressiblePdaPrograms, + additionalAccountDirs, }: { additionalPrograms?: { address: string; path: string }[]; upgradeablePrograms?: { @@ -158,9 +169,11 @@ export async function initTestEnv({ skipSystemAccounts?: boolean; indexer: boolean; prover: boolean; + forester?: boolean; rpcPort?: number; indexerPort?: number; proverPort?: number; + foresterPort?: number; gossipHost?: string; checkPhotonVersion?: boolean; photonDatabaseUrl?: string; @@ -171,6 +184,8 @@ export async function initTestEnv({ verbose?: boolean; skipReset?: boolean; useSurfpool?: boolean; + compressiblePdaPrograms?: string[]; + additionalAccountDirs?: string[]; }) { if (useSurfpool) { // For surfpool we can await startTestValidator because spawnBinary returns @@ -189,6 +204,7 @@ export async function initTestEnv({ verbose, skipReset, useSurfpool, + additionalAccountDirs, }); // Surfpool only supports JSON-RPC POST, not GET /health. await confirmRpcReadiness(`http://127.0.0.1:${rpcPort}`); @@ -207,6 +223,7 @@ export async function initTestEnv({ verbose, skipReset, useSurfpool, + additionalAccountDirs, }); await waitForServers([{ port: rpcPort, path: "/health" }]); await confirmServerStability(`http://127.0.0.1:${rpcPort}/health`); @@ -250,6 +267,48 @@ export async function initTestEnv({ startSlot, ); } + + if (forester) { + if (!indexer || !prover) { + throw new Error("Forester requires both indexer and prover to be running"); + } + try { + const payer = getPayerForForester(); + await startForester({ + rpcUrl: `http://127.0.0.1:${rpcPort}`, + wsRpcUrl: `ws://127.0.0.1:${rpcPort + 1}`, + indexerUrl: `http://127.0.0.1:${indexerPort}`, + proverUrl: `http://127.0.0.1:${proverPort}`, + payer, + foresterPort, + compressiblePdaPrograms, + }); + } catch (error) { + console.error("Failed to start forester:", error); + throw error; + } + } + + if (forester) { + if (!indexer || !prover) { + throw new Error("Forester requires both indexer and prover to be running"); + } + try { + const payer = getPayerForForester(); + await startForester({ + rpcUrl: `http://127.0.0.1:${rpcPort}`, + wsRpcUrl: `ws://127.0.0.1:${rpcPort + 1}`, + indexerUrl: `http://127.0.0.1:${indexerPort}`, + proverUrl: `http://127.0.0.1:${proverPort}`, + payer, + foresterPort, + compressiblePdaPrograms, + }); + } catch (error) { + console.error("Failed to start forester:", error); + throw error; + } + } } export async function initTestEnvIfNeeded({ @@ -448,6 +507,7 @@ export async function getSurfpoolArgs({ rpcPort, gossipHost, downloadBinaries = true, + additionalAccountDirs, }: { additionalPrograms?: { address: string; path: string }[]; upgradeablePrograms?: { @@ -459,6 +519,7 @@ export async function getSurfpoolArgs({ rpcPort?: number; gossipHost?: string; downloadBinaries?: boolean; + additionalAccountDirs?: string[]; }): Promise> { const dirPath = programsDirPath(); @@ -508,6 +569,13 @@ export async function getSurfpoolArgs({ args.push("--account-dir", accountsPath); } + // Load additional account directories + if (additionalAccountDirs) { + for (const accountDir of additionalAccountDirs) { + args.push("--account-dir", path.resolve(accountDir)); + } + } + return args; } @@ -603,6 +671,7 @@ export async function startTestValidator({ verbose, skipReset, useSurfpool, + additionalAccountDirs, }: { additionalPrograms?: { address: string; path: string }[]; upgradeablePrograms?: { @@ -620,6 +689,7 @@ export async function startTestValidator({ verbose?: boolean; skipReset?: boolean; useSurfpool?: boolean; + additionalAccountDirs?: string[]; }) { if (useSurfpool) { const command = await ensureSurfpoolBinary(); @@ -629,6 +699,7 @@ export async function startTestValidator({ skipSystemAccounts, rpcPort, gossipHost, + additionalAccountDirs, }); await killTestValidator(rpcPort); diff --git a/cli/src/utils/processForester.ts b/cli/src/utils/processForester.ts new file mode 100644 index 0000000000..9bbc255e72 --- /dev/null +++ b/cli/src/utils/processForester.ts @@ -0,0 +1,107 @@ +import which from "which"; +import { killProcess, spawnBinary, waitForServers } from "./process"; +import { FORESTER_PROCESS_NAME } from "./constants"; +import { exec } from "node:child_process"; +import * as util from "node:util"; +import { exit } from "node:process"; +import * as fs from "fs"; +import * as path from "path"; + +const execAsync = util.promisify(exec); + +async function isForesterInstalled(): Promise { + try { + const resolvedOrNull = which.sync("forester", { nothrow: true }); + return resolvedOrNull !== null; + } catch (error) { + return false; + } +} + +function getForesterInstallMessage(): string { + return `\nForester not found. Please install it by running: "cargo install --git https://github.com/Lightprotocol/light-protocol forester --locked --force"`; +} + +export interface ForesterConfig { + rpcUrl: string; + wsRpcUrl: string; + indexerUrl: string; + proverUrl: string; + payer: string; + foresterPort: number; + compressiblePdaPrograms?: string[]; +} + +/** + * Starts the forester service for auto-compression of compressible accounts. + * + * @param config - Forester configuration + */ +export async function startForester(config: ForesterConfig) { + await killForester(); + + if (!(await isForesterInstalled())) { + console.log(getForesterInstallMessage()); + return exit(1); + } + + console.log("Starting forester..."); + + const args: string[] = [ + "start", + "--rpc-url", + config.rpcUrl, + "--ws-rpc-url", + config.wsRpcUrl, + "--indexer-url", + config.indexerUrl, + "--prover-url", + config.proverUrl, + "--payer", + config.payer, + "--api-server-port", + config.foresterPort.toString(), + "--enable-compressible", + ]; + + // Add compressible PDA programs if specified + if (config.compressiblePdaPrograms && config.compressiblePdaPrograms.length > 0) { + for (const program of config.compressiblePdaPrograms) { + args.push("--compressible-pda-program", program); + } + } + + spawnBinary(FORESTER_PROCESS_NAME, args); + await waitForServers([{ port: config.foresterPort, path: "/health" }]); + console.log("Forester started successfully!"); +} + +export async function killForester() { + await killProcess(FORESTER_PROCESS_NAME); +} + +/** + * Gets the payer keypair as a JSON array string for forester. + * Reads from ~/.config/solana/id.json or SOLANA_PAYER environment variable. + * + * @returns JSON array string of the keypair bytes + */ +export function getPayerForForester(): string { + // Check for SOLANA_PAYER environment variable first + if (process.env.SOLANA_PAYER) { + return process.env.SOLANA_PAYER; + } + + // Default to standard Solana keypair location + const homeDir = process.env.HOME || process.env.USERPROFILE || ""; + const keypairPath = path.join(homeDir, ".config", "solana", "id.json"); + + if (fs.existsSync(keypairPath)) { + const keypairData = fs.readFileSync(keypairPath, "utf-8"); + return keypairData.trim(); + } + + throw new Error( + "No payer keypair found. Set SOLANA_PAYER environment variable or create ~/.config/solana/id.json", + ); +} diff --git a/forester/Cargo.toml b/forester/Cargo.toml index 3b0cc6aef2..21902f6436 100644 --- a/forester/Cargo.toml +++ b/forester/Cargo.toml @@ -45,7 +45,7 @@ futures = { workspace = true } thiserror = { workspace = true } borsh = { workspace = true } bs58 = { workspace = true } -hex = "0.4" +hex = { workspace = true } env_logger = { workspace = true } async-trait = { workspace = true } tracing = { workspace = true } diff --git a/forester/justfile b/forester/justfile index 430267c08f..ad7798ecae 100644 --- a/forester/justfile +++ b/forester/justfile @@ -35,3 +35,8 @@ test-compressible-mint: build-compressible-test-deps test-compressible-ctoken: build-compressible-test-deps RUST_LOG=forester=debug,light_client=debug \ cargo test --package forester --test test_compressible_ctoken -- --nocapture + +# Test for indexer interface scenarios (creates test data for photon) +test-indexer-interface: build-test-deps + RUST_LOG=forester=debug,light_client=debug \ + cargo test --package forester --test test_indexer_interface -- --nocapture diff --git a/forester/src/compressible/bootstrap_helpers.rs b/forester/src/compressible/bootstrap_helpers.rs index c358bacbfc..8ad43ec638 100644 --- a/forester/src/compressible/bootstrap_helpers.rs +++ b/forester/src/compressible/bootstrap_helpers.rs @@ -5,12 +5,18 @@ //! - Account field extraction from JSON responses //! - Standard and V2 API patterns -use std::time::Duration; +use std::{ + sync::{ + atomic::{AtomicBool, Ordering}, + Arc, + }, + time::Duration, +}; use serde_json::json; use solana_sdk::pubkey::Pubkey; -use tokio::time::timeout; -use tracing::debug; +use tokio::{sync::oneshot, time::timeout}; +use tracing::{debug, info}; use super::config::{DEFAULT_PAGE_SIZE, DEFAULT_PAGINATION_DELAY_MS}; use crate::Result; @@ -344,3 +350,127 @@ where Ok((page_count, total_fetched, total_inserted)) } + +/// Result of a bootstrap operation +#[derive(Debug, Clone)] +pub struct BootstrapResult { + /// Number of pages fetched (1 for standard API) + pub pages: usize, + /// Total number of accounts fetched from RPC + pub fetched: usize, + /// Number of accounts successfully inserted/processed + pub inserted: usize, +} + +/// High-level bootstrap runner that handles common scaffolding. +/// +/// This helper encapsulates: +/// - Shutdown flag setup and listener spawning +/// - HTTP client creation +/// - Automatic selection between standard and V2 APIs based on localhost detection +/// - Consistent logging with the provided label +/// +/// # Arguments +/// * `rpc_url` - The RPC endpoint URL +/// * `program_id` - The program ID to fetch accounts from +/// * `filters` - Optional memcmp/dataSize filters for the query +/// * `shutdown_rx` - Optional shutdown receiver for graceful cancellation +/// * `process_fn` - Closure called for each fetched account; returns true if successfully processed +/// * `label` - Label for log messages (e.g., "Mint", "CToken", "PDA") +/// +/// # Returns +/// A `BootstrapResult` containing page count, fetched count, and inserted count. +pub async fn run_bootstrap( + rpc_url: &str, + program_id: &Pubkey, + filters: Option>, + shutdown_rx: Option>, + process_fn: F, + label: &str, +) -> Result +where + F: FnMut(RawAccountData) -> bool, +{ + info!("Starting bootstrap of {} accounts", label); + + // Set up shutdown flag and listener task + let shutdown_flag = Arc::new(AtomicBool::new(false)); + + // Spawn shutdown listener and keep handle for cleanup + let shutdown_listener_handle = shutdown_rx.map(|rx| { + let shutdown_flag_clone = shutdown_flag.clone(); + tokio::spawn(async move { + let _ = rx.await; + shutdown_flag_clone.store(true, Ordering::SeqCst); + }) + }); + + let client = reqwest::Client::new(); + + info!( + "Bootstrapping {} accounts from program {}", + label, program_id + ); + + let result = if is_localhost(rpc_url) { + debug!("Detected localhost, using standard getProgramAccounts"); + let api_result = bootstrap_standard_api( + &client, + rpc_url, + program_id, + filters, + Some(&shutdown_flag), + process_fn, + ) + .await; + + // Abort shutdown listener before returning (success or error) + if let Some(handle) = shutdown_listener_handle { + handle.abort(); + } + + let (fetched, inserted) = api_result?; + + info!( + "{} bootstrap complete: {} fetched, {} inserted", + label, fetched, inserted + ); + + BootstrapResult { + pages: 1, + fetched, + inserted, + } + } else { + debug!("Using getProgramAccountsV2 with pagination"); + let api_result = bootstrap_v2_api( + &client, + rpc_url, + program_id, + filters, + Some(&shutdown_flag), + process_fn, + ) + .await; + + // Abort shutdown listener before returning (success or error) + if let Some(handle) = shutdown_listener_handle { + handle.abort(); + } + + let (pages, fetched, inserted) = api_result?; + + info!( + "{} bootstrap complete: {} pages, {} fetched, {} inserted", + label, pages, fetched, inserted + ); + + BootstrapResult { + pages, + fetched, + inserted, + } + }; + + Ok(result) +} diff --git a/forester/src/compressible/config.rs b/forester/src/compressible/config.rs index 46b65e35b9..14668317a2 100644 --- a/forester/src/compressible/config.rs +++ b/forester/src/compressible/config.rs @@ -34,7 +34,7 @@ pub const DEFAULT_PAGINATION_DELAY_MS: u64 = 100; /// Configuration for a compressible PDA program. /// -/// Can be specified via CLI (using `program_id:discriminator_base58` format) +/// Can be specified via CLI `--compressible-pda-program` (using `program_id:discriminator_base58` format) /// or via config file using the serialized struct format. #[derive(Debug, Clone, Serialize, Deserialize)] pub struct PdaProgramConfig { @@ -146,7 +146,7 @@ pub struct CompressibleConfig { #[serde(default = "default_max_concurrent_batches")] pub max_concurrent_batches: usize, /// Compressible PDA programs to track and compress. - /// Can be specified in config file or via CLI `--pda-program` flags. + /// Can be specified in config file or via CLI `--compressible-pda-program` flags. /// CLI values are merged with config file values. #[serde(default)] pub pda_programs: Vec, diff --git a/forester/src/compressible/ctoken/state.rs b/forester/src/compressible/ctoken/state.rs index eaf0272fac..5dbc5b9961 100644 --- a/forester/src/compressible/ctoken/state.rs +++ b/forester/src/compressible/ctoken/state.rs @@ -76,6 +76,7 @@ impl CTokenAccountTracker { /// Update tracker with an already-deserialized Token. /// Use this to avoid double deserialization when the Token is already available. + /// Skips mint accounts (only tracks actual token accounts). pub fn update_from_token( &self, pubkey: Pubkey, @@ -83,6 +84,12 @@ impl CTokenAccountTracker { lamports: u64, account_size: usize, ) -> Result<()> { + // Skip mint accounts - only track actual token accounts + if !ctoken.is_token_account() { + debug!("Skipping non-token account {}", pubkey); + return Ok(()); + } + let compressible_slot = match calculate_compressible_slot(&ctoken, lamports, account_size) { Ok(slot) => slot, Err(e) => { diff --git a/forester/src/compressible/mint/bootstrap.rs b/forester/src/compressible/mint/bootstrap.rs index b20aebfe9d..104c8dd00c 100644 --- a/forester/src/compressible/mint/bootstrap.rs +++ b/forester/src/compressible/mint/bootstrap.rs @@ -1,4 +1,4 @@ -use std::{sync::Arc, time::Duration}; +use std::sync::Arc; use tokio::sync::oneshot; use tracing::{debug, info}; @@ -6,9 +6,7 @@ use tracing::{debug, info}; use super::state::MintAccountTracker; use crate::{ compressible::{ - bootstrap_helpers::{ - bootstrap_standard_api, bootstrap_v2_api, is_localhost, RawAccountData, - }, + bootstrap_helpers::{run_bootstrap, RawAccountData}, config::{ACCOUNT_TYPE_OFFSET, MINT_ACCOUNT_TYPE_FILTER}, traits::CompressibleTracker, }, @@ -21,31 +19,18 @@ pub async fn bootstrap_mint_accounts( tracker: Arc, shutdown_rx: Option>, ) -> Result<()> { - info!("Starting bootstrap of decompressed Mint accounts"); - - // Set up shutdown flag - let shutdown_flag = Arc::new(std::sync::atomic::AtomicBool::new(false)); - - if let Some(rx) = shutdown_rx { - let shutdown_flag_clone = shutdown_flag.clone(); - tokio::spawn(async move { - let _ = rx.await; - shutdown_flag_clone.store(true, std::sync::atomic::Ordering::SeqCst); - }); - } - - let client = reqwest::Client::builder() - .timeout(Duration::from_secs(30)) - .build()?; - // Light Token Program ID let program_id = solana_sdk::pubkey::Pubkey::new_from_array(light_token_interface::LIGHT_TOKEN_PROGRAM_ID); - info!( - "Bootstrapping decompressed Mint accounts from program {}", - program_id - ); + // Filter for decompressed Mint accounts (account_type = 1) + let filters = Some(vec![serde_json::json!({ + "memcmp": { + "offset": ACCOUNT_TYPE_OFFSET, + "bytes": MINT_ACCOUNT_TYPE_FILTER, + "encoding": "base58" + } + })]); // Process function that updates tracker let process_account = |raw_data: RawAccountData| -> bool { @@ -58,50 +43,22 @@ pub async fn bootstrap_mint_accounts( true }; - // Filter for decompressed Mint accounts (account_type = 1) - let filters = Some(vec![serde_json::json!({ - "memcmp": { - "offset": ACCOUNT_TYPE_OFFSET, - "bytes": MINT_ACCOUNT_TYPE_FILTER, - "encoding": "base58" - } - })]); - - if is_localhost(&rpc_url) { - let (total_fetched, total_inserted) = bootstrap_standard_api( - &client, - &rpc_url, - &program_id, - filters, - Some(&shutdown_flag), - process_account, - ) - .await?; - - info!( - "Mint bootstrap complete: {} fetched, {} decompressed mints tracked", - total_fetched, total_inserted - ); - } else { - let (page_count, total_fetched, total_inserted) = bootstrap_v2_api( - &client, - &rpc_url, - &program_id, - filters, - Some(&shutdown_flag), - process_account, - ) - .await?; - - info!( - "Mint bootstrap finished: {} pages, {} fetched, {} decompressed mints tracked", - page_count, total_fetched, total_inserted - ); - } + let result = run_bootstrap( + &rpc_url, + &program_id, + filters, + shutdown_rx, + process_account, + "Mint", + ) + .await?; info!( - "Mint bootstrap finished: {} total mints tracked", - tracker.len() + "Mint bootstrap finished: {} total mints tracked (fetched: {}, inserted: {}, pages: {})", + tracker.len(), + result.fetched, + result.inserted, + result.pages ); Ok(()) diff --git a/forester/src/compressible/mint/state.rs b/forester/src/compressible/mint/state.rs index db19e6dd10..4ddebb4847 100644 --- a/forester/src/compressible/mint/state.rs +++ b/forester/src/compressible/mint/state.rs @@ -19,12 +19,13 @@ fn calculate_compressible_slot(mint: &Mint, lamports: u64, account_size: usize) let rent_exemption = get_rent_exemption_lamports(account_size as u64) .map_err(|e| anyhow::anyhow!("Failed to get rent exemption: {:?}", e))?; let compression_info = &mint.compression; + let config = &compression_info.rent_config; let last_funded_epoch = get_last_funded_epoch( account_size as u64, lamports, compression_info.last_claimed_slot, - &compression_info.rent_config, + config, rent_exemption, ); diff --git a/forester/src/compressible/pda/compressor.rs b/forester/src/compressible/pda/compressor.rs index 45c64c7fae..3b5664f857 100644 --- a/forester/src/compressible/pda/compressor.rs +++ b/forester/src/compressible/pda/compressor.rs @@ -310,14 +310,17 @@ impl PdaCompressor { "Batched compress_accounts_idempotent tx confirmed: {}", signature ); + Ok(signature) } else { tracing::warn!( "compress_accounts_idempotent tx not confirmed: {} - accounts kept in tracker for retry", signature ); + Err(anyhow::anyhow!( + "Batch transaction not confirmed: {}", + signature + )) } - - Ok(signature) } /// Compress a single PDA account using cached config diff --git a/forester/tests/e2e_test.rs b/forester/tests/e2e_test.rs index c4db026a4d..98ca8763c4 100644 --- a/forester/tests/e2e_test.rs +++ b/forester/tests/e2e_test.rs @@ -277,6 +277,7 @@ async fn e2e_test() { upgradeable_programs: vec![], limit_ledger_size: None, use_surfpool: true, + validator_args: vec![], })) .await; spawn_prover().await; diff --git a/forester/tests/legacy/address_v2_test.rs b/forester/tests/legacy/address_v2_test.rs index 71ee957010..c7ec9db781 100644 --- a/forester/tests/legacy/address_v2_test.rs +++ b/forester/tests/legacy/address_v2_test.rs @@ -63,6 +63,7 @@ async fn test_create_v2_address() { upgradeable_programs: vec![], limit_ledger_size: Some(500000), use_surfpool: true, + validator_args: vec![], })) .await; diff --git a/forester/tests/legacy/batched_address_test.rs b/forester/tests/legacy/batched_address_test.rs index fc6c0af838..aa71314226 100644 --- a/forester/tests/legacy/batched_address_test.rs +++ b/forester/tests/legacy/batched_address_test.rs @@ -44,6 +44,7 @@ async fn test_address_batched() { upgradeable_programs: vec![], limit_ledger_size: None, use_surfpool: true, + validator_args: vec![], })) .await; let tree_params = InitAddressTreeAccountsInstructionData::test_default(); diff --git a/forester/tests/legacy/batched_state_async_indexer_test.rs b/forester/tests/legacy/batched_state_async_indexer_test.rs index 9e94fd8079..ac719ea9aa 100644 --- a/forester/tests/legacy/batched_state_async_indexer_test.rs +++ b/forester/tests/legacy/batched_state_async_indexer_test.rs @@ -84,6 +84,7 @@ async fn test_state_indexer_async_batched() { upgradeable_programs: vec![], limit_ledger_size: None, use_surfpool: true, + validator_args: vec![], })) .await; spawn_prover().await; diff --git a/forester/tests/legacy/batched_state_indexer_test.rs b/forester/tests/legacy/batched_state_indexer_test.rs index 4eb6a5b02d..1bea68ba1e 100644 --- a/forester/tests/legacy/batched_state_indexer_test.rs +++ b/forester/tests/legacy/batched_state_indexer_test.rs @@ -45,6 +45,7 @@ async fn test_state_indexer_batched() { upgradeable_programs: vec![], limit_ledger_size: None, use_surfpool: true, + validator_args: vec![], })) .await; diff --git a/forester/tests/legacy/batched_state_test.rs b/forester/tests/legacy/batched_state_test.rs index 134ecc67ec..5cb0cbbb3e 100644 --- a/forester/tests/legacy/batched_state_test.rs +++ b/forester/tests/legacy/batched_state_test.rs @@ -49,6 +49,7 @@ async fn test_state_batched() { upgradeable_programs: vec![], limit_ledger_size: None, use_surfpool: true, + validator_args: vec![], })) .await; diff --git a/forester/tests/legacy/e2e_test.rs b/forester/tests/legacy/e2e_test.rs index b413894361..80734c0483 100644 --- a/forester/tests/legacy/e2e_test.rs +++ b/forester/tests/legacy/e2e_test.rs @@ -41,6 +41,7 @@ async fn test_epoch_monitor_with_2_foresters() { upgradeable_programs: vec![], limit_ledger_size: None, use_surfpool: true, + validator_args: vec![], })) .await; let forester_keypair1 = Keypair::new(); @@ -389,6 +390,7 @@ async fn test_epoch_double_registration() { upgradeable_programs: vec![], limit_ledger_size: None, use_surfpool: true, + validator_args: vec![], })) .await; diff --git a/forester/tests/legacy/e2e_v1_test.rs b/forester/tests/legacy/e2e_v1_test.rs index 4687dc33f6..88b88af86c 100644 --- a/forester/tests/legacy/e2e_v1_test.rs +++ b/forester/tests/legacy/e2e_v1_test.rs @@ -42,6 +42,7 @@ async fn test_e2e_v1() { upgradeable_programs: vec![], limit_ledger_size: None, use_surfpool: true, + validator_args: vec![], })) .await; let forester_keypair1 = Keypair::new(); @@ -386,6 +387,7 @@ async fn test_epoch_double_registration() { upgradeable_programs: vec![], limit_ledger_size: None, use_surfpool: true, + validator_args: vec![], })) .await; diff --git a/forester/tests/test_batch_append_spent.rs b/forester/tests/test_batch_append_spent.rs index bc5a71b94b..547acf0193 100644 --- a/forester/tests/test_batch_append_spent.rs +++ b/forester/tests/test_batch_append_spent.rs @@ -52,6 +52,7 @@ async fn test_batch_sequence() { upgradeable_programs: vec![], limit_ledger_size: None, use_surfpool: true, + validator_args: vec![], })) .await; diff --git a/forester/tests/test_compressible_ctoken.rs b/forester/tests/test_compressible_ctoken.rs index dd2bea5b91..bde04205de 100644 --- a/forester/tests/test_compressible_ctoken.rs +++ b/forester/tests/test_compressible_ctoken.rs @@ -90,12 +90,15 @@ async fn register_forester( // Calculate epoch info let current_slot = rpc.get_slot().await?; let current_epoch = protocol_config.get_current_epoch(current_slot); - println!("current_epoch {:?}", current_epoch); let phases = get_epoch_phases(&protocol_config, current_epoch); + + println!( + "Current slot: {}, current_epoch: {}, phases: {:?}", + current_slot, current_epoch, phases + ); + let register_phase_start = phases.registration.start; let active_phase_start = phases.active.start; - println!("phases {:?}", phases); - println!("current_slot {}", current_slot); // Warp to registration phase if rpc.get_slot().await? < register_phase_start { @@ -104,10 +107,12 @@ async fn register_forester( .expect("warp_to_slot to registration phase"); } - // Register for epoch 0 - let epoch = 0u64; - let register_epoch_ix = - create_register_forester_epoch_pda_instruction(&forester_pubkey, &forester_pubkey, epoch); + // Register for the current epoch + let register_epoch_ix = create_register_forester_epoch_pda_instruction( + &forester_pubkey, + &forester_pubkey, + current_epoch, + ); let (blockhash, _) = rpc.get_latest_blockhash().await?; let tx = Transaction::new_signed_with_payer( @@ -118,12 +123,7 @@ async fn register_forester( ); rpc.process_transaction(tx).await?; - println!("Registered for epoch {}", epoch); - - println!( - "Waiting for active phase (current slot: {}, active phase starts at: {})...", - current_slot, active_phase_start - ); + println!("Registered for epoch {}", current_epoch); // Warp to active phase if rpc.get_slot().await? < active_phase_start { @@ -132,11 +132,11 @@ async fn register_forester( .expect("warp_to_slot to active phase"); } - println!("Active phase reached"); + println!("Active phase reached for epoch {}", current_epoch); // Finalize registration let finalize_ix = - create_finalize_registration_instruction(&forester_pubkey, &forester_pubkey, epoch); + create_finalize_registration_instruction(&forester_pubkey, &forester_pubkey, current_epoch); let (blockhash, _) = rpc.get_latest_blockhash().await?; let tx = Transaction::new_signed_with_payer( @@ -164,7 +164,7 @@ async fn register_forester( use light_registry::protocol_config::state::EpochState; let epoch_struct = Epoch { - epoch, + epoch: current_epoch, epoch_pda: solana_sdk::pubkey::Pubkey::default(), forester_epoch_pda: solana_sdk::pubkey::Pubkey::default(), phases, @@ -199,6 +199,7 @@ async fn test_compressible_ctoken_compression() { upgradeable_programs: vec![], limit_ledger_size: None, use_surfpool: true, + validator_args: vec![], }) .await; let mut rpc = LightClient::new(LightClientConfig::local()) @@ -371,6 +372,7 @@ async fn test_compressible_ctoken_bootstrap() { upgradeable_programs: vec![], limit_ledger_size: None, use_surfpool: true, + validator_args: vec![], }) .await; @@ -386,6 +388,22 @@ async fn test_compressible_ctoken_bootstrap() { .await .expect("Failed to airdrop lamports"); + // Count pre-existing compressible token accounts + let program_id = Pubkey::new_from_array(light_token_interface::LIGHT_TOKEN_PROGRAM_ID); + let pre_existing = rpc + .get_program_accounts(&program_id) + .await + .expect("Failed to get program accounts") + .into_iter() + .filter(|(_, account)| { + ::try_from_slice( + &account.data, + ) + .map(|t| t.is_token_account()) + .unwrap_or(false) + }) + .count(); + // Create mint let mint_seed = Keypair::new(); let address_tree = rpc.get_address_tree_v2().tree; @@ -428,7 +446,7 @@ async fn test_compressible_ctoken_bootstrap() { // Run bootstrap test with localhost run_bootstrap_test( "http://localhost:8899".to_string(), - 3, + pre_existing + 3, Some((created_pubkeys, mint)), ) .await; @@ -473,7 +491,7 @@ async fn run_bootstrap_test( }); if expected_count > 0 { - // Wait for bootstrap to find expected number of accounts (with timeout) + // Wait for bootstrap to find at least expected number of accounts (with timeout) let start = tokio::time::Instant::now(); let timeout = Duration::from_secs(60); @@ -485,12 +503,12 @@ async fn run_bootstrap_test( sleep(Duration::from_millis(500)).await; } - // Assert bootstrap picked up all accounts assert_eq!( tracker.len(), expected_count, - "Bootstrap should have found all {} accounts", - expected_count + "Bootstrap should have found exactly {} accounts, found {}", + expected_count, + tracker.len() ); } else { // Mainnet test: wait a bit for bootstrap to run @@ -504,14 +522,13 @@ async fn run_bootstrap_test( if let Some((expected_pubkeys, expected_mint)) = expected_data { // Verify specific accounts (localhost test) - // Verify all created accounts are in tracker + // Verify all created accounts are in tracker and have correct data for pubkey in &expected_pubkeys { - let found = accounts.iter().any(|acc| acc.pubkey == *pubkey); - assert!(found, "Bootstrap should have found account {}", pubkey); - } + let account_state = accounts + .iter() + .find(|acc| acc.pubkey == *pubkey) + .unwrap_or_else(|| panic!("Bootstrap should have found account {}", pubkey)); - // Verify account data is correct - for account_state in &accounts { println!( "Verifying account {}: mint={:?}, lamports={}", account_state.pubkey, account_state.account.mint, account_state.lamports diff --git a/forester/tests/test_compressible_mint.rs b/forester/tests/test_compressible_mint.rs index cb16391cc1..0e36226fca 100644 --- a/forester/tests/test_compressible_mint.rs +++ b/forester/tests/test_compressible_mint.rs @@ -6,7 +6,7 @@ use forester::compressible::{ traits::CompressibleTracker, AccountSubscriber, SubscriptionConfig, }; -use forester_utils::{rpc_pool::SolanaRpcPoolBuilder, utils::wait_for_indexer}; +use forester_utils::rpc_pool::SolanaRpcPoolBuilder; use light_client::{ indexer::{AddressWithTree, Indexer}, local_test_validator::{spawn_validator, LightValidatorConfig}, @@ -23,13 +23,48 @@ use tokio::{ time::sleep, }; -/// Helper to create a compressed mint with decompression +/// Build an expected Mint for assertion comparison. +/// +/// Takes known values from test setup plus runtime values extracted from the on-chain account. +fn build_expected_mint( + mint_authority: &Pubkey, + decimals: u8, + mint_pda: &Pubkey, + mint_signer: &[u8; 32], + bump: u8, + version: u8, + compression: light_compressible::compression_info::CompressionInfo, +) -> Mint { + Mint { + base: BaseMint { + mint_authority: Some((*mint_authority).into()), + supply: 0, + decimals, + is_initialized: true, + freeze_authority: None, + }, + metadata: MintMetadata { + version, + mint_decompressed: true, + mint: (*mint_pda).into(), + mint_signer: *mint_signer, + bump, + }, + reserved: [0u8; 16], + account_type: ACCOUNT_TYPE_MINT, + compression, + extensions: None, + } +} + +/// Helper to create a compressed mint with decompression. +/// Returns (mint_pda, compression_address, mint_seed, bump). async fn create_decompressed_mint( rpc: &mut (impl Rpc + Indexer), payer: &Keypair, mint_authority: Pubkey, decimals: u8, -) -> (Pubkey, [u8; 32], Keypair) { +) -> (Pubkey, [u8; 32], Keypair, u8) { let mint_seed = Keypair::new(); let address_tree = rpc.get_address_tree_v2(); let output_queue = rpc.get_random_state_tree_info().unwrap().queue; @@ -84,7 +119,7 @@ async fn create_decompressed_mint( .await .expect("CreateMint should succeed"); - (mint_pda, compression_address, mint_seed) + (mint_pda, compression_address, mint_seed, bump) } /// Test that Mint bootstrap discovers decompressed mints @@ -107,6 +142,7 @@ async fn test_compressible_mint_bootstrap() { upgradeable_programs: vec![], limit_ledger_size: None, use_surfpool: true, + validator_args: vec![], }) .await; @@ -124,19 +160,20 @@ async fn test_compressible_mint_bootstrap() { .await .expect("Failed to airdrop lamports"); - // Wait for indexer to be ready before making validity proof requests - wait_for_indexer(&rpc) + // Advance slot so the indexer is ready for validity proof requests + let current_slot = rpc.get_slot().await.unwrap(); + rpc.warp_to_slot(current_slot + 1) .await - .expect("Failed to wait for indexer"); + .expect("warp_to_slot"); // Create a decompressed mint - let (mint_pda, compression_address, mint_seed) = + let (mint_pda, compression_address, mint_seed, bump) = create_decompressed_mint(&mut rpc, &payer, payer.pubkey(), 9).await; println!("Created decompressed mint at: {}", mint_pda); println!("Compression address: {:?}", compression_address); - // Verify mint exists on-chain + // Verify mint exists on-chain and matches expected structure let mint_account = rpc.get_account(mint_pda).await.unwrap(); assert!(mint_account.is_some(), "Mint should exist after creation"); @@ -144,41 +181,24 @@ async fn test_compressible_mint_bootstrap() { let mint_data = mint_account.unwrap(); let mint = Mint::deserialize(&mut &mint_data.data[..]).expect("Failed to deserialize Mint"); - // Extract runtime-specific values from deserialized mint - let compression = mint.compression; - let metadata_version = mint.metadata.version; - - // Derive the bump from mint_seed - let (_, bump) = find_mint_address(&mint_seed.pubkey()); - - // Build expected Mint - let expected_mint = Mint { - base: BaseMint { - mint_authority: Some(payer.pubkey().to_bytes().into()), - supply: 0, - decimals: 9, - is_initialized: true, - freeze_authority: None, - }, - metadata: MintMetadata { - version: metadata_version, - mint_decompressed: true, - mint: mint_pda.to_bytes().into(), - mint_signer: mint_seed.pubkey().to_bytes(), - bump, - }, - reserved: [0u8; 16], - account_type: ACCOUNT_TYPE_MINT, - compression, - extensions: None, - }; + // Build expected mint using known values plus runtime compression info + let expected_mint = build_expected_mint( + &payer.pubkey(), + 9, + &mint_pda, + &mint_seed.pubkey().to_bytes(), + bump, + mint.metadata.version, + mint.compression, + ); - assert_eq!(mint, expected_mint, "Mint should match expected state"); + assert_eq!(mint, expected_mint, "Mint should match expected structure"); - // Wait for indexer - wait_for_indexer(&rpc) + // Advance slot so the indexer processes the mint creation + let current_slot = rpc.get_slot().await.unwrap(); + rpc.warp_to_slot(current_slot + 1) .await - .expect("Failed to wait for indexer"); + .expect("warp_to_slot"); // Create tracker and run bootstrap let tracker = Arc::new(MintAccountTracker::new()); @@ -263,6 +283,7 @@ async fn test_compressible_mint_compression() { upgradeable_programs: vec![], limit_ledger_size: None, use_surfpool: true, + validator_args: vec![], }) .await; @@ -280,13 +301,14 @@ async fn test_compressible_mint_compression() { .await .expect("Failed to airdrop lamports"); - // Wait for indexer to be ready before making validity proof requests - wait_for_indexer(&rpc) + // Advance slot so the indexer is ready for validity proof requests + let current_slot = rpc.get_slot().await.unwrap(); + rpc.warp_to_slot(current_slot + 1) .await - .expect("Failed to wait for indexer"); + .expect("warp_to_slot"); // Create a decompressed mint - let (mint_pda, compression_address, mint_seed) = + let (mint_pda, compression_address, mint_seed, bump) = create_decompressed_mint(&mut rpc, &payer, payer.pubkey(), 9).await; println!("Created decompressed mint at: {}", mint_pda); @@ -303,9 +325,6 @@ async fn test_compressible_mint_compression() { let compression = mint.compression; let metadata_version = mint.metadata.version; - // Derive the bump from mint_seed - let (_, bump) = find_mint_address(&mint_seed.pubkey()); - // Build expected Mint let expected_mint = Mint { base: BaseMint { @@ -330,10 +349,11 @@ async fn test_compressible_mint_compression() { assert_eq!(mint, expected_mint, "Mint should match expected state"); - // Wait for indexer after mint creation - wait_for_indexer(&rpc) + // Advance slot so the indexer processes the mint creation + let current_slot = rpc.get_slot().await.unwrap(); + rpc.warp_to_slot(current_slot + 1) .await - .expect("Failed to wait for indexer"); + .expect("warp_to_slot"); // Create tracker and add the mint manually let tracker = Arc::new(MintAccountTracker::new()); @@ -366,55 +386,61 @@ async fn test_compressible_mint_compression() { let ready_accounts = tracker.get_ready_to_compress(current_slot); println!("Ready to compress: {} mints", ready_accounts.len()); - if !ready_accounts.is_empty() { - // Create compressor and compress - let compressor = - MintCompressor::new(rpc_pool.clone(), tracker.clone(), payer.insecure_clone()); - - println!("Compressing Mint..."); - let compress_result = compressor.compress_batch(&ready_accounts).await; - - let signature = compress_result.expect("Compression should succeed"); - println!("Compression transaction sent: {}", signature); - - // Wait for account to be closed - let start = tokio::time::Instant::now(); - let timeout = Duration::from_secs(30); - let mut account_closed = false; - - while start.elapsed() < timeout { - let mint_after = rpc - .get_account(mint_pda) - .await - .expect("Failed to query mint account"); - if mint_after.is_none() { - account_closed = true; - println!("Mint account closed successfully!"); - break; - } - sleep(Duration::from_millis(500)).await; - } + assert!( + !ready_accounts.is_empty(), + "Mint should be ready to compress with rent_payment=0" + ); + + // Create compressor and compress + let compressor = MintCompressor::new(rpc_pool.clone(), tracker.clone(), payer.insecure_clone()); + + println!("Compressing Mint..."); + let compress_result = compressor.compress_batch(&ready_accounts).await; + + let signature = compress_result.expect("Compression should succeed"); + println!("Compression transaction sent: {}", signature); - assert!( - account_closed, - "Mint account should be closed after compression" - ); + // Wait for account to be closed + let start = tokio::time::Instant::now(); + let timeout = Duration::from_secs(30); + let mut account_closed = false; - // Verify compressed mint still exists in the merkle tree - let compressed_after = rpc - .get_compressed_account(compression_address, None) + while start.elapsed() < timeout { + let mint_after = rpc + .get_account(mint_pda) .await - .unwrap() - .value; - assert!( - compressed_after.is_some(), - "Compressed mint should still exist after compression" - ); - - println!("Mint compression test completed successfully!"); - } else { - panic!("Mint should be ready to compress with rent_payment=0"); + .expect("Failed to query mint account"); + if mint_after.is_none() || mint_after.as_ref().map(|a| a.lamports) == Some(0) { + account_closed = true; + println!("Mint account closed successfully!"); + break; + } + sleep(Duration::from_millis(500)).await; } + + assert!( + account_closed, + "Mint account should be closed after compression" + ); + + // Advance slot so the indexer processes the compression transaction + let current_slot = rpc.get_slot().await.unwrap(); + rpc.warp_to_slot(current_slot + 1) + .await + .expect("warp_to_slot"); + + // Verify compressed mint still exists in the merkle tree + let compressed_after = rpc + .get_compressed_account(compression_address, None) + .await + .unwrap() + .value; + assert!( + compressed_after.is_some(), + "Compressed mint should still exist after compression" + ); + + println!("Mint compression test completed successfully!"); } /// Test AccountSubscriber for Mint accounts @@ -439,6 +465,7 @@ async fn test_compressible_mint_subscription() { upgradeable_programs: vec![], limit_ledger_size: None, use_surfpool: true, + validator_args: vec![], }) .await; @@ -456,10 +483,11 @@ async fn test_compressible_mint_subscription() { .await .expect("Failed to airdrop lamports"); - // Wait for indexer to be ready - wait_for_indexer(&rpc) + // Advance slot so the indexer is ready + let current_slot = rpc.get_slot().await.unwrap(); + rpc.warp_to_slot(current_slot + 1) .await - .expect("Failed to wait for indexer"); + .expect("warp_to_slot"); // Setup tracker and subscribers let tracker = Arc::new(MintAccountTracker::new()); @@ -484,7 +512,7 @@ async fn test_compressible_mint_subscription() { sleep(Duration::from_secs(2)).await; // Create first decompressed mint (immediately compressible with rent_payment=0) - let (mint_pda_1, compression_address_1, _mint_seed_1) = + let (mint_pda_1, compression_address_1, _mint_seed_1, _bump_1) = create_decompressed_mint(&mut rpc, &payer, payer.pubkey(), 9).await; println!("Created first decompressed mint at: {}", mint_pda_1); @@ -511,7 +539,7 @@ async fn test_compressible_mint_subscription() { println!("Tracker detected first mint via subscription"); // Create second decompressed mint - let (mint_pda_2, _compression_address_2, _mint_seed_2) = + let (mint_pda_2, _compression_address_2, _mint_seed_2, _bump_2) = create_decompressed_mint(&mut rpc, &payer, payer.pubkey(), 6).await; println!("Created second decompressed mint at: {}", mint_pda_2); @@ -625,6 +653,12 @@ async fn test_compressible_mint_subscription() { "Compressed mint should still exist after compression" ); + // Advance slot so the indexer processes the compression transaction + let current_slot = rpc.get_slot().await.unwrap(); + rpc.warp_to_slot(current_slot + 1) + .await + .expect("warp_to_slot"); + // Shutdown subscribers shutdown_tx .send(()) diff --git a/forester/tests/test_compressible_pda.rs b/forester/tests/test_compressible_pda.rs index 8faa38b820..e04f630432 100644 --- a/forester/tests/test_compressible_pda.rs +++ b/forester/tests/test_compressible_pda.rs @@ -16,7 +16,7 @@ use forester_utils::{ use light_client::{ indexer::Indexer, interface::{get_create_accounts_proof, CreateAccountsProofInput, InitializeRentFreeConfig}, - local_test_validator::{spawn_validator, LightValidatorConfig}, + local_test_validator::{spawn_validator, LightValidatorConfig, UpgradeableProgramConfig}, rpc::{LightClient, LightClientConfig, Rpc}, }; use light_compressed_account::address::derive_address; @@ -265,13 +265,14 @@ async fn test_compressible_pda_bootstrap() { enable_prover: true, wait_time: 60, sbf_programs: vec![], - upgradeable_programs: vec![( + upgradeable_programs: vec![UpgradeableProgramConfig::new( CSDK_TEST_PROGRAM_ID.to_string(), "../target/deploy/csdk_anchor_full_derived_test.so".to_string(), payer_pubkey_string(), )], limit_ledger_size: None, use_surfpool: true, + validator_args: vec![], }) .await; @@ -457,13 +458,14 @@ async fn test_compressible_pda_compression() { enable_prover: true, wait_time: 60, sbf_programs: vec![], - upgradeable_programs: vec![( + upgradeable_programs: vec![UpgradeableProgramConfig::new( CSDK_TEST_PROGRAM_ID.to_string(), "../target/deploy/csdk_anchor_full_derived_test.so".to_string(), payer_pubkey_string(), )], limit_ledger_size: None, use_surfpool: true, + validator_args: vec![], }) .await; @@ -693,13 +695,14 @@ async fn test_compressible_pda_subscription() { enable_prover: true, wait_time: 60, sbf_programs: vec![], - upgradeable_programs: vec![( + upgradeable_programs: vec![UpgradeableProgramConfig::new( CSDK_TEST_PROGRAM_ID.to_string(), "../target/deploy/csdk_anchor_full_derived_test.so".to_string(), payer_pubkey_string(), )], limit_ledger_size: None, use_surfpool: true, + validator_args: vec![], }) .await; diff --git a/forester/tests/test_indexer_interface.rs b/forester/tests/test_indexer_interface.rs new file mode 100644 index 0000000000..6918ae1fd1 --- /dev/null +++ b/forester/tests/test_indexer_interface.rs @@ -0,0 +1,618 @@ +/// Test scenarios for indexer interface endpoints. +/// +/// This test creates various account types for testing the indexer's interface racing logic. +/// After running, use `cargo xtask export-photon-test-data --test-name indexer_interface` +/// to export transactions to the indexer's test snapshot directory. +/// +/// Scenarios covered: +/// 1. Light Token Mint - mint for token operations +/// 2. Token accounts (via light-token-client MintTo) - for getTokenAccountInterface +/// 3. Registered v2 address in batched address tree - for address tree verification +/// 4. Compressible token accounts - on-chain accounts that can be compressed +use std::collections::HashMap; + +use anchor_lang::Discriminator; +use borsh::BorshSerialize; +use create_address_test_program::create_invoke_cpi_instruction; +use light_client::{ + indexer::{photon_indexer::PhotonIndexer, AddressWithTree, Indexer}, + local_test_validator::{spawn_validator, LightValidatorConfig}, + rpc::{LightClient, LightClientConfig, Rpc}, +}; +use light_compressed_account::{ + address::derive_address, + instruction_data::{ + data::NewAddressParamsAssigned, with_readonly::InstructionDataInvokeCpiWithReadOnly, + }, +}; +use light_compressed_token::process_transfer::transfer_sdk::to_account_metas; +use light_test_utils::{ + actions::legacy::{ + create_compressible_token_account, + instructions::mint_action::{ + create_mint_action_instruction, MintActionParams, MintActionType, + }, + CreateCompressibleTokenAccountInputs, + }, + pack::pack_new_address_params_assigned, +}; +use light_token::instruction::{ + derive_mint_compressed_address, find_mint_address, CreateMint as CreateMintInstruction, + CreateMintParams, +}; +use light_token_client::{CreateAta, CreateMint, MintTo}; +use light_token_interface::state::TokenDataVersion; +use serial_test::serial; +use solana_sdk::{pubkey::Pubkey, signature::Keypair, signer::Signer, transaction::Transaction}; +/// Test that creates scenarios for Photon interface testing +/// +/// Run with: cargo test -p forester --test test_indexer_interface -- --nocapture +/// Then export: cargo xtask export-photon-test-data --test-name indexer_interface +#[tokio::test(flavor = "multi_thread", worker_threads = 4)] +#[serial] +async fn test_indexer_interface_scenarios() { + // Start validator with indexer, prover, and create_address_test_program + spawn_validator(LightValidatorConfig { + enable_indexer: true, + enable_prover: true, + wait_time: 0, + sbf_programs: vec![( + "FNt7byTHev1k5x2cXZLBr8TdWiC3zoP5vcnZR4P682Uy".to_string(), + "../target/deploy/create_address_test_program.so".to_string(), + )], + upgradeable_programs: vec![], + limit_ledger_size: None, + validator_args: vec![], + use_surfpool: true, + }) + .await; + + let mut rpc = LightClient::new(LightClientConfig::local()) + .await + .expect("Failed to create LightClient"); + rpc.get_latest_active_state_trees() + .await + .expect("Failed to get state trees"); + + let payer = rpc.get_payer().insecure_clone(); + rpc.airdrop_lamports(&payer.pubkey(), 100_000_000_000) + .await + .expect("Failed to airdrop to payer"); + + println!("\n========== PHOTON INTERFACE TEST ==========\n"); + println!("Payer: {}", payer.pubkey()); + + // ============ Scenario 1: Create Light Token Mint ============ + println!("\n=== Creating Light Token mint ==="); + + let (create_mint_sig, mint_pubkey) = CreateMint { + decimals: 9, + ..Default::default() + } + .execute(&mut rpc, &payer, &payer) + .await + .expect("Failed to create Light Token mint"); + println!( + "Light Token Mint: {} (sig: {})", + mint_pubkey, create_mint_sig + ); + + // ============ Scenario 2: Mint tokens to Bob and Charlie ============ + println!("\n=== Minting tokens via light-token-client ==="); + + let bob = Keypair::new(); + let charlie = Keypair::new(); + + // Create ATAs for Bob and Charlie + let (_, bob_ata) = CreateAta { + mint: mint_pubkey, + owner: bob.pubkey(), + idempotent: false, + } + .execute(&mut rpc, &payer) + .await + .expect("Failed to create Bob's ATA"); + + let (_, charlie_ata) = CreateAta { + mint: mint_pubkey, + owner: charlie.pubkey(), + idempotent: false, + } + .execute(&mut rpc, &payer) + .await + .expect("Failed to create Charlie's ATA"); + + // Mint tokens + let bob_mint_sig = MintTo { + mint: mint_pubkey, + destination: bob_ata, + amount: 1_000_000_000, + } + .execute(&mut rpc, &payer, &payer) + .await + .expect("Failed to mint to Bob"); + + let charlie_mint_sig = MintTo { + mint: mint_pubkey, + destination: charlie_ata, + amount: 500_000_000, + } + .execute(&mut rpc, &payer, &payer) + .await + .expect("Failed to mint to Charlie"); + + println!("Minted to Bob: {} (sig: {})", bob.pubkey(), bob_mint_sig); + println!( + "Minted to Charlie: {} (sig: {})", + charlie.pubkey(), + charlie_mint_sig + ); + + // ============ Scenario 3: Register v2 Address (using create_address_test_program) ============ + println!("\n=== Registering v2 address in batched address tree ==="); + + // Use v2 (batched) address tree + let address_tree = rpc.get_address_tree_v2(); + + // Create a deterministic seed for the address + let address_seed: [u8; 32] = [42u8; 32]; + + // Derive address using v2 method (includes program ID) + let derived_address = derive_address( + &address_seed, + &address_tree.tree.to_bytes(), + &create_address_test_program::ID.to_bytes(), + ); + + println!("Derived v2 address: {:?}", derived_address); + + // Get validity proof for the new address + let proof_result = rpc + .indexer() + .unwrap() + .get_validity_proof( + vec![], + vec![AddressWithTree { + address: derived_address, + tree: address_tree.tree, + }], + None, + ) + .await + .unwrap(); + + // Build new address params + let new_address_params = vec![NewAddressParamsAssigned { + seed: address_seed, + address_queue_pubkey: address_tree.tree.into(), // For batched trees, queue = tree + address_merkle_tree_pubkey: address_tree.tree.into(), + address_merkle_tree_root_index: proof_result.value.get_address_root_indices()[0], + assigned_account_index: None, + }]; + + // Pack the address params for the instruction + let mut remaining_accounts = HashMap::::new(); + let packed_new_address_params = + pack_new_address_params_assigned(&new_address_params, &mut remaining_accounts); + + // Build instruction data for create_address_test_program + let ix_data = InstructionDataInvokeCpiWithReadOnly::new( + create_address_test_program::ID.into(), + 255, + proof_result.value.proof.0, + ) + .mode_v1() + .with_with_transaction_hash(true) + .with_new_addresses(&packed_new_address_params); + + let remaining_accounts_metas = to_account_metas(remaining_accounts); + + // Create the instruction using the test program + let instruction = create_invoke_cpi_instruction( + payer.pubkey(), + [ + light_system_program::instruction::InvokeCpiWithReadOnly::DISCRIMINATOR.to_vec(), + ix_data.try_to_vec().unwrap(), + ] + .concat(), + remaining_accounts_metas, + None, + ); + + let instructions = vec![ + solana_sdk::compute_budget::ComputeBudgetInstruction::set_compute_unit_limit(1_000_000), + instruction, + ]; + let address_sig = rpc + .create_and_send_transaction(&instructions, &payer.pubkey(), &[&payer]) + .await + .unwrap(); + println!( + "Registered v2 address: {} (sig: {})", + hex::encode(derived_address), + address_sig + ); + + // ============ Scenario 4: Decompressed Mint (CreateMint with rent_payment=0) ============ + // This creates a compressed mint that is immediately decompressed to an on-chain CMint account. + // The compressed account only contains the 32-byte mint_pda reference (DECOMPRESSED_PDA_DISCRIMINATOR). + // Full mint data is on-chain in the CMint account owned by LIGHT_TOKEN_PROGRAM_ID. + println!("\n=== Creating decompressed mint (on-chain CMint) ==="); + + let decompressed_mint_seed = Keypair::new(); + let output_queue = rpc.get_random_state_tree_info().unwrap().queue; + + // Use v2 address tree for compressed mints + let mint_address_tree = rpc.get_address_tree_v2(); + + // Derive compression address for decompressed mint + let decompressed_mint_compression_address = + derive_mint_compressed_address(&decompressed_mint_seed.pubkey(), &mint_address_tree.tree); + + let (decompressed_mint_pda, decompressed_mint_bump) = + find_mint_address(&decompressed_mint_seed.pubkey()); + + // Get validity proof for the address + let rpc_result = rpc + .get_validity_proof( + vec![], + vec![AddressWithTree { + address: decompressed_mint_compression_address, + tree: mint_address_tree.tree, + }], + None, + ) + .await + .unwrap() + .value; + + // Create decompressed mint (CreateMint always creates both compressed + on-chain CMint) + let decompressed_mint_params = CreateMintParams { + decimals: 6, + address_merkle_tree_root_index: rpc_result.addresses[0].root_index, + mint_authority: payer.pubkey(), + proof: rpc_result.proof.0.unwrap(), + compression_address: decompressed_mint_compression_address, + mint: decompressed_mint_pda, + bump: decompressed_mint_bump, + freeze_authority: None, + extensions: None, + rent_payment: 0, // Immediately compressible + write_top_up: 0, + }; + + let create_decompressed_mint_builder = CreateMintInstruction::new( + decompressed_mint_params, + decompressed_mint_seed.pubkey(), + payer.pubkey(), + mint_address_tree.tree, + output_queue, + ); + let ix = create_decompressed_mint_builder.instruction().unwrap(); + + let (blockhash, _) = rpc.get_latest_blockhash().await.unwrap(); + let tx = Transaction::new_signed_with_payer( + &[ix], + Some(&payer.pubkey()), + &[&payer, &decompressed_mint_seed], + blockhash, + ); + let decompressed_mint_sig = rpc.process_transaction(tx).await.unwrap(); + println!( + "Created decompressed mint (CMint on-chain): {} (sig: {})", + decompressed_mint_pda, decompressed_mint_sig + ); + + // ============ Scenario 5: Fully Compressed Mint (CreateMint + CompressAndCloseMint) ============ + // This creates a compressed mint and then compresses it, so full mint data is in the compressed DB. + // This is for testing getMintInterface cold path (no on-chain data needed). + println!("\n=== Creating fully compressed mint ==="); + + let compressed_mint_seed = Keypair::new(); + + // Derive compression address for fully compressed mint + let compressed_mint_compression_address = + derive_mint_compressed_address(&compressed_mint_seed.pubkey(), &mint_address_tree.tree); + + let (compressed_mint_pda, compressed_mint_bump) = + find_mint_address(&compressed_mint_seed.pubkey()); + + // Get validity proof for the new address + let rpc_result = rpc + .get_validity_proof( + vec![], + vec![AddressWithTree { + address: compressed_mint_compression_address, + tree: mint_address_tree.tree, + }], + None, + ) + .await + .unwrap() + .value; + + // Create compressed mint (will be decompressed initially) + let compressed_mint_params = CreateMintParams { + decimals: 9, + address_merkle_tree_root_index: rpc_result.addresses[0].root_index, + mint_authority: payer.pubkey(), + proof: rpc_result.proof.0.unwrap(), + compression_address: compressed_mint_compression_address, + mint: compressed_mint_pda, + bump: compressed_mint_bump, + freeze_authority: Some(payer.pubkey()), // Add freeze authority for variety + extensions: None, + rent_payment: 0, // Immediately compressible + write_top_up: 0, + }; + + let create_compressed_mint_builder = CreateMintInstruction::new( + compressed_mint_params, + compressed_mint_seed.pubkey(), + payer.pubkey(), + mint_address_tree.tree, + output_queue, + ); + let ix = create_compressed_mint_builder.instruction().unwrap(); + + let (blockhash, _) = rpc.get_latest_blockhash().await.unwrap(); + let tx = Transaction::new_signed_with_payer( + &[ix], + Some(&payer.pubkey()), + &[&payer, &compressed_mint_seed], + blockhash, + ); + let create_mint_sig = rpc.process_transaction(tx).await.unwrap(); + println!( + "Created mint (step 1/2): {} (sig: {})", + compressed_mint_pda, create_mint_sig + ); + + // Now compress and close the mint to make it fully compressed + println!("Compressing mint via CompressAndCloseMint..."); + + let compress_params = MintActionParams { + compressed_mint_address: compressed_mint_compression_address, + mint_seed: compressed_mint_seed.pubkey(), + authority: payer.pubkey(), + payer: payer.pubkey(), + actions: vec![MintActionType::CompressAndCloseMint { idempotent: false }], + new_mint: None, + }; + + let compress_ix = create_mint_action_instruction(&mut rpc, compress_params) + .await + .expect("Failed to create CompressAndCloseMint instruction"); + + let (blockhash, _) = rpc.get_latest_blockhash().await.unwrap(); + let tx = Transaction::new_signed_with_payer( + &[compress_ix], + Some(&payer.pubkey()), + &[&payer], + blockhash, + ); + let compress_mint_sig = rpc.process_transaction(tx).await.unwrap(); + println!( + "Compressed mint (step 2/2): {} (sig: {})", + compressed_mint_pda, compress_mint_sig + ); + + // ============ Scenario 6: Compressible Token Account ============ + println!("\n=== Creating compressible token account ==="); + + let compressible_owner = Keypair::new(); + rpc.airdrop_lamports(&compressible_owner.pubkey(), 1_000_000_000) + .await + .expect("Failed to airdrop to compressible owner"); + + let compressible_token_account = create_compressible_token_account( + &mut rpc, + CreateCompressibleTokenAccountInputs { + owner: compressible_owner.pubkey(), + mint: decompressed_mint_pda, + num_prepaid_epochs: 2, + payer: &payer, + token_account_keypair: None, + lamports_per_write: Some(100), + token_account_version: TokenDataVersion::ShaFlat, + }, + ) + .await + .expect("Failed to create compressible token account"); + println!( + "Created compressible token account: {}", + compressible_token_account + ); + println!("Compressible owner: {}", compressible_owner.pubkey()); + + // ============ Summary ============ + println!("\n========== ADDRESSES SUMMARY ==========\n"); + println!("Light Token Mint: {}", mint_pubkey); + println!("Registered v2 Address: {}", hex::encode(derived_address)); + println!( + "Decompressed Mint PDA (on-chain CMint): {}", + decompressed_mint_pda + ); + println!( + "Decompressed Mint Address: {:?}", + decompressed_mint_compression_address + ); + println!( + "Fully Compressed Mint PDA (in compressed DB): {}", + compressed_mint_pda + ); + println!( + "Fully Compressed Mint Address: {:?}", + compressed_mint_compression_address + ); + println!("Bob (compressed token holder): {}", bob.pubkey()); + println!("Charlie (compressed token holder): {}", charlie.pubkey()); + println!("Compressible owner: {}", compressible_owner.pubkey()); + println!("Compressible token account: {}", compressible_token_account); + + // ============ Test Interface Endpoints ============ + println!("\n========== TESTING INTERFACE ENDPOINTS ==========\n"); + + // Create PhotonIndexer to test the interface endpoints + let photon_indexer = PhotonIndexer::new("http://localhost:8784".to_string(), None); + + // ============ Test 1: getAccountInterface with compressible token account (on-chain) ============ + println!("Test 1: getAccountInterface with compressible token account (on-chain)..."); + let compressible_account_interface = photon_indexer + .get_account_interface(&compressible_token_account, None) + .await + .expect("getAccountInterface should not error for compressible account") + .value + .expect("Compressible token account should be found"); + + assert!( + compressible_account_interface.is_hot(), + "Compressible account should be hot (on-chain)" + ); + assert!( + compressible_account_interface.cold.is_none(), + "On-chain account should not have cold context" + ); + assert_eq!( + compressible_account_interface.key, compressible_token_account, + "Key should match the queried address" + ); + assert!( + compressible_account_interface.account.lamports > 0, + "On-chain account should have lamports > 0" + ); + println!(" PASSED: Compressible account resolved from on-chain"); + + // ============ Test 2: getTokenAccountInterface with compressible token account (on-chain) ============ + println!("\nTest 2: getTokenAccountInterface with compressible token account (on-chain)..."); + let compressible_token_interface = photon_indexer + .get_token_account_interface(&compressible_token_account, None) + .await + .expect("getTokenAccountInterface should not error") + .value + .expect("Compressible token account should be found via token interface"); + + assert!( + compressible_token_interface.account.is_hot(), + "Token account should be hot (on-chain)" + ); + assert!( + compressible_token_interface.account.cold.is_none(), + "On-chain token account should not have cold context" + ); + assert_eq!( + compressible_token_interface.account.key, compressible_token_account, + "Token account key should match" + ); + assert_eq!( + compressible_token_interface.token.mint, decompressed_mint_pda, + "Token mint should match decompressed mint" + ); + assert_eq!( + compressible_token_interface.token.owner, + compressible_owner.pubkey(), + "Token owner should match compressible owner" + ); + println!(" PASSED: Token account interface resolved with correct token data"); + + // ============ Test 3: getMultipleAccountInterfaces batch lookup ============ + println!("\nTest 3: getMultipleAccountInterfaces batch lookup..."); + let batch_addresses = vec![&decompressed_mint_pda, &compressible_token_account]; + + let batch_response = photon_indexer + .get_multiple_account_interfaces(batch_addresses.clone(), None) + .await + .expect("getMultipleAccountInterfaces should not error"); + + assert_eq!( + batch_response.value.len(), + 2, + "Batch response should have exactly 2 results" + ); + + // First result: decompressed mint + let batch_mint = batch_response.value[0] + .as_ref() + .expect("Decompressed mint should be found in batch"); + assert!(batch_mint.is_hot(), "Batch mint should be hot (on-chain)"); + assert_eq!( + batch_mint.key, decompressed_mint_pda, + "Batch mint key should match" + ); + assert!( + batch_mint.account.lamports > 0, + "Batch mint should have lamports > 0" + ); + + // Second result: compressible token account + let batch_token = batch_response.value[1] + .as_ref() + .expect("Compressible account should be found in batch"); + assert!( + batch_token.is_hot(), + "Batch token account should be hot (on-chain)" + ); + assert_eq!( + batch_token.key, compressible_token_account, + "Batch token account key should match" + ); + assert!( + batch_token.account.lamports > 0, + "Batch token account should have lamports > 0" + ); + println!(" PASSED: Batch lookup returned correct results"); + + // ============ Test 4: Verify fully compressed mint via getAccountInterface returns None ============ + // Fully compressed mints (after CompressAndCloseMint) have full mint data in the compressed DB. + // Their address column contains the compression_address, not the mint_pda. + // Since they don't have the [255; 8] discriminator, onchain_pubkey is not set. + // Therefore getAccountInterface by mint_pda should return None. + println!("\nTest 4: getAccountInterface with fully compressed mint PDA..."); + let compressed_via_account = photon_indexer + .get_account_interface(&compressed_mint_pda, None) + .await + .expect("getAccountInterface should not error"); + + assert!( + compressed_via_account.value.is_none(), + "Fully compressed mint should NOT be found via getAccountInterface" + ); + println!(" PASSED: Fully compressed mint correctly returns None via getAccountInterface"); + + // ============ Test 5: Verify decompressed mint found via getAccountInterface (generic linking) ============ + // Decompressed mints have discriminator [255; 8] + 32-byte mint_pda in data. + // The generic linking feature extracts this as onchain_pubkey during ingestion. + // Therefore getAccountInterface(mint_pda) should find it via onchain_pubkey column. + println!("\nTest 5: getAccountInterface with decompressed mint PDA (generic linking)..."); + let decompressed_via_account = photon_indexer + .get_account_interface(&decompressed_mint_pda, None) + .await + .expect("getAccountInterface should not error"); + + let decompressed_account = decompressed_via_account + .value + .expect("Decompressed mint should be found via getAccountInterface (generic linking)"); + + // The decompressed mint should be found from on-chain (CMint account exists) + assert!( + decompressed_account.is_hot(), + "Decompressed mint via getAccountInterface should be hot (on-chain)" + ); + assert!( + decompressed_account.cold.is_none(), + "Decompressed mint via getAccountInterface should not have cold context" + ); + assert_eq!( + decompressed_account.key, decompressed_mint_pda, + "Key should match the queried mint PDA" + ); + assert!( + decompressed_account.account.lamports > 0, + "Decompressed mint should have lamports > 0" + ); + println!(" PASSED: Decompressed mint found via getAccountInterface with generic linking"); + + println!("\n========== ALL TESTS PASSED =========="); + println!("\nTo export transactions, run:"); + println!("cargo xtask export-photon-test-data --test-name indexer_interface"); +} diff --git a/js/stateless.js/tests/unit/version.test.ts b/js/stateless.js/tests/unit/version.test.ts index 97db06c8be..a0cc7100aa 100644 --- a/js/stateless.js/tests/unit/version.test.ts +++ b/js/stateless.js/tests/unit/version.test.ts @@ -20,8 +20,9 @@ describe('Version System', () => { }); it('should respect LIGHT_PROTOCOL_VERSION environment variable', () => { + // Default is V2 when no env var is set (see constants.ts line 31) const expectedVersion = - process.env.LIGHT_PROTOCOL_VERSION || VERSION.V1; + process.env.LIGHT_PROTOCOL_VERSION || VERSION.V2; expect(featureFlags.version).toBe(expectedVersion); }); diff --git a/justfile b/justfile index cb686dc191..dbbb007cdf 100644 --- a/justfile +++ b/justfile @@ -32,12 +32,26 @@ build: programs::build js::build cli::build test: program-tests::test sdk-tests::test js::test # === Lint & Format === -lint: lint-rust js::lint +lint: lint-rust lint-readmes js::lint lint-rust: cargo +nightly fmt --all -- --check cargo clippy --workspace --all-features --all-targets -- -D warnings +# Check READMEs are up-to-date with cargo-rdme +lint-readmes: + #!/usr/bin/env bash + set -e + echo "Checking READMEs are up-to-date..." + if ! command -v cargo-rdme &> /dev/null; then + cargo install cargo-rdme + fi + for toml in $(find program-libs sdk-libs -name '.cargo-rdme.toml' -type f); do + crate_dir=$(dirname "$toml") + echo "Checking README in $crate_dir..." + (cd "$crate_dir" && cargo rdme --check --no-fail-on-warnings) + done + format: cargo +nightly fmt --all just js format diff --git a/program-libs/CLAUDE.md b/program-libs/CLAUDE.md index 81359b5151..3803b58e81 100644 --- a/program-libs/CLAUDE.md +++ b/program-libs/CLAUDE.md @@ -63,6 +63,7 @@ Some crates depend on external Light Protocol crates not in program-libs: ## Testing Unit tests run with `cargo test`: + ```bash cargo test -p light-hasher --all-features cargo test -p light-compressed-account --all-features diff --git a/program-libs/compressed-account/src/constants.rs b/program-libs/compressed-account/src/constants.rs index adea8113e9..ea14221e09 100644 --- a/program-libs/compressed-account/src/constants.rs +++ b/program-libs/compressed-account/src/constants.rs @@ -6,6 +6,9 @@ pub const ACCOUNT_COMPRESSION_PROGRAM_ID: [u8; 32] = /// ID of the light-system program. pub const LIGHT_SYSTEM_PROGRAM_ID: [u8; 32] = pubkey_array!("SySTEM1eSU2p4BGQfQpimFEWWSC1XDFeun3Nqzz3rT7"); +/// ID of the light-registry program. +pub const LIGHT_REGISTRY_PROGRAM_ID: [u8; 32] = + pubkey_array!("Lighton6oQpVkeewmo2mcPTQQp7kYHr4fWpAgJyEmDX"); #[deprecated(since = "0.9.0", note = "Use LIGHT_SYSTEM_PROGRAM_ID instead")] pub const SYSTEM_PROGRAM_ID: [u8; 32] = LIGHT_SYSTEM_PROGRAM_ID; pub const REGISTERED_PROGRAM_PDA: [u8; 32] = diff --git a/program-tests/compressed-token-test/tests/v1.rs b/program-tests/compressed-token-test/tests/v1.rs index 5b9072b0c2..81c01c82fd 100644 --- a/program-tests/compressed-token-test/tests/v1.rs +++ b/program-tests/compressed-token-test/tests/v1.rs @@ -4893,6 +4893,7 @@ async fn test_transfer_with_photon_and_batched_tree() { upgradeable_programs: vec![], limit_ledger_size: None, use_surfpool: true, + validator_args: vec![], }) .await; diff --git a/program-tests/justfile b/program-tests/justfile index 18454e4823..b63c5fcdaa 100644 --- a/program-tests/justfile +++ b/program-tests/justfile @@ -6,10 +6,126 @@ default: build: cd create-address-test-program && cargo build-sbf -test: build +# === Full test suite (mirrors CI) === + +test: build test-account-compression test-registry test-system test-system-cpi test-system-cpi-v2 test-compressed-token test-e2e + +# === Individual test packages === + +test-account-compression: RUSTFLAGS="-D warnings" cargo test-sbf -p account-compression-test + +test-registry: RUSTFLAGS="-D warnings" cargo test-sbf -p registry-test - RUSTFLAGS="-D warnings" cargo test-sbf -p system-test + +# System program tests +test-system: test-system-address test-system-compression test-system-re-init + +test-system-address: + RUSTFLAGS="-D warnings" cargo test-sbf -p system-test -- test_with_address + +test-system-compression: + RUSTFLAGS="-D warnings" cargo test-sbf -p system-test -- test_with_compression + +test-system-re-init: + RUSTFLAGS="-D warnings" cargo test-sbf -p system-test --test test_re_init_cpi_account + +# System CPI tests (v1) +test-system-cpi: RUSTFLAGS="-D warnings" cargo test-sbf -p system-cpi-test - RUSTFLAGS="-D warnings" cargo test-sbf -p compressed-token-test + +# System CPI tests (v2) +test-system-cpi-v2: test-system-cpi-v2-main test-system-cpi-v2-event-parse test-system-cpi-v2-functional + +test-system-cpi-v2-main: + RUSTFLAGS="-D warnings" cargo test-sbf -p system-cpi-v2-test -- --skip functional_ --skip event::parse + +test-system-cpi-v2-event-parse: + RUSTFLAGS="-D warnings" cargo test-sbf -p system-cpi-v2-test -- event::parse + +test-system-cpi-v2-functional: test-system-cpi-v2-functional-read-only test-system-cpi-v2-functional-account-infos + +test-system-cpi-v2-functional-read-only: + RUSTFLAGS="-D warnings" cargo test-sbf -p system-cpi-v2-test -- functional_read_only + +test-system-cpi-v2-functional-account-infos: + RUSTFLAGS="-D warnings" cargo test-sbf -p system-cpi-v2-test -- functional_account_infos + +# Compressed token tests +test-compressed-token: test-compressed-token-unit test-compressed-token-v1 test-compressed-token-mint test-compressed-token-light-token test-compressed-token-transfer2 + +test-compressed-token-unit: + RUSTFLAGS="-D warnings" cargo test -p light-compressed-token + +test-compressed-token-v1: + RUSTFLAGS="-D warnings" cargo test-sbf -p compressed-token-test --test v1 + +test-compressed-token-mint: + RUSTFLAGS="-D warnings" cargo test-sbf -p compressed-token-test --test mint + +test-compressed-token-light-token: + RUSTFLAGS="-D warnings" cargo test-sbf -p compressed-token-test --test light_token + +test-compressed-token-transfer2: + RUSTFLAGS="-D warnings" cargo test-sbf -p compressed-token-test --test transfer2 + +# Compressed token batched tree test (flaky, may need retries) +test-compressed-token-batched-tree: + RUSTFLAGS="-D warnings" cargo test-sbf -p compressed-token-test -- test_transfer_with_photon_and_batched_tree + +# E2E tests +test-e2e: RUSTFLAGS="-D warnings" cargo test-sbf -p e2e-test + +# E2E extended tests (requires building compressed-token-small first) +test-e2e-extended: build-compressed-token-small + RUSTFLAGS="-D warnings" cargo test-sbf -p e2e-test -- --test test_10_all + +# Pinocchio unit tests +test-pinocchio: + RUSTFLAGS="-D warnings" cargo test -p light-system-program-pinocchio + +# === Build targets === + +build-compressed-token-small: + pnpm --filter @lightprotocol/programs run build-compressed-token-small + +# === CI-equivalent grouped tests === + +# Matches CI: account-compression-and-registry +ci-account-compression-and-registry: test-account-compression test-registry + +# Matches CI: light-system-program-address +ci-system-address: test-system-address test-e2e test-e2e-extended test-compressed-token-light-token + +# Matches CI: light-system-program-compression +ci-system-compression: test-system-compression test-system-re-init + +# Matches CI: compressed-token-and-e2e +ci-compressed-token-and-e2e: test-compressed-token-unit test-compressed-token-v1 test-compressed-token-mint + +# Matches CI: compressed-token-batched-tree (with retry for flaky test) +ci-compressed-token-batched-tree: + #!/usr/bin/env bash + set -euo pipefail + attempt=1 + max_attempts=3 + until RUSTFLAGS="-D warnings" cargo test-sbf -p compressed-token-test -- test_transfer_with_photon_and_batched_tree; do + attempt=$((attempt + 1)) + if [ $attempt -gt $max_attempts ]; then + echo "Test failed after $max_attempts attempts" + exit 1 + fi + echo "Attempt $attempt/$max_attempts failed, retrying in 5s..." + sleep 5 + done + echo "Test passed on attempt $attempt" + +# Matches CI: system-cpi-test +ci-system-cpi: test-system-cpi test-pinocchio test-system-cpi-v2-main test-system-cpi-v2-event-parse test-compressed-token-transfer2 + +# Matches CI: system-cpi-test-v2-functional-read-only +ci-system-cpi-v2-functional-read-only: test-system-cpi-v2-functional-read-only + +# Matches CI: system-cpi-test-v2-functional-account-infos +ci-system-cpi-v2-functional-account-infos: test-system-cpi-v2-functional-account-infos diff --git a/program-tests/system-cpi-v2-test/tests/event.rs b/program-tests/system-cpi-v2-test/tests/event.rs index d25554354c..9425d72144 100644 --- a/program-tests/system-cpi-v2-test/tests/event.rs +++ b/program-tests/system-cpi-v2-test/tests/event.rs @@ -101,6 +101,7 @@ async fn parse_batched_event_functional() { is_compress: false, compress_or_decompress_lamports: None, pubkey_array: vec![env.v2_state_trees[0].output_queue.into()], + ata_owners: vec![], }, address_sequence_numbers: Vec::new(), input_sequence_numbers: Vec::new(), @@ -227,6 +228,7 @@ async fn parse_batched_event_functional() { }) .collect::>(), output_compressed_accounts: output_accounts.to_vec(), + ata_owners: vec![], sequence_numbers: vec![MerkleTreeSequenceNumberV1 { tree_pubkey: env.v2_state_trees[0].merkle_tree.into(), // queue_pubkey: env.v2_state_trees[0].output_queue, @@ -411,6 +413,7 @@ async fn parse_batched_event_functional() { env.v2_state_trees[0].merkle_tree.into(), env.v2_state_trees[0].output_queue.into(), ], + ata_owners: vec![], }, address_sequence_numbers: vec![MerkleTreeSequenceNumber { tree_pubkey: env.v2_address_trees[0].into(), @@ -496,6 +499,7 @@ async fn parse_multiple_batched_events_functional() { is_compress: false, compress_or_decompress_lamports: None, pubkey_array: vec![env.v2_state_trees[0].output_queue.into()], + ata_owners: vec![], }, address_sequence_numbers: Vec::new(), input_sequence_numbers: Vec::new(), @@ -540,6 +544,7 @@ async fn generate_photon_test_data_multiple_events() { upgradeable_programs: vec![], limit_ledger_size: None, use_surfpool: true, + validator_args: vec![], }) .await; @@ -589,6 +594,7 @@ async fn generate_photon_test_data_multiple_events() { is_compress: false, compress_or_decompress_lamports: None, pubkey_array: vec![env.v2_state_trees[0].output_queue.into()], + ata_owners: vec![], }, address_sequence_numbers: Vec::new(), input_sequence_numbers: Vec::new(), diff --git a/scripts/devenv.sh b/scripts/devenv.sh index 605bce12b7..a656a8de25 100755 --- a/scripts/devenv.sh +++ b/scripts/devenv.sh @@ -74,8 +74,11 @@ if [ -z "${CI:-}" ]; then alias light="${LIGHT_PROTOCOL_TOPLEVEL}/cli/test_bin/run" fi -# Define GOROOT for Go. export GOROOT="${LIGHT_PROTOCOL_TOPLEVEL}/.local/go" +export GOTOOLCHAIN=local +unset GOBIN +# Disable mise entirely to prevent its hooks from overriding our paths. +export MISE_DISABLED=1 # Ensure Rust binaries are in PATH PATH="${CARGO_HOME}/bin:${PATH}" diff --git a/scripts/devenv/versions.sh b/scripts/devenv/versions.sh index c578cacb05..710ee85831 100755 --- a/scripts/devenv/versions.sh +++ b/scripts/devenv/versions.sh @@ -13,7 +13,7 @@ export SOLANA_VERSION="2.2.15" export ANCHOR_VERSION="0.31.1" export JQ_VERSION="1.8.0" export PHOTON_VERSION="0.51.2" -export PHOTON_COMMIT="83b46c9aef58a134edef2eb8e506c1bc6604e876" +export PHOTON_COMMIT="9c8ce2d9a4116b643ec0cd2cfcf695339f8e1a3f" export REDIS_VERSION="8.0.1" export ANCHOR_TAG="anchor-v${ANCHOR_VERSION}" diff --git a/sdk-libs/client/README.md b/sdk-libs/client/README.md index 8c46cbc68c..42363ecba5 100644 --- a/sdk-libs/client/README.md +++ b/sdk-libs/client/README.md @@ -45,6 +45,7 @@ async fn main() -> Result<(), Box> { upgradeable_programs: vec![], limit_ledger_size: None, use_surfpool: true, + validator_args: vec![], }; spawn_validator(config).await; diff --git a/sdk-libs/client/src/indexer/base58.rs b/sdk-libs/client/src/indexer/base58.rs index a2b66a123f..46b3953aa3 100644 --- a/sdk-libs/client/src/indexer/base58.rs +++ b/sdk-libs/client/src/indexer/base58.rs @@ -38,10 +38,13 @@ pub fn decode_base58_to_fixed_array(input: &str) -> Result<[u8; let mut buffer = [0u8; N]; let decoded_len = bs58::decode(input) .onto(&mut buffer) - .map_err(|_| IndexerError::InvalidResponseData)?; + .map_err(|e| IndexerError::base58_decode_error("base58", e))?; if decoded_len != N { - return Err(IndexerError::InvalidResponseData); + return Err(IndexerError::base58_decode_error( + "base58", + format!("expected {} bytes, got {}", N, decoded_len), + )); } Ok(buffer) diff --git a/sdk-libs/client/src/indexer/mod.rs b/sdk-libs/client/src/indexer/mod.rs index fa03606dfe..cc3167459c 100644 --- a/sdk-libs/client/src/indexer/mod.rs +++ b/sdk-libs/client/src/indexer/mod.rs @@ -14,12 +14,13 @@ pub use error::IndexerError; pub use indexer_trait::Indexer; pub use response::{Context, Items, ItemsWithCursor, Response}; pub use types::{ - AccountProofInputs, Address, AddressMerkleTreeAccounts, AddressProofInputs, AddressQueueData, - AddressWithTree, CompressedAccount, CompressedTokenAccount, Hash, InputQueueData, MerkleProof, + AccountInterface, AccountProofInputs, Address, AddressMerkleTreeAccounts, AddressProofInputs, + AddressQueueData, AddressWithTree, ColdContext, ColdData, CompressedAccount, + CompressedTokenAccount, Hash, InputQueueData, InterfaceTreeInfo, MerkleProof, MerkleProofWithContext, NewAddressProofWithContext, NextTreeInfo, OutputQueueData, OwnerBalance, ProofOfLeaf, QueueElementsResult, QueueInfo, QueueInfoResult, RootIndex, - SignatureWithMetadata, StateMerkleTreeAccounts, StateQueueData, TokenBalance, TreeInfo, - ValidityProofWithContext, + SignatureWithMetadata, SolanaAccountData, StateMerkleTreeAccounts, StateQueueData, + TokenAccountInterface, TokenBalance, TreeInfo, ValidityProofWithContext, }; mod options; pub use options::*; diff --git a/sdk-libs/client/src/indexer/options.rs b/sdk-libs/client/src/indexer/options.rs index dbbf699fb5..87fc8f4e8a 100644 --- a/sdk-libs/client/src/indexer/options.rs +++ b/sdk-libs/client/src/indexer/options.rs @@ -2,7 +2,7 @@ use photon_api::models::{FilterSelector, Memcmp}; use solana_account_decoder_client_types::UiDataSliceConfig; use solana_pubkey::Pubkey; -#[derive(Debug, Clone)] +#[derive(Debug, Clone, Default)] pub struct GetCompressedTokenAccountsByOwnerOrDelegateOptions { pub mint: Option, pub cursor: Option, diff --git a/sdk-libs/client/src/indexer/photon_indexer.rs b/sdk-libs/client/src/indexer/photon_indexer.rs index a220c16554..c0ba68a686 100644 --- a/sdk-libs/client/src/indexer/photon_indexer.rs +++ b/sdk-libs/client/src/indexer/photon_indexer.rs @@ -2,6 +2,7 @@ use std::{fmt::Debug, time::Duration}; use async_trait::async_trait; use bs58; +use light_sdk_types::constants::STATE_MERKLE_TREE_CANOPY_DEPTH; use photon_api::{ apis::configuration::{ApiKey, Configuration}, models::GetCompressedAccountsByOwnerPostRequestParams, @@ -10,7 +11,8 @@ use solana_pubkey::Pubkey; use tracing::{error, trace, warn}; use super::types::{ - CompressedAccount, CompressedTokenAccount, OwnerBalance, SignatureWithMetadata, TokenBalance, + AccountInterface, CompressedAccount, CompressedTokenAccount, OwnerBalance, + SignatureWithMetadata, TokenAccountInterface, TokenBalance, }; use crate::indexer::{ base58::Base58Conversions, @@ -895,8 +897,8 @@ impl Indexer for PhotonIndexer { .value .items .iter() - .map(SignatureWithMetadata::try_from) - .collect::, IndexerError>>()?; + .map(SignatureWithMetadata::from) + .collect::>(); Ok(Response { context: Context { @@ -947,8 +949,8 @@ impl Indexer for PhotonIndexer { .value .items .iter() - .map(SignatureWithMetadata::try_from) - .collect::, IndexerError>>()?; + .map(SignatureWithMetadata::from) + .collect::>(); let cursor = api_response.value.cursor; @@ -1003,8 +1005,8 @@ impl Indexer for PhotonIndexer { .value .items .iter() - .map(SignatureWithMetadata::try_from) - .collect::, IndexerError>>()?; + .map(SignatureWithMetadata::from) + .collect::>(); let cursor = api_response.value.cursor; @@ -1060,8 +1062,8 @@ impl Indexer for PhotonIndexer { .value .items .iter() - .map(SignatureWithMetadata::try_from) - .collect::, IndexerError>>()?; + .map(SignatureWithMetadata::from) + .collect::>(); let cursor = api_response.value.cursor; @@ -1173,7 +1175,14 @@ impl Indexer for PhotonIndexer { .iter() .map(|x| { let mut proof_vec = x.proof.clone(); - proof_vec.truncate(proof_vec.len() - 10); // Remove canopy + if proof_vec.len() < STATE_MERKLE_TREE_CANOPY_DEPTH { + return Err(IndexerError::InvalidParameters(format!( + "Merkle proof length ({}) is less than canopy depth ({})", + proof_vec.len(), + STATE_MERKLE_TREE_CANOPY_DEPTH, + ))); + } + proof_vec.truncate(proof_vec.len() - STATE_MERKLE_TREE_CANOPY_DEPTH); let proof = proof_vec .iter() @@ -1330,7 +1339,15 @@ impl Indexer for PhotonIndexer { .map(|x: &String| Hash::from_base58(x)) .collect::, IndexerError>>()?; - proof_vec.truncate(proof_vec.len() - 10); // Remove canopy + const ADDRESS_TREE_CANOPY_DEPTH: usize = 10; + if proof_vec.len() < ADDRESS_TREE_CANOPY_DEPTH { + return Err(IndexerError::InvalidParameters(format!( + "Address proof length ({}) is less than canopy depth ({})", + proof_vec.len(), + ADDRESS_TREE_CANOPY_DEPTH, + ))); + } + proof_vec.truncate(proof_vec.len() - ADDRESS_TREE_CANOPY_DEPTH); let mut proof_arr = [[0u8; 32]; 16]; proof_arr.copy_from_slice(&proof_vec); @@ -1778,3 +1795,198 @@ impl Indexer for PhotonIndexer { } } } + +// ============ Interface Methods ============ +// These methods use the Interface endpoints that race hot (on-chain) and cold (compressed) lookups + +impl PhotonIndexer { + /// Get account data from either on-chain or compressed sources. + /// Races both lookups and returns the result with the higher slot. + pub async fn get_account_interface( + &self, + address: &Pubkey, + config: Option, + ) -> Result>, IndexerError> { + let config = config.unwrap_or_default(); + self.retry(config.retry_config, || async { + let request = photon_api::models::GetAccountInterfacePostRequest::new( + photon_api::models::GetAccountInterfacePostRequestParams::new(address.to_string()), + ); + + let result = photon_api::apis::default_api::get_account_interface_post( + &self.configuration, + request, + ) + .await?; + + let api_response = Self::extract_result_with_error_check( + "get_account_interface", + result.error, + result.result.map(|r| *r), + )?; + + if api_response.context.slot < config.slot { + return Err(IndexerError::IndexerNotSyncedToSlot); + } + + let account = match api_response.value { + Some(boxed) => Some(AccountInterface::try_from(boxed.as_ref())?), + None => None, + }; + + Ok(Response { + context: Context { + slot: api_response.context.slot, + }, + value: account, + }) + }) + .await + } + + /// Get token account data from either on-chain or compressed sources. + /// Races both lookups and returns the result with the higher slot. + pub async fn get_token_account_interface( + &self, + address: &Pubkey, + config: Option, + ) -> Result>, IndexerError> { + let config = config.unwrap_or_default(); + self.retry(config.retry_config, || async { + let request = photon_api::models::GetTokenAccountInterfacePostRequest::new( + photon_api::models::GetTokenAccountInterfacePostRequestParams::new( + address.to_string(), + ), + ); + + let result = photon_api::apis::default_api::get_token_account_interface_post( + &self.configuration, + request, + ) + .await?; + + let api_response = Self::extract_result_with_error_check( + "get_token_account_interface", + result.error, + result.result.map(|r| *r), + )?; + + if api_response.context.slot < config.slot { + return Err(IndexerError::IndexerNotSyncedToSlot); + } + + let account = match api_response.value { + Some(boxed) => Some(TokenAccountInterface::try_from(boxed.as_ref())?), + None => None, + }; + + Ok(Response { + context: Context { + slot: api_response.context.slot, + }, + value: account, + }) + }) + .await + } + + /// Get Associated Token Account data from either on-chain or compressed sources. + /// Derives the Light Protocol ATA address from owner+mint, then races hot/cold lookups. + pub async fn get_associated_token_account_interface( + &self, + owner: &Pubkey, + mint: &Pubkey, + config: Option, + ) -> Result>, IndexerError> { + let config = config.unwrap_or_default(); + self.retry(config.retry_config, || async { + let request = photon_api::models::GetAtaInterfacePostRequest::new( + photon_api::models::GetAtaInterfacePostRequestParams::new( + owner.to_string(), + mint.to_string(), + ), + ); + + let result = + photon_api::apis::default_api::get_ata_interface_post(&self.configuration, request) + .await?; + + let api_response = Self::extract_result_with_error_check( + "get_associated_token_account_interface", + result.error, + result.result.map(|r| *r), + )?; + + if api_response.context.slot < config.slot { + return Err(IndexerError::IndexerNotSyncedToSlot); + } + + let account = match api_response.value { + Some(boxed) => Some(TokenAccountInterface::try_from(boxed.as_ref())?), + None => None, + }; + + Ok(Response { + context: Context { + slot: api_response.context.slot, + }, + value: account, + }) + }) + .await + } + + /// Get multiple account interfaces in a batch. + /// Returns a vector where each element corresponds to an input address. + pub async fn get_multiple_account_interfaces( + &self, + addresses: Vec<&Pubkey>, + config: Option, + ) -> Result>>, IndexerError> { + let config = config.unwrap_or_default(); + self.retry(config.retry_config, || async { + let address_strings: Vec = + addresses.iter().map(|addr| addr.to_string()).collect(); + + let request = photon_api::models::GetMultipleAccountInterfacesPostRequest::new( + photon_api::models::GetMultipleAccountInterfacesPostRequestParams::new( + address_strings, + ), + ); + + let result = photon_api::apis::default_api::get_multiple_account_interfaces_post( + &self.configuration, + request, + ) + .await?; + + let api_response = Self::extract_result_with_error_check( + "get_multiple_account_interfaces", + result.error, + result.result.map(|r| *r), + )?; + + if api_response.context.slot < config.slot { + return Err(IndexerError::IndexerNotSyncedToSlot); + } + + let accounts: Result>, IndexerError> = api_response + .value + .into_iter() + .map(|maybe_acc| { + maybe_acc + .map(|ai| AccountInterface::try_from(&ai)) + .transpose() + }) + .collect(); + + Ok(Response { + context: Context { + slot: api_response.context.slot, + }, + value: accounts?, + }) + }) + .await + } +} diff --git a/sdk-libs/client/src/indexer/types.rs b/sdk-libs/client/src/indexer/types.rs deleted file mode 100644 index 2cd0f6c8d8..0000000000 --- a/sdk-libs/client/src/indexer/types.rs +++ /dev/null @@ -1,1038 +0,0 @@ -use borsh::BorshDeserialize; -use light_account::PackedAccounts; -use light_compressed_account::{ - compressed_account::{ - CompressedAccount as ProgramCompressedAccount, CompressedAccountData, - CompressedAccountWithMerkleContext, - }, - instruction_data::compressed_proof::CompressedProof, - TreeType, -}; -use light_indexed_merkle_tree::array::IndexedElement; -use light_sdk::instruction::{PackedAddressTreeInfo, PackedStateTreeInfo, ValidityProof}; -use light_token::compat::{AccountState, TokenData}; -use light_token_interface::state::ExtensionStruct; -use num_bigint::BigUint; -use solana_pubkey::Pubkey; -use tracing::warn; - -use super::{ - base58::{decode_base58_option_to_pubkey, decode_base58_to_fixed_array}, - tree_info::QUEUE_TREE_MAPPING, - IndexerError, -}; - -pub struct ProofOfLeaf { - pub leaf: [u8; 32], - pub proof: Vec<[u8; 32]>, -} - -pub type Address = [u8; 32]; -pub type Hash = [u8; 32]; - -#[derive(Debug, Clone, PartialEq)] -pub struct QueueInfo { - pub tree: Pubkey, - pub queue: Pubkey, - pub queue_type: u8, - pub queue_size: u64, -} - -#[derive(Debug, Clone, PartialEq, Default)] -pub struct QueueInfoResult { - pub queues: Vec, - pub slot: u64, -} - -#[derive(Debug, Clone, PartialEq, Default)] -pub struct OutputQueueData { - pub leaf_indices: Vec, - pub account_hashes: Vec<[u8; 32]>, - pub old_leaves: Vec<[u8; 32]>, - pub first_queue_index: u64, - /// The tree's next_index - where new leaves will be appended - pub next_index: u64, - /// Pre-computed hash chains per ZKP batch (from on-chain) - pub leaves_hash_chains: Vec<[u8; 32]>, -} - -/// V2 Input Queue Data -#[derive(Debug, Clone, PartialEq, Default)] -pub struct InputQueueData { - pub leaf_indices: Vec, - pub account_hashes: Vec<[u8; 32]>, - pub current_leaves: Vec<[u8; 32]>, - pub tx_hashes: Vec<[u8; 32]>, - /// Pre-computed nullifiers from indexer - pub nullifiers: Vec<[u8; 32]>, - pub first_queue_index: u64, - /// Pre-computed hash chains per ZKP batch (from on-chain) - pub leaves_hash_chains: Vec<[u8; 32]>, -} - -/// State queue data with shared tree nodes for output and input queues -#[derive(Debug, Clone, PartialEq, Default)] -pub struct StateQueueData { - /// Shared deduplicated tree nodes for state queues (output + input) - /// node_index encoding: (level << 56) | position - pub nodes: Vec, - pub node_hashes: Vec<[u8; 32]>, - /// Initial root for the state tree (shared by output and input queues) - pub initial_root: [u8; 32], - /// Sequence number of the root - pub root_seq: u64, - /// Output queue data (if requested) - pub output_queue: Option, - /// Input queue data (if requested) - pub input_queue: Option, -} - -/// V2 Address Queue Data with deduplicated nodes -/// Proofs are reconstructed from `nodes`/`node_hashes` using `low_element_indices` -#[derive(Debug, Clone, PartialEq, Default)] -pub struct AddressQueueData { - pub addresses: Vec<[u8; 32]>, - pub low_element_values: Vec<[u8; 32]>, - pub low_element_next_values: Vec<[u8; 32]>, - pub low_element_indices: Vec, - pub low_element_next_indices: Vec, - /// Deduplicated node indices - encoding: (level << 56) | position - pub nodes: Vec, - /// Hashes corresponding to each node index - pub node_hashes: Vec<[u8; 32]>, - pub initial_root: [u8; 32], - pub leaves_hash_chains: Vec<[u8; 32]>, - pub subtrees: Vec<[u8; 32]>, - pub start_index: u64, - pub root_seq: u64, -} - -impl AddressQueueData { - /// Reconstruct a merkle proof for a given low_element_index from the deduplicated nodes. - /// The tree_height is needed to know how many levels to traverse. - pub fn reconstruct_proof( - &self, - address_idx: usize, - tree_height: u8, - ) -> Result, IndexerError> { - let leaf_index = self.low_element_indices[address_idx]; - let mut proof = Vec::with_capacity(tree_height as usize); - let mut pos = leaf_index; - - for level in 0..tree_height { - let sibling_pos = if pos.is_multiple_of(2) { - pos + 1 - } else { - pos - 1 - }; - let sibling_idx = Self::encode_node_index(level, sibling_pos); - - if let Some(hash_idx) = self.nodes.iter().position(|&n| n == sibling_idx) { - proof.push(self.node_hashes[hash_idx]); - } else { - return Err(IndexerError::MissingResult { - context: "reconstruct_proof".to_string(), - message: format!( - "Missing proof node at level {} position {} (encoded: {})", - level, sibling_pos, sibling_idx - ), - }); - } - pos /= 2; - } - - Ok(proof) - } - - /// Reconstruct all proofs for all addresses - pub fn reconstruct_all_proofs( - &self, - tree_height: u8, - ) -> Result>, IndexerError> { - (0..self.addresses.len()) - .map(|i| self.reconstruct_proof(i, tree_height)) - .collect() - } - - /// Encode node index: (level << 56) | position - #[inline] - fn encode_node_index(level: u8, position: u64) -> u64 { - ((level as u64) << 56) | position - } -} - -/// V2 Queue Elements Result with deduplicated node data -#[derive(Debug, Clone, PartialEq, Default)] -pub struct QueueElementsResult { - pub state_queue: Option, - pub address_queue: Option, -} - -#[derive(Debug, Clone, PartialEq, Default)] -pub struct MerkleProofWithContext { - pub proof: Vec<[u8; 32]>, - pub root: [u8; 32], - pub leaf_index: u64, - pub leaf: [u8; 32], - pub merkle_tree: [u8; 32], - pub root_seq: u64, - pub tx_hash: Option<[u8; 32]>, - pub account_hash: [u8; 32], -} - -#[derive(Debug, Clone, PartialEq, Default)] -pub struct MerkleProof { - pub hash: [u8; 32], - pub leaf_index: u64, - pub merkle_tree: Pubkey, - pub proof: Vec<[u8; 32]>, - pub root_seq: u64, - pub root: [u8; 32], -} - -#[derive(Debug, Clone, Copy, PartialEq)] -pub struct AddressWithTree { - pub address: Address, - pub tree: Pubkey, -} - -#[derive(Clone, Default, Debug, PartialEq)] -pub struct NewAddressProofWithContext { - pub merkle_tree: Pubkey, - pub root: [u8; 32], - pub root_seq: u64, - pub low_address_index: u64, - pub low_address_value: [u8; 32], - pub low_address_next_index: u64, - pub low_address_next_value: [u8; 32], - pub low_address_proof: Vec<[u8; 32]>, - pub new_low_element: Option>, - pub new_element: Option>, - pub new_element_next_value: Option, -} - -#[derive(Debug, Default, Clone, PartialEq)] -pub struct ValidityProofWithContext { - pub proof: ValidityProof, - pub accounts: Vec, - pub addresses: Vec, -} - -// TODO: add get_public_inputs -// -> to make it easier to use light-verifier with get_validity_proof() -impl ValidityProofWithContext { - pub fn get_root_indices(&self) -> Vec> { - self.accounts - .iter() - .map(|account| account.root_index.root_index()) - .collect() - } - - pub fn get_address_root_indices(&self) -> Vec { - self.addresses - .iter() - .map(|address| address.root_index) - .collect() - } -} - -#[derive(Clone, Default, Debug, PartialEq)] -pub struct AccountProofInputs { - pub hash: [u8; 32], - pub root: [u8; 32], - pub root_index: RootIndex, - pub leaf_index: u64, - pub tree_info: TreeInfo, -} - -#[derive(Clone, Default, Copy, Debug, PartialEq)] -pub struct RootIndex { - proof_by_index: bool, - root_index: u16, -} - -impl RootIndex { - pub fn new_none() -> Self { - Self { - proof_by_index: true, - root_index: 0, - } - } - - pub fn new_some(root_index: u16) -> Self { - Self { - proof_by_index: false, - root_index, - } - } - - pub fn proof_by_index(&self) -> bool { - self.proof_by_index - } - - pub fn root_index(&self) -> Option { - if !self.proof_by_index { - Some(self.root_index) - } else { - None - } - } -} - -impl AccountProofInputs { - pub fn from_api_model( - value: &photon_api::models::AccountProofInputs, - ) -> Result { - let root_index = { - if value.root_index.prove_by_index { - RootIndex::new_none() - } else { - RootIndex::new_some(value.root_index.root_index) - } - }; - Ok(Self { - hash: decode_base58_to_fixed_array(&value.hash)?, - root: decode_base58_to_fixed_array(&value.root)?, - root_index, - leaf_index: value.leaf_index, - tree_info: TreeInfo::from_api_model(&value.merkle_context)?, - }) - } -} - -#[derive(Clone, Default, Debug, PartialEq)] -pub struct AddressProofInputs { - pub address: [u8; 32], - pub root: [u8; 32], - pub root_index: u16, - pub tree_info: TreeInfo, -} - -impl AddressProofInputs { - pub fn from_api_model( - value: &photon_api::models::AddressProofInputs, - ) -> Result { - Ok(Self { - address: decode_base58_to_fixed_array(&value.address)?, - root: decode_base58_to_fixed_array(&value.root)?, - root_index: value.root_index, - tree_info: TreeInfo::from_api_model(&value.merkle_context)?, - }) - } -} - -#[derive(Clone, Default, Debug, PartialEq)] -pub struct PackedStateTreeInfos { - pub packed_tree_infos: Vec, - pub output_tree_index: u8, -} - -#[derive(Clone, Default, Debug, PartialEq)] -pub struct PackedTreeInfos { - pub state_trees: Option, - pub address_trees: Vec, -} - -impl ValidityProofWithContext { - pub fn pack_tree_infos(&self, packed_accounts: &mut PackedAccounts) -> PackedTreeInfos { - let mut packed_tree_infos = Vec::new(); - let mut address_trees = Vec::new(); - let mut output_tree_index = None; - for account in self.accounts.iter() { - // Pack TreeInfo - let merkle_tree_pubkey_index = packed_accounts.insert_or_get(account.tree_info.tree); - let queue_pubkey_index = packed_accounts.insert_or_get(account.tree_info.queue); - let tree_info_packed = PackedStateTreeInfo { - root_index: account.root_index.root_index, - merkle_tree_pubkey_index, - queue_pubkey_index, - leaf_index: account.leaf_index as u32, - prove_by_index: account.root_index.proof_by_index(), - }; - packed_tree_infos.push(tree_info_packed); - - // If a next Merkle tree exists the Merkle tree is full -> use the next Merkle tree for new state. - // Else use the current Merkle tree for new state. - if let Some(next) = account.tree_info.next_tree_info { - // SAFETY: account will always have a state Merkle tree context. - // pack_output_tree_index only panics on an address Merkle tree context. - let index = next.pack_output_tree_index(packed_accounts).unwrap(); - if output_tree_index.is_none() { - output_tree_index = Some(index); - } - } else { - // SAFETY: account will always have a state Merkle tree context. - // pack_output_tree_index only panics on an address Merkle tree context. - let index = account - .tree_info - .pack_output_tree_index(packed_accounts) - .unwrap(); - if output_tree_index.is_none() { - output_tree_index = Some(index); - } - } - } - - for address in self.addresses.iter() { - // Pack AddressTreeInfo - let address_merkle_tree_pubkey_index = - packed_accounts.insert_or_get(address.tree_info.tree); - let address_queue_pubkey_index = packed_accounts.insert_or_get(address.tree_info.queue); - address_trees.push(PackedAddressTreeInfo { - address_merkle_tree_pubkey_index, - address_queue_pubkey_index, - root_index: address.root_index, - }); - } - let packed_tree_infos = if packed_tree_infos.is_empty() { - None - } else { - Some(PackedStateTreeInfos { - packed_tree_infos, - output_tree_index: output_tree_index.unwrap(), - }) - }; - PackedTreeInfos { - state_trees: packed_tree_infos, - address_trees, - } - } - - pub fn from_api_model( - value: photon_api::models::CompressedProofWithContext, - num_hashes: usize, - ) -> Result { - let proof = ValidityProof::new(Some(CompressedProof { - a: value - .compressed_proof - .a - .try_into() - .map_err(|_| IndexerError::InvalidResponseData)?, - b: value - .compressed_proof - .b - .try_into() - .map_err(|_| IndexerError::InvalidResponseData)?, - c: value - .compressed_proof - .c - .try_into() - .map_err(|_| IndexerError::InvalidResponseData)?, - })); - - // Convert account data from V1 flat arrays to V2 structured format - let accounts = (0..num_hashes) - .map(|i| { - let tree_pubkey = - Pubkey::new_from_array(decode_base58_to_fixed_array(&value.merkle_trees[i])?); - let tree_info = super::tree_info::QUEUE_TREE_MAPPING - .get(&value.merkle_trees[i]) - .ok_or(IndexerError::InvalidResponseData)?; - - Ok(AccountProofInputs { - hash: decode_base58_to_fixed_array(&value.leaves[i])?, - root: decode_base58_to_fixed_array(&value.roots[i])?, - root_index: RootIndex::new_some(value.root_indices[i] as u16), - leaf_index: value.leaf_indices[i] as u64, - tree_info: TreeInfo { - tree_type: tree_info.tree_type, - tree: tree_pubkey, - queue: tree_info.queue, - cpi_context: tree_info.cpi_context, - next_tree_info: None, - }, - }) - }) - .collect::, IndexerError>>()?; - - // Convert address data from remaining indices (if any) - let addresses = if value.root_indices.len() > num_hashes { - (num_hashes..value.root_indices.len()) - .map(|i| { - let tree_pubkey = Pubkey::new_from_array(decode_base58_to_fixed_array( - &value.merkle_trees[i], - )?); - let tree_info = super::tree_info::QUEUE_TREE_MAPPING - .get(&value.merkle_trees[i]) - .ok_or(IndexerError::InvalidResponseData)?; - - Ok(AddressProofInputs { - address: decode_base58_to_fixed_array(&value.leaves[i])?, // Address is in leaves - root: decode_base58_to_fixed_array(&value.roots[i])?, - root_index: value.root_indices[i] as u16, - tree_info: TreeInfo { - tree_type: tree_info.tree_type, - tree: tree_pubkey, - queue: tree_info.queue, - cpi_context: tree_info.cpi_context, - next_tree_info: None, - }, - }) - }) - .collect::, IndexerError>>()? - } else { - Vec::new() - }; - - Ok(Self { - proof, - accounts, - addresses, - }) - } - - pub fn from_api_model_v2( - value: photon_api::models::CompressedProofWithContextV2, - ) -> Result { - let proof = if let Some(proof) = value.compressed_proof { - ValidityProof::new(Some(CompressedProof { - a: proof - .a - .try_into() - .map_err(|_| IndexerError::InvalidResponseData)?, - b: proof - .b - .try_into() - .map_err(|_| IndexerError::InvalidResponseData)?, - c: proof - .c - .try_into() - .map_err(|_| IndexerError::InvalidResponseData)?, - })) - } else { - ValidityProof::new(None) - }; - - let accounts = value - .accounts - .iter() - .map(AccountProofInputs::from_api_model) - .collect::, IndexerError>>()?; - - let addresses = value - .addresses - .iter() - .map(AddressProofInputs::from_api_model) - .collect::, IndexerError>>()?; - - Ok(Self { - proof, - accounts, - addresses, - }) - } -} - -#[derive(Clone, Copy, Default, Debug, PartialEq)] -pub struct NextTreeInfo { - pub cpi_context: Option, - pub queue: Pubkey, - pub tree: Pubkey, - pub tree_type: TreeType, -} - -impl NextTreeInfo { - /// Get the index of the output tree in the packed accounts. - /// For StateV1, it returns the index of the tree account. - /// For StateV2, it returns the index of the queue account. - /// (For V2 trees new state is inserted into the output queue. - /// The forester updates the tree from the queue asynchronously.) - pub fn pack_output_tree_index( - &self, - packed_accounts: &mut PackedAccounts, - ) -> Result { - match self.tree_type { - TreeType::StateV1 => Ok(packed_accounts.insert_or_get(self.tree)), - TreeType::StateV2 => Ok(packed_accounts.insert_or_get(self.queue)), - _ => Err(IndexerError::InvalidPackTreeType), - } - } - pub fn from_api_model( - value: &photon_api::models::TreeContextInfo, - ) -> Result { - Ok(Self { - tree_type: TreeType::from(value.tree_type as u64), - tree: Pubkey::new_from_array(decode_base58_to_fixed_array(&value.tree)?), - queue: Pubkey::new_from_array(decode_base58_to_fixed_array(&value.queue)?), - cpi_context: decode_base58_option_to_pubkey(&value.cpi_context)?, - }) - } -} - -impl TryFrom<&photon_api::models::TreeContextInfo> for NextTreeInfo { - type Error = IndexerError; - - fn try_from(value: &photon_api::models::TreeContextInfo) -> Result { - Ok(Self { - tree_type: TreeType::from(value.tree_type as u64), - tree: Pubkey::new_from_array(decode_base58_to_fixed_array(&value.tree)?), - queue: Pubkey::new_from_array(decode_base58_to_fixed_array(&value.queue)?), - cpi_context: decode_base58_option_to_pubkey(&value.cpi_context)?, - }) - } -} - -#[derive(Clone, Copy, Default, Debug, PartialEq)] -pub struct TreeInfo { - pub cpi_context: Option, - pub next_tree_info: Option, - pub queue: Pubkey, - pub tree: Pubkey, - pub tree_type: TreeType, -} - -impl TreeInfo { - /// Get the index of the output tree in the packed accounts. - /// For StateV1, it returns the index of the tree account. - /// For StateV2, it returns the index of the queue account. - /// (For V2 trees new state is inserted into the output queue. - /// The forester updates the tree from the queue asynchronously.) - pub fn pack_output_tree_index( - &self, - packed_accounts: &mut PackedAccounts, - ) -> Result { - match self.tree_type { - TreeType::StateV1 => Ok(packed_accounts.insert_or_get(self.tree)), - TreeType::StateV2 => Ok(packed_accounts.insert_or_get(self.queue)), - _ => Err(IndexerError::InvalidPackTreeType), - } - } - - pub fn get_output_pubkey(&self) -> Result { - match self.tree_type { - TreeType::StateV1 => Ok(self.tree), - TreeType::StateV2 => Ok(self.queue), - _ => Err(IndexerError::InvalidPackTreeType), - } - } - - pub fn from_api_model( - value: &photon_api::models::MerkleContextV2, - ) -> Result { - Ok(Self { - tree_type: TreeType::from(value.tree_type as u64), - tree: Pubkey::new_from_array(decode_base58_to_fixed_array(&value.tree)?), - queue: Pubkey::new_from_array(decode_base58_to_fixed_array(&value.queue)?), - cpi_context: decode_base58_option_to_pubkey(&value.cpi_context)?, - next_tree_info: value - .next_tree_context - .as_ref() - .map(|tree_info| NextTreeInfo::from_api_model(tree_info.as_ref())) - .transpose()?, - }) - } - - pub fn to_light_merkle_context( - &self, - leaf_index: u32, - prove_by_index: bool, - ) -> light_compressed_account::compressed_account::MerkleContext { - use light_compressed_account::Pubkey; - light_compressed_account::compressed_account::MerkleContext { - merkle_tree_pubkey: Pubkey::new_from_array(self.tree.to_bytes()), - queue_pubkey: Pubkey::new_from_array(self.queue.to_bytes()), - leaf_index, - tree_type: self.tree_type, - prove_by_index, - } - } -} - -#[derive(Clone, Default, Debug, PartialEq)] -pub struct CompressedAccount { - pub address: Option<[u8; 32]>, - pub data: Option, - pub hash: [u8; 32], - pub lamports: u64, - pub leaf_index: u32, - pub owner: Pubkey, - pub prove_by_index: bool, - pub seq: Option, - pub slot_created: u64, - pub tree_info: TreeInfo, -} - -impl TryFrom for CompressedAccount { - type Error = IndexerError; - - fn try_from(account: CompressedAccountWithMerkleContext) -> Result { - let hash = account - .hash() - .map_err(|_| IndexerError::InvalidResponseData)?; - // Breaks light-program-test - let tree_info = QUEUE_TREE_MAPPING.get( - &Pubkey::new_from_array(account.merkle_context.merkle_tree_pubkey.to_bytes()) - .to_string(), - ); - let cpi_context = if let Some(tree_info) = tree_info { - tree_info.cpi_context - } else { - warn!("Cpi context not found in queue tree mapping"); - None - }; - Ok(CompressedAccount { - address: account.compressed_account.address, - data: account.compressed_account.data, - hash, - lamports: account.compressed_account.lamports, - leaf_index: account.merkle_context.leaf_index, - tree_info: TreeInfo { - tree: Pubkey::new_from_array(account.merkle_context.merkle_tree_pubkey.to_bytes()), - queue: Pubkey::new_from_array(account.merkle_context.queue_pubkey.to_bytes()), - tree_type: account.merkle_context.tree_type, - cpi_context, - next_tree_info: None, - }, - owner: Pubkey::new_from_array(account.compressed_account.owner.to_bytes()), - prove_by_index: account.merkle_context.prove_by_index, - seq: None, - slot_created: u64::MAX, - }) - } -} - -impl From for CompressedAccountWithMerkleContext { - fn from(account: CompressedAccount) -> Self { - use light_compressed_account::Pubkey; - let compressed_account = ProgramCompressedAccount { - owner: Pubkey::new_from_array(account.owner.to_bytes()), - lamports: account.lamports, - address: account.address, - data: account.data, - }; - - let merkle_context = account - .tree_info - .to_light_merkle_context(account.leaf_index, account.prove_by_index); - - CompressedAccountWithMerkleContext { - compressed_account, - merkle_context, - } - } -} - -impl TryFrom<&photon_api::models::AccountV2> for CompressedAccount { - type Error = IndexerError; - - fn try_from(account: &photon_api::models::AccountV2) -> Result { - let data = if let Some(data) = &account.data { - Ok::, IndexerError>(Some(CompressedAccountData { - discriminator: data.discriminator.to_le_bytes(), - data: base64::decode_config(&data.data, base64::STANDARD_NO_PAD) - .map_err(|_| IndexerError::InvalidResponseData)?, - data_hash: decode_base58_to_fixed_array(&data.data_hash)?, - })) - } else { - Ok::, IndexerError>(None) - }?; - - let owner = Pubkey::new_from_array(decode_base58_to_fixed_array(&account.owner)?); - let address = account - .address - .as_ref() - .map(|address| decode_base58_to_fixed_array(address)) - .transpose()?; - let hash = decode_base58_to_fixed_array(&account.hash)?; - - let tree_info = TreeInfo { - tree: Pubkey::new_from_array(decode_base58_to_fixed_array( - &account.merkle_context.tree, - )?), - queue: Pubkey::new_from_array(decode_base58_to_fixed_array( - &account.merkle_context.queue, - )?), - tree_type: TreeType::from(account.merkle_context.tree_type as u64), - cpi_context: decode_base58_option_to_pubkey(&account.merkle_context.cpi_context)?, - next_tree_info: account - .merkle_context - .next_tree_context - .as_ref() - .map(|ctx| NextTreeInfo::try_from(ctx.as_ref())) - .transpose()?, - }; - - Ok(CompressedAccount { - owner, - address, - data, - hash, - lamports: account.lamports, - leaf_index: account.leaf_index, - seq: account.seq, - slot_created: account.slot_created, - tree_info, - prove_by_index: account.prove_by_index, - }) - } -} - -impl TryFrom<&photon_api::models::Account> for CompressedAccount { - type Error = IndexerError; - - fn try_from(account: &photon_api::models::Account) -> Result { - let data = if let Some(data) = &account.data { - Ok::, IndexerError>(Some(CompressedAccountData { - discriminator: data.discriminator.to_le_bytes(), - data: base64::decode_config(&data.data, base64::STANDARD_NO_PAD) - .map_err(|_| IndexerError::InvalidResponseData)?, - data_hash: decode_base58_to_fixed_array(&data.data_hash)?, - })) - } else { - Ok::, IndexerError>(None) - }?; - let owner = Pubkey::new_from_array(decode_base58_to_fixed_array(&account.owner)?); - let address = account - .address - .as_ref() - .map(|address| decode_base58_to_fixed_array(address)) - .transpose()?; - let hash = decode_base58_to_fixed_array(&account.hash)?; - let seq = account.seq; - let slot_created = account.slot_created; - let lamports = account.lamports; - let leaf_index = account.leaf_index; - - let tree_info = QUEUE_TREE_MAPPING - .get(&account.tree) - .ok_or(IndexerError::InvalidResponseData)?; - - let tree_info = TreeInfo { - cpi_context: tree_info.cpi_context, - queue: tree_info.queue, - tree_type: tree_info.tree_type, - next_tree_info: None, - tree: tree_info.tree, - }; - - Ok(CompressedAccount { - owner, - address, - data, - hash, - lamports, - leaf_index, - seq, - slot_created, - tree_info, - prove_by_index: false, - }) - } -} - -#[derive(Debug, Clone, Copy, Ord, PartialOrd, Eq, PartialEq)] -pub struct StateMerkleTreeAccounts { - pub merkle_tree: Pubkey, - pub nullifier_queue: Pubkey, - pub cpi_context: Pubkey, - pub tree_type: TreeType, -} - -#[allow(clippy::from_over_into)] -impl Into for StateMerkleTreeAccounts { - fn into(self) -> TreeInfo { - TreeInfo { - tree: self.merkle_tree, - queue: self.nullifier_queue, - cpi_context: Some(self.cpi_context), - tree_type: self.tree_type, - next_tree_info: None, - } - } -} - -#[derive(Debug, Clone, Copy)] -pub struct AddressMerkleTreeAccounts { - pub merkle_tree: Pubkey, - pub queue: Pubkey, -} - -#[derive(Clone, Default, Debug, PartialEq)] -pub struct CompressedTokenAccount { - /// Token-specific data (mint, owner, amount, delegate, state, tlv) - pub token: TokenData, - /// General account information (address, hash, lamports, merkle context, etc.) - pub account: CompressedAccount, -} - -impl TryFrom<&photon_api::models::TokenAccount> for CompressedTokenAccount { - type Error = IndexerError; - - fn try_from(token_account: &photon_api::models::TokenAccount) -> Result { - let account = CompressedAccount::try_from(token_account.account.as_ref())?; - - let token = TokenData { - mint: Pubkey::new_from_array(decode_base58_to_fixed_array( - &token_account.token_data.mint, - )?), - owner: Pubkey::new_from_array(decode_base58_to_fixed_array( - &token_account.token_data.owner, - )?), - amount: token_account.token_data.amount, - delegate: token_account - .token_data - .delegate - .as_ref() - .map(|d| decode_base58_to_fixed_array(d).map(Pubkey::new_from_array)) - .transpose()?, - state: match token_account.token_data.state { - photon_api::models::AccountState::Initialized => AccountState::Initialized, - photon_api::models::AccountState::Frozen => AccountState::Frozen, - }, - tlv: token_account - .token_data - .tlv - .as_ref() - .map(|tlv| { - let bytes = base64::decode_config(tlv, base64::STANDARD_NO_PAD) - .map_err(|_| IndexerError::InvalidResponseData)?; - Vec::::deserialize(&mut bytes.as_slice()) - .map_err(|_| IndexerError::InvalidResponseData) - }) - .transpose()?, - }; - - Ok(CompressedTokenAccount { token, account }) - } -} - -impl TryFrom<&photon_api::models::TokenAccountV2> for CompressedTokenAccount { - type Error = IndexerError; - - fn try_from(token_account: &photon_api::models::TokenAccountV2) -> Result { - let account = CompressedAccount::try_from(token_account.account.as_ref())?; - - let token = TokenData { - mint: Pubkey::new_from_array(decode_base58_to_fixed_array( - &token_account.token_data.mint, - )?), - owner: Pubkey::new_from_array(decode_base58_to_fixed_array( - &token_account.token_data.owner, - )?), - amount: token_account.token_data.amount, - delegate: token_account - .token_data - .delegate - .as_ref() - .map(|d| decode_base58_to_fixed_array(d).map(Pubkey::new_from_array)) - .transpose()?, - state: match token_account.token_data.state { - photon_api::models::AccountState::Initialized => AccountState::Initialized, - photon_api::models::AccountState::Frozen => AccountState::Frozen, - }, - tlv: token_account - .token_data - .tlv - .as_ref() - .map(|tlv| { - let bytes = base64::decode_config(tlv, base64::STANDARD_NO_PAD) - .map_err(|_| IndexerError::InvalidResponseData)?; - Vec::::deserialize(&mut bytes.as_slice()) - .map_err(|_| IndexerError::InvalidResponseData) - }) - .transpose()?, - }; - - Ok(CompressedTokenAccount { token, account }) - } -} - -#[allow(clippy::from_over_into)] -impl Into for CompressedTokenAccount { - fn into(self) -> light_token::compat::TokenDataWithMerkleContext { - let compressed_account = CompressedAccountWithMerkleContext::from(self.account); - - light_token::compat::TokenDataWithMerkleContext { - token_data: self.token, - compressed_account, - } - } -} - -#[allow(clippy::from_over_into)] -impl Into> - for super::response::Response> -{ - fn into(self) -> Vec { - self.value - .items - .into_iter() - .map( - |token_account| light_token::compat::TokenDataWithMerkleContext { - token_data: token_account.token, - compressed_account: CompressedAccountWithMerkleContext::from( - token_account.account.clone(), - ), - }, - ) - .collect::>() - } -} - -impl TryFrom for CompressedTokenAccount { - type Error = IndexerError; - - fn try_from( - token_data_with_context: light_token::compat::TokenDataWithMerkleContext, - ) -> Result { - let account = CompressedAccount::try_from(token_data_with_context.compressed_account)?; - - Ok(CompressedTokenAccount { - token: token_data_with_context.token_data, - account, - }) - } -} - -#[derive(Clone, Default, Debug, PartialEq)] -pub struct TokenBalance { - pub balance: u64, - pub mint: Pubkey, -} - -impl TryFrom<&photon_api::models::TokenBalance> for TokenBalance { - type Error = IndexerError; - - fn try_from(token_balance: &photon_api::models::TokenBalance) -> Result { - Ok(TokenBalance { - balance: token_balance.balance, - mint: Pubkey::new_from_array(decode_base58_to_fixed_array(&token_balance.mint)?), - }) - } -} - -#[derive(Debug, Clone, PartialEq, Default)] -pub struct SignatureWithMetadata { - pub block_time: u64, - pub signature: String, - pub slot: u64, -} - -impl TryFrom<&photon_api::models::SignatureInfo> for SignatureWithMetadata { - type Error = IndexerError; - - fn try_from(sig_info: &photon_api::models::SignatureInfo) -> Result { - Ok(SignatureWithMetadata { - block_time: sig_info.block_time, - signature: sig_info.signature.clone(), - slot: sig_info.slot, - }) - } -} - -#[derive(Clone, Default, Debug, PartialEq)] -pub struct OwnerBalance { - pub balance: u64, - pub owner: Pubkey, -} - -impl TryFrom<&photon_api::models::OwnerBalance> for OwnerBalance { - type Error = IndexerError; - - fn try_from(owner_balance: &photon_api::models::OwnerBalance) -> Result { - Ok(OwnerBalance { - balance: owner_balance.balance, - owner: Pubkey::new_from_array(decode_base58_to_fixed_array(&owner_balance.owner)?), - }) - } -} diff --git a/sdk-libs/client/src/indexer/types/account.rs b/sdk-libs/client/src/indexer/types/account.rs new file mode 100644 index 0000000000..9976bbdc24 --- /dev/null +++ b/sdk-libs/client/src/indexer/types/account.rs @@ -0,0 +1,202 @@ +use light_compressed_account::{ + compressed_account::{ + CompressedAccount as ProgramCompressedAccount, CompressedAccountData, + CompressedAccountWithMerkleContext, + }, + TreeType, +}; +use solana_pubkey::Pubkey; +use tracing::warn; + +use super::{ + super::{base58::decode_base58_to_fixed_array, tree_info::QUEUE_TREE_MAPPING, IndexerError}, + tree::{NextTreeInfo, TreeInfo}, +}; + +#[derive(Clone, Default, Debug, PartialEq)] +pub struct CompressedAccount { + pub address: Option<[u8; 32]>, + pub data: Option, + pub hash: [u8; 32], + pub lamports: u64, + pub leaf_index: u32, + pub owner: Pubkey, + pub prove_by_index: bool, + pub seq: Option, + pub slot_created: u64, + pub tree_info: TreeInfo, +} + +impl TryFrom for CompressedAccount { + type Error = IndexerError; + + fn try_from(account: CompressedAccountWithMerkleContext) -> Result { + let hash = account + .hash() + .map_err(|e| IndexerError::decode_error("data", e))?; + // Breaks light-program-test + let tree_info = QUEUE_TREE_MAPPING.get( + &Pubkey::new_from_array(account.merkle_context.merkle_tree_pubkey.to_bytes()) + .to_string(), + ); + let cpi_context = if let Some(tree_info) = tree_info { + tree_info.cpi_context + } else { + warn!("Cpi context not found in queue tree mapping"); + None + }; + Ok(CompressedAccount { + address: account.compressed_account.address, + data: account.compressed_account.data, + hash, + lamports: account.compressed_account.lamports, + leaf_index: account.merkle_context.leaf_index, + tree_info: TreeInfo { + tree: Pubkey::new_from_array(account.merkle_context.merkle_tree_pubkey.to_bytes()), + queue: Pubkey::new_from_array(account.merkle_context.queue_pubkey.to_bytes()), + tree_type: account.merkle_context.tree_type, + cpi_context, + next_tree_info: None, + }, + owner: Pubkey::new_from_array(account.compressed_account.owner.to_bytes()), + prove_by_index: account.merkle_context.prove_by_index, + seq: None, + slot_created: u64::MAX, + }) + } +} + +impl From for CompressedAccountWithMerkleContext { + fn from(account: CompressedAccount) -> Self { + use light_compressed_account::Pubkey; + let compressed_account = ProgramCompressedAccount { + owner: Pubkey::new_from_array(account.owner.to_bytes()), + lamports: account.lamports, + address: account.address, + data: account.data, + }; + + let merkle_context = account + .tree_info + .to_light_merkle_context(account.leaf_index, account.prove_by_index); + + CompressedAccountWithMerkleContext { + compressed_account, + merkle_context, + } + } +} + +impl TryFrom<&photon_api::models::AccountV2> for CompressedAccount { + type Error = IndexerError; + + fn try_from(account: &photon_api::models::AccountV2) -> Result { + let data = if let Some(data) = &account.data { + Ok::, IndexerError>(Some(CompressedAccountData { + discriminator: data.discriminator.to_le_bytes(), + data: base64::decode_config(&data.data, base64::STANDARD_NO_PAD) + .map_err(|e| IndexerError::decode_error("data", e))?, + data_hash: decode_base58_to_fixed_array(&data.data_hash)?, + })) + } else { + Ok::, IndexerError>(None) + }?; + + let owner = Pubkey::new_from_array(decode_base58_to_fixed_array(&account.owner)?); + let address = account + .address + .as_ref() + .map(|address| decode_base58_to_fixed_array(address)) + .transpose()?; + let hash = decode_base58_to_fixed_array(&account.hash)?; + + let tree_info = TreeInfo { + tree: Pubkey::new_from_array(decode_base58_to_fixed_array( + &account.merkle_context.tree, + )?), + queue: Pubkey::new_from_array(decode_base58_to_fixed_array( + &account.merkle_context.queue, + )?), + tree_type: TreeType::from(account.merkle_context.tree_type as u64), + cpi_context: super::super::base58::decode_base58_option_to_pubkey( + &account.merkle_context.cpi_context, + )?, + next_tree_info: account + .merkle_context + .next_tree_context + .as_ref() + .map(|ctx| NextTreeInfo::try_from(ctx.as_ref())) + .transpose()?, + }; + + Ok(CompressedAccount { + owner, + address, + data, + hash, + lamports: account.lamports, + leaf_index: account.leaf_index, + seq: account.seq, + slot_created: account.slot_created, + tree_info, + prove_by_index: account.prove_by_index, + }) + } +} + +impl TryFrom<&photon_api::models::Account> for CompressedAccount { + type Error = IndexerError; + + fn try_from(account: &photon_api::models::Account) -> Result { + let data = if let Some(data) = &account.data { + Ok::, IndexerError>(Some(CompressedAccountData { + discriminator: data.discriminator.to_le_bytes(), + data: base64::decode_config(&data.data, base64::STANDARD_NO_PAD) + .map_err(|e| IndexerError::decode_error("data", e))?, + data_hash: decode_base58_to_fixed_array(&data.data_hash)?, + })) + } else { + Ok::, IndexerError>(None) + }?; + let owner = Pubkey::new_from_array(decode_base58_to_fixed_array(&account.owner)?); + let address = account + .address + .as_ref() + .map(|address| decode_base58_to_fixed_array(address)) + .transpose()?; + let hash = decode_base58_to_fixed_array(&account.hash)?; + let seq = account.seq; + let slot_created = account.slot_created; + let lamports = account.lamports; + let leaf_index = account.leaf_index; + + let tree_info = + QUEUE_TREE_MAPPING + .get(&account.tree) + .ok_or(IndexerError::MissingResult { + context: "conversion".into(), + message: "expected value was None".into(), + })?; + + let tree_info = TreeInfo { + cpi_context: tree_info.cpi_context, + queue: tree_info.queue, + tree_type: tree_info.tree_type, + next_tree_info: None, + tree: tree_info.tree, + }; + + Ok(CompressedAccount { + owner, + address, + data, + hash, + lamports, + leaf_index, + seq, + slot_created, + tree_info, + prove_by_index: false, + }) + } +} diff --git a/sdk-libs/client/src/indexer/types/interface.rs b/sdk-libs/client/src/indexer/types/interface.rs new file mode 100644 index 0000000000..1a13df1959 --- /dev/null +++ b/sdk-libs/client/src/indexer/types/interface.rs @@ -0,0 +1,217 @@ +use borsh::BorshDeserialize; +use light_compressed_account::TreeType; +use light_token::compat::{AccountState, TokenData}; +use light_token_interface::state::ExtensionStruct; +use solana_account::Account; +use solana_pubkey::Pubkey; + +use super::super::{base58::decode_base58_to_fixed_array, IndexerError}; + +/// Re-export solana Account for interface types. +pub type SolanaAccountData = Account; + +/// Merkle tree info for compressed accounts +#[derive(Clone, Copy, Debug, PartialEq)] +pub struct InterfaceTreeInfo { + pub tree: Pubkey, + pub queue: Pubkey, + pub tree_type: TreeType, + pub seq: Option, + /// Slot when the account was created/compressed + pub slot_created: u64, +} + +/// Structured compressed account data (discriminator separated) +#[derive(Clone, Debug, PartialEq)] +pub struct ColdData { + pub discriminator: [u8; 8], + pub data: Vec, +} + +/// Compressed account context — present when account is in compressed state +#[derive(Clone, Debug, PartialEq)] +pub enum ColdContext { + Account { + hash: [u8; 32], + leaf_index: u64, + tree_info: InterfaceTreeInfo, + data: ColdData, + }, + Token { + hash: [u8; 32], + leaf_index: u64, + tree_info: InterfaceTreeInfo, + data: ColdData, + }, +} + +/// Decode tree info from photon_api format +fn decode_tree_info( + tree_info: &photon_api::models::InterfaceTreeInfo, +) -> Result { + let tree = Pubkey::new_from_array(decode_base58_to_fixed_array(&tree_info.tree)?); + let queue = Pubkey::new_from_array(decode_base58_to_fixed_array(&tree_info.queue)?); + let tree_type = match tree_info.tree_type { + photon_api::models::TreeType::StateV1 => TreeType::StateV1, + photon_api::models::TreeType::StateV2 => TreeType::StateV2, + }; + Ok(InterfaceTreeInfo { + tree, + queue, + tree_type, + seq: tree_info.seq, + slot_created: tree_info.slot_created, + }) +} + +/// Decode cold data from photon_api format +fn decode_cold_data(data: &photon_api::models::ColdData) -> Result { + Ok(ColdData { + discriminator: data.discriminator, + data: base64::decode_config(&data.data, base64::STANDARD_NO_PAD) + .map_err(|e| IndexerError::decode_error("data", e))?, + }) +} + +/// Helper to convert photon_api ColdContext to client ColdContext +fn convert_cold_context( + cold: &photon_api::models::ColdContext, +) -> Result { + match cold { + photon_api::models::ColdContext::Account { + hash, + leaf_index, + tree_info, + data, + } => Ok(ColdContext::Account { + hash: decode_base58_to_fixed_array(hash)?, + leaf_index: *leaf_index, + tree_info: decode_tree_info(tree_info)?, + data: decode_cold_data(data)?, + }), + photon_api::models::ColdContext::Token { + hash, + leaf_index, + tree_info, + data, + } => Ok(ColdContext::Token { + hash: decode_base58_to_fixed_array(hash)?, + leaf_index: *leaf_index, + tree_info: decode_tree_info(tree_info)?, + data: decode_cold_data(data)?, + }), + } +} + +/// Unified account interface — works for both on-chain and compressed accounts +#[derive(Clone, Debug, PartialEq)] +pub struct AccountInterface { + /// The on-chain Solana pubkey + pub key: Pubkey, + /// Standard Solana account fields + pub account: SolanaAccountData, + /// Compressed context — None if on-chain, Some if compressed + pub cold: Option, +} + +impl AccountInterface { + /// Returns true if this account is on-chain (hot) + pub fn is_hot(&self) -> bool { + self.cold.is_none() + } + + /// Returns true if this account is compressed (cold) + pub fn is_cold(&self) -> bool { + self.cold.is_some() + } +} + +/// Helper to convert photon_api AccountInterface to client AccountInterface +fn convert_account_interface( + ai: &photon_api::models::AccountInterface, +) -> Result { + let cold = ai.cold.as_ref().map(convert_cold_context).transpose()?; + + let data = base64::decode_config(&ai.account.data, base64::STANDARD_NO_PAD) + .map_err(|e| IndexerError::decode_error("account.data", e))?; + + Ok(AccountInterface { + key: Pubkey::new_from_array(decode_base58_to_fixed_array(&ai.key)?), + account: Account { + lamports: ai.account.lamports, + data, + owner: Pubkey::new_from_array(decode_base58_to_fixed_array(&ai.account.owner)?), + executable: ai.account.executable, + rent_epoch: ai.account.rent_epoch, + }, + cold, + }) +} + +impl TryFrom<&photon_api::models::AccountInterface> for AccountInterface { + type Error = IndexerError; + + fn try_from(ai: &photon_api::models::AccountInterface) -> Result { + convert_account_interface(ai) + } +} + +impl TryFrom<&photon_api::models::InterfaceResult> for AccountInterface { + type Error = IndexerError; + + fn try_from(ir: &photon_api::models::InterfaceResult) -> Result { + match ir { + photon_api::models::InterfaceResult::Account(ai) => AccountInterface::try_from(ai), + photon_api::models::InterfaceResult::Token(tai) => { + AccountInterface::try_from(&tai.account) + } + } + } +} + +/// Token account interface with parsed token data +#[derive(Clone, Debug, PartialEq)] +pub struct TokenAccountInterface { + /// Base account interface data + pub account: AccountInterface, + /// Parsed token data (same as CompressedTokenAccount.token) + pub token: TokenData, +} + +impl TryFrom<&photon_api::models::TokenAccountInterface> for TokenAccountInterface { + type Error = IndexerError; + + fn try_from(tai: &photon_api::models::TokenAccountInterface) -> Result { + let account = convert_account_interface(&tai.account)?; + + // Parse token data - same pattern as CompressedTokenAccount + let token = TokenData { + mint: Pubkey::new_from_array(decode_base58_to_fixed_array(&tai.token_data.mint)?), + owner: Pubkey::new_from_array(decode_base58_to_fixed_array(&tai.token_data.owner)?), + amount: tai.token_data.amount, + delegate: tai + .token_data + .delegate + .as_ref() + .map(|d| decode_base58_to_fixed_array(d).map(Pubkey::new_from_array)) + .transpose()?, + state: match tai.token_data.state { + photon_api::models::AccountState::Initialized => AccountState::Initialized, + photon_api::models::AccountState::Frozen => AccountState::Frozen, + }, + tlv: tai + .token_data + .tlv + .as_ref() + .map(|tlv| { + let bytes = base64::decode_config(tlv, base64::STANDARD_NO_PAD) + .map_err(|e| IndexerError::decode_error("tlv", e))?; + Vec::::deserialize(&mut bytes.as_slice()) + .map_err(|e| IndexerError::decode_error("extensions", e)) + }) + .transpose()?, + }; + + Ok(TokenAccountInterface { account, token }) + } +} diff --git a/sdk-libs/client/src/indexer/types/mod.rs b/sdk-libs/client/src/indexer/types/mod.rs new file mode 100644 index 0000000000..f91504c8e3 --- /dev/null +++ b/sdk-libs/client/src/indexer/types/mod.rs @@ -0,0 +1,45 @@ +mod account; +mod interface; +mod proof; +mod queue; +mod signature; +mod token; +mod tree; + +pub use account::CompressedAccount; +pub use interface::{ + AccountInterface, ColdContext, ColdData, InterfaceTreeInfo, SolanaAccountData, + TokenAccountInterface, +}; +pub use proof::{ + AccountProofInputs, AddressProofInputs, AddressWithTree, MerkleProof, MerkleProofWithContext, + NewAddressProofWithContext, RootIndex, ValidityProofWithContext, +}; +pub use queue::{ + AddressQueueData, InputQueueData, OutputQueueData, QueueElementsResult, StateQueueData, +}; +pub use signature::SignatureWithMetadata; +pub use token::{CompressedTokenAccount, OwnerBalance, TokenBalance}; +pub use tree::{AddressMerkleTreeAccounts, NextTreeInfo, StateMerkleTreeAccounts, TreeInfo}; + +pub struct ProofOfLeaf { + pub leaf: [u8; 32], + pub proof: Vec<[u8; 32]>, +} + +pub type Address = [u8; 32]; +pub type Hash = [u8; 32]; + +#[derive(Debug, Clone, PartialEq)] +pub struct QueueInfo { + pub tree: solana_pubkey::Pubkey, + pub queue: solana_pubkey::Pubkey, + pub queue_type: u8, + pub queue_size: u64, +} + +#[derive(Debug, Clone, PartialEq, Default)] +pub struct QueueInfoResult { + pub queues: Vec, + pub slot: u64, +} diff --git a/sdk-libs/client/src/indexer/types/proof.rs b/sdk-libs/client/src/indexer/types/proof.rs new file mode 100644 index 0000000000..94182baab2 --- /dev/null +++ b/sdk-libs/client/src/indexer/types/proof.rs @@ -0,0 +1,373 @@ +use light_account::PackedAccounts; +use light_compressed_account::instruction_data::compressed_proof::CompressedProof; +use light_sdk::instruction::{PackedAddressTreeInfo, PackedStateTreeInfo, ValidityProof}; +use solana_pubkey::Pubkey; + +use super::{ + super::{base58::decode_base58_to_fixed_array, tree_info::QUEUE_TREE_MAPPING, IndexerError}, + tree::TreeInfo, +}; + +#[derive(Debug, Clone, PartialEq, Default)] +pub struct MerkleProofWithContext { + pub proof: Vec<[u8; 32]>, + pub root: [u8; 32], + pub leaf_index: u64, + pub leaf: [u8; 32], + pub merkle_tree: [u8; 32], + pub root_seq: u64, + pub tx_hash: Option<[u8; 32]>, + pub account_hash: [u8; 32], +} + +#[derive(Debug, Clone, PartialEq, Default)] +pub struct MerkleProof { + pub hash: [u8; 32], + pub leaf_index: u64, + pub merkle_tree: Pubkey, + pub proof: Vec<[u8; 32]>, + pub root_seq: u64, + pub root: [u8; 32], +} + +#[derive(Debug, Clone, Copy, PartialEq)] +pub struct AddressWithTree { + pub address: super::Address, + pub tree: Pubkey, +} + +#[derive(Clone, Default, Debug, PartialEq)] +pub struct NewAddressProofWithContext { + pub merkle_tree: Pubkey, + pub root: [u8; 32], + pub root_seq: u64, + pub low_address_index: u64, + pub low_address_value: [u8; 32], + pub low_address_next_index: u64, + pub low_address_next_value: [u8; 32], + pub low_address_proof: Vec<[u8; 32]>, + pub new_low_element: Option>, + pub new_element: Option>, + pub new_element_next_value: Option, +} + +#[derive(Debug, Default, Clone, PartialEq)] +pub struct ValidityProofWithContext { + pub proof: ValidityProof, + pub accounts: Vec, + pub addresses: Vec, +} + +// TODO: add get_public_inputs +// -> to make it easier to use light-verifier with get_validity_proof() +impl ValidityProofWithContext { + pub fn get_root_indices(&self) -> Vec> { + self.accounts + .iter() + .map(|account| account.root_index.root_index()) + .collect() + } + + pub fn get_address_root_indices(&self) -> Vec { + self.addresses + .iter() + .map(|address| address.root_index) + .collect() + } +} + +#[derive(Clone, Default, Debug, PartialEq)] +pub struct AccountProofInputs { + pub hash: [u8; 32], + pub root: [u8; 32], + pub root_index: RootIndex, + pub leaf_index: u64, + pub tree_info: TreeInfo, +} + +#[derive(Clone, Default, Copy, Debug, PartialEq)] +pub struct RootIndex { + proof_by_index: bool, + root_index: u16, +} + +impl RootIndex { + pub fn new_none() -> Self { + Self { + proof_by_index: true, + root_index: 0, + } + } + + pub fn new_some(root_index: u16) -> Self { + Self { + proof_by_index: false, + root_index, + } + } + + pub fn proof_by_index(&self) -> bool { + self.proof_by_index + } + + pub fn root_index(&self) -> Option { + if !self.proof_by_index { + Some(self.root_index) + } else { + None + } + } +} + +impl AccountProofInputs { + pub fn from_api_model( + value: &photon_api::models::AccountProofInputs, + ) -> Result { + let root_index = { + if value.root_index.prove_by_index { + RootIndex::new_none() + } else { + RootIndex::new_some(value.root_index.root_index) + } + }; + Ok(Self { + hash: decode_base58_to_fixed_array(&value.hash)?, + root: decode_base58_to_fixed_array(&value.root)?, + root_index, + leaf_index: value.leaf_index, + tree_info: TreeInfo::from_api_model(&value.merkle_context)?, + }) + } +} + +#[derive(Clone, Default, Debug, PartialEq)] +pub struct AddressProofInputs { + pub address: [u8; 32], + pub root: [u8; 32], + pub root_index: u16, + pub tree_info: TreeInfo, +} + +impl AddressProofInputs { + pub fn from_api_model( + value: &photon_api::models::AddressProofInputs, + ) -> Result { + Ok(Self { + address: decode_base58_to_fixed_array(&value.address)?, + root: decode_base58_to_fixed_array(&value.root)?, + root_index: value.root_index, + tree_info: TreeInfo::from_api_model(&value.merkle_context)?, + }) + } +} + +#[derive(Clone, Default, Debug, PartialEq)] +pub struct PackedStateTreeInfos { + pub packed_tree_infos: Vec, + pub output_tree_index: u8, +} + +#[derive(Clone, Default, Debug, PartialEq)] +pub struct PackedTreeInfos { + pub state_trees: Option, + pub address_trees: Vec, +} + +impl ValidityProofWithContext { + pub fn pack_tree_infos(&self, packed_accounts: &mut PackedAccounts) -> PackedTreeInfos { + let mut packed_tree_infos = Vec::new(); + let mut address_trees = Vec::new(); + let mut output_tree_index = None; + for account in self.accounts.iter() { + // Pack TreeInfo + let merkle_tree_pubkey_index = packed_accounts.insert_or_get(account.tree_info.tree); + let queue_pubkey_index = packed_accounts.insert_or_get(account.tree_info.queue); + let tree_info_packed = PackedStateTreeInfo { + root_index: account.root_index.root_index, + merkle_tree_pubkey_index, + queue_pubkey_index, + leaf_index: account.leaf_index as u32, + prove_by_index: account.root_index.proof_by_index(), + }; + packed_tree_infos.push(tree_info_packed); + + // If a next Merkle tree exists the Merkle tree is full -> use the next Merkle tree for new state. + // Else use the current Merkle tree for new state. + if let Some(next) = account.tree_info.next_tree_info { + // SAFETY: account will always have a state Merkle tree context. + // pack_output_tree_index only panics on an address Merkle tree context. + let index = next.pack_output_tree_index(packed_accounts).unwrap(); + if output_tree_index.is_none() { + output_tree_index = Some(index); + } + } else { + // SAFETY: account will always have a state Merkle tree context. + // pack_output_tree_index only panics on an address Merkle tree context. + let index = account + .tree_info + .pack_output_tree_index(packed_accounts) + .unwrap(); + if output_tree_index.is_none() { + output_tree_index = Some(index); + } + } + } + + for address in self.addresses.iter() { + // Pack AddressTreeInfo + let address_merkle_tree_pubkey_index = + packed_accounts.insert_or_get(address.tree_info.tree); + let address_queue_pubkey_index = packed_accounts.insert_or_get(address.tree_info.queue); + address_trees.push(PackedAddressTreeInfo { + address_merkle_tree_pubkey_index, + address_queue_pubkey_index, + root_index: address.root_index, + }); + } + let packed_tree_infos = if packed_tree_infos.is_empty() { + None + } else { + Some(PackedStateTreeInfos { + packed_tree_infos, + output_tree_index: output_tree_index.unwrap(), + }) + }; + PackedTreeInfos { + state_trees: packed_tree_infos, + address_trees, + } + } + + pub fn from_api_model( + value: photon_api::models::CompressedProofWithContext, + num_hashes: usize, + ) -> Result { + let proof = ValidityProof::new(Some(CompressedProof { + a: value + .compressed_proof + .a + .try_into() + .map_err(|_| IndexerError::decode_error("proof.a", "invalid length"))?, + b: value + .compressed_proof + .b + .try_into() + .map_err(|_| IndexerError::decode_error("proof.b", "invalid length"))?, + c: value + .compressed_proof + .c + .try_into() + .map_err(|_| IndexerError::decode_error("proof.c", "invalid length"))?, + })); + + // Convert account data from V1 flat arrays to V2 structured format + let accounts = (0..num_hashes) + .map(|i| { + let tree_pubkey = + Pubkey::new_from_array(decode_base58_to_fixed_array(&value.merkle_trees[i])?); + let tree_info = QUEUE_TREE_MAPPING.get(&value.merkle_trees[i]).ok_or( + IndexerError::MissingResult { + context: "conversion".into(), + message: format!( + "tree not found in QUEUE_TREE_MAPPING: {}", + &value.merkle_trees[i] + ), + }, + )?; + + Ok(AccountProofInputs { + hash: decode_base58_to_fixed_array(&value.leaves[i])?, + root: decode_base58_to_fixed_array(&value.roots[i])?, + root_index: RootIndex::new_some(value.root_indices[i] as u16), + leaf_index: value.leaf_indices[i] as u64, + tree_info: TreeInfo { + tree_type: tree_info.tree_type, + tree: tree_pubkey, + queue: tree_info.queue, + cpi_context: tree_info.cpi_context, + next_tree_info: None, + }, + }) + }) + .collect::, IndexerError>>()?; + + // Convert address data from remaining indices (if any) + let addresses = if value.root_indices.len() > num_hashes { + (num_hashes..value.root_indices.len()) + .map(|i| { + let tree_pubkey = Pubkey::new_from_array(decode_base58_to_fixed_array( + &value.merkle_trees[i], + )?); + let tree_info = QUEUE_TREE_MAPPING.get(&value.merkle_trees[i]).ok_or( + IndexerError::MissingResult { + context: "conversion".into(), + message: "expected value was None".into(), + }, + )?; + + Ok(AddressProofInputs { + address: decode_base58_to_fixed_array(&value.leaves[i])?, // Address is in leaves + root: decode_base58_to_fixed_array(&value.roots[i])?, + root_index: value.root_indices[i] as u16, + tree_info: TreeInfo { + tree_type: tree_info.tree_type, + tree: tree_pubkey, + queue: tree_info.queue, + cpi_context: tree_info.cpi_context, + next_tree_info: None, + }, + }) + }) + .collect::, IndexerError>>()? + } else { + Vec::new() + }; + + Ok(Self { + proof, + accounts, + addresses, + }) + } + + pub fn from_api_model_v2( + value: photon_api::models::CompressedProofWithContextV2, + ) -> Result { + let proof = if let Some(proof) = value.compressed_proof { + ValidityProof::new(Some(CompressedProof { + a: proof + .a + .try_into() + .map_err(|_| IndexerError::decode_error("proof.a", "invalid length"))?, + b: proof + .b + .try_into() + .map_err(|_| IndexerError::decode_error("proof.b", "invalid length"))?, + c: proof + .c + .try_into() + .map_err(|_| IndexerError::decode_error("proof.c", "invalid length"))?, + })) + } else { + ValidityProof::new(None) + }; + + let accounts = value + .accounts + .iter() + .map(AccountProofInputs::from_api_model) + .collect::, IndexerError>>()?; + + let addresses = value + .addresses + .iter() + .map(AddressProofInputs::from_api_model) + .collect::, IndexerError>>()?; + + Ok(Self { + proof, + accounts, + addresses, + }) + } +} diff --git a/sdk-libs/client/src/indexer/types/queue.rs b/sdk-libs/client/src/indexer/types/queue.rs new file mode 100644 index 0000000000..40e7cc0f6e --- /dev/null +++ b/sdk-libs/client/src/indexer/types/queue.rs @@ -0,0 +1,146 @@ +use super::super::IndexerError; + +#[derive(Debug, Clone, PartialEq, Default)] +pub struct OutputQueueData { + pub leaf_indices: Vec, + pub account_hashes: Vec<[u8; 32]>, + pub old_leaves: Vec<[u8; 32]>, + pub first_queue_index: u64, + /// The tree's next_index - where new leaves will be appended + pub next_index: u64, + /// Pre-computed hash chains per ZKP batch (from on-chain) + pub leaves_hash_chains: Vec<[u8; 32]>, +} + +/// V2 Input Queue Data +#[derive(Debug, Clone, PartialEq, Default)] +pub struct InputQueueData { + pub leaf_indices: Vec, + pub account_hashes: Vec<[u8; 32]>, + pub current_leaves: Vec<[u8; 32]>, + pub tx_hashes: Vec<[u8; 32]>, + /// Pre-computed nullifiers from indexer + pub nullifiers: Vec<[u8; 32]>, + pub first_queue_index: u64, + /// Pre-computed hash chains per ZKP batch (from on-chain) + pub leaves_hash_chains: Vec<[u8; 32]>, +} + +/// State queue data with shared tree nodes for output and input queues +#[derive(Debug, Clone, PartialEq, Default)] +pub struct StateQueueData { + /// Shared deduplicated tree nodes for state queues (output + input) + /// node_index encoding: (level << 56) | position + pub nodes: Vec, + pub node_hashes: Vec<[u8; 32]>, + /// Initial root for the state tree (shared by output and input queues) + pub initial_root: [u8; 32], + /// Sequence number of the root + pub root_seq: u64, + /// Output queue data (if requested) + pub output_queue: Option, + /// Input queue data (if requested) + pub input_queue: Option, +} + +/// V2 Address Queue Data with deduplicated nodes +/// Proofs are reconstructed from `nodes`/`node_hashes` using `low_element_indices` +#[derive(Debug, Clone, PartialEq, Default)] +pub struct AddressQueueData { + pub addresses: Vec<[u8; 32]>, + pub low_element_values: Vec<[u8; 32]>, + pub low_element_next_values: Vec<[u8; 32]>, + pub low_element_indices: Vec, + pub low_element_next_indices: Vec, + /// Deduplicated node indices - encoding: (level << 56) | position + pub nodes: Vec, + /// Hashes corresponding to each node index + pub node_hashes: Vec<[u8; 32]>, + pub initial_root: [u8; 32], + pub leaves_hash_chains: Vec<[u8; 32]>, + pub subtrees: Vec<[u8; 32]>, + pub start_index: u64, + pub root_seq: u64, +} + +impl AddressQueueData { + /// Reconstruct a merkle proof for a given low_element_index from the deduplicated nodes. + /// The tree_height is needed to know how many levels to traverse. + pub fn reconstruct_proof( + &self, + address_idx: usize, + tree_height: u8, + ) -> Result, IndexerError> { + let leaf_index = *self.low_element_indices.get(address_idx).ok_or_else(|| { + IndexerError::MissingResult { + context: "reconstruct_proof".to_string(), + message: format!( + "address_idx {} out of bounds for low_element_indices (len {})", + address_idx, + self.low_element_indices.len(), + ), + } + })?; + let mut proof = Vec::with_capacity(tree_height as usize); + let mut pos = leaf_index; + + for level in 0..tree_height { + let sibling_pos = if pos.is_multiple_of(2) { + pos + 1 + } else { + pos - 1 + }; + let sibling_idx = Self::encode_node_index(level, sibling_pos); + + let hash_idx = self + .nodes + .iter() + .position(|&n| n == sibling_idx) + .ok_or_else(|| IndexerError::MissingResult { + context: "reconstruct_proof".to_string(), + message: format!( + "Missing proof node at level {} position {} (encoded: {})", + level, sibling_pos, sibling_idx + ), + })?; + let hash = + self.node_hashes + .get(hash_idx) + .ok_or_else(|| IndexerError::MissingResult { + context: "reconstruct_proof".to_string(), + message: format!( + "node_hashes index {} out of bounds (len {})", + hash_idx, + self.node_hashes.len(), + ), + })?; + proof.push(*hash); + pos /= 2; + } + + Ok(proof) + } + + /// Reconstruct all proofs for all addresses + pub fn reconstruct_all_proofs( + &self, + tree_height: u8, + ) -> Result>, IndexerError> { + (0..self.addresses.len()) + .map(|i| self.reconstruct_proof(i, tree_height)) + .collect() + } + + /// Encode node index: (level << 56) | position + #[inline] + fn encode_node_index(level: u8, position: u64) -> u64 { + ((level as u64) << 56) | position + } +} + +/// V2 Queue Elements Result with deduplicated node data +#[derive(Debug, Clone, PartialEq, Default)] +pub struct QueueElementsResult { + pub state_queue: Option, + pub address_queue: Option, +} diff --git a/sdk-libs/client/src/indexer/types/signature.rs b/sdk-libs/client/src/indexer/types/signature.rs new file mode 100644 index 0000000000..6ec82b8f1d --- /dev/null +++ b/sdk-libs/client/src/indexer/types/signature.rs @@ -0,0 +1,16 @@ +#[derive(Debug, Clone, PartialEq, Default)] +pub struct SignatureWithMetadata { + pub block_time: u64, + pub signature: String, + pub slot: u64, +} + +impl From<&photon_api::models::SignatureInfo> for SignatureWithMetadata { + fn from(sig_info: &photon_api::models::SignatureInfo) -> Self { + SignatureWithMetadata { + block_time: sig_info.block_time, + signature: sig_info.signature.clone(), + slot: sig_info.slot, + } + } +} diff --git a/sdk-libs/client/src/indexer/types/token.rs b/sdk-libs/client/src/indexer/types/token.rs new file mode 100644 index 0000000000..f2a9c63656 --- /dev/null +++ b/sdk-libs/client/src/indexer/types/token.rs @@ -0,0 +1,148 @@ +use borsh::BorshDeserialize; +use light_compressed_account::compressed_account::CompressedAccountWithMerkleContext; +use light_token::compat::{AccountState, TokenData}; +use light_token_interface::state::ExtensionStruct; +use solana_pubkey::Pubkey; + +use super::{ + super::{base58::decode_base58_to_fixed_array, IndexerError}, + account::CompressedAccount, +}; + +#[derive(Clone, Default, Debug, PartialEq)] +pub struct CompressedTokenAccount { + /// Token-specific data (mint, owner, amount, delegate, state, tlv) + pub token: TokenData, + /// General account information (address, hash, lamports, merkle context, etc.) + pub account: CompressedAccount, +} + +fn parse_token_data(td: &photon_api::models::TokenData) -> Result { + Ok(TokenData { + mint: Pubkey::new_from_array(decode_base58_to_fixed_array(&td.mint)?), + owner: Pubkey::new_from_array(decode_base58_to_fixed_array(&td.owner)?), + amount: td.amount, + delegate: td + .delegate + .as_ref() + .map(|d| decode_base58_to_fixed_array(d).map(Pubkey::new_from_array)) + .transpose()?, + state: match td.state { + photon_api::models::AccountState::Initialized => AccountState::Initialized, + photon_api::models::AccountState::Frozen => AccountState::Frozen, + }, + tlv: td + .tlv + .as_ref() + .map(|tlv| { + let bytes = base64::decode_config(tlv, base64::STANDARD_NO_PAD) + .map_err(|e| IndexerError::decode_error("tlv", e))?; + Vec::::deserialize(&mut bytes.as_slice()) + .map_err(|e| IndexerError::decode_error("extensions", e)) + }) + .transpose()?, + }) +} + +impl TryFrom<&photon_api::models::TokenAccount> for CompressedTokenAccount { + type Error = IndexerError; + + fn try_from(token_account: &photon_api::models::TokenAccount) -> Result { + let account = CompressedAccount::try_from(token_account.account.as_ref())?; + let token = parse_token_data(&token_account.token_data)?; + Ok(CompressedTokenAccount { token, account }) + } +} + +impl TryFrom<&photon_api::models::TokenAccountV2> for CompressedTokenAccount { + type Error = IndexerError; + + fn try_from(token_account: &photon_api::models::TokenAccountV2) -> Result { + let account = CompressedAccount::try_from(token_account.account.as_ref())?; + let token = parse_token_data(&token_account.token_data)?; + Ok(CompressedTokenAccount { token, account }) + } +} + +#[allow(clippy::from_over_into)] +impl Into for CompressedTokenAccount { + fn into(self) -> light_token::compat::TokenDataWithMerkleContext { + let compressed_account = CompressedAccountWithMerkleContext::from(self.account); + + light_token::compat::TokenDataWithMerkleContext { + token_data: self.token, + compressed_account, + } + } +} + +#[allow(clippy::from_over_into)] +impl Into> + for super::super::response::Response< + super::super::response::ItemsWithCursor, + > +{ + fn into(self) -> Vec { + self.value + .items + .into_iter() + .map( + |token_account| light_token::compat::TokenDataWithMerkleContext { + token_data: token_account.token, + compressed_account: CompressedAccountWithMerkleContext::from( + token_account.account.clone(), + ), + }, + ) + .collect::>() + } +} + +impl TryFrom for CompressedTokenAccount { + type Error = IndexerError; + + fn try_from( + token_data_with_context: light_token::compat::TokenDataWithMerkleContext, + ) -> Result { + let account = CompressedAccount::try_from(token_data_with_context.compressed_account)?; + + Ok(CompressedTokenAccount { + token: token_data_with_context.token_data, + account, + }) + } +} + +#[derive(Clone, Default, Debug, PartialEq)] +pub struct TokenBalance { + pub balance: u64, + pub mint: Pubkey, +} + +impl TryFrom<&photon_api::models::TokenBalance> for TokenBalance { + type Error = IndexerError; + + fn try_from(token_balance: &photon_api::models::TokenBalance) -> Result { + Ok(TokenBalance { + balance: token_balance.balance, + mint: Pubkey::new_from_array(decode_base58_to_fixed_array(&token_balance.mint)?), + }) + } +} + +#[derive(Debug, Clone, PartialEq, Default)] +pub struct OwnerBalance { + pub balance: u64, + pub owner: Pubkey, +} + +impl TryFrom<&photon_api::models::OwnerBalance> for OwnerBalance { + type Error = IndexerError; + + fn try_from(owner_balance: &photon_api::models::OwnerBalance) -> Result { + Ok(OwnerBalance { + balance: owner_balance.balance, + owner: Pubkey::new_from_array(decode_base58_to_fixed_array(&owner_balance.owner)?), + }) + } +} diff --git a/sdk-libs/client/src/indexer/types/tree.rs b/sdk-libs/client/src/indexer/types/tree.rs new file mode 100644 index 0000000000..100efee9cd --- /dev/null +++ b/sdk-libs/client/src/indexer/types/tree.rs @@ -0,0 +1,145 @@ +use light_account::PackedAccounts; +use light_compressed_account::TreeType; +use solana_pubkey::Pubkey; + +use super::super::{ + base58::{decode_base58_option_to_pubkey, decode_base58_to_fixed_array}, + IndexerError, +}; + +#[derive(Clone, Copy, Default, Debug, PartialEq)] +pub struct NextTreeInfo { + pub cpi_context: Option, + pub queue: Pubkey, + pub tree: Pubkey, + pub tree_type: TreeType, +} + +impl NextTreeInfo { + /// Get the index of the output tree in the packed accounts. + /// For StateV1, it returns the index of the tree account. + /// For StateV2, it returns the index of the queue account. + /// (For V2 trees new state is inserted into the output queue. + /// The forester updates the tree from the queue asynchronously.) + pub fn pack_output_tree_index( + &self, + packed_accounts: &mut PackedAccounts, + ) -> Result { + match self.tree_type { + TreeType::StateV1 => Ok(packed_accounts.insert_or_get(self.tree)), + TreeType::StateV2 => Ok(packed_accounts.insert_or_get(self.queue)), + _ => Err(IndexerError::InvalidPackTreeType), + } + } + pub fn from_api_model( + value: &photon_api::models::TreeContextInfo, + ) -> Result { + Self::try_from(value) + } +} + +impl TryFrom<&photon_api::models::TreeContextInfo> for NextTreeInfo { + type Error = IndexerError; + + fn try_from(value: &photon_api::models::TreeContextInfo) -> Result { + Ok(Self { + tree_type: TreeType::from(value.tree_type as u64), + tree: Pubkey::new_from_array(decode_base58_to_fixed_array(&value.tree)?), + queue: Pubkey::new_from_array(decode_base58_to_fixed_array(&value.queue)?), + cpi_context: decode_base58_option_to_pubkey(&value.cpi_context)?, + }) + } +} + +#[derive(Clone, Copy, Default, Debug, PartialEq)] +pub struct TreeInfo { + pub cpi_context: Option, + pub next_tree_info: Option, + pub queue: Pubkey, + pub tree: Pubkey, + pub tree_type: TreeType, +} + +impl TreeInfo { + /// Get the index of the output tree in the packed accounts. + /// For StateV1, it returns the index of the tree account. + /// For StateV2, it returns the index of the queue account. + /// (For V2 trees new state is inserted into the output queue. + /// The forester updates the tree from the queue asynchronously.) + pub fn pack_output_tree_index( + &self, + packed_accounts: &mut PackedAccounts, + ) -> Result { + match self.tree_type { + TreeType::StateV1 => Ok(packed_accounts.insert_or_get(self.tree)), + TreeType::StateV2 => Ok(packed_accounts.insert_or_get(self.queue)), + _ => Err(IndexerError::InvalidPackTreeType), + } + } + + pub fn get_output_pubkey(&self) -> Result { + match self.tree_type { + TreeType::StateV1 => Ok(self.tree), + TreeType::StateV2 => Ok(self.queue), + _ => Err(IndexerError::InvalidPackTreeType), + } + } + + pub fn from_api_model( + value: &photon_api::models::MerkleContextV2, + ) -> Result { + Ok(Self { + tree_type: TreeType::from(value.tree_type as u64), + tree: Pubkey::new_from_array(decode_base58_to_fixed_array(&value.tree)?), + queue: Pubkey::new_from_array(decode_base58_to_fixed_array(&value.queue)?), + cpi_context: decode_base58_option_to_pubkey(&value.cpi_context)?, + next_tree_info: value + .next_tree_context + .as_ref() + .map(|tree_info| NextTreeInfo::from_api_model(tree_info.as_ref())) + .transpose()?, + }) + } + + pub fn to_light_merkle_context( + &self, + leaf_index: u32, + prove_by_index: bool, + ) -> light_compressed_account::compressed_account::MerkleContext { + use light_compressed_account::Pubkey; + light_compressed_account::compressed_account::MerkleContext { + merkle_tree_pubkey: Pubkey::new_from_array(self.tree.to_bytes()), + queue_pubkey: Pubkey::new_from_array(self.queue.to_bytes()), + leaf_index, + tree_type: self.tree_type, + prove_by_index, + } + } +} + +#[derive(Debug, Clone, Copy, Ord, PartialOrd, Eq, PartialEq)] +pub struct StateMerkleTreeAccounts { + pub merkle_tree: Pubkey, + pub nullifier_queue: Pubkey, + pub cpi_context: Pubkey, + pub tree_type: TreeType, +} + +#[allow(clippy::from_over_into)] +impl Into for StateMerkleTreeAccounts { + fn into(self) -> TreeInfo { + TreeInfo { + tree: self.merkle_tree, + queue: self.nullifier_queue, + cpi_context: Some(self.cpi_context), + tree_type: self.tree_type, + next_tree_info: None, + } + } +} + +#[derive(Debug, Clone, Copy)] +pub struct AddressMerkleTreeAccounts { + pub merkle_tree: Pubkey, + pub queue: Pubkey, +} diff --git a/sdk-libs/client/src/interface/account_interface.rs b/sdk-libs/client/src/interface/account_interface.rs index 4c04469b7f..8c96e84d13 100644 --- a/sdk-libs/client/src/interface/account_interface.rs +++ b/sdk-libs/client/src/interface/account_interface.rs @@ -43,7 +43,7 @@ pub enum AccountInterfaceError { /// Uses standard `solana_account::Account` for raw data. /// For hot accounts: actual on-chain bytes. /// For cold accounts: synthetic bytes from cold data. -#[derive(Debug, Clone)] +#[derive(Debug, Clone, PartialEq, Default)] pub struct AccountInterface { /// The account's public key. pub key: Pubkey, @@ -150,6 +150,7 @@ impl AccountInterface { match &self.cold { Some(ColdContext::Account(c)) => Some(c.hash), Some(ColdContext::Token(c)) => Some(c.account.hash), + Some(ColdContext::Mint(c)) => Some(c.hash), None => None, } } @@ -159,6 +160,7 @@ impl AccountInterface { match &self.cold { Some(ColdContext::Account(c)) => Some(&c.tree_info), Some(ColdContext::Token(c)) => Some(&c.account.tree_info), + Some(ColdContext::Mint(c)) => Some(&c.tree_info), None => None, } } @@ -168,14 +170,16 @@ impl AccountInterface { match &self.cold { Some(ColdContext::Account(c)) => Some(c.leaf_index), Some(ColdContext::Token(c)) => Some(c.account.leaf_index), + Some(ColdContext::Mint(c)) => Some(c.leaf_index), None => None, } } - /// Get as CompressedAccount if cold account type. + /// Get as CompressedAccount if cold account or mint type. pub fn as_compressed_account(&self) -> Option<&CompressedAccount> { match &self.cold { Some(ColdContext::Account(c)) => Some(c), + Some(ColdContext::Mint(c)) => Some(c), _ => None, } } @@ -191,7 +195,7 @@ impl AccountInterface { /// Try to parse as Mint. Returns None if not a mint or parse fails. pub fn as_mint(&self) -> Option { match &self.cold { - Some(ColdContext::Account(ca)) => { + Some(ColdContext::Mint(ca)) => { let data = ca.data.as_ref()?; borsh::BorshDeserialize::deserialize(&mut data.data.as_slice()).ok() } @@ -218,7 +222,7 @@ impl AccountInterface { /// /// For ATAs: `parsed.owner` is the wallet owner (set from fetch params). /// For program-owned: `parsed.owner` is the PDA. -#[derive(Debug, Clone)] +#[derive(Debug, Clone, PartialEq, Default)] pub struct TokenAccountInterface { /// The token account's public key. pub key: Pubkey, @@ -390,3 +394,13 @@ impl TokenAccountInterface { self.ata_bump().is_some() } } + +impl From for AccountInterface { + fn from(tai: TokenAccountInterface) -> Self { + Self { + key: tai.key, + account: tai.account, + cold: tai.cold, + } + } +} diff --git a/sdk-libs/client/src/interface/account_interface_ext.rs b/sdk-libs/client/src/interface/account_interface_ext.rs deleted file mode 100644 index d6ae2237a5..0000000000 --- a/sdk-libs/client/src/interface/account_interface_ext.rs +++ /dev/null @@ -1,288 +0,0 @@ -use async_trait::async_trait; -use borsh::BorshDeserialize as _; -use light_compressed_account::address::derive_address; -use light_token::instruction::derive_token_ata; -use light_token_interface::{state::Mint, MINT_ADDRESS_TREE}; -use solana_pubkey::Pubkey; - -use super::{AccountInterface, AccountToFetch, MintInterface, MintState, TokenAccountInterface}; -use crate::{ - indexer::{GetCompressedTokenAccountsByOwnerOrDelegateOptions, Indexer}, - rpc::{Rpc, RpcError}, -}; - -fn indexer_err(e: impl std::fmt::Display) -> RpcError { - RpcError::CustomError(format!("IndexerError: {}", e)) -} - -/// Extension trait for fetching account interfaces (unified hot/cold handling). -#[async_trait] -pub trait AccountInterfaceExt: Rpc + Indexer { - /// Fetch MintInterface for a mint account. - /// - /// Use this instead of get_account + unpack_mint. - async fn get_mint_interface(&self, address: &Pubkey) -> Result; - - /// Fetch AccountInterface for an account. - /// - /// Use this instead of get_account. - async fn get_account_interface( - &self, - address: &Pubkey, - program_id: &Pubkey, - ) -> Result; - - /// Fetch TokenAccountInterface for a token account. - /// - /// Use this instead of get_token_account. - async fn get_token_account_interface( - &self, - address: &Pubkey, - ) -> Result; - - /// Fetch TokenAccountInterface for an associated token account. - /// - /// Use this for all ATAs. - async fn get_ata_interface( - &self, - owner: &Pubkey, - mint: &Pubkey, - ) -> Result; - - /// Fetch multiple accounts with automatic type dispatch. - /// - /// Use this instead of get_multiple_accounts. - async fn get_multiple_account_interfaces( - &self, - accounts: &[AccountToFetch], - ) -> Result, RpcError>; -} - -// TODO: move all these to native RPC methods with single roundtrip. -#[async_trait] -impl AccountInterfaceExt for T { - async fn get_mint_interface(&self, address: &Pubkey) -> Result { - let address_tree = Pubkey::new_from_array(MINT_ADDRESS_TREE); - let compressed_address = derive_address( - &address.to_bytes(), - &address_tree.to_bytes(), - &light_token_interface::LIGHT_TOKEN_PROGRAM_ID, - ); - - // Hot - if let Some(account) = self.get_account(*address).await? { - if account.lamports > 0 { - return Ok(MintInterface { - mint: *address, - address_tree, - compressed_address, - state: MintState::Hot { account }, - }); - } - } - - // Cold - let result = self - .get_compressed_account(compressed_address, None) - .await - .map_err(indexer_err)?; - - if let Some(compressed) = result.value { - if let Some(data) = compressed.data.as_ref() { - if !data.data.is_empty() { - let mint_data = Mint::try_from_slice(&data.data) - .map_err(|e| RpcError::CustomError(format!("mint parse error: {}", e)))?; - return Ok(MintInterface { - mint: *address, - address_tree, - compressed_address, - state: MintState::Cold { - compressed, - mint_data, - }, - }); - } - } - } - - Ok(MintInterface { - mint: *address, - address_tree, - compressed_address, - state: MintState::None, - }) - } - - async fn get_account_interface( - &self, - address: &Pubkey, - program_id: &Pubkey, - ) -> Result { - let address_tree = self.get_address_tree_v2().tree; - let compressed_address = derive_address( - &address.to_bytes(), - &address_tree.to_bytes(), - &program_id.to_bytes(), - ); - - // Hot - if let Some(account) = self.get_account(*address).await? { - if account.lamports > 0 { - return Ok(AccountInterface::hot(*address, account)); - } - } - - // Cold - let result = self - .get_compressed_account(compressed_address, None) - .await - .map_err(indexer_err)?; - - if let Some(compressed) = result.value { - if compressed.data.as_ref().is_some_and(|d| !d.data.is_empty()) { - return Ok(AccountInterface::cold(*address, compressed, *program_id)); - } - } - - // Doesn't exist. - let account = solana_account::Account { - lamports: 0, - data: vec![], - owner: *program_id, - executable: false, - rent_epoch: 0, - }; - Ok(AccountInterface::hot(*address, account)) - } - - async fn get_token_account_interface( - &self, - address: &Pubkey, - ) -> Result { - use light_sdk::constants::LIGHT_TOKEN_PROGRAM_ID; - - // Hot - if let Some(account) = self.get_account(*address).await? { - if account.lamports > 0 { - return TokenAccountInterface::hot(*address, account) - .map_err(|e| RpcError::CustomError(format!("parse error: {}", e))); - } - } - - // Cold (program-owned tokens: address = owner) - let result = self - .get_compressed_token_accounts_by_owner(address, None, None) - .await - .map_err(indexer_err)?; - - if let Some(compressed) = result.value.items.into_iter().next() { - return Ok(TokenAccountInterface::cold( - *address, - compressed, - *address, // owner = hot address - LIGHT_TOKEN_PROGRAM_ID.into(), - )); - } - - Err(RpcError::CustomError(format!( - "token account not found: {}", - address - ))) - } - - async fn get_ata_interface( - &self, - owner: &Pubkey, - mint: &Pubkey, - ) -> Result { - use light_sdk::constants::LIGHT_TOKEN_PROGRAM_ID; - - let (ata, _bump) = derive_token_ata(owner, mint); - - // Hot - if let Some(account) = self.get_account(ata).await? { - if account.lamports > 0 { - return TokenAccountInterface::hot(ata, account) - .map_err(|e| RpcError::CustomError(format!("parse error: {}", e))); - } - } - - // Cold (ATA query by address) - let options = Some(GetCompressedTokenAccountsByOwnerOrDelegateOptions::new( - Some(*mint), - )); - let result = self - .get_compressed_token_accounts_by_owner(&ata, options, None) - .await - .map_err(indexer_err)?; - - if let Some(compressed) = result.value.items.into_iter().next() { - return Ok(TokenAccountInterface::cold( - ata, - compressed, - *owner, // owner_override = wallet owner - LIGHT_TOKEN_PROGRAM_ID.into(), - )); - } - - Err(RpcError::CustomError(format!( - "ATA not found: owner={} mint={}", - owner, mint - ))) - } - - async fn get_multiple_account_interfaces( - &self, - accounts: &[AccountToFetch], - ) -> Result, RpcError> { - // TODO: concurrent with futures - let mut result = Vec::with_capacity(accounts.len()); - - for account in accounts { - let iface = match account { - AccountToFetch::Pda { - address, - program_id, - } => self.get_account_interface(address, program_id).await?, - AccountToFetch::Token { address } => { - let token_iface = self.get_token_account_interface(address).await?; - AccountInterface { - key: token_iface.key, - account: token_iface.account, - cold: token_iface.cold, - } - } - AccountToFetch::Ata { wallet_owner, mint } => { - let token_iface = self.get_ata_interface(wallet_owner, mint).await?; - AccountInterface { - key: token_iface.key, - account: token_iface.account, - cold: token_iface.cold, - } - } - AccountToFetch::Mint { address } => { - let mint_iface = self.get_mint_interface(address).await?; - match mint_iface.state { - MintState::Hot { account } => AccountInterface { - key: mint_iface.mint, - account, - cold: None, - }, - MintState::Cold { compressed, .. } => { - let owner = compressed.owner; - AccountInterface::cold(mint_iface.mint, compressed, owner) - } - MintState::None => AccountInterface { - key: mint_iface.mint, - account: Default::default(), - cold: None, - }, - } - } - }; - result.push(iface); - } - - Ok(result) - } -} diff --git a/sdk-libs/client/src/interface/decompress_mint.rs b/sdk-libs/client/src/interface/decompress_mint.rs index d877c1ce71..e421b38cd8 100644 --- a/sdk-libs/client/src/interface/decompress_mint.rs +++ b/sdk-libs/client/src/interface/decompress_mint.rs @@ -1,8 +1,10 @@ //! Mint interface types for hot/cold handling. use borsh::BorshDeserialize; -use light_compressed_account::instruction_data::compressed_proof::ValidityProof; -use light_token::instruction::{derive_mint_compressed_address, DecompressMint}; +use light_compressed_account::{ + address::derive_address, instruction_data::compressed_proof::ValidityProof, +}; +use light_token::instruction::DecompressMint; use light_token_interface::{ instructions::mint_action::{MintInstructionData, MintWithContext}, state::Mint, @@ -13,6 +15,7 @@ use solana_instruction::Instruction; use solana_pubkey::Pubkey; use thiserror::Error; +use super::{AccountInterface, ColdContext}; use crate::indexer::{CompressedAccount, Indexer, ValidityProofWithContext}; /// Error type for mint load operations. @@ -38,7 +41,7 @@ pub enum DecompressMintError { } /// Mint state: hot (on-chain), cold (compressed), or none. -#[derive(Debug, Clone)] +#[derive(Debug, Clone, PartialEq, Default)] #[allow(clippy::large_enum_variant)] pub enum MintState { /// On-chain. @@ -49,11 +52,12 @@ pub enum MintState { mint_data: Mint, }, /// Doesn't exist. + #[default] None, } /// Mint interface for hot/cold handling. -#[derive(Debug, Clone)] +#[derive(Debug, Clone, PartialEq, Default)] pub struct MintInterface { pub mint: Pubkey, pub address_tree: Pubkey, @@ -97,6 +101,51 @@ impl MintInterface { } } +impl From for AccountInterface { + fn from(mi: MintInterface) -> Self { + match mi.state { + MintState::Hot { account } => Self { + key: mi.mint, + account, + cold: None, + }, + MintState::Cold { + compressed, + mint_data: _, + } => { + let data = compressed + .data + .as_ref() + .map(|d| { + let mut buf = d.discriminator.to_vec(); + buf.extend_from_slice(&d.data); + buf + }) + .unwrap_or_default(); + + Self { + key: mi.mint, + account: Account { + lamports: compressed.lamports, + data, + owner: Pubkey::new_from_array( + light_token_interface::LIGHT_TOKEN_PROGRAM_ID, + ), + executable: false, + rent_epoch: 0, + }, + cold: Some(ColdContext::Mint(compressed)), + } + } + MintState::None => Self { + key: mi.mint, + account: Account::default(), + cold: None, + }, + } + } +} + pub const DEFAULT_RENT_PAYMENT: u8 = 2; pub const DEFAULT_WRITE_TOP_UP: u32 = 0; @@ -238,8 +287,11 @@ pub async fn decompress_mint_idempotent( let address_tree = request .address_tree .unwrap_or(Pubkey::new_from_array(MINT_ADDRESS_TREE)); - let compressed_address = - derive_mint_compressed_address(&request.mint_seed_pubkey, &address_tree); + let compressed_address = derive_address( + &request.mint_seed_pubkey.to_bytes(), + &address_tree.to_bytes(), + &light_token_interface::LIGHT_TOKEN_PROGRAM_ID, + ); // 2. Fetch cold mint from indexer let compressed_account = indexer diff --git a/sdk-libs/client/src/interface/light_program_interface.rs b/sdk-libs/client/src/interface/light_program_interface.rs index a1fa25ab0a..8ee0eb2880 100644 --- a/sdk-libs/client/src/interface/light_program_interface.rs +++ b/sdk-libs/client/src/interface/light_program_interface.rs @@ -22,9 +22,9 @@ pub enum AccountToFetch { Pda { address: Pubkey, program_id: Pubkey }, /// Token account (program-owned) - uses `get_token_account_interface(address)` Token { address: Pubkey }, - /// ATA - uses `get_ata_interface(wallet_owner, mint)` + /// ATA - uses `get_associated_token_account_interface(wallet_owner, mint)` Ata { wallet_owner: Pubkey, mint: Pubkey }, - /// Light mint - uses `get_mint_interface(address)` + /// Light mint - uses `get_account_interface(address)` (clients parse mint data) Mint { address: Pubkey }, } @@ -48,6 +48,7 @@ impl AccountToFetch { Self::Mint { address } } + /// Returns the primary pubkey for this fetch request. #[must_use] pub fn pubkey(&self) -> Pubkey { match self { @@ -61,15 +62,18 @@ impl AccountToFetch { /// Context for cold accounts. /// -/// Two variants based on data structure, not account type: -/// - `Account` - PDA +/// Three variants based on data structure: +/// - `Account` - Generic PDA /// - `Token` - Token account -#[derive(Clone, Debug)] +/// - `Mint` - Compressed mint +#[derive(Clone, Debug, PartialEq)] pub enum ColdContext { - /// PDA + /// Generic PDA Account(CompressedAccount), /// Token account Token(CompressedTokenAccount), + /// Compressed mint + Mint(CompressedAccount), } /// Specification for a program-owned PDA with typed variant. @@ -130,6 +134,7 @@ impl PdaSpec { match &self.interface.cold { Some(ColdContext::Account(c)) => Some(c), Some(ColdContext::Token(c)) => Some(&c.account), + Some(ColdContext::Mint(c)) => Some(c), None => None, } } diff --git a/sdk-libs/client/src/interface/mod.rs b/sdk-libs/client/src/interface/mod.rs index d3b5bd730c..041e7f4973 100644 --- a/sdk-libs/client/src/interface/mod.rs +++ b/sdk-libs/client/src/interface/mod.rs @@ -1,7 +1,6 @@ //! Client utilities for hot/cold account handling. pub mod account_interface; -pub mod account_interface_ext; pub mod create_accounts_proof; pub mod decompress_mint; pub mod initialize_config; @@ -12,7 +11,6 @@ pub mod pack; pub mod tx_size; pub use account_interface::{AccountInterface, AccountInterfaceError, TokenAccountInterface}; -pub use account_interface_ext::AccountInterfaceExt; pub use create_accounts_proof::{ get_create_accounts_proof, CreateAccountsProofError, CreateAccountsProofInput, CreateAccountsProofResult, diff --git a/sdk-libs/client/src/lib.rs b/sdk-libs/client/src/lib.rs index 3d228f3de8..d20e964c12 100644 --- a/sdk-libs/client/src/lib.rs +++ b/sdk-libs/client/src/lib.rs @@ -43,6 +43,7 @@ //! upgradeable_programs: vec![], //! limit_ledger_size: None, //! use_surfpool: true, +//! validator_args: vec![], //! }; //! spawn_validator(config).await; //! diff --git a/sdk-libs/client/src/local_test_validator.rs b/sdk-libs/client/src/local_test_validator.rs index a6413aa591..36ed7c04b3 100644 --- a/sdk-libs/client/src/local_test_validator.rs +++ b/sdk-libs/client/src/local_test_validator.rs @@ -2,6 +2,27 @@ use std::process::{Command, Stdio}; use light_prover_client::helpers::get_project_root; +/// Configuration for an upgradeable program to deploy to the validator. +#[derive(Debug, Clone)] +pub struct UpgradeableProgramConfig { + /// The program ID (public key) of the program + pub program_id: String, + /// Path to the compiled program (.so file) + pub program_path: String, + /// The upgrade authority for the program + pub upgrade_authority: String, +} + +impl UpgradeableProgramConfig { + pub fn new(program_id: String, program_path: String, upgrade_authority: String) -> Self { + Self { + program_id, + program_path, + upgrade_authority, + } + } +} + #[derive(Debug)] pub struct LightValidatorConfig { pub enable_indexer: bool, @@ -9,12 +30,14 @@ pub struct LightValidatorConfig { pub wait_time: u64, /// Non-upgradeable programs: (program_id, program_path) pub sbf_programs: Vec<(String, String)>, - /// Upgradeable programs: (program_id, program_path, upgrade_authority) - /// Use this when the program needs a valid upgrade authority (e.g., for compression config) - pub upgradeable_programs: Vec<(String, String, String)>, + /// Upgradeable programs to deploy with a valid upgrade authority. + /// Use this when the program needs a valid upgrade authority (e.g., for compression config). + pub upgradeable_programs: Vec, pub limit_ledger_size: Option, /// Use surfpool instead of solana-test-validator pub use_surfpool: bool, + /// Additional arguments to pass to the validator (e.g., "--account
") + pub validator_args: Vec, } impl Default for LightValidatorConfig { @@ -27,6 +50,7 @@ impl Default for LightValidatorConfig { upgradeable_programs: vec![], limit_ledger_size: None, use_surfpool: true, + validator_args: vec![], } } } @@ -53,7 +77,9 @@ pub async fn spawn_validator(config: LightValidatorConfig) { for upgradeable_program in config.upgradeable_programs.iter() { path.push_str(&format!( " --upgradeable-program {} {} {}", - upgradeable_program.0, upgradeable_program.1, upgradeable_program.2 + upgradeable_program.program_id, + upgradeable_program.program_path, + upgradeable_program.upgrade_authority )); } @@ -65,6 +91,10 @@ pub async fn spawn_validator(config: LightValidatorConfig) { path.push_str(" --use-surfpool"); } + for arg in config.validator_args.iter() { + path.push_str(&format!(" {}", arg)); + } + println!("Starting validator with command: {}", path); if config.use_surfpool { diff --git a/sdk-libs/client/src/rpc/client.rs b/sdk-libs/client/src/rpc/client.rs index 2f5c5cca0f..d87d8d728d 100644 --- a/sdk-libs/client/src/rpc/client.rs +++ b/sdk-libs/client/src/rpc/client.rs @@ -31,7 +31,12 @@ use tracing::warn; use super::LightClientConfig; use crate::{ - indexer::{photon_indexer::PhotonIndexer, Indexer, TreeInfo}, + indexer::{ + photon_indexer::PhotonIndexer, AccountInterface as IndexerAccountInterface, Indexer, + IndexerRpcConfig, Response, TokenAccountInterface as IndexerTokenAccountInterface, + TreeInfo, + }, + interface::{AccountInterface, MintInterface, MintState, TokenAccountInterface}, rpc::{ errors::RpcError, get_light_state_tree_infos::{ @@ -459,6 +464,179 @@ impl LightClient { } } +// Conversion helpers from indexer types to interface types + +fn convert_account_interface( + indexer_ai: IndexerAccountInterface, +) -> Result { + use light_compressed_account::compressed_account::CompressedAccountData; + + use crate::indexer::{ColdContext as IndexerColdContext, CompressedAccount}; + + let account = Account { + lamports: indexer_ai.account.lamports, + data: indexer_ai.account.data, + owner: indexer_ai.account.owner, + executable: indexer_ai.account.executable, + rent_epoch: indexer_ai.account.rent_epoch, + }; + + match indexer_ai.cold { + None => Ok(AccountInterface::hot(indexer_ai.key, account)), + Some(IndexerColdContext::Account { + hash, + leaf_index, + tree_info, + data, + }) => { + let compressed = CompressedAccount { + address: None, + data: Some(CompressedAccountData { + discriminator: data.discriminator, + data: data.data, + data_hash: [0u8; 32], // Computed on demand if needed + }), + hash, + lamports: indexer_ai.account.lamports, + leaf_index: leaf_index as u32, + owner: indexer_ai.account.owner, + prove_by_index: false, + seq: tree_info.seq, + slot_created: tree_info.slot_created, + tree_info: TreeInfo { + tree: tree_info.tree, + queue: tree_info.queue, + cpi_context: None, + next_tree_info: None, + tree_type: tree_info.tree_type, + }, + }; + Ok(AccountInterface::cold( + indexer_ai.key, + compressed, + indexer_ai.account.owner, + )) + } + Some(IndexerColdContext::Token { + hash, + leaf_index, + tree_info, + data, + }) => { + use light_token::compat::TokenData; + + use crate::indexer::CompressedTokenAccount; + + // Parse token data from the cold data - propagate errors instead of using default + let token_data: TokenData = + borsh::BorshDeserialize::deserialize(&mut data.data.as_slice()).map_err(|e| { + RpcError::CustomError(format!("Failed to deserialize token data: {}", e)) + })?; + + let wallet_owner = token_data.owner; + + let compressed_account = CompressedAccount { + address: None, + data: Some(CompressedAccountData { + discriminator: data.discriminator, + data: data.data, + data_hash: [0u8; 32], + }), + hash, + lamports: indexer_ai.account.lamports, + leaf_index: leaf_index as u32, + owner: indexer_ai.account.owner, + prove_by_index: false, + seq: tree_info.seq, + slot_created: tree_info.slot_created, + tree_info: TreeInfo { + tree: tree_info.tree, + queue: tree_info.queue, + cpi_context: None, + next_tree_info: None, + tree_type: tree_info.tree_type, + }, + }; + let compressed_token = CompressedTokenAccount { + token: token_data, + account: compressed_account.clone(), + }; + Ok(AccountInterface::cold_token( + indexer_ai.key, + compressed_token, + wallet_owner, + )) + } + } +} + +fn convert_token_account_interface( + indexer_tai: IndexerTokenAccountInterface, +) -> Result { + use light_compressed_account::compressed_account::CompressedAccountData; + + use crate::indexer::{ + ColdContext as IndexerColdContext, CompressedAccount, CompressedTokenAccount, + }; + + let account = Account { + lamports: indexer_tai.account.account.lamports, + data: indexer_tai.account.account.data.clone(), + owner: indexer_tai.account.account.owner, + executable: indexer_tai.account.account.executable, + rent_epoch: indexer_tai.account.account.rent_epoch, + }; + + match indexer_tai.account.cold { + None => TokenAccountInterface::hot(indexer_tai.account.key, account) + .map_err(|e| RpcError::CustomError(format!("parse error: {}", e))), + Some(IndexerColdContext::Token { + hash, + leaf_index, + tree_info, + data, + }) => { + let compressed_account = CompressedAccount { + address: None, + data: Some(CompressedAccountData { + discriminator: data.discriminator, + data: data.data, + data_hash: [0u8; 32], + }), + hash, + lamports: indexer_tai.account.account.lamports, + leaf_index: leaf_index as u32, + owner: indexer_tai.account.account.owner, + prove_by_index: false, + seq: tree_info.seq, + slot_created: tree_info.slot_created, + tree_info: TreeInfo { + tree: tree_info.tree, + queue: tree_info.queue, + cpi_context: None, + next_tree_info: None, + tree_type: tree_info.tree_type, + }, + }; + // Extract token owner before moving token into CompressedTokenAccount + let token_owner = indexer_tai.token.owner; + let compressed_token = CompressedTokenAccount { + token: indexer_tai.token, + account: compressed_account, + }; + Ok(TokenAccountInterface::cold( + indexer_tai.account.key, + compressed_token, + token_owner, // owner_override: use token owner, not account key + indexer_tai.account.account.owner, + )) + } + _ => Err(RpcError::CustomError( + "unexpected cold context type for token account".into(), + )), + } +} + #[async_trait] impl Rpc for LightClient { async fn new(config: LightClientConfig) -> Result @@ -926,6 +1104,199 @@ impl Rpc for LightClient { tree_type: TreeType::AddressV2, } } + + async fn get_account_interface( + &self, + address: &Pubkey, + config: Option, + ) -> Result>, RpcError> { + let indexer = self + .indexer + .as_ref() + .ok_or(RpcError::IndexerNotInitialized)?; + let resp = indexer + .get_account_interface(address, config) + .await + .map_err(|e| RpcError::CustomError(format!("Indexer error: {e}")))?; + + let value = resp.value.map(convert_account_interface).transpose()?; + Ok(Response { + context: resp.context, + value, + }) + } + + async fn get_token_account_interface( + &self, + address: &Pubkey, + config: Option, + ) -> Result>, RpcError> { + let indexer = self + .indexer + .as_ref() + .ok_or(RpcError::IndexerNotInitialized)?; + let resp = indexer + .get_token_account_interface(address, config) + .await + .map_err(|e| RpcError::CustomError(format!("Indexer error: {e}")))?; + + let value = match resp.value { + Some(tai) => Some(convert_token_account_interface(tai)?), + None => None, + }; + + Ok(Response { + context: resp.context, + value, + }) + } + + async fn get_associated_token_account_interface( + &self, + owner: &Pubkey, + mint: &Pubkey, + config: Option, + ) -> Result>, RpcError> { + let indexer = self + .indexer + .as_ref() + .ok_or(RpcError::IndexerNotInitialized)?; + let resp = indexer + .get_associated_token_account_interface(owner, mint, config) + .await + .map_err(|e| RpcError::CustomError(format!("Indexer error: {e}")))?; + + let value = match resp.value { + Some(tai) => Some(convert_token_account_interface(tai)?), + None => None, + }; + + Ok(Response { + context: resp.context, + value, + }) + } + + async fn get_multiple_account_interfaces( + &self, + addresses: Vec<&Pubkey>, + config: Option, + ) -> Result>>, RpcError> { + let indexer = self + .indexer + .as_ref() + .ok_or(RpcError::IndexerNotInitialized)?; + let resp = indexer + .get_multiple_account_interfaces(addresses, config) + .await + .map_err(|e| RpcError::CustomError(format!("Indexer error: {e}")))?; + + let value: Result>, RpcError> = resp + .value + .into_iter() + .map(|opt| opt.map(convert_account_interface).transpose()) + .collect(); + + Ok(Response { + context: resp.context, + value: value?, + }) + } + + async fn get_mint_interface( + &self, + address: &Pubkey, + config: Option, + ) -> Result>, RpcError> { + use light_compressed_account::address::derive_address; + use light_token_interface::{state::Mint, MINT_ADDRESS_TREE}; + + let address_tree = Pubkey::new_from_array(MINT_ADDRESS_TREE); + let compressed_address = derive_address( + &address.to_bytes(), + &address_tree.to_bytes(), + &light_token_interface::LIGHT_TOKEN_PROGRAM_ID, + ); + + let indexer = self + .indexer + .as_ref() + .ok_or(RpcError::IndexerNotInitialized)?; + + // Use get_account_interface to check hot/cold (Photon handles derived address fallback) + let resp = indexer + .get_account_interface(address, config.clone()) + .await + .map_err(|e| RpcError::CustomError(format!("Indexer error: {e}")))?; + + let value = match resp.value { + Some(ai) => { + let state = if ai.is_cold() { + // Cold: fetch full CompressedAccount to get data_hash + let compressed_resp = indexer + .get_compressed_account(compressed_address, config) + .await + .map_err(|e| RpcError::CustomError(format!("Indexer error: {e}")))?; + + let compressed = compressed_resp.value.ok_or_else(|| { + RpcError::CustomError("Cold mint not found by compressed address".into()) + })?; + + // Parse mint data from compressed account + let mint_data = compressed + .data + .as_ref() + .and_then(|d| { + if d.data.is_empty() { + None + } else { + Mint::try_from_slice(&d.data).ok() + } + }) + .ok_or_else(|| { + RpcError::CustomError( + "Missing or invalid mint data in compressed account".into(), + ) + })?; + + MintState::Cold { + compressed, + mint_data, + } + } else { + let expected_owner = + Pubkey::new_from_array(light_token_interface::LIGHT_TOKEN_PROGRAM_ID); + if ai.account.owner != expected_owner { + return Err(RpcError::CustomError(format!( + "Invalid mint account owner: expected {}, got {}", + expected_owner, ai.account.owner, + ))); + } + Mint::try_from_slice(&ai.account.data).map_err(|e| { + RpcError::CustomError(format!( + "Failed to deserialize hot mint account: {e}" + )) + })?; + MintState::Hot { + account: ai.account, + } + }; + + Some(MintInterface { + mint: *address, + address_tree, + compressed_address, + state, + }) + } + None => None, + }; + + Ok(Response { + context: resp.context, + value, + }) + } } impl MerkleTreeExt for LightClient {} diff --git a/sdk-libs/client/src/rpc/rpc_trait.rs b/sdk-libs/client/src/rpc/rpc_trait.rs index 104c32d51e..820bc455d3 100644 --- a/sdk-libs/client/src/rpc/rpc_trait.rs +++ b/sdk-libs/client/src/rpc/rpc_trait.rs @@ -18,7 +18,8 @@ use solana_transaction_status_client_types::TransactionStatus; use super::client::RpcUrl; use crate::{ - indexer::{Indexer, TreeInfo}, + indexer::{Indexer, IndexerRpcConfig, Response, TreeInfo}, + interface::{AccountInterface, AccountToFetch, MintInterface, TokenAccountInterface}, rpc::errors::RpcError, }; @@ -234,4 +235,112 @@ pub trait Rpc: Send + Sync + Debug + 'static { fn get_address_tree_v1(&self) -> TreeInfo; fn get_address_tree_v2(&self) -> TreeInfo; + + // ============ Interface Methods ============ + // These race hot (on-chain) and cold (compressed) lookups in the indexer. + + /// Get account data from either on-chain or compressed sources. + /// + /// Looks up by on-chain Solana pubkey. For cold accounts, searches by + /// onchain_pubkey stored in the compressed account data. + async fn get_account_interface( + &self, + address: &Pubkey, + config: Option, + ) -> Result>, RpcError>; + + /// Get token account data from either on-chain or compressed sources. + async fn get_token_account_interface( + &self, + address: &Pubkey, + config: Option, + ) -> Result>, RpcError>; + + /// Get ATA data from either on-chain or compressed sources. + async fn get_associated_token_account_interface( + &self, + owner: &Pubkey, + mint: &Pubkey, + config: Option, + ) -> Result>, RpcError>; + + /// Get multiple account interfaces in a batch. + async fn get_multiple_account_interfaces( + &self, + addresses: Vec<&Pubkey>, + config: Option, + ) -> Result>>, RpcError>; + + /// Get mint interface from either on-chain or compressed sources. + /// + /// This method: + /// 1. First checks if the mint exists on-chain (hot) + /// 2. Falls back to compressed account lookup (cold) using derived address + /// 3. Parses mint data locally from the account data + async fn get_mint_interface( + &self, + address: &Pubkey, + config: Option, + ) -> Result>, RpcError>; + + /// Fetch multiple accounts using `AccountToFetch` descriptors. + /// + /// Routes each account to the correct method based on its variant: + /// - `Pda` -> `get_account_interface` + /// - `Token` -> `get_token_account_interface` + /// - `Ata` -> `get_associated_token_account_interface` + /// - `Mint` -> `get_mint_interface` + async fn fetch_accounts( + &self, + accounts: &[AccountToFetch], + config: Option, + ) -> Result, RpcError> { + let mut results = Vec::with_capacity(accounts.len()); + for account in accounts { + let interface = match account { + AccountToFetch::Pda { address, .. } => self + .get_account_interface(address, config.clone()) + .await? + .value + .ok_or_else(|| { + RpcError::CustomError(format!("PDA account not found: {}", address)) + })?, + AccountToFetch::Token { address } => { + let tai = self + .get_token_account_interface(address, config.clone()) + .await? + .value + .ok_or_else(|| { + RpcError::CustomError(format!("Token account not found: {}", address)) + })?; + tai.into() + } + AccountToFetch::Ata { wallet_owner, mint } => { + let tai = self + .get_associated_token_account_interface(wallet_owner, mint, config.clone()) + .await? + .value + .ok_or_else(|| { + RpcError::CustomError(format!( + "ATA not found for owner {} mint {}", + wallet_owner, mint + )) + })?; + tai.into() + } + AccountToFetch::Mint { address } => { + let mi = self + .get_mint_interface(address, config.clone()) + .await? + .value + .ok_or_else(|| { + RpcError::CustomError(format!("Mint not found: {}", address)) + })?; + mi.into() + } + }; + results.push(interface); + } + Ok(results) + } } diff --git a/sdk-libs/event/Cargo.toml b/sdk-libs/event/Cargo.toml index 7de438d439..76156bdbf6 100644 --- a/sdk-libs/event/Cargo.toml +++ b/sdk-libs/event/Cargo.toml @@ -10,6 +10,7 @@ edition = "2021" borsh = { workspace = true } light-compressed-account = { workspace = true, features = ["std"] } light-hasher = { workspace = true, features = ["poseidon"] } +light-token-interface = { workspace = true } light-zero-copy = { workspace = true } thiserror = { workspace = true } diff --git a/sdk-libs/event/src/event.rs b/sdk-libs/event/src/event.rs index 6978cae3d5..f9b87207f5 100644 --- a/sdk-libs/event/src/event.rs +++ b/sdk-libs/event/src/event.rs @@ -29,6 +29,18 @@ pub struct PublicTransactionEvent { pub compress_or_decompress_lamports: Option, pub pubkey_array: Vec, pub message: Option>, + /// ATA owner info for compressed ATAs (output_index -> wallet_owner_pubkey). + /// Only populated for compress_and_close operations where is_ata=true. + pub ata_owners: Vec, +} + +/// ATA owner info extracted from compress_and_close operations. +#[derive(Debug, Clone, Copy, BorshSerialize, BorshDeserialize, Default, PartialEq, Eq)] +pub struct AssociatedTokenAccountOwnerInfo { + /// Index into output_compressed_accounts + pub output_index: u8, + /// The wallet owner pubkey that the ATA is derived from + pub wallet_owner: Pubkey, } #[derive(Debug, Clone, Copy, PartialEq, Eq)] diff --git a/sdk-libs/event/src/parse.rs b/sdk-libs/event/src/parse.rs index f8e5356636..f034eaebef 100644 --- a/sdk-libs/event/src/parse.rs +++ b/sdk-libs/event/src/parse.rs @@ -4,8 +4,8 @@ use light_compressed_account::{ CompressedAccount, CompressedAccountData, PackedCompressedAccountWithMerkleContext, }, constants::{ - ACCOUNT_COMPRESSION_PROGRAM_ID, CREATE_CPI_CONTEXT_ACCOUNT, LIGHT_SYSTEM_PROGRAM_ID, - REGISTERED_PROGRAM_PDA, + ACCOUNT_COMPRESSION_PROGRAM_ID, CREATE_CPI_CONTEXT_ACCOUNT, LIGHT_REGISTRY_PROGRAM_ID, + LIGHT_SYSTEM_PROGRAM_ID, REGISTERED_PROGRAM_PDA, }, discriminators::*, instruction_data::{ @@ -17,13 +17,19 @@ use light_compressed_account::{ nullifier::create_nullifier, Pubkey, }; +use light_token_interface::{ + instructions::{ + extensions::ExtensionInstructionData, transfer2::CompressedTokenInstructionDataTransfer2, + }, + LIGHT_TOKEN_PROGRAM_ID, TRANSFER2, +}; use light_zero_copy::traits::ZeroCopyAt; use super::{ error::ParseIndexerEventError, event::{ - BatchNullifyContext, BatchPublicTransactionEvent, MerkleTreeSequenceNumber, - MerkleTreeSequenceNumberV1, NewAddress, PublicTransactionEvent, + AssociatedTokenAccountOwnerInfo, BatchNullifyContext, BatchPublicTransactionEvent, + MerkleTreeSequenceNumber, MerkleTreeSequenceNumberV1, NewAddress, PublicTransactionEvent, }, }; @@ -39,19 +45,25 @@ struct ExecutingSystemInstruction<'a> { } #[derive(Debug, Clone, PartialEq, Default)] -pub(crate) struct Indices { +pub struct Indices { pub system: usize, pub cpi: Vec, pub insert_into_queues: usize, pub found_solana_system_program_instruction: bool, pub found_system: bool, + /// Index of the token program instruction (if present, only when called from registry) + pub token: Option, + /// Whether registry program was found in the CPI chain (required for token instruction tracking) + pub found_registry: bool, } #[derive(Debug, Clone, Copy, PartialEq)] -pub(crate) enum ProgramId { +pub enum ProgramId { LightSystem, AccountCompression, SolanaSystem, + LightToken, + Registry, Unknown, } @@ -61,6 +73,17 @@ struct AssociatedInstructions<'a> { pub cpi_context_outputs: Vec, pub insert_into_queues_instruction: InsertIntoQueuesInstructionData<'a>, pub accounts: &'a [Pubkey], + /// Token instruction data and accounts for ATA owner extraction + pub token_instruction: Option>, +} + +/// Parsed token instruction data for extracting ATA owner info +#[derive(Debug, Clone, PartialEq)] +pub struct TokenInstructionData<'a> { + /// Raw instruction data + pub data: &'a [u8], + /// Accounts for this instruction + pub accounts: &'a [Pubkey], } /// We piece the event together from 2 instructions: @@ -158,12 +181,20 @@ fn deserialize_associated_instructions<'a>( }?; let exec_instruction = deserialize_instruction(&instructions[indices.system], &accounts[indices.system])?; + + // Get token instruction data if present + let token_instruction = indices.token.map(|token_idx| TokenInstructionData { + data: &instructions[token_idx], + accounts: &accounts[token_idx], + }); + Ok(AssociatedInstructions { executing_system_instruction: exec_instruction, cpi_context_outputs, insert_into_queues_instruction: insert_queues_instruction, // Remove signer and register program accounts. accounts: &accounts[indices.insert_into_queues][2..], + token_instruction, }) } @@ -173,7 +204,7 @@ fn deserialize_associated_instructions<'a>( /// if next instruct is solana system program isntruction followed by insert into queues is executable instruction /// else is cpi instruction /// only push into vec if insert into queues instruction is found -fn find_cpi_patterns(program_ids: &[ProgramId]) -> Vec { +pub fn find_cpi_patterns(program_ids: &[ProgramId]) -> Vec { let mut vec = Vec::new(); let mut next_index = usize::MAX; for (last_index, program_id) in (0..program_ids.len()).rev().zip(program_ids.iter().rev()) { @@ -198,11 +229,14 @@ fn find_cpi_patterns(program_ids: &[ProgramId]) -> Vec { /// We search for the pattern in reverse because there can be multiple system instructions /// but only one account compression instruction. /// Start index points to ACCOUNT_COMPRESSION_PROGRAM_ID -fn find_cpi_pattern(start_index: usize, program_ids: &[ProgramId]) -> (Option, usize) { +pub fn find_cpi_pattern(start_index: usize, program_ids: &[ProgramId]) -> (Option, usize) { let mut index_account = Indices { insert_into_queues: start_index, ..Default::default() }; + // Track tentative token index - will only be confirmed if registry is found + let mut tentative_token: Option = None; + for (index, program_id) in (0..start_index) .rev() .zip(program_ids[..start_index].iter().rev()) @@ -218,6 +252,22 @@ fn find_cpi_pattern(start_index: usize, program_ids: &[ProgramId]) -> (Option (Option], accounts: &[Vec], @@ -268,6 +318,15 @@ fn wrap_program_ids( } else { vec.push(ProgramId::Unknown); } + } else if program_id == &Pubkey::from(LIGHT_TOKEN_PROGRAM_ID) { + // Token program Transfer2 instruction + if !instruction.is_empty() && instruction[0] == TRANSFER2 { + vec.push(ProgramId::LightToken); + } else { + vec.push(ProgramId::Unknown); + } + } else if program_id == &LIGHT_REGISTRY_PROGRAM_ID { + vec.push(ProgramId::Registry); } else { vec.push(ProgramId::Unknown); } @@ -458,6 +517,56 @@ fn deserialize_instruction<'a>( } } +/// Extract ATA owner info from token instruction's out_tlv. +/// Returns a Vec of (output_index, wallet_owner) for ATAs. +pub fn extract_ata_owners( + token_instruction: &TokenInstructionData, +) -> Vec { + let mut ata_owners = Vec::new(); + + // Token instruction format: [discriminator (1 byte)] [serialized data] + if token_instruction.data.is_empty() || token_instruction.data[0] != TRANSFER2 { + return ata_owners; + } + + // Skip discriminator byte and deserialize using borsh + let data = &token_instruction.data[1..]; + let Ok(transfer_data) = CompressedTokenInstructionDataTransfer2::deserialize(&mut &data[..]) + else { + return ata_owners; + }; + + // Check if there's out_tlv data + let Some(out_tlv) = transfer_data.out_tlv.as_ref() else { + return ata_owners; + }; + + // Iterate over output TLV entries (one per output token account) + for (output_index, tlv_extensions) in out_tlv.iter().enumerate() { + // Look for CompressedOnly extension with is_ata=true + for ext in tlv_extensions.iter() { + if let ExtensionInstructionData::CompressedOnly(compressed_only) = ext { + if compressed_only.is_ata { + // Get wallet owner from packed_accounts using owner_index. + // owner_index is an index into packed_accounts, which starts at position 7 + // in the Transfer2 accounts array (after the 7 system accounts). + const TRANSFER2_PACKED_ACCOUNTS_OFFSET: usize = 7; + let owner_idx = + compressed_only.owner_index as usize + TRANSFER2_PACKED_ACCOUNTS_OFFSET; + if owner_idx < token_instruction.accounts.len() { + ata_owners.push(AssociatedTokenAccountOwnerInfo { + output_index: output_index as u8, + wallet_owner: token_instruction.accounts[owner_idx], + }); + } + } + } + } + } + + ata_owners +} + fn create_batched_transaction_event( associated_instructions: &AssociatedInstructions, ) -> Result { @@ -521,6 +630,11 @@ fn create_batched_transaction_event( .accounts .to_vec(), message: None, + ata_owners: associated_instructions + .token_instruction + .as_ref() + .map(extract_ata_owners) + .unwrap_or_default(), }, tx_hash: associated_instructions .insert_into_queues_instruction @@ -666,345 +780,3 @@ fn create_address_queue_indices( }); address_queue_indices } - -#[cfg(test)] -mod test { - use rand::{ - rngs::{StdRng, ThreadRng}, - Rng, RngCore, SeedableRng, - }; - - use super::*; - fn get_rnd_program_id(rng: &mut R, with_system_program: bool) -> ProgramId { - let vec = [ - ProgramId::Unknown, - ProgramId::AccountCompression, - ProgramId::LightSystem, - ]; - let len = if with_system_program { 3 } else { 2 }; - let index = rng.gen_range(0..len); - vec[index] - } - fn get_rnd_program_ids( - rng: &mut R, - len: usize, - with_system_program: bool, - ) -> Vec { - (0..len) - .map(|_| get_rnd_program_id(rng, with_system_program)) - .collect() - } - - #[test] - fn test_rnd_functional() { - let mut thread_rng = ThreadRng::default(); - let seed = thread_rng.next_u64(); - // Keep this print so that in case the test fails - // we can use the seed to reproduce the error. - println!("\n\ntest seed {}\n\n", seed); - let mut rng = StdRng::seed_from_u64(seed); - let num_iters = 100000; - for _ in 0..num_iters { - let len_pre = rng.gen_range(0..6); - let rnd_vec_pre = get_rnd_program_ids(&mut rng, len_pre, false); - let len_post = rng.gen_range(0..6); - let rnd_vec_post = get_rnd_program_ids(&mut rng, len_post, false); - let num_mid = rng.gen_range(1..6); - - let program_ids = [ - rnd_vec_pre.as_slice(), - [ProgramId::LightSystem].as_slice(), - vec![ProgramId::SolanaSystem; num_mid].as_slice(), - [ProgramId::AccountCompression].as_slice(), - rnd_vec_post.as_slice(), - ] - .concat(); - let start_index = program_ids.len() - 1 - len_post; - let system_index = program_ids.len() - 1 - len_post - num_mid - 1; - let vec = find_cpi_patterns(&program_ids); - let expected = Indices { - system: system_index, - cpi: vec![], - insert_into_queues: start_index, - found_solana_system_program_instruction: true, - found_system: true, - }; - assert!( - vec.contains(&expected), - "program ids {:?} parsed events {:?} expected {:?} ", - program_ids, - vec, - expected, - ); - } - - for _ in 0..num_iters { - let len_pre = rng.gen_range(0..6); - let rnd_vec_pre = get_rnd_program_ids(&mut rng, len_pre, true); - let len_post = rng.gen_range(0..6); - let rnd_vec_post = get_rnd_program_ids(&mut rng, len_post, true); - let num_mid = rng.gen_range(1..6); - - let program_ids = [ - rnd_vec_pre.as_slice(), - [ProgramId::LightSystem].as_slice(), - vec![ProgramId::SolanaSystem; num_mid].as_slice(), - [ProgramId::AccountCompression].as_slice(), - rnd_vec_post.as_slice(), - ] - .concat(); - let start_index = program_ids.len() - 1 - len_post; - let system_index = program_ids.len() - 1 - len_post - num_mid - 1; - let vec = find_cpi_patterns(&program_ids); - let expected = Indices { - system: system_index, - cpi: vec![], - insert_into_queues: start_index, - found_solana_system_program_instruction: true, - found_system: true, - }; - assert!( - vec.iter().any(|x| x.system == expected.system - && x.insert_into_queues == expected.insert_into_queues), - "program ids {:?} parsed events {:?} expected {:?} ", - program_ids, - vec, - expected, - ); - } - } - - #[test] - fn test_rnd_failing() { - let mut thread_rng = ThreadRng::default(); - let seed = thread_rng.next_u64(); - // Keep this print so that in case the test fails - // we can use the seed to reproduce the error. - println!("\n\ntest seed {}\n\n", seed); - let mut rng = StdRng::seed_from_u64(seed); - let num_iters = 100000; - for _ in 0..num_iters { - let len = rng.gen_range(0..20); - let mut program_ids = get_rnd_program_ids(&mut rng, len, true); - // if any ProgramId::LightSystem is followed by ProgramId::SolanaSystem overwrite ProgramId::SolanaSystem with ProgramId::Unknown - for i in 0..program_ids.len().saturating_sub(1) { - if matches!(program_ids[i], ProgramId::LightSystem) - && matches!(program_ids[i + 1], ProgramId::SolanaSystem) - { - program_ids[i + 1] = ProgramId::Unknown; - } - } - - let vec = find_cpi_patterns(&program_ids); - - assert!( - vec.is_empty(), - "program_ids {:?} result {:?}", - program_ids, - vec - ); - } - } - - #[test] - fn test_find_two_patterns() { - // Std pattern - { - let program_ids = vec![ - ProgramId::Unknown, - ProgramId::LightSystem, - ProgramId::SolanaSystem, - ProgramId::AccountCompression, - ProgramId::Unknown, - ProgramId::LightSystem, - ProgramId::SolanaSystem, - ProgramId::AccountCompression, - ]; - let vec = find_cpi_patterns(&program_ids); - assert_eq!(vec.len(), 2); - assert_eq!( - vec[0], - Indices { - system: 5, - cpi: vec![], - insert_into_queues: 7, - found_solana_system_program_instruction: true, - found_system: true, - } - ); - assert_eq!( - vec[1], - Indices { - system: 1, - cpi: vec![], - insert_into_queues: 3, - found_solana_system_program_instruction: true, - found_system: true, - } - ); - // Modify only second event is valid - { - let mut program_ids = program_ids.clone(); - program_ids[2] = ProgramId::Unknown; - let vec = find_cpi_patterns(&program_ids); - assert_eq!(vec.len(), 1); - assert_eq!( - vec[0], - Indices { - system: 5, - cpi: vec![], - insert_into_queues: 7, - found_solana_system_program_instruction: true, - found_system: true, - } - ); - } - // Modify only first event is valid - { - let mut program_ids = program_ids; - program_ids[6] = ProgramId::Unknown; - let vec = find_cpi_patterns(&program_ids); - assert_eq!(vec.len(), 1); - assert_eq!( - vec[0], - Indices { - system: 1, - cpi: vec![], - insert_into_queues: 3, - found_solana_system_program_instruction: true, - found_system: true, - } - ); - } - } - } - - #[test] - fn test_find_pattern() { - // Std pattern - { - let program_ids = vec![ - ProgramId::Unknown, - ProgramId::LightSystem, - ProgramId::SolanaSystem, - ProgramId::AccountCompression, - ]; - let (res, last_index) = find_cpi_pattern(3, &program_ids); - assert_eq!(last_index, 0); - assert_eq!( - res, - Some(Indices { - system: 1, - cpi: vec![], - insert_into_queues: 3, - found_solana_system_program_instruction: true, - found_system: true, - }) - ); - } - { - let program_ids = vec![ - ProgramId::Unknown, - ProgramId::LightSystem, - ProgramId::SolanaSystem, - ProgramId::SolanaSystem, - ProgramId::SolanaSystem, - ProgramId::AccountCompression, - ]; - let start_index = program_ids.len() - 1; - let (res, last_index) = find_cpi_pattern(start_index, &program_ids); - assert_eq!(last_index, 0); - assert_eq!( - res, - Some(Indices { - system: 1, - cpi: vec![], - insert_into_queues: start_index, - found_solana_system_program_instruction: true, - found_system: true, - }) - ); - } - { - let program_ids = vec![ - ProgramId::Unknown, - ProgramId::LightSystem, - ProgramId::SolanaSystem, - ProgramId::Unknown, - ProgramId::SolanaSystem, - ProgramId::AccountCompression, - ]; - let start_index = program_ids.len() - 1; - let (res, last_index) = find_cpi_pattern(start_index, &program_ids); - assert_eq!(last_index, 3); - assert_eq!(res, None); - } - // With cpi context - { - let program_ids = vec![ - ProgramId::Unknown, - ProgramId::LightSystem, - ProgramId::Unknown, - ProgramId::LightSystem, - ProgramId::SolanaSystem, - ProgramId::SolanaSystem, - ProgramId::SolanaSystem, - ProgramId::AccountCompression, - ]; - let start_index = program_ids.len() - 1; - let (res, last_index) = find_cpi_pattern(start_index, &program_ids); - assert_eq!(last_index, 0); - assert_eq!( - res, - Some(Indices { - system: 3, - cpi: vec![1], - insert_into_queues: start_index, - found_solana_system_program_instruction: true, - found_system: true, - }) - ); - // Failing - { - let mut program_ids = program_ids; - program_ids[5] = ProgramId::Unknown; - let (res, last_index) = find_cpi_pattern(start_index, &program_ids); - assert_eq!(last_index, 5); - assert_eq!(res, None); - } - } - // With cpi context - { - let program_ids = vec![ - ProgramId::Unknown, - ProgramId::LightSystem, - ProgramId::LightSystem, - ProgramId::SolanaSystem, - ProgramId::SolanaSystem, - ProgramId::SolanaSystem, - ProgramId::AccountCompression, - ]; - let start_index = program_ids.len() - 1; - let (res, last_index) = find_cpi_pattern(start_index, &program_ids); - assert_eq!(last_index, 0); - assert_eq!( - res, - Some(Indices { - system: 2, - cpi: vec![1], - insert_into_queues: start_index, - found_solana_system_program_instruction: true, - found_system: true, - }) - ); - // Failing - { - let mut program_ids = program_ids; - program_ids[4] = ProgramId::Unknown; - let (res, last_index) = find_cpi_pattern(start_index, &program_ids); - assert_eq!(last_index, 4); - assert_eq!(res, None); - } - } - } -} diff --git a/sdk-libs/event/tests/parse_test.rs b/sdk-libs/event/tests/parse_test.rs new file mode 100644 index 0000000000..be470958db --- /dev/null +++ b/sdk-libs/event/tests/parse_test.rs @@ -0,0 +1,1287 @@ +use borsh::BorshSerialize; +use light_compressed_account::{ + constants::{ + ACCOUNT_COMPRESSION_PROGRAM_ID, LIGHT_REGISTRY_PROGRAM_ID, REGISTERED_PROGRAM_PDA, + }, + discriminators::DISCRIMINATOR_INSERT_INTO_QUEUES, + Pubkey, +}; +use light_event::parse::{ + extract_ata_owners, find_cpi_pattern, find_cpi_patterns, wrap_program_ids, Indices, ProgramId, + TokenInstructionData, +}; +use light_token_interface::{ + instructions::{ + extensions::{CompressedOnlyExtensionInstructionData, ExtensionInstructionData}, + transfer2::{CompressedTokenInstructionDataTransfer2, MultiTokenTransferOutputData}, + }, + LIGHT_TOKEN_PROGRAM_ID, TRANSFER2, +}; +use rand::{ + rngs::{StdRng, ThreadRng}, + Rng, RngCore, SeedableRng, +}; + +fn get_rnd_program_id(rng: &mut R, with_system_program: bool) -> ProgramId { + let vec = [ + ProgramId::Unknown, + ProgramId::AccountCompression, + ProgramId::LightSystem, + ]; + let len = if with_system_program { 3 } else { 2 }; + let index = rng.gen_range(0..len); + vec[index] +} + +fn get_rnd_program_ids( + rng: &mut R, + len: usize, + with_system_program: bool, +) -> Vec { + (0..len) + .map(|_| get_rnd_program_id(rng, with_system_program)) + .collect() +} + +/// Helper to create valid Transfer2 instruction data with ATA extensions +fn create_transfer2_with_ata(owner_index: u8, is_ata: bool) -> Vec { + let transfer_data = CompressedTokenInstructionDataTransfer2 { + with_transaction_hash: false, + with_lamports_change_account_merkle_tree_index: false, + lamports_change_account_merkle_tree_index: 0, + lamports_change_account_owner_index: 0, + output_queue: 0, + max_top_up: 0, + cpi_context: None, + compressions: None, + proof: None, + in_token_data: vec![], + out_token_data: vec![MultiTokenTransferOutputData { + owner: owner_index, + amount: 1000, + has_delegate: false, + delegate: 0, + mint: 0, + version: 3, + }], + in_lamports: None, + out_lamports: None, + in_tlv: None, + out_tlv: Some(vec![vec![ExtensionInstructionData::CompressedOnly( + CompressedOnlyExtensionInstructionData { + delegated_amount: 0, + withheld_transfer_fee: 0, + is_frozen: false, + compression_index: 0, + is_ata, + bump: 255, + owner_index, + }, + )]]), + }; + let mut data = vec![TRANSFER2]; // discriminator + data.extend(transfer_data.try_to_vec().unwrap()); + data +} + +/// Helper to create Transfer2 instruction data with multiple outputs +fn create_transfer2_with_multiple_outputs( + outputs: Vec<(u8, bool)>, // (owner_index, is_ata) +) -> Vec { + let out_token_data: Vec = outputs + .iter() + .map(|(owner_index, _)| MultiTokenTransferOutputData { + owner: *owner_index, + amount: 1000, + has_delegate: false, + delegate: 0, + mint: 0, + version: 3, + }) + .collect(); + + let out_tlv: Vec> = outputs + .iter() + .map(|(owner_index, is_ata)| { + vec![ExtensionInstructionData::CompressedOnly( + CompressedOnlyExtensionInstructionData { + delegated_amount: 0, + withheld_transfer_fee: 0, + is_frozen: false, + compression_index: 0, + is_ata: *is_ata, + bump: 255, + owner_index: *owner_index, + }, + )] + }) + .collect(); + + let transfer_data = CompressedTokenInstructionDataTransfer2 { + with_transaction_hash: false, + with_lamports_change_account_merkle_tree_index: false, + lamports_change_account_merkle_tree_index: 0, + lamports_change_account_owner_index: 0, + output_queue: 0, + max_top_up: 0, + cpi_context: None, + compressions: None, + proof: None, + in_token_data: vec![], + out_token_data, + in_lamports: None, + out_lamports: None, + in_tlv: None, + out_tlv: Some(out_tlv), + }; + let mut data = vec![TRANSFER2]; + data.extend(transfer_data.try_to_vec().unwrap()); + data +} + +#[test] +fn test_rnd_functional() { + let mut thread_rng = ThreadRng::default(); + let seed = thread_rng.next_u64(); + // Keep this print so that in case the test fails + // we can use the seed to reproduce the error. + println!("\n\ntest seed {}\n\n", seed); + let mut rng = StdRng::seed_from_u64(seed); + let num_iters = 100000; + for _ in 0..num_iters { + let len_pre = rng.gen_range(0..6); + let rnd_vec_pre = get_rnd_program_ids(&mut rng, len_pre, false); + let len_post = rng.gen_range(0..6); + let rnd_vec_post = get_rnd_program_ids(&mut rng, len_post, false); + let num_mid = rng.gen_range(1..6); + + let program_ids = [ + rnd_vec_pre.as_slice(), + [ProgramId::LightSystem].as_slice(), + vec![ProgramId::SolanaSystem; num_mid].as_slice(), + [ProgramId::AccountCompression].as_slice(), + rnd_vec_post.as_slice(), + ] + .concat(); + let start_index = program_ids.len() - 1 - len_post; + let system_index = program_ids.len() - 1 - len_post - num_mid - 1; + let vec = find_cpi_patterns(&program_ids); + let expected = Indices { + system: system_index, + cpi: vec![], + insert_into_queues: start_index, + found_solana_system_program_instruction: true, + found_system: true, + token: None, + found_registry: false, + }; + assert!( + vec.contains(&expected), + "program ids {:?} parsed events {:?} expected {:?} ", + program_ids, + vec, + expected, + ); + } + + for _ in 0..num_iters { + let len_pre = rng.gen_range(0..6); + let rnd_vec_pre = get_rnd_program_ids(&mut rng, len_pre, true); + let len_post = rng.gen_range(0..6); + let rnd_vec_post = get_rnd_program_ids(&mut rng, len_post, true); + let num_mid = rng.gen_range(1..6); + + let program_ids = [ + rnd_vec_pre.as_slice(), + [ProgramId::LightSystem].as_slice(), + vec![ProgramId::SolanaSystem; num_mid].as_slice(), + [ProgramId::AccountCompression].as_slice(), + rnd_vec_post.as_slice(), + ] + .concat(); + let start_index = program_ids.len() - 1 - len_post; + let system_index = program_ids.len() - 1 - len_post - num_mid - 1; + let vec = find_cpi_patterns(&program_ids); + let expected = Indices { + system: system_index, + cpi: vec![], + insert_into_queues: start_index, + found_solana_system_program_instruction: true, + found_system: true, + token: None, + found_registry: false, + }; + assert!( + vec.iter().any(|x| x.system == expected.system + && x.insert_into_queues == expected.insert_into_queues), + "program ids {:?} parsed events {:?} expected {:?} ", + program_ids, + vec, + expected, + ); + } +} + +#[test] +fn test_rnd_failing() { + let mut thread_rng = ThreadRng::default(); + let seed = thread_rng.next_u64(); + // Keep this print so that in case the test fails + // we can use the seed to reproduce the error. + println!("\n\ntest seed {}\n\n", seed); + let mut rng = StdRng::seed_from_u64(seed); + let num_iters = 100000; + for _ in 0..num_iters { + let len = rng.gen_range(0..20); + let mut program_ids = get_rnd_program_ids(&mut rng, len, true); + // if any ProgramId::LightSystem is followed by ProgramId::SolanaSystem overwrite ProgramId::SolanaSystem with ProgramId::Unknown + for i in 0..program_ids.len().saturating_sub(1) { + if matches!(program_ids[i], ProgramId::LightSystem) + && matches!(program_ids[i + 1], ProgramId::SolanaSystem) + { + program_ids[i + 1] = ProgramId::Unknown; + } + } + + let vec = find_cpi_patterns(&program_ids); + + assert!( + vec.is_empty(), + "program_ids {:?} result {:?}", + program_ids, + vec + ); + } +} + +#[test] +fn test_find_two_patterns() { + // Std pattern + { + let program_ids = vec![ + ProgramId::Unknown, + ProgramId::LightSystem, + ProgramId::SolanaSystem, + ProgramId::AccountCompression, + ProgramId::Unknown, + ProgramId::LightSystem, + ProgramId::SolanaSystem, + ProgramId::AccountCompression, + ]; + let vec = find_cpi_patterns(&program_ids); + assert_eq!(vec.len(), 2); + assert_eq!( + vec[0], + Indices { + system: 5, + cpi: vec![], + insert_into_queues: 7, + found_solana_system_program_instruction: true, + found_system: true, + token: None, + found_registry: false, + } + ); + assert_eq!( + vec[1], + Indices { + system: 1, + cpi: vec![], + insert_into_queues: 3, + found_solana_system_program_instruction: true, + found_system: true, + token: None, + found_registry: false, + } + ); + // Modify only second event is valid + { + let mut program_ids = program_ids.clone(); + program_ids[2] = ProgramId::Unknown; + let vec = find_cpi_patterns(&program_ids); + assert_eq!(vec.len(), 1); + assert_eq!( + vec[0], + Indices { + system: 5, + cpi: vec![], + insert_into_queues: 7, + found_solana_system_program_instruction: true, + found_system: true, + token: None, + found_registry: false, + } + ); + } + // Modify only first event is valid + { + let mut program_ids = program_ids; + program_ids[6] = ProgramId::Unknown; + let vec = find_cpi_patterns(&program_ids); + assert_eq!(vec.len(), 1); + assert_eq!( + vec[0], + Indices { + system: 1, + cpi: vec![], + insert_into_queues: 3, + found_solana_system_program_instruction: true, + found_system: true, + token: None, + found_registry: false, + } + ); + } + } +} + +#[test] +fn test_find_pattern() { + // Std pattern + { + let program_ids = vec![ + ProgramId::Unknown, + ProgramId::LightSystem, + ProgramId::SolanaSystem, + ProgramId::AccountCompression, + ]; + let (res, last_index) = find_cpi_pattern(3, &program_ids); + assert_eq!(last_index, 0); + assert_eq!( + res, + Some(Indices { + system: 1, + cpi: vec![], + insert_into_queues: 3, + found_solana_system_program_instruction: true, + found_system: true, + token: None, + found_registry: false, + }) + ); + } + { + let program_ids = vec![ + ProgramId::Unknown, + ProgramId::LightSystem, + ProgramId::SolanaSystem, + ProgramId::SolanaSystem, + ProgramId::SolanaSystem, + ProgramId::AccountCompression, + ]; + let start_index = program_ids.len() - 1; + let (res, last_index) = find_cpi_pattern(start_index, &program_ids); + assert_eq!(last_index, 0); + assert_eq!( + res, + Some(Indices { + system: 1, + cpi: vec![], + insert_into_queues: start_index, + found_solana_system_program_instruction: true, + found_system: true, + token: None, + found_registry: false, + }) + ); + } + { + let program_ids = vec![ + ProgramId::Unknown, + ProgramId::LightSystem, + ProgramId::SolanaSystem, + ProgramId::Unknown, + ProgramId::SolanaSystem, + ProgramId::AccountCompression, + ]; + let start_index = program_ids.len() - 1; + let (res, last_index) = find_cpi_pattern(start_index, &program_ids); + assert_eq!(last_index, 3); + assert_eq!(res, None); + } + // With cpi context + { + let program_ids = vec![ + ProgramId::Unknown, + ProgramId::LightSystem, + ProgramId::Unknown, + ProgramId::LightSystem, + ProgramId::SolanaSystem, + ProgramId::SolanaSystem, + ProgramId::SolanaSystem, + ProgramId::AccountCompression, + ]; + let start_index = program_ids.len() - 1; + let (res, last_index) = find_cpi_pattern(start_index, &program_ids); + assert_eq!(last_index, 0); + assert_eq!( + res, + Some(Indices { + system: 3, + cpi: vec![1], + insert_into_queues: start_index, + found_solana_system_program_instruction: true, + found_system: true, + token: None, + found_registry: false, + }) + ); + // Failing + { + let mut program_ids = program_ids; + program_ids[5] = ProgramId::Unknown; + let (res, last_index) = find_cpi_pattern(start_index, &program_ids); + assert_eq!(last_index, 5); + assert_eq!(res, None); + } + } + // With cpi context + { + let program_ids = vec![ + ProgramId::Unknown, + ProgramId::LightSystem, + ProgramId::LightSystem, + ProgramId::SolanaSystem, + ProgramId::SolanaSystem, + ProgramId::SolanaSystem, + ProgramId::AccountCompression, + ]; + let start_index = program_ids.len() - 1; + let (res, last_index) = find_cpi_pattern(start_index, &program_ids); + assert_eq!(last_index, 0); + assert_eq!( + res, + Some(Indices { + system: 2, + cpi: vec![1], + insert_into_queues: start_index, + found_solana_system_program_instruction: true, + found_system: true, + token: None, + found_registry: false, + }) + ); + // Failing + { + let mut program_ids = program_ids; + program_ids[4] = ProgramId::Unknown; + let (res, last_index) = find_cpi_pattern(start_index, &program_ids); + assert_eq!(last_index, 4); + assert_eq!(res, None); + } + } +} + +// ========================================================================== +// Tests for extract_ata_owners +// ========================================================================== + +#[test] +fn test_extract_ata_owners_empty_data() { + let token_instruction = TokenInstructionData { + data: &[], + accounts: &[], + }; + let result = extract_ata_owners(&token_instruction); + assert!(result.is_empty(), "Empty data should return empty vec"); +} + +#[test] +fn test_extract_ata_owners_wrong_discriminator() { + let token_instruction = TokenInstructionData { + data: &[0xFF, 0x00, 0x00], // Wrong discriminator + accounts: &[], + }; + let result = extract_ata_owners(&token_instruction); + assert!( + result.is_empty(), + "Wrong discriminator should return empty vec" + ); +} + +#[test] +fn test_extract_ata_owners_only_discriminator() { + let token_instruction = TokenInstructionData { + data: &[TRANSFER2], // Only discriminator, no data + accounts: &[], + }; + let result = extract_ata_owners(&token_instruction); + assert!( + result.is_empty(), + "Only discriminator should return empty vec (deserialization fails)" + ); +} + +#[test] +fn test_extract_ata_owners_malformed_data() { + // Random garbage after discriminator + let token_instruction = TokenInstructionData { + data: &[TRANSFER2, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF], + accounts: &[], + }; + let result = extract_ata_owners(&token_instruction); + assert!( + result.is_empty(), + "Malformed data should return empty vec (deserialization fails)" + ); +} + +#[test] +fn test_extract_ata_owners_valid_non_ata() { + let data = create_transfer2_with_ata(0, false); // is_ata = false + let accounts = vec![Pubkey::default(); 10]; + let token_instruction = TokenInstructionData { + data: &data, + accounts: &accounts, + }; + let result = extract_ata_owners(&token_instruction); + assert!( + result.is_empty(), + "Non-ATA accounts should not produce ATA owner info" + ); +} + +#[test] +fn test_extract_ata_owners_valid_ata() { + let owner_index = 2u8; // Index into packed_accounts + let data = create_transfer2_with_ata(owner_index, true); + + // Create accounts array: 7 system accounts + packed_accounts + // owner_index=2 means packed_accounts[2] = accounts[7+2] = accounts[9] + let mut accounts = vec![Pubkey::default(); 10]; + let expected_owner = Pubkey::new_from_array([42u8; 32]); + accounts[7 + owner_index as usize] = expected_owner; + + let token_instruction = TokenInstructionData { + data: &data, + accounts: &accounts, + }; + let result = extract_ata_owners(&token_instruction); + assert_eq!(result.len(), 1, "Should extract one ATA owner"); + assert_eq!(result[0].output_index, 0); + assert_eq!(result[0].wallet_owner, expected_owner); +} + +#[test] +fn test_extract_ata_owners_owner_index_out_of_bounds() { + let owner_index = 100u8; // Way beyond accounts array + let data = create_transfer2_with_ata(owner_index, true); + + // Only 10 accounts, but owner_index + 7 = 107 + let accounts = vec![Pubkey::default(); 10]; + + let token_instruction = TokenInstructionData { + data: &data, + accounts: &accounts, + }; + let result = extract_ata_owners(&token_instruction); + assert!( + result.is_empty(), + "Out of bounds owner_index should be safely skipped" + ); +} + +#[test] +fn test_extract_ata_owners_boundary_owner_index() { + // Test with owner_index at the boundary + let owner_index = 2u8; + let data = create_transfer2_with_ata(owner_index, true); + + // Create exactly enough accounts: 7 system + 3 packed (indices 0, 1, 2) + // owner_index=2 needs accounts[9], so we need 10 accounts total + let mut accounts = vec![Pubkey::default(); 10]; + let expected_owner = Pubkey::new_from_array([99u8; 32]); + accounts[9] = expected_owner; + + let token_instruction = TokenInstructionData { + data: &data, + accounts: &accounts, + }; + let result = extract_ata_owners(&token_instruction); + assert_eq!(result.len(), 1); + assert_eq!(result[0].wallet_owner, expected_owner); + + // Now with one less account - should be skipped + let accounts_short = vec![Pubkey::default(); 9]; + let token_instruction_short = TokenInstructionData { + data: &data, + accounts: &accounts_short, + }; + let result_short = extract_ata_owners(&token_instruction_short); + assert!( + result_short.is_empty(), + "Boundary case with insufficient accounts should be skipped" + ); +} + +#[test] +fn test_extract_ata_owners_max_owner_index() { + // Test with u8::MAX owner_index + let owner_index = u8::MAX; + let data = create_transfer2_with_ata(owner_index, true); + + // 255 + 7 = 262, need 263 accounts + let accounts = vec![Pubkey::default(); 10]; // Way too few + + let token_instruction = TokenInstructionData { + data: &data, + accounts: &accounts, + }; + let result = extract_ata_owners(&token_instruction); + assert!( + result.is_empty(), + "u8::MAX owner_index with small accounts array should be safely skipped" + ); +} + +// ========================================================================== +// Tests for wrap_program_ids with LightToken and Registry +// ========================================================================== + +#[test] +fn test_wrap_program_ids_light_token_transfer2() { + let program_ids = vec![Pubkey::from(LIGHT_TOKEN_PROGRAM_ID)]; + let mut instruction_data = vec![0u8; 12]; // Minimum size + instruction_data[0] = TRANSFER2; + let instructions = vec![instruction_data]; + let accounts = vec![vec![]]; + + let result = wrap_program_ids(&program_ids, &instructions, &accounts); + assert_eq!(result, vec![ProgramId::LightToken]); +} + +#[test] +fn test_wrap_program_ids_light_token_non_transfer2() { + let program_ids = vec![Pubkey::from(LIGHT_TOKEN_PROGRAM_ID)]; + let mut instruction_data = vec![0u8; 12]; + instruction_data[0] = 0xFF; // Not TRANSFER2 + let instructions = vec![instruction_data]; + let accounts = vec![vec![]]; + + let result = wrap_program_ids(&program_ids, &instructions, &accounts); + assert_eq!(result, vec![ProgramId::Unknown]); +} + +#[test] +fn test_wrap_program_ids_registry() { + let program_ids = vec![Pubkey::from(LIGHT_REGISTRY_PROGRAM_ID)]; + let instruction_data = vec![0u8; 12]; + let instructions = vec![instruction_data]; + let accounts = vec![vec![]]; + + let result = wrap_program_ids(&program_ids, &instructions, &accounts); + assert_eq!(result, vec![ProgramId::Registry]); +} + +#[test] +fn test_wrap_program_ids_instruction_too_small() { + let program_ids = vec![Pubkey::from(LIGHT_TOKEN_PROGRAM_ID)]; + let instruction_data = vec![TRANSFER2; 5]; // Less than 12 bytes + let instructions = vec![instruction_data]; + let accounts = vec![vec![]]; + + let result = wrap_program_ids(&program_ids, &instructions, &accounts); + assert_eq!( + result, + vec![ProgramId::Unknown], + "Instructions smaller than 12 bytes should be Unknown" + ); +} + +// ========================================================================== +// Tests for find_cpi_pattern with Registry and Token tracking +// ========================================================================== + +#[test] +fn test_find_cpi_pattern_with_registry_and_token() { + // Pattern: Registry -> Token -> LightSystem -> SolanaSystem -> AccountCompression + let program_ids = vec![ + ProgramId::Registry, + ProgramId::LightToken, + ProgramId::LightSystem, + ProgramId::SolanaSystem, + ProgramId::AccountCompression, + ]; + let (res, _) = find_cpi_pattern(4, &program_ids); + assert!(res.is_some()); + let indices = res.unwrap(); + assert!(indices.found_registry, "Should find registry"); + assert_eq!( + indices.token, + Some(1), + "Should track token when registry is present" + ); + assert_eq!(indices.system, 2); +} + +#[test] +fn test_find_cpi_pattern_token_without_registry() { + // Pattern: Token -> LightSystem -> SolanaSystem -> AccountCompression + // No registry means token should NOT be tracked + let program_ids = vec![ + ProgramId::LightToken, + ProgramId::LightSystem, + ProgramId::SolanaSystem, + ProgramId::AccountCompression, + ]; + let (res, _) = find_cpi_pattern(3, &program_ids); + assert!(res.is_some()); + let indices = res.unwrap(); + assert!(!indices.found_registry, "Should not find registry"); + assert_eq!( + indices.token, None, + "Should NOT track token without registry" + ); +} + +#[test] +fn test_find_cpi_pattern_registry_without_token() { + // Registry can call LightSystem directly without Token + // Pattern: Registry -> LightSystem -> SolanaSystem -> AccountCompression + let program_ids = vec![ + ProgramId::Registry, + ProgramId::LightSystem, + ProgramId::SolanaSystem, + ProgramId::AccountCompression, + ]; + let (res, _) = find_cpi_pattern(3, &program_ids); + assert!(res.is_some()); + let indices = res.unwrap(); + assert!(indices.found_registry, "Should find registry"); + assert_eq!(indices.token, None, "No token instruction in this pattern"); +} + +#[test] +fn test_find_cpi_pattern_multiple_tokens_only_first_tracked() { + // Only the first (closest to system) token should be tracked + // Pattern: Registry -> Token1 -> Token2 -> LightSystem -> SolanaSystem -> AccountCompression + let program_ids = vec![ + ProgramId::Registry, + ProgramId::LightToken, // Token1 - outer + ProgramId::LightToken, // Token2 - inner, should be tracked + ProgramId::LightSystem, + ProgramId::SolanaSystem, + ProgramId::AccountCompression, + ]; + let (res, _) = find_cpi_pattern(5, &program_ids); + assert!(res.is_some()); + let indices = res.unwrap(); + assert!(indices.found_registry); + // The inner token (index 2) should be tracked as it's first when searching backwards + assert_eq!( + indices.token, + Some(2), + "Should track the token closest to system instruction" + ); +} + +// ========================================================================== +// Additional ATA and Program ID filtering edge case tests +// ========================================================================== + +#[test] +fn test_find_cpi_pattern_token_after_account_compression_not_tracked() { + // Token appearing after AccountCompression should not be part of this pattern + // Pattern: Registry -> LightSystem -> SolanaSystem -> AccountCompression -> Token + let program_ids = vec![ + ProgramId::Registry, + ProgramId::LightSystem, + ProgramId::SolanaSystem, + ProgramId::AccountCompression, + ProgramId::LightToken, // After AccountCompression - not part of this pattern + ]; + let (res, _) = find_cpi_pattern(3, &program_ids); + assert!(res.is_some()); + let indices = res.unwrap(); + assert!(indices.found_registry); + assert_eq!( + indices.token, None, + "Token after AccountCompression should not be tracked in this pattern" + ); +} + +#[test] +fn test_find_cpi_pattern_registry_after_account_compression_not_found() { + // Registry appearing after AccountCompression should not validate token tracking + // Pattern: Token -> LightSystem -> SolanaSystem -> AccountCompression -> Registry + let program_ids = vec![ + ProgramId::LightToken, + ProgramId::LightSystem, + ProgramId::SolanaSystem, + ProgramId::AccountCompression, + ProgramId::Registry, // After AccountCompression - not part of this pattern + ]; + let (res, _) = find_cpi_pattern(3, &program_ids); + assert!(res.is_some()); + let indices = res.unwrap(); + assert!( + !indices.found_registry, + "Registry after AccountCompression should not be found" + ); + assert_eq!( + indices.token, None, + "Token should not be tracked without registry before AccountCompression" + ); +} + +#[test] +fn test_find_cpi_pattern_token_between_unknown_programs() { + // Token surrounded by Unknown programs, with Registry present + // Pattern: Registry -> Unknown -> Token -> Unknown -> LightSystem -> SolanaSystem -> AccountCompression + let program_ids = vec![ + ProgramId::Registry, + ProgramId::Unknown, + ProgramId::LightToken, + ProgramId::Unknown, + ProgramId::LightSystem, + ProgramId::SolanaSystem, + ProgramId::AccountCompression, + ]; + let (res, _) = find_cpi_pattern(6, &program_ids); + assert!(res.is_some()); + let indices = res.unwrap(); + assert!(indices.found_registry); + assert_eq!( + indices.token, + Some(2), + "Token should be tracked even with Unknown programs around it" + ); +} + +#[test] +fn test_find_cpi_pattern_empty_program_ids() { + let program_ids: Vec = vec![]; + let patterns = find_cpi_patterns(&program_ids); + assert!( + patterns.is_empty(), + "Empty program IDs should return no patterns" + ); +} + +#[test] +fn test_find_cpi_pattern_single_account_compression() { + let program_ids = vec![ProgramId::AccountCompression]; + let (res, _) = find_cpi_pattern(0, &program_ids); + assert!( + res.is_none(), + "Single AccountCompression without system should not match" + ); +} + +#[test] +fn test_find_cpi_pattern_registry_token_no_system() { + // Registry and Token without LightSystem - invalid pattern + let program_ids = vec![ + ProgramId::Registry, + ProgramId::LightToken, + ProgramId::SolanaSystem, + ProgramId::AccountCompression, + ]; + let (res, _) = find_cpi_pattern(3, &program_ids); + assert!( + res.is_none(), + "Pattern without LightSystem should not match" + ); +} + +#[test] +fn test_find_cpi_pattern_token_at_position_zero_not_tracked() { + // Token at position 0 (outermost in CPI chain) - this is NOT a valid real-world pattern. + // In the actual protocol, Registry is always the outermost caller (Registry -> Token -> LightSystem). + // Pattern: Token -> Registry -> LightSystem -> SolanaSystem -> AccountCompression + // + // When searching backwards, we encounter Registry (index 1) BEFORE Token (index 0). + // At the point we find Registry, tentative_token is still None, so we don't confirm a token. + // Then we find Token at index 0, but Registry has already been processed. + // + // This behavior is CORRECT because Token being outermost is invalid - Registry must be outer. + let program_ids = vec![ + ProgramId::LightToken, // Position 0 - invalid as outermost + ProgramId::Registry, // Position 1 + ProgramId::LightSystem, + ProgramId::SolanaSystem, + ProgramId::AccountCompression, + ]; + let (res, _) = find_cpi_pattern(4, &program_ids); + assert!(res.is_some()); + let indices = res.unwrap(); + assert!(indices.found_registry); + // Token at position 0 is NOT tracked because it appears AFTER Registry in backwards search. + // This is correct behavior - Token must be between Registry and LightSystem. + assert_eq!( + indices.token, None, + "Token at position 0 (before Registry in array) should NOT be tracked - invalid CPI order" + ); +} + +#[test] +fn test_find_cpi_pattern_multiple_registries() { + // Multiple Registry programs - behavior verification + // Pattern: Registry -> Registry -> Token -> LightSystem -> SolanaSystem -> AccountCompression + let program_ids = vec![ + ProgramId::Registry, // First Registry + ProgramId::Registry, // Second Registry + ProgramId::LightToken, + ProgramId::LightSystem, + ProgramId::SolanaSystem, + ProgramId::AccountCompression, + ]; + let (res, _) = find_cpi_pattern(5, &program_ids); + assert!(res.is_some()); + let indices = res.unwrap(); + assert!(indices.found_registry, "Should find at least one registry"); + assert_eq!( + indices.token, + Some(2), + "Token should be tracked with registry present" + ); +} + +#[test] +fn test_find_cpi_pattern_token_before_system_instruction() { + // Token appearing before finding system instruction in backwards search + // Pattern: LightSystem -> SolanaSystem -> Token -> AccountCompression + // When searching backwards from AccountCompression, we find Token before system + let program_ids = vec![ + ProgramId::LightSystem, + ProgramId::SolanaSystem, + ProgramId::LightToken, // Between SolanaSystem and AccountCompression + ProgramId::AccountCompression, + ]; + let (res, _) = find_cpi_pattern(3, &program_ids); + // This should fail because we need SolanaSystem right before AccountCompression + assert!( + res.is_none(), + "Token breaking the SolanaSystem -> AccountCompression chain should fail" + ); +} + +#[test] +fn test_find_cpi_pattern_registry_between_system_and_solana_system() { + // Registry between LightSystem and SolanaSystem + // Pattern: Registry -> LightSystem -> Registry -> SolanaSystem -> AccountCompression + let program_ids = vec![ + ProgramId::Registry, + ProgramId::LightSystem, + ProgramId::Registry, // Between LightSystem and SolanaSystem + ProgramId::SolanaSystem, + ProgramId::AccountCompression, + ]; + let (res, _) = find_cpi_pattern(4, &program_ids); + // Registry between should break the pattern + assert!( + res.is_none(), + "Registry between LightSystem and SolanaSystem should break pattern" + ); +} + +// ========================================================================== +// Additional extract_ata_owners edge case tests +// ========================================================================== + +#[test] +fn test_extract_ata_owners_multiple_outputs_all_ata() { + // Multiple outputs, all are ATAs + let data = create_transfer2_with_multiple_outputs(vec![ + (0, true), // output 0: ATA with owner at packed_accounts[0] + (1, true), // output 1: ATA with owner at packed_accounts[1] + (2, true), // output 2: ATA with owner at packed_accounts[2] + ]); + + let mut accounts = vec![Pubkey::default(); 12]; // 7 system + 5 packed + let owner0 = Pubkey::new_from_array([10u8; 32]); + let owner1 = Pubkey::new_from_array([11u8; 32]); + let owner2 = Pubkey::new_from_array([12u8; 32]); + accounts[7] = owner0; + accounts[8] = owner1; + accounts[9] = owner2; + + let token_instruction = TokenInstructionData { + data: &data, + accounts: &accounts, + }; + let result = extract_ata_owners(&token_instruction); + + assert_eq!(result.len(), 3, "Should extract 3 ATA owners"); + assert_eq!(result[0].output_index, 0); + assert_eq!(result[0].wallet_owner, owner0); + assert_eq!(result[1].output_index, 1); + assert_eq!(result[1].wallet_owner, owner1); + assert_eq!(result[2].output_index, 2); + assert_eq!(result[2].wallet_owner, owner2); +} + +#[test] +fn test_extract_ata_owners_multiple_outputs_mixed() { + // Mixed: some ATA, some not + let data = create_transfer2_with_multiple_outputs(vec![ + (0, false), // output 0: NOT an ATA + (1, true), // output 1: ATA + (2, false), // output 2: NOT an ATA + (3, true), // output 3: ATA + ]); + + let mut accounts = vec![Pubkey::default(); 12]; + let owner1 = Pubkey::new_from_array([21u8; 32]); + let owner3 = Pubkey::new_from_array([23u8; 32]); + accounts[8] = owner1; // packed_accounts[1] + accounts[10] = owner3; // packed_accounts[3] + + let token_instruction = TokenInstructionData { + data: &data, + accounts: &accounts, + }; + let result = extract_ata_owners(&token_instruction); + + assert_eq!(result.len(), 2, "Should only extract ATA outputs"); + assert_eq!(result[0].output_index, 1); + assert_eq!(result[0].wallet_owner, owner1); + assert_eq!(result[1].output_index, 3); + assert_eq!(result[1].wallet_owner, owner3); +} + +#[test] +fn test_extract_ata_owners_multiple_outputs_none_ata() { + // All outputs are non-ATA + let data = create_transfer2_with_multiple_outputs(vec![(0, false), (1, false), (2, false)]); + + let accounts = vec![Pubkey::default(); 12]; + let token_instruction = TokenInstructionData { + data: &data, + accounts: &accounts, + }; + let result = extract_ata_owners(&token_instruction); + + assert!( + result.is_empty(), + "Should not extract any owners when no ATAs" + ); +} + +#[test] +fn test_extract_ata_owners_same_owner_multiple_atas() { + // Multiple ATAs pointing to the same owner (same owner_index) + let data = create_transfer2_with_multiple_outputs(vec![ + (0, true), // output 0: ATA with owner at packed_accounts[0] + (0, true), // output 1: ATA with SAME owner + (0, true), // output 2: ATA with SAME owner + ]); + + let mut accounts = vec![Pubkey::default(); 10]; + let shared_owner = Pubkey::new_from_array([77u8; 32]); + accounts[7] = shared_owner; + + let token_instruction = TokenInstructionData { + data: &data, + accounts: &accounts, + }; + let result = extract_ata_owners(&token_instruction); + + assert_eq!(result.len(), 3, "Should extract all 3 ATA entries"); + assert!( + result.iter().all(|r| r.wallet_owner == shared_owner), + "All should have the same owner" + ); + assert_eq!(result[0].output_index, 0); + assert_eq!(result[1].output_index, 1); + assert_eq!(result[2].output_index, 2); +} + +#[test] +fn test_extract_ata_owners_partial_out_of_bounds() { + // Some outputs have valid owner_index, some are out of bounds + let data = create_transfer2_with_multiple_outputs(vec![ + (0, true), // output 0: Valid owner_index + (100, true), // output 1: Out of bounds + (1, true), // output 2: Valid owner_index + ]); + + let mut accounts = vec![Pubkey::default(); 10]; + let owner0 = Pubkey::new_from_array([30u8; 32]); + let owner1 = Pubkey::new_from_array([31u8; 32]); + accounts[7] = owner0; + accounts[8] = owner1; + + let token_instruction = TokenInstructionData { + data: &data, + accounts: &accounts, + }; + let result = extract_ata_owners(&token_instruction); + + assert_eq!(result.len(), 2, "Should only extract valid owner indices"); + assert_eq!(result[0].output_index, 0); + assert_eq!(result[0].wallet_owner, owner0); + assert_eq!(result[1].output_index, 2); + assert_eq!(result[1].wallet_owner, owner1); +} + +#[test] +fn test_extract_ata_owners_zero_packed_accounts() { + // Edge case: exactly 7 accounts (no packed_accounts at all) + let data = create_transfer2_with_ata(0, true); // Wants packed_accounts[0] which doesn't exist + + let accounts = vec![Pubkey::default(); 7]; // Only system accounts + + let token_instruction = TokenInstructionData { + data: &data, + accounts: &accounts, + }; + let result = extract_ata_owners(&token_instruction); + + assert!( + result.is_empty(), + "Should not extract ATA when no packed_accounts exist" + ); +} + +#[test] +fn test_extract_ata_owners_exactly_one_packed_account() { + // Edge case: exactly 8 accounts (only one packed_account at index 0) + let data = create_transfer2_with_ata(0, true); + + let mut accounts = vec![Pubkey::default(); 8]; + let owner = Pubkey::new_from_array([55u8; 32]); + accounts[7] = owner; + + let token_instruction = TokenInstructionData { + data: &data, + accounts: &accounts, + }; + let result = extract_ata_owners(&token_instruction); + + assert_eq!(result.len(), 1); + assert_eq!(result[0].wallet_owner, owner); +} + +// ========================================================================== +// Tests for wrap_program_ids edge cases +// ========================================================================== + +#[test] +fn test_wrap_program_ids_empty_instruction_data() { + let program_ids = vec![Pubkey::from(LIGHT_TOKEN_PROGRAM_ID)]; + let instructions = vec![vec![]]; // Empty instruction data + let accounts = vec![vec![]]; + + let result = wrap_program_ids(&program_ids, &instructions, &accounts); + assert_eq!( + result, + vec![ProgramId::Unknown], + "Empty instruction should be Unknown" + ); +} + +#[test] +fn test_wrap_program_ids_exactly_12_bytes() { + // Boundary: exactly 12 bytes is valid + let program_ids = vec![Pubkey::from(LIGHT_TOKEN_PROGRAM_ID)]; + let mut instruction_data = vec![0u8; 12]; + instruction_data[0] = TRANSFER2; + let instructions = vec![instruction_data]; + let accounts = vec![vec![]]; + + let result = wrap_program_ids(&program_ids, &instructions, &accounts); + assert_eq!(result, vec![ProgramId::LightToken]); +} + +#[test] +fn test_wrap_program_ids_11_bytes() { + // Boundary: 11 bytes is too small + let program_ids = vec![Pubkey::from(LIGHT_TOKEN_PROGRAM_ID)]; + let mut instruction_data = vec![0u8; 11]; + instruction_data[0] = TRANSFER2; + let instructions = vec![instruction_data]; + let accounts = vec![vec![]]; + + let result = wrap_program_ids(&program_ids, &instructions, &accounts); + assert_eq!(result, vec![ProgramId::Unknown], "11 bytes is too small"); +} + +#[test] +fn test_wrap_program_ids_mixed_valid_invalid() { + // Mix of valid and invalid instructions + let program_ids = vec![ + Pubkey::from(LIGHT_TOKEN_PROGRAM_ID), + Pubkey::from(LIGHT_REGISTRY_PROGRAM_ID), + Pubkey::from(LIGHT_TOKEN_PROGRAM_ID), + Pubkey::from(LIGHT_TOKEN_PROGRAM_ID), + ]; + + let mut valid_transfer = vec![0u8; 12]; + valid_transfer[0] = TRANSFER2; + + let instructions = vec![ + valid_transfer.clone(), // Valid Token + TRANSFER2 + vec![0u8; 12], // Valid Registry (any 12+ bytes) + vec![0xFF; 12], // Token but not TRANSFER2 + vec![TRANSFER2; 5], // Token + TRANSFER2 but too short + ]; + let accounts = vec![vec![], vec![], vec![], vec![]]; + + let result = wrap_program_ids(&program_ids, &instructions, &accounts); + assert_eq!( + result, + vec![ + ProgramId::LightToken, + ProgramId::Registry, + ProgramId::Unknown, + ProgramId::Unknown, + ] + ); +} + +#[test] +fn test_wrap_program_ids_account_compression_missing_registered_pda() { + // AccountCompression with wrong registered PDA + let program_ids = vec![Pubkey::from(ACCOUNT_COMPRESSION_PROGRAM_ID)]; + let mut instruction_data = vec![0u8; 12]; + instruction_data[0..8].copy_from_slice(&DISCRIMINATOR_INSERT_INTO_QUEUES); + let instructions = vec![instruction_data]; + // accounts[1] should be REGISTERED_PROGRAM_PDA but we use a different pubkey + let accounts = vec![vec![ + Pubkey::default(), + Pubkey::new_from_array([99u8; 32]), // Wrong PDA + Pubkey::default(), + ]]; + + let result = wrap_program_ids(&program_ids, &instructions, &accounts); + assert_eq!( + result, + vec![ProgramId::Unknown], + "AccountCompression with wrong registered PDA should be Unknown" + ); +} + +#[test] +fn test_wrap_program_ids_account_compression_valid() { + // AccountCompression with correct setup + let program_ids = vec![Pubkey::from(ACCOUNT_COMPRESSION_PROGRAM_ID)]; + let mut instruction_data = vec![0u8; 12]; + instruction_data[0..8].copy_from_slice(&DISCRIMINATOR_INSERT_INTO_QUEUES); + let instructions = vec![instruction_data]; + let accounts = vec![vec![ + Pubkey::default(), + Pubkey::from(REGISTERED_PROGRAM_PDA), // Correct PDA + Pubkey::default(), + ]]; + + let result = wrap_program_ids(&program_ids, &instructions, &accounts); + assert_eq!(result, vec![ProgramId::AccountCompression]); +} + +#[test] +fn test_wrap_program_ids_account_compression_insufficient_accounts() { + // AccountCompression with too few accounts + let program_ids = vec![Pubkey::from(ACCOUNT_COMPRESSION_PROGRAM_ID)]; + let mut instruction_data = vec![0u8; 12]; + instruction_data[0..8].copy_from_slice(&DISCRIMINATOR_INSERT_INTO_QUEUES); + let instructions = vec![instruction_data]; + let accounts = vec![vec![Pubkey::default()]]; // Only 1 account, need 3 + + let result = wrap_program_ids(&program_ids, &instructions, &accounts); + assert_eq!( + result, + vec![ProgramId::Unknown], + "AccountCompression with insufficient accounts should be Unknown" + ); +} diff --git a/sdk-libs/photon-api/src/apis/default_api.rs b/sdk-libs/photon-api/src/apis/default_api.rs index d0dd52fa51..d432e2d248 100644 --- a/sdk-libs/photon-api/src/apis/default_api.rs +++ b/sdk-libs/photon-api/src/apis/default_api.rs @@ -349,6 +349,42 @@ pub enum GetValidityProofV2PostError { UnknownValue(serde_json::Value), } +/// struct for typed errors of method [`get_account_interface_post`] +#[derive(Debug, Clone, Serialize, Deserialize)] +#[serde(untagged)] +pub enum GetAccountInterfacePostError { + Status429(models::GetBatchAddressUpdateInfoPost429Response), + Status500(models::GetBatchAddressUpdateInfoPost429Response), + UnknownValue(serde_json::Value), +} + +/// struct for typed errors of method [`get_token_account_interface_post`] +#[derive(Debug, Clone, Serialize, Deserialize)] +#[serde(untagged)] +pub enum GetTokenAccountInterfacePostError { + Status429(models::GetBatchAddressUpdateInfoPost429Response), + Status500(models::GetBatchAddressUpdateInfoPost429Response), + UnknownValue(serde_json::Value), +} + +/// struct for typed errors of method [`get_ata_interface_post`] +#[derive(Debug, Clone, Serialize, Deserialize)] +#[serde(untagged)] +pub enum GetAtaInterfacePostError { + Status429(models::GetBatchAddressUpdateInfoPost429Response), + Status500(models::GetBatchAddressUpdateInfoPost429Response), + UnknownValue(serde_json::Value), +} + +/// struct for typed errors of method [`get_multiple_account_interfaces_post`] +#[derive(Debug, Clone, Serialize, Deserialize)] +#[serde(untagged)] +pub enum GetMultipleAccountInterfacesPostError { + Status429(models::GetBatchAddressUpdateInfoPost429Response), + Status500(models::GetBatchAddressUpdateInfoPost429Response), + UnknownValue(serde_json::Value), +} + pub async fn get_batch_address_update_info_post( configuration: &configuration::Configuration, get_batch_address_update_info_post_request: models::GetBatchAddressUpdateInfoPostRequest, @@ -1997,6 +2033,173 @@ pub async fn get_validity_proof_v2_post( } } +pub async fn get_account_interface_post( + configuration: &configuration::Configuration, + get_account_interface_post_request: models::GetAccountInterfacePostRequest, +) -> Result> { + let local_var_configuration = configuration; + + let local_var_client = &local_var_configuration.client; + + let local_var_uri_str = format!("{}/getAccountInterface", local_var_configuration.base_path); + let local_var_uri_str = append_api_key(local_var_configuration, &local_var_uri_str); + let mut local_var_req_builder = + local_var_client.request(reqwest::Method::POST, local_var_uri_str.as_str()); + + if let Some(ref local_var_user_agent) = local_var_configuration.user_agent { + local_var_req_builder = + local_var_req_builder.header(reqwest::header::USER_AGENT, local_var_user_agent.clone()); + } + local_var_req_builder = local_var_req_builder.json(&get_account_interface_post_request); + + let local_var_req = local_var_req_builder.build()?; + let local_var_resp = local_var_client.execute(local_var_req).await?; + + let local_var_status = local_var_resp.status(); + let local_var_content = local_var_resp.text().await?; + + if !local_var_status.is_client_error() && !local_var_status.is_server_error() { + serde_json::from_str(&local_var_content).map_err(Error::from) + } else { + let local_var_entity: Option = + serde_json::from_str(&local_var_content).ok(); + let local_var_error = ResponseContent { + status: local_var_status, + content: local_var_content, + entity: local_var_entity, + }; + Err(Error::ResponseError(local_var_error)) + } +} + +pub async fn get_token_account_interface_post( + configuration: &configuration::Configuration, + get_token_account_interface_post_request: models::GetTokenAccountInterfacePostRequest, +) -> Result> +{ + let local_var_configuration = configuration; + + let local_var_client = &local_var_configuration.client; + + let local_var_uri_str = format!( + "{}/getTokenAccountInterface", + local_var_configuration.base_path + ); + let local_var_uri_str = append_api_key(local_var_configuration, &local_var_uri_str); + let mut local_var_req_builder = + local_var_client.request(reqwest::Method::POST, local_var_uri_str.as_str()); + + if let Some(ref local_var_user_agent) = local_var_configuration.user_agent { + local_var_req_builder = + local_var_req_builder.header(reqwest::header::USER_AGENT, local_var_user_agent.clone()); + } + local_var_req_builder = local_var_req_builder.json(&get_token_account_interface_post_request); + + let local_var_req = local_var_req_builder.build()?; + let local_var_resp = local_var_client.execute(local_var_req).await?; + + let local_var_status = local_var_resp.status(); + let local_var_content = local_var_resp.text().await?; + + if !local_var_status.is_client_error() && !local_var_status.is_server_error() { + serde_json::from_str(&local_var_content).map_err(Error::from) + } else { + let local_var_entity: Option = + serde_json::from_str(&local_var_content).ok(); + let local_var_error = ResponseContent { + status: local_var_status, + content: local_var_content, + entity: local_var_entity, + }; + Err(Error::ResponseError(local_var_error)) + } +} + +pub async fn get_ata_interface_post( + configuration: &configuration::Configuration, + get_ata_interface_post_request: models::GetAtaInterfacePostRequest, +) -> Result> { + let local_var_configuration = configuration; + + let local_var_client = &local_var_configuration.client; + + let local_var_uri_str = format!("{}/getAtaInterface", local_var_configuration.base_path); + let local_var_uri_str = append_api_key(local_var_configuration, &local_var_uri_str); + let mut local_var_req_builder = + local_var_client.request(reqwest::Method::POST, local_var_uri_str.as_str()); + + if let Some(ref local_var_user_agent) = local_var_configuration.user_agent { + local_var_req_builder = + local_var_req_builder.header(reqwest::header::USER_AGENT, local_var_user_agent.clone()); + } + local_var_req_builder = local_var_req_builder.json(&get_ata_interface_post_request); + + let local_var_req = local_var_req_builder.build()?; + let local_var_resp = local_var_client.execute(local_var_req).await?; + + let local_var_status = local_var_resp.status(); + let local_var_content = local_var_resp.text().await?; + + if !local_var_status.is_client_error() && !local_var_status.is_server_error() { + serde_json::from_str(&local_var_content).map_err(Error::from) + } else { + let local_var_entity: Option = + serde_json::from_str(&local_var_content).ok(); + let local_var_error = ResponseContent { + status: local_var_status, + content: local_var_content, + entity: local_var_entity, + }; + Err(Error::ResponseError(local_var_error)) + } +} + +pub async fn get_multiple_account_interfaces_post( + configuration: &configuration::Configuration, + get_multiple_account_interfaces_post_request: models::GetMultipleAccountInterfacesPostRequest, +) -> Result< + models::GetMultipleAccountInterfacesPost200Response, + Error, +> { + let local_var_configuration = configuration; + + let local_var_client = &local_var_configuration.client; + + let local_var_uri_str = format!( + "{}/getMultipleAccountInterfaces", + local_var_configuration.base_path + ); + let local_var_uri_str = append_api_key(local_var_configuration, &local_var_uri_str); + let mut local_var_req_builder = + local_var_client.request(reqwest::Method::POST, local_var_uri_str.as_str()); + + if let Some(ref local_var_user_agent) = local_var_configuration.user_agent { + local_var_req_builder = + local_var_req_builder.header(reqwest::header::USER_AGENT, local_var_user_agent.clone()); + } + local_var_req_builder = + local_var_req_builder.json(&get_multiple_account_interfaces_post_request); + + let local_var_req = local_var_req_builder.build()?; + let local_var_resp = local_var_client.execute(local_var_req).await?; + + let local_var_status = local_var_resp.status(); + let local_var_content = local_var_resp.text().await?; + + if !local_var_status.is_client_error() && !local_var_status.is_server_error() { + serde_json::from_str(&local_var_content).map_err(Error::from) + } else { + let local_var_entity: Option = + serde_json::from_str(&local_var_content).ok(); + let local_var_error = ResponseContent { + status: local_var_status, + content: local_var_content, + entity: local_var_entity, + }; + Err(Error::ResponseError(local_var_error)) + } +} + fn append_api_key(configuration: &Configuration, uri_str: &str) -> String { let mut uri_str = uri_str.to_string(); if let Some(ref api_key) = configuration.api_key { diff --git a/sdk-libs/photon-api/src/models/_get_account_interface_post_200_response.rs b/sdk-libs/photon-api/src/models/_get_account_interface_post_200_response.rs new file mode 100644 index 0000000000..1dc4692b4f --- /dev/null +++ b/sdk-libs/photon-api/src/models/_get_account_interface_post_200_response.rs @@ -0,0 +1,35 @@ +/* + * photon-indexer + * + * Solana indexer for general compression + * + * The version of the OpenAPI document: 0.50.0 + * + */ + +use crate::models; + +#[derive(Clone, Default, Debug, PartialEq, Serialize, Deserialize)] +pub struct GetAccountInterfacePost200Response { + #[serde(rename = "error", skip_serializing_if = "Option::is_none")] + pub error: Option>, + /// An ID to identify the response. + #[serde(rename = "id")] + pub id: String, + /// The version of the JSON-RPC protocol. + #[serde(rename = "jsonrpc")] + pub jsonrpc: String, + #[serde(rename = "result", skip_serializing_if = "Option::is_none")] + pub result: Option>, +} + +impl GetAccountInterfacePost200Response { + pub fn new(id: String, jsonrpc: String) -> Self { + Self { + error: None, + id, + jsonrpc, + result: None, + } + } +} diff --git a/sdk-libs/photon-api/src/models/_get_account_interface_post_200_response_result.rs b/sdk-libs/photon-api/src/models/_get_account_interface_post_200_response_result.rs new file mode 100644 index 0000000000..e59fae1529 --- /dev/null +++ b/sdk-libs/photon-api/src/models/_get_account_interface_post_200_response_result.rs @@ -0,0 +1,27 @@ +/* + * photon-indexer + * + * Solana indexer for general compression + * + * The version of the OpenAPI document: 0.50.0 + * + */ + +use crate::models; + +#[derive(Clone, Default, Debug, PartialEq, Serialize, Deserialize)] +pub struct GetAccountInterfacePost200ResponseResult { + #[serde(rename = "context")] + pub context: Box, + #[serde(rename = "value", skip_serializing_if = "Option::is_none")] + pub value: Option>, +} + +impl GetAccountInterfacePost200ResponseResult { + pub fn new(context: models::Context) -> Self { + Self { + context: Box::new(context), + value: None, + } + } +} diff --git a/sdk-libs/photon-api/src/models/_get_account_interface_post_request.rs b/sdk-libs/photon-api/src/models/_get_account_interface_post_request.rs new file mode 100644 index 0000000000..dd4cc0e843 --- /dev/null +++ b/sdk-libs/photon-api/src/models/_get_account_interface_post_request.rs @@ -0,0 +1,36 @@ +/* + * photon-indexer + * + * Solana indexer for general compression + * + * The version of the OpenAPI document: 0.50.0 + * + */ + +use crate::models; + +#[derive(Clone, Default, Debug, PartialEq, Serialize, Deserialize)] +pub struct GetAccountInterfacePostRequest { + /// An ID to identify the request. + #[serde(rename = "id")] + pub id: String, + /// The version of the JSON-RPC protocol. + #[serde(rename = "jsonrpc")] + pub jsonrpc: String, + /// The name of the method to invoke. + #[serde(rename = "method")] + pub method: String, + #[serde(rename = "params")] + pub params: Box, +} + +impl GetAccountInterfacePostRequest { + pub fn new(params: models::GetAccountInterfacePostRequestParams) -> Self { + Self { + id: "test-id".to_string(), + jsonrpc: "2.0".to_string(), + method: "getAccountInterface".to_string(), + params: Box::new(params), + } + } +} diff --git a/sdk-libs/photon-api/src/models/_get_account_interface_post_request_params.rs b/sdk-libs/photon-api/src/models/_get_account_interface_post_request_params.rs new file mode 100644 index 0000000000..d3b6238808 --- /dev/null +++ b/sdk-libs/photon-api/src/models/_get_account_interface_post_request_params.rs @@ -0,0 +1,22 @@ +/* + * photon-indexer + * + * Solana indexer for general compression + * + * The version of the OpenAPI document: 0.50.0 + * + */ + +/// GetAccountInterfacePostRequestParams : Request parameters for getAccountInterface +#[derive(Clone, Default, Debug, PartialEq, Serialize, Deserialize)] +pub struct GetAccountInterfacePostRequestParams { + /// Account address to look up + #[serde(rename = "address")] + pub address: String, +} + +impl GetAccountInterfacePostRequestParams { + pub fn new(address: String) -> Self { + Self { address } + } +} diff --git a/sdk-libs/photon-api/src/models/_get_ata_interface_post_200_response.rs b/sdk-libs/photon-api/src/models/_get_ata_interface_post_200_response.rs new file mode 100644 index 0000000000..5c7709912a --- /dev/null +++ b/sdk-libs/photon-api/src/models/_get_ata_interface_post_200_response.rs @@ -0,0 +1,35 @@ +/* + * photon-indexer + * + * Solana indexer for general compression + * + * The version of the OpenAPI document: 0.50.0 + * + */ + +use crate::models; + +#[derive(Clone, Default, Debug, PartialEq, Serialize, Deserialize)] +pub struct GetAtaInterfacePost200Response { + #[serde(rename = "error", skip_serializing_if = "Option::is_none")] + pub error: Option>, + /// An ID to identify the response. + #[serde(rename = "id")] + pub id: String, + /// The version of the JSON-RPC protocol. + #[serde(rename = "jsonrpc")] + pub jsonrpc: String, + #[serde(rename = "result", skip_serializing_if = "Option::is_none")] + pub result: Option>, +} + +impl GetAtaInterfacePost200Response { + pub fn new(id: String, jsonrpc: String) -> Self { + Self { + error: None, + id, + jsonrpc, + result: None, + } + } +} diff --git a/sdk-libs/photon-api/src/models/_get_ata_interface_post_200_response_result.rs b/sdk-libs/photon-api/src/models/_get_ata_interface_post_200_response_result.rs new file mode 100644 index 0000000000..f477f54d09 --- /dev/null +++ b/sdk-libs/photon-api/src/models/_get_ata_interface_post_200_response_result.rs @@ -0,0 +1,27 @@ +/* + * photon-indexer + * + * Solana indexer for general compression + * + * The version of the OpenAPI document: 0.50.0 + * + */ + +use crate::models; + +#[derive(Clone, Default, Debug, PartialEq, Serialize, Deserialize)] +pub struct GetAtaInterfacePost200ResponseResult { + #[serde(rename = "context")] + pub context: Box, + #[serde(rename = "value", skip_serializing_if = "Option::is_none")] + pub value: Option>, +} + +impl GetAtaInterfacePost200ResponseResult { + pub fn new(context: models::Context) -> Self { + Self { + context: Box::new(context), + value: None, + } + } +} diff --git a/sdk-libs/photon-api/src/models/_get_ata_interface_post_request.rs b/sdk-libs/photon-api/src/models/_get_ata_interface_post_request.rs new file mode 100644 index 0000000000..81c36f3981 --- /dev/null +++ b/sdk-libs/photon-api/src/models/_get_ata_interface_post_request.rs @@ -0,0 +1,36 @@ +/* + * photon-indexer + * + * Solana indexer for general compression + * + * The version of the OpenAPI document: 0.50.0 + * + */ + +use crate::models; + +#[derive(Clone, Default, Debug, PartialEq, Serialize, Deserialize)] +pub struct GetAtaInterfacePostRequest { + /// An ID to identify the request. + #[serde(rename = "id")] + pub id: String, + /// The version of the JSON-RPC protocol. + #[serde(rename = "jsonrpc")] + pub jsonrpc: String, + /// The name of the method to invoke. + #[serde(rename = "method")] + pub method: String, + #[serde(rename = "params")] + pub params: Box, +} + +impl GetAtaInterfacePostRequest { + pub fn new(params: models::GetAtaInterfacePostRequestParams) -> Self { + Self { + id: "test-id".to_string(), + jsonrpc: "2.0".to_string(), + method: "getAtaInterface".to_string(), + params: Box::new(params), + } + } +} diff --git a/sdk-libs/photon-api/src/models/_get_ata_interface_post_request_params.rs b/sdk-libs/photon-api/src/models/_get_ata_interface_post_request_params.rs new file mode 100644 index 0000000000..ea336353c5 --- /dev/null +++ b/sdk-libs/photon-api/src/models/_get_ata_interface_post_request_params.rs @@ -0,0 +1,25 @@ +/* + * photon-indexer + * + * Solana indexer for general compression + * + * The version of the OpenAPI document: 0.50.0 + * + */ + +/// GetAtaInterfacePostRequestParams : Request parameters for getAtaInterface +#[derive(Clone, Default, Debug, PartialEq, Serialize, Deserialize)] +pub struct GetAtaInterfacePostRequestParams { + /// Owner address + #[serde(rename = "owner")] + pub owner: String, + /// Mint address + #[serde(rename = "mint")] + pub mint: String, +} + +impl GetAtaInterfacePostRequestParams { + pub fn new(owner: String, mint: String) -> Self { + Self { owner, mint } + } +} diff --git a/sdk-libs/photon-api/src/models/_get_multiple_account_interfaces_post_200_response.rs b/sdk-libs/photon-api/src/models/_get_multiple_account_interfaces_post_200_response.rs new file mode 100644 index 0000000000..d46d97e6e7 --- /dev/null +++ b/sdk-libs/photon-api/src/models/_get_multiple_account_interfaces_post_200_response.rs @@ -0,0 +1,35 @@ +/* + * photon-indexer + * + * Solana indexer for general compression + * + * The version of the OpenAPI document: 0.50.0 + * + */ + +use crate::models; + +#[derive(Clone, Default, Debug, PartialEq, Serialize, Deserialize)] +pub struct GetMultipleAccountInterfacesPost200Response { + #[serde(rename = "error", skip_serializing_if = "Option::is_none")] + pub error: Option>, + /// An ID to identify the response. + #[serde(rename = "id")] + pub id: String, + /// The version of the JSON-RPC protocol. + #[serde(rename = "jsonrpc")] + pub jsonrpc: String, + #[serde(rename = "result", skip_serializing_if = "Option::is_none")] + pub result: Option>, +} + +impl GetMultipleAccountInterfacesPost200Response { + pub fn new(id: String, jsonrpc: String) -> Self { + Self { + error: None, + id, + jsonrpc, + result: None, + } + } +} diff --git a/sdk-libs/photon-api/src/models/_get_multiple_account_interfaces_post_200_response_result.rs b/sdk-libs/photon-api/src/models/_get_multiple_account_interfaces_post_200_response_result.rs new file mode 100644 index 0000000000..31085786eb --- /dev/null +++ b/sdk-libs/photon-api/src/models/_get_multiple_account_interfaces_post_200_response_result.rs @@ -0,0 +1,28 @@ +/* + * photon-indexer + * + * Solana indexer for general compression + * + * The version of the OpenAPI document: 0.50.0 + * + */ + +use crate::models; + +#[derive(Clone, Default, Debug, PartialEq, Serialize, Deserialize)] +pub struct GetMultipleAccountInterfacesPost200ResponseResult { + #[serde(rename = "context")] + pub context: Box, + /// List of typed results (Some for found accounts, None for not found) + #[serde(rename = "value")] + pub value: Vec>, +} + +impl GetMultipleAccountInterfacesPost200ResponseResult { + pub fn new(context: models::Context, value: Vec>) -> Self { + Self { + context: Box::new(context), + value, + } + } +} diff --git a/sdk-libs/photon-api/src/models/_get_multiple_account_interfaces_post_request.rs b/sdk-libs/photon-api/src/models/_get_multiple_account_interfaces_post_request.rs new file mode 100644 index 0000000000..fb9308c13b --- /dev/null +++ b/sdk-libs/photon-api/src/models/_get_multiple_account_interfaces_post_request.rs @@ -0,0 +1,36 @@ +/* + * photon-indexer + * + * Solana indexer for general compression + * + * The version of the OpenAPI document: 0.50.0 + * + */ + +use crate::models; + +#[derive(Clone, Default, Debug, PartialEq, Serialize, Deserialize)] +pub struct GetMultipleAccountInterfacesPostRequest { + /// An ID to identify the request. + #[serde(rename = "id")] + pub id: String, + /// The version of the JSON-RPC protocol. + #[serde(rename = "jsonrpc")] + pub jsonrpc: String, + /// The name of the method to invoke. + #[serde(rename = "method")] + pub method: String, + #[serde(rename = "params")] + pub params: Box, +} + +impl GetMultipleAccountInterfacesPostRequest { + pub fn new(params: models::GetMultipleAccountInterfacesPostRequestParams) -> Self { + Self { + id: "test-id".to_string(), + jsonrpc: "2.0".to_string(), + method: "getMultipleAccountInterfaces".to_string(), + params: Box::new(params), + } + } +} diff --git a/sdk-libs/photon-api/src/models/_get_multiple_account_interfaces_post_request_params.rs b/sdk-libs/photon-api/src/models/_get_multiple_account_interfaces_post_request_params.rs new file mode 100644 index 0000000000..c20d8b1e42 --- /dev/null +++ b/sdk-libs/photon-api/src/models/_get_multiple_account_interfaces_post_request_params.rs @@ -0,0 +1,22 @@ +/* + * photon-indexer + * + * Solana indexer for general compression + * + * The version of the OpenAPI document: 0.50.0 + * + */ + +/// GetMultipleAccountInterfacesPostRequestParams : Request parameters for getMultipleAccountInterfaces +#[derive(Clone, Default, Debug, PartialEq, Serialize, Deserialize)] +pub struct GetMultipleAccountInterfacesPostRequestParams { + /// List of account addresses to look up (max 100) + #[serde(rename = "addresses")] + pub addresses: Vec, +} + +impl GetMultipleAccountInterfacesPostRequestParams { + pub fn new(addresses: Vec) -> Self { + Self { addresses } + } +} diff --git a/sdk-libs/photon-api/src/models/_get_token_account_interface_post_200_response.rs b/sdk-libs/photon-api/src/models/_get_token_account_interface_post_200_response.rs new file mode 100644 index 0000000000..7bd42eb7f7 --- /dev/null +++ b/sdk-libs/photon-api/src/models/_get_token_account_interface_post_200_response.rs @@ -0,0 +1,35 @@ +/* + * photon-indexer + * + * Solana indexer for general compression + * + * The version of the OpenAPI document: 0.50.0 + * + */ + +use crate::models; + +#[derive(Clone, Default, Debug, PartialEq, Serialize, Deserialize)] +pub struct GetTokenAccountInterfacePost200Response { + #[serde(rename = "error", skip_serializing_if = "Option::is_none")] + pub error: Option>, + /// An ID to identify the response. + #[serde(rename = "id")] + pub id: String, + /// The version of the JSON-RPC protocol. + #[serde(rename = "jsonrpc")] + pub jsonrpc: String, + #[serde(rename = "result", skip_serializing_if = "Option::is_none")] + pub result: Option>, +} + +impl GetTokenAccountInterfacePost200Response { + pub fn new(id: String, jsonrpc: String) -> Self { + Self { + error: None, + id, + jsonrpc, + result: None, + } + } +} diff --git a/sdk-libs/photon-api/src/models/_get_token_account_interface_post_200_response_result.rs b/sdk-libs/photon-api/src/models/_get_token_account_interface_post_200_response_result.rs new file mode 100644 index 0000000000..dadd19b281 --- /dev/null +++ b/sdk-libs/photon-api/src/models/_get_token_account_interface_post_200_response_result.rs @@ -0,0 +1,27 @@ +/* + * photon-indexer + * + * Solana indexer for general compression + * + * The version of the OpenAPI document: 0.50.0 + * + */ + +use crate::models; + +#[derive(Clone, Default, Debug, PartialEq, Serialize, Deserialize)] +pub struct GetTokenAccountInterfacePost200ResponseResult { + #[serde(rename = "context")] + pub context: Box, + #[serde(rename = "value", skip_serializing_if = "Option::is_none")] + pub value: Option>, +} + +impl GetTokenAccountInterfacePost200ResponseResult { + pub fn new(context: models::Context) -> Self { + Self { + context: Box::new(context), + value: None, + } + } +} diff --git a/sdk-libs/photon-api/src/models/_get_token_account_interface_post_request.rs b/sdk-libs/photon-api/src/models/_get_token_account_interface_post_request.rs new file mode 100644 index 0000000000..8255ac11f7 --- /dev/null +++ b/sdk-libs/photon-api/src/models/_get_token_account_interface_post_request.rs @@ -0,0 +1,36 @@ +/* + * photon-indexer + * + * Solana indexer for general compression + * + * The version of the OpenAPI document: 0.50.0 + * + */ + +use crate::models; + +#[derive(Clone, Default, Debug, PartialEq, Serialize, Deserialize)] +pub struct GetTokenAccountInterfacePostRequest { + /// An ID to identify the request. + #[serde(rename = "id")] + pub id: String, + /// The version of the JSON-RPC protocol. + #[serde(rename = "jsonrpc")] + pub jsonrpc: String, + /// The name of the method to invoke. + #[serde(rename = "method")] + pub method: String, + #[serde(rename = "params")] + pub params: Box, +} + +impl GetTokenAccountInterfacePostRequest { + pub fn new(params: models::GetTokenAccountInterfacePostRequestParams) -> Self { + Self { + id: "test-id".to_string(), + jsonrpc: "2.0".to_string(), + method: "getTokenAccountInterface".to_string(), + params: Box::new(params), + } + } +} diff --git a/sdk-libs/photon-api/src/models/_get_token_account_interface_post_request_params.rs b/sdk-libs/photon-api/src/models/_get_token_account_interface_post_request_params.rs new file mode 100644 index 0000000000..60f583e6af --- /dev/null +++ b/sdk-libs/photon-api/src/models/_get_token_account_interface_post_request_params.rs @@ -0,0 +1,22 @@ +/* + * photon-indexer + * + * Solana indexer for general compression + * + * The version of the OpenAPI document: 0.50.0 + * + */ + +/// GetTokenAccountInterfacePostRequestParams : Request parameters for getTokenAccountInterface +#[derive(Clone, Default, Debug, PartialEq, Serialize, Deserialize)] +pub struct GetTokenAccountInterfacePostRequestParams { + /// Token account address to look up + #[serde(rename = "address")] + pub address: String, +} + +impl GetTokenAccountInterfacePostRequestParams { + pub fn new(address: String) -> Self { + Self { address } + } +} diff --git a/sdk-libs/photon-api/src/models/account_interface.rs b/sdk-libs/photon-api/src/models/account_interface.rs new file mode 100644 index 0000000000..efae2f6498 --- /dev/null +++ b/sdk-libs/photon-api/src/models/account_interface.rs @@ -0,0 +1,35 @@ +/* + * photon-indexer + * + * Solana indexer for general compression + * + * The version of the OpenAPI document: 0.51.0 + * + */ + +use crate::models; + +/// AccountInterface : Unified account interface — works for both on-chain and compressed accounts +#[derive(Clone, Default, Debug, PartialEq, Serialize, Deserialize)] +#[serde(rename_all = "camelCase")] +pub struct AccountInterface { + /// The on-chain Solana pubkey + #[serde(rename = "key")] + pub key: String, + /// Standard Solana account fields + #[serde(rename = "account")] + pub account: models::SolanaAccountData, + /// Compressed context — null if on-chain, present if compressed + #[serde(rename = "cold", skip_serializing_if = "Option::is_none")] + pub cold: Option, +} + +impl AccountInterface { + pub fn new(key: String, account: models::SolanaAccountData) -> Self { + Self { + key, + account, + cold: None, + } + } +} diff --git a/sdk-libs/photon-api/src/models/cold_context.rs b/sdk-libs/photon-api/src/models/cold_context.rs new file mode 100644 index 0000000000..11ded1c66c --- /dev/null +++ b/sdk-libs/photon-api/src/models/cold_context.rs @@ -0,0 +1,36 @@ +/* + * photon-indexer + * + * Solana indexer for general compression + * + * The version of the OpenAPI document: 0.51.0 + * + */ + +use crate::models; + +/// ColdContext : Compressed account context — present when account is in compressed state +#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)] +#[serde(tag = "type", rename_all = "camelCase")] +pub enum ColdContext { + /// Generic compressed account + #[serde(rename = "account")] + Account { + hash: String, + #[serde(rename = "leafIndex")] + leaf_index: u64, + #[serde(rename = "treeInfo")] + tree_info: models::InterfaceTreeInfo, + data: models::ColdData, + }, + /// Compressed token account + #[serde(rename = "token")] + Token { + hash: String, + #[serde(rename = "leafIndex")] + leaf_index: u64, + #[serde(rename = "treeInfo")] + tree_info: models::InterfaceTreeInfo, + data: models::ColdData, + }, +} diff --git a/sdk-libs/photon-api/src/models/cold_data.rs b/sdk-libs/photon-api/src/models/cold_data.rs new file mode 100644 index 0000000000..318e7939f5 --- /dev/null +++ b/sdk-libs/photon-api/src/models/cold_data.rs @@ -0,0 +1,27 @@ +/* + * photon-indexer + * + * Solana indexer for general compression + * + * The version of the OpenAPI document: 0.51.0 + * + */ + +/// ColdData : Structured compressed account data (discriminator separated) +#[derive(Clone, Default, Debug, PartialEq, Serialize, Deserialize)] +#[serde(rename_all = "camelCase")] +pub struct ColdData { + /// First 8 bytes of the account data (discriminator) + pub discriminator: [u8; 8], + /// Remaining account data after discriminator, base64 encoded + pub data: String, +} + +impl ColdData { + pub fn new(discriminator: [u8; 8], data: String) -> Self { + Self { + discriminator, + data, + } + } +} diff --git a/sdk-libs/photon-api/src/models/compressed_context.rs b/sdk-libs/photon-api/src/models/compressed_context.rs new file mode 100644 index 0000000000..e664500503 --- /dev/null +++ b/sdk-libs/photon-api/src/models/compressed_context.rs @@ -0,0 +1,40 @@ +/* + * photon-indexer + * + * Solana indexer for general compression + * + * The version of the OpenAPI document: 0.50.0 + * + */ + +/// CompressedContext : Context information for compressed accounts +#[derive(Clone, Default, Debug, PartialEq, Serialize, Deserialize)] +pub struct CompressedContext { + /// The hash of the compressed account (leaf hash in Merkle tree) + #[serde(rename = "hash")] + pub hash: String, + /// The Merkle tree address + #[serde(rename = "tree")] + pub tree: String, + /// The leaf index in the Merkle tree + #[serde(rename = "leafIndex")] + pub leaf_index: u64, + /// Sequence number (None if in output queue, Some once inserted into Merkle tree) + #[serde(rename = "seq", skip_serializing_if = "Option::is_none")] + pub seq: Option, + /// Whether the account can be proven by index (in output queue) + #[serde(rename = "proveByIndex")] + pub prove_by_index: bool, +} + +impl CompressedContext { + pub fn new(hash: String, tree: String, leaf_index: u64, prove_by_index: bool) -> Self { + Self { + hash, + tree, + leaf_index, + seq: None, + prove_by_index, + } + } +} diff --git a/sdk-libs/photon-api/src/models/interface_result.rs b/sdk-libs/photon-api/src/models/interface_result.rs new file mode 100644 index 0000000000..ff086836ac --- /dev/null +++ b/sdk-libs/photon-api/src/models/interface_result.rs @@ -0,0 +1,28 @@ +/* + * photon-indexer + * + * Solana indexer for general compression + * + * The version of the OpenAPI document: 0.50.0 + * + */ + +use crate::models; + +/// InterfaceResult : Heterogeneous result type for batch lookups +#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)] +#[serde(tag = "type")] +pub enum InterfaceResult { + /// Generic account result + #[serde(rename = "account")] + Account(models::AccountInterface), + /// Token account result with parsed token data + #[serde(rename = "token")] + Token(models::TokenAccountInterface), +} + +impl Default for InterfaceResult { + fn default() -> Self { + Self::Account(models::AccountInterface::default()) + } +} diff --git a/sdk-libs/photon-api/src/models/mod.rs b/sdk-libs/photon-api/src/models/mod.rs index 115861e99e..de71f73a43 100644 --- a/sdk-libs/photon-api/src/models/mod.rs +++ b/sdk-libs/photon-api/src/models/mod.rs @@ -342,3 +342,54 @@ pub mod node; pub use self::node::Node; pub mod queue_request; pub use self::queue_request::QueueRequest; +// Interface types +pub mod solana_account_data; +pub use self::solana_account_data::SolanaAccountData; +pub mod cold_data; +pub use self::cold_data::ColdData; +pub mod tree_info; +pub use self::tree_info::{TreeInfo as InterfaceTreeInfo, TreeType}; +pub mod cold_context; +pub use self::cold_context::ColdContext; +pub mod account_interface; +pub use self::account_interface::AccountInterface; +pub mod token_account_interface; +pub use self::token_account_interface::TokenAccountInterface; +pub mod interface_result; +pub use self::interface_result::InterfaceResult; +// getAccountInterface +pub mod _get_account_interface_post_request_params; +pub use self::_get_account_interface_post_request_params::GetAccountInterfacePostRequestParams; +pub mod _get_account_interface_post_request; +pub use self::_get_account_interface_post_request::GetAccountInterfacePostRequest; +pub mod _get_account_interface_post_200_response_result; +pub use self::_get_account_interface_post_200_response_result::GetAccountInterfacePost200ResponseResult; +pub mod _get_account_interface_post_200_response; +pub use self::_get_account_interface_post_200_response::GetAccountInterfacePost200Response; +// getTokenAccountInterface +pub mod _get_token_account_interface_post_request_params; +pub use self::_get_token_account_interface_post_request_params::GetTokenAccountInterfacePostRequestParams; +pub mod _get_token_account_interface_post_request; +pub use self::_get_token_account_interface_post_request::GetTokenAccountInterfacePostRequest; +pub mod _get_token_account_interface_post_200_response_result; +pub use self::_get_token_account_interface_post_200_response_result::GetTokenAccountInterfacePost200ResponseResult; +pub mod _get_token_account_interface_post_200_response; +pub use self::_get_token_account_interface_post_200_response::GetTokenAccountInterfacePost200Response; +// getAtaInterface +pub mod _get_ata_interface_post_request_params; +pub use self::_get_ata_interface_post_request_params::GetAtaInterfacePostRequestParams; +pub mod _get_ata_interface_post_request; +pub use self::_get_ata_interface_post_request::GetAtaInterfacePostRequest; +pub mod _get_ata_interface_post_200_response_result; +pub use self::_get_ata_interface_post_200_response_result::GetAtaInterfacePost200ResponseResult; +pub mod _get_ata_interface_post_200_response; +pub use self::_get_ata_interface_post_200_response::GetAtaInterfacePost200Response; +// getMultipleAccountInterfaces +pub mod _get_multiple_account_interfaces_post_request_params; +pub use self::_get_multiple_account_interfaces_post_request_params::GetMultipleAccountInterfacesPostRequestParams; +pub mod _get_multiple_account_interfaces_post_request; +pub use self::_get_multiple_account_interfaces_post_request::GetMultipleAccountInterfacesPostRequest; +pub mod _get_multiple_account_interfaces_post_200_response_result; +pub use self::_get_multiple_account_interfaces_post_200_response_result::GetMultipleAccountInterfacesPost200ResponseResult; +pub mod _get_multiple_account_interfaces_post_200_response; +pub use self::_get_multiple_account_interfaces_post_200_response::GetMultipleAccountInterfacesPost200Response; diff --git a/sdk-libs/photon-api/src/models/resolved_from.rs b/sdk-libs/photon-api/src/models/resolved_from.rs new file mode 100644 index 0000000000..c79935af7e --- /dev/null +++ b/sdk-libs/photon-api/src/models/resolved_from.rs @@ -0,0 +1,23 @@ +/* + * photon-indexer + * + * Solana indexer for general compression + * + * The version of the OpenAPI document: 0.50.0 + * + */ + +/// ResolvedFrom : Indicates the source of the resolved account data +#[derive(Clone, Copy, Debug, Eq, PartialEq, Ord, PartialOrd, Hash, Serialize, Deserialize)] +pub enum ResolvedFrom { + #[serde(rename = "onchain")] + Onchain, + #[serde(rename = "compressed")] + Compressed, +} + +impl Default for ResolvedFrom { + fn default() -> Self { + Self::Onchain + } +} diff --git a/sdk-libs/photon-api/src/models/solana_account_data.rs b/sdk-libs/photon-api/src/models/solana_account_data.rs new file mode 100644 index 0000000000..5d4142b840 --- /dev/null +++ b/sdk-libs/photon-api/src/models/solana_account_data.rs @@ -0,0 +1,40 @@ +/* + * photon-indexer + * + * Solana indexer for general compression + * + * The version of the OpenAPI document: 0.51.0 + * + */ + +/// SolanaAccountData : Standard Solana account fields (matches getAccountInfo shape) +#[derive(Clone, Default, Debug, PartialEq, Serialize, Deserialize)] +#[serde(rename_all = "camelCase")] +pub struct SolanaAccountData { + pub lamports: u64, + pub data: String, + pub owner: String, + pub executable: bool, + pub rent_epoch: u64, + pub space: u64, +} + +impl SolanaAccountData { + pub fn new( + lamports: u64, + data: String, + owner: String, + executable: bool, + rent_epoch: u64, + space: u64, + ) -> Self { + Self { + lamports, + data, + owner, + executable, + rent_epoch, + space, + } + } +} diff --git a/sdk-libs/photon-api/src/models/token_account_interface.rs b/sdk-libs/photon-api/src/models/token_account_interface.rs new file mode 100644 index 0000000000..3e3080c100 --- /dev/null +++ b/sdk-libs/photon-api/src/models/token_account_interface.rs @@ -0,0 +1,30 @@ +/* + * photon-indexer + * + * Solana indexer for general compression + * + * The version of the OpenAPI document: 0.50.0 + * + */ + +use crate::models; + +/// TokenAccountInterface : Token account interface with parsed token data +#[derive(Clone, Default, Debug, PartialEq, Serialize, Deserialize)] +pub struct TokenAccountInterface { + /// Base account interface data (flattened) + #[serde(flatten)] + pub account: models::AccountInterface, + /// Parsed token account data + #[serde(rename = "tokenData")] + pub token_data: models::TokenData, +} + +impl TokenAccountInterface { + pub fn new(account: models::AccountInterface, token_data: models::TokenData) -> Self { + Self { + account, + token_data, + } + } +} diff --git a/sdk-libs/photon-api/src/models/tree_info.rs b/sdk-libs/photon-api/src/models/tree_info.rs new file mode 100644 index 0000000000..0a7f036b13 --- /dev/null +++ b/sdk-libs/photon-api/src/models/tree_info.rs @@ -0,0 +1,62 @@ +/* + * photon-indexer + * + * Solana indexer for general compression + * + * The version of the OpenAPI document: 0.51.0 + * + */ + +#[derive(Clone, Copy, Default, Debug, PartialEq, Eq, Serialize, Deserialize)] +#[serde(rename_all = "camelCase")] +#[repr(u64)] +pub enum TreeType { + #[default] + #[serde(rename = "stateV1")] + StateV1 = 1, + #[serde(rename = "stateV2")] + StateV2 = 3, +} + +impl From for u64 { + fn from(value: TreeType) -> Self { + value as u64 + } +} + +impl From for TreeType { + fn from(value: u64) -> Self { + match value { + 1 => TreeType::StateV1, + 3 => TreeType::StateV2, + _ => TreeType::StateV1, + } + } +} + +/// TreeInfo : Merkle tree info for compressed accounts +#[derive(Clone, Default, Debug, PartialEq, Serialize, Deserialize)] +#[serde(rename_all = "camelCase")] +pub struct TreeInfo { + pub tree: String, + pub queue: String, + #[serde(rename = "treeType")] + pub tree_type: TreeType, + #[serde(skip_serializing_if = "Option::is_none")] + pub seq: Option, + /// Slot when the account was created/compressed + #[serde(rename = "slotCreated")] + pub slot_created: u64, +} + +impl TreeInfo { + pub fn new(tree: String, queue: String, tree_type: TreeType, slot_created: u64) -> Self { + Self { + tree, + queue, + tree_type, + seq: None, + slot_created, + } + } +} diff --git a/sdk-libs/program-test/src/indexer/test_indexer.rs b/sdk-libs/program-test/src/indexer/test_indexer.rs index a0691279b7..32249ab81b 100644 --- a/sdk-libs/program-test/src/indexer/test_indexer.rs +++ b/sdk-libs/program-test/src/indexer/test_indexer.rs @@ -1,4 +1,4 @@ -use std::{fmt::Debug, time::Duration}; +use std::{collections::HashMap, fmt::Debug, time::Duration}; #[cfg(feature = "devenv")] use account_compression::{ @@ -13,6 +13,7 @@ pub(crate) const STATE_MERKLE_TREE_ROOTS: u64 = 2400; pub(crate) const DEFAULT_BATCH_STATE_TREE_HEIGHT: usize = 32; pub(crate) const DEFAULT_BATCH_ADDRESS_TREE_HEIGHT: usize = 40; pub(crate) const DEFAULT_BATCH_ROOT_HISTORY_LEN: usize = 200; + use async_trait::async_trait; use borsh::BorshDeserialize; #[cfg(feature = "devenv")] @@ -38,6 +39,9 @@ use light_compressed_account::{ tx_hash::create_tx_hash, TreeType, }; +/// Discriminator for compressible accounts that store onchain_pubkey in the first 32 bytes of data. +/// Re-exported from light_compressible for convenience. +pub use light_compressible::DECOMPRESSED_PDA_DISCRIMINATOR; use light_event::event::PublicTransactionEvent; use light_hasher::{bigint::bigint_to_be_bytes_array, Poseidon}; use light_merkle_tree_reference::MerkleTree; @@ -102,6 +106,8 @@ pub struct TestIndexer { pub token_compressed_accounts: Vec, pub token_nullified_compressed_accounts: Vec, pub events: Vec, + /// Index mapping onchain_pubkey to compressed account index. + pub onchain_pubkey_index: HashMap<[u8; 32], usize>, } impl Clone for TestIndexer { @@ -116,6 +122,7 @@ impl Clone for TestIndexer { token_compressed_accounts: self.token_compressed_accounts.clone(), token_nullified_compressed_accounts: self.token_nullified_compressed_accounts.clone(), events: self.events.clone(), + onchain_pubkey_index: self.onchain_pubkey_index.clone(), } } } @@ -992,7 +999,7 @@ impl Indexer for TestIndexer { } async fn get_indexer_health(&self, _config: Option) -> Result { - todo!("get_indexer_health not implemented") + Ok(true) // Test indexer is always healthy } } @@ -1345,9 +1352,153 @@ impl TestIndexer { token_compressed_accounts: vec![], token_nullified_compressed_accounts: vec![], group_pda, + onchain_pubkey_index: HashMap::new(), } } + /// Extract onchain_pubkey from compressed account data if it has the decompressed discriminator. + /// Compressible accounts store the on-chain PDA pubkey in the first 32 bytes of data. + fn extract_onchain_pubkey_from_data( + data: Option<&light_compressed_account::compressed_account::CompressedAccountData>, + ) -> Option<[u8; 32]> { + let data = data?; + // Check discriminator matches DECOMPRESSED_PDA_DISCRIMINATOR + if data.discriminator == DECOMPRESSED_PDA_DISCRIMINATOR && data.data.len() >= 32 { + // onchain_pubkey is stored in the first 32 bytes of data (after discriminator) + data.data[..32].try_into().ok() + } else { + None + } + } + + /// Find a compressed account by its on-chain pubkey. + /// This mirrors Photon's lookup by onchain_pubkey column. + pub fn find_compressed_account_by_onchain_pubkey( + &self, + onchain_pubkey: &[u8; 32], + ) -> Option<&CompressedAccountWithMerkleContext> { + let matches: Vec<_> = self + .compressed_accounts + .iter() + .filter(|acc| { + Self::extract_onchain_pubkey_from_data(acc.compressed_account.data.as_ref()) + .as_ref() + == Some(onchain_pubkey) + }) + .collect(); + + debug_assert!( + matches.len() <= 1, + "find_compressed_account_by_onchain_pubkey: found {} matches, expected at most 1", + matches.len() + ); + + matches.into_iter().next() + } + + /// Find multiple compressed accounts by their on-chain pubkeys. + pub fn find_multiple_compressed_accounts_by_onchain_pubkeys( + &self, + onchain_pubkeys: &[[u8; 32]], + ) -> Vec> { + onchain_pubkeys + .iter() + .map(|pubkey| self.find_compressed_account_by_onchain_pubkey(pubkey)) + .collect() + } + + /// Find a token compressed account by its on-chain pubkey. + pub fn find_token_account_by_onchain_pubkey( + &self, + onchain_pubkey: &[u8; 32], + ) -> Option<&TokenDataWithMerkleContext> { + let matches: Vec<_> = self + .token_compressed_accounts + .iter() + .filter(|acc| { + Self::extract_onchain_pubkey_from_data( + acc.compressed_account.compressed_account.data.as_ref(), + ) + .as_ref() + == Some(onchain_pubkey) + }) + .collect(); + + debug_assert!( + matches.len() <= 1, + "find_token_account_by_onchain_pubkey: found {} matches, expected at most 1", + matches.len() + ); + + matches.into_iter().next() + } + + /// Find a compressed account by its PDA pubkey + pub fn find_compressed_account_by_pda_seed( + &self, + pda_pubkey: &[u8; 32], + ) -> Option<&CompressedAccountWithMerkleContext> { + // Try each address tree to find an account whose address matches + for address_tree in &self.address_merkle_trees { + let tree_pubkey = address_tree.accounts.merkle_tree.to_bytes(); + + // For each compressed account with an address, check if it was derived from this seed + for acc in &self.compressed_accounts { + if let Some(address) = acc.compressed_account.address { + // Try deriving with this tree and the account's owner as program_id + let owner_bytes = acc.compressed_account.owner.to_bytes(); + let derived = light_compressed_account::address::derive_address( + pda_pubkey, + &tree_pubkey, + &owner_bytes, + ); + + if derived == address { + return Some(acc); + } + } + } + } + None + } + + /// Find a token compressed account by its PDA pubkey + pub fn find_token_account_by_pda_seed( + &self, + pda_pubkey: &[u8; 32], + ) -> Option<&TokenDataWithMerkleContext> { + // Try each address tree to find an account whose address matches + for address_tree in &self.address_merkle_trees { + let tree_pubkey = address_tree.accounts.merkle_tree.to_bytes(); + + // For each token compressed account with an address, check if it was derived from this seed + for acc in &self.token_compressed_accounts { + if let Some(address) = acc.compressed_account.compressed_account.address { + // Try deriving with this tree and the account's owner as program_id + let owner_bytes = acc.compressed_account.compressed_account.owner.to_bytes(); + let derived = light_compressed_account::address::derive_address( + pda_pubkey, + &tree_pubkey, + &owner_bytes, + ); + + if derived == address { + return Some(acc); + } + } + } + } + None + } + + /// Get the sequence number for a state merkle tree by its pubkey. + pub fn get_state_tree_seq(&self, tree_pubkey: &Pubkey) -> Option { + self.state_merkle_trees + .iter() + .find(|tree| tree.accounts.merkle_tree == *tree_pubkey) + .map(|tree| tree.merkle_tree.sequence_number as u64) + } + pub fn add_address_merkle_tree_bundle( address_merkle_tree_accounts: AddressMerkleTreeAccounts, // TODO: add config here diff --git a/sdk-libs/program-test/src/program_test/rpc.rs b/sdk-libs/program-test/src/program_test/rpc.rs index a2f5d6981d..0d4173cca4 100644 --- a/sdk-libs/program-test/src/program_test/rpc.rs +++ b/sdk-libs/program-test/src/program_test/rpc.rs @@ -4,7 +4,8 @@ use anchor_lang::pubkey; use async_trait::async_trait; use borsh::BorshDeserialize; use light_client::{ - indexer::{Indexer, TreeInfo}, + indexer::{CompressedAccount, CompressedTokenAccount, Context, Indexer, Response, TreeInfo}, + interface::{AccountInterface, MintInterface, MintState, TokenAccountInterface}, rpc::{LightClientConfig, Rpc, RpcError}, }; use light_compressed_account::TreeType; @@ -366,6 +367,397 @@ impl Rpc for LightProgramTest { "create_and_send_versioned_transaction is unimplemented for LightProgramTest" ); } + + async fn get_account_interface( + &self, + address: &Pubkey, + _config: Option, + ) -> Result>, RpcError> { + let slot = self.context.get_sysvar::().slot; + + // Hot: check on-chain first + if let Some(account) = self.context.get_account(address) { + if account.lamports > 0 { + return Ok(Response { + context: Context { slot }, + value: Some(AccountInterface::hot(*address, account)), + }); + } + } + + // Cold: check TestIndexer by onchain pubkey (mirrors Photon behavior) + if let Some(indexer) = self.indexer.as_ref() { + // First try: lookup by onchain_pubkey (for accounts with DECOMPRESSED_PDA_DISCRIMINATOR) + if let Some(compressed_with_ctx) = + indexer.find_compressed_account_by_onchain_pubkey(&address.to_bytes()) + { + let owner: Pubkey = compressed_with_ctx.compressed_account.owner.into(); + let compressed: CompressedAccount = compressed_with_ctx.clone().try_into().map_err( + |e| { + RpcError::CustomError(format!( + "CompressedAccountWithMerkleContext conversion failed for address {}: {:?}", + address, e + )) + }, + )?; + + return Ok(Response { + context: Context { slot }, + value: Some(AccountInterface::cold(*address, compressed, owner)), + }); + } + + // Second try: lookup by PDA seed (for accounts whose address was derived from this pubkey) + if let Some(compressed_with_ctx) = + indexer.find_compressed_account_by_pda_seed(&address.to_bytes()) + { + let owner: Pubkey = compressed_with_ctx.compressed_account.owner.into(); + let compressed: CompressedAccount = compressed_with_ctx.clone().try_into().map_err( + |e| { + RpcError::CustomError(format!( + "CompressedAccountWithMerkleContext conversion failed for PDA seed {}: {:?}", + address, e + )) + }, + )?; + + return Ok(Response { + context: Context { slot }, + value: Some(AccountInterface::cold(*address, compressed, owner)), + }); + } + } + + Ok(Response { + context: Context { slot }, + value: None, + }) + } + + async fn get_token_account_interface( + &self, + address: &Pubkey, + _config: Option, + ) -> Result>, RpcError> { + use light_sdk::constants::LIGHT_TOKEN_PROGRAM_ID; + + let light_token_program_id: Pubkey = LIGHT_TOKEN_PROGRAM_ID.into(); + let slot = self.context.get_sysvar::().slot; + + // Hot: check on-chain first (must be owned by LIGHT_TOKEN_PROGRAM_ID) + if let Some(account) = self.context.get_account(address) { + if account.lamports > 0 && account.owner == light_token_program_id { + match TokenAccountInterface::hot(*address, account) { + Ok(iface) => { + return Ok(Response { + context: Context { slot }, + value: Some(iface), + }); + } + Err(_) => { + // Fall through to cold lookup if parsing failed + } + } + } + } + + // Cold: check TestIndexer by onchain_pubkey, PDA seed, or token_data.owner + if let Some(indexer) = self.indexer.as_ref() { + // First try: lookup by onchain_pubkey (for accounts with DECOMPRESSED_PDA_DISCRIMINATOR) + let token_acc = indexer + .find_token_account_by_onchain_pubkey(&address.to_bytes()) + .or_else(|| { + // Second try: lookup by PDA seed (for accounts whose address was derived from this pubkey) + indexer.find_token_account_by_pda_seed(&address.to_bytes()) + }); + + if let Some(token_acc) = token_acc { + // Convert to CompressedTokenAccount + let compressed_account: CompressedAccount = token_acc + .compressed_account + .clone() + .try_into() + .map_err(|e| RpcError::CustomError(format!("conversion error: {:?}", e)))?; + + let compressed_token = CompressedTokenAccount { + token: token_acc.token_data.clone(), + account: compressed_account, + }; + + return Ok(Response { + context: Context { slot }, + value: Some(TokenAccountInterface::cold( + *address, + compressed_token, + *address, // owner = hot address for program-owned tokens + light_token_program_id, + )), + }); + } + + // Third try: lookup by token_data.owner (for tokens where owner == address) + let result = indexer + .get_compressed_token_accounts_by_owner(address, None, None) + .await + .map_err(|e| RpcError::CustomError(format!("indexer error: {}", e)))?; + + let items = result.value.items; + if items.len() > 1 { + return Err(RpcError::CustomError(format!( + "Ambiguous lookup: found {} compressed token accounts for address {}. \ + Use get_compressed_token_accounts_by_owner for multiple accounts.", + items.len(), + address + ))); + } + + if let Some(token_acc) = items.into_iter().next() { + let key = token_acc + .account + .address + .map(Pubkey::new_from_array) + .unwrap_or(*address); + return Ok(Response { + context: Context { slot }, + value: Some(TokenAccountInterface::cold( + key, + token_acc, + *address, // owner = hot address for program-owned tokens + light_token_program_id, + )), + }); + } + } + + Ok(Response { + context: Context { slot }, + value: None, + }) + } + + async fn get_associated_token_account_interface( + &self, + owner: &Pubkey, + mint: &Pubkey, + _config: Option, + ) -> Result>, RpcError> { + use light_client::indexer::GetCompressedTokenAccountsByOwnerOrDelegateOptions; + use light_sdk::constants::LIGHT_TOKEN_PROGRAM_ID; + use light_token::instruction::derive_token_ata; + + let (ata, _bump) = derive_token_ata(owner, mint); + let light_token_program_id: Pubkey = LIGHT_TOKEN_PROGRAM_ID.into(); + let slot = self.context.get_sysvar::().slot; + + // First try: on-chain (hot) lookup + // We handle this directly instead of using get_token_account_interface + // because we need to control owner_override for ata_bump() to work + if let Some(account) = self.context.get_account(&ata) { + if account.lamports > 0 && account.owner == light_token_program_id { + match TokenAccountInterface::hot(ata, account) { + Ok(iface) => { + return Ok(Response { + context: Context { slot }, + value: Some(iface), + }); + } + Err(_) => { + // Fall through to cold lookup if parsing failed + } + } + } + } + + // Cold: search compressed tokens by ata_pubkey + mint + // In Light Protocol, token_data.owner is the token account pubkey (ATA), not wallet owner + // But we need to pass the wallet owner for TokenAccountInterface::cold so ata_bump() works + if let Some(indexer) = self.indexer.as_ref() { + let options = Some(GetCompressedTokenAccountsByOwnerOrDelegateOptions { + mint: Some(*mint), + ..Default::default() + }); + let result = indexer + .get_compressed_token_accounts_by_owner(&ata, options, None) + .await + .map_err(|e| RpcError::CustomError(format!("indexer error: {}", e)))?; + + let items = result.value.items; + if items.len() > 1 { + return Err(RpcError::CustomError(format!( + "Ambiguous lookup: found {} compressed token accounts for ATA {} (owner: {}, mint: {}). \ + Use get_compressed_token_accounts_by_owner for multiple accounts.", + items.len(), + ata, + owner, + mint + ))); + } + + if let Some(token_acc) = items.into_iter().next() { + return Ok(Response { + context: Context { slot }, + value: Some(TokenAccountInterface::cold( + ata, // key = ATA pubkey (derived, so we use it directly) + token_acc, + *owner, // owner_override = wallet owner (for ata_bump() to work) + light_token_program_id, + )), + }); + } + } + + Ok(Response { + context: Context { slot }, + value: None, + }) + } + + async fn get_multiple_account_interfaces( + &self, + addresses: Vec<&Pubkey>, + _config: Option, + ) -> Result>>, RpcError> { + let slot = self.context.get_sysvar::().slot; + let mut results: Vec> = vec![None; addresses.len()]; + + // Batch fetch on-chain accounts (hot path) + let owned_addresses: Vec = addresses.iter().map(|a| **a).collect(); + let on_chain_accounts: Vec> = owned_addresses + .iter() + .map(|addr| self.context.get_account(addr)) + .collect(); + + // Track which addresses still need cold lookup + let mut cold_lookup_indices: Vec = Vec::new(); + let mut cold_lookup_pubkeys: Vec<[u8; 32]> = Vec::new(); + + for (i, (address, maybe_account)) in addresses + .iter() + .zip(on_chain_accounts.into_iter()) + .enumerate() + { + if let Some(account) = maybe_account { + if account.lamports > 0 { + results[i] = Some(AccountInterface::hot(**address, account)); + continue; + } + } + // Not found on-chain or has 0 lamports, need cold lookup + cold_lookup_indices.push(i); + cold_lookup_pubkeys.push(address.to_bytes()); + } + + // Batch lookup cold accounts from TestIndexer + if !cold_lookup_pubkeys.is_empty() { + if let Some(indexer) = self.indexer.as_ref() { + let cold_results = indexer + .find_multiple_compressed_accounts_by_onchain_pubkeys(&cold_lookup_pubkeys); + + for (lookup_idx, maybe_compressed) in cold_results.into_iter().enumerate() { + let original_idx = cold_lookup_indices[lookup_idx]; + if let Some(compressed_with_ctx) = maybe_compressed { + let owner: Pubkey = compressed_with_ctx.compressed_account.owner.into(); + let compressed: CompressedAccount = + compressed_with_ctx.clone().try_into().map_err(|e| { + RpcError::CustomError(format!("conversion error: {:?}", e)) + })?; + + results[original_idx] = Some(AccountInterface::cold( + *addresses[original_idx], + compressed, + owner, + )); + } + } + } + } + + Ok(Response { + context: Context { slot }, + value: results, + }) + } + + async fn get_mint_interface( + &self, + address: &Pubkey, + config: Option, + ) -> Result>, RpcError> { + use borsh::BorshDeserialize; + use light_compressed_account::address::derive_address; + use light_token_interface::{state::Mint, MINT_ADDRESS_TREE}; + + let slot = self.context.get_sysvar::().slot; + let address_tree = Pubkey::new_from_array(MINT_ADDRESS_TREE); + let compressed_address = derive_address( + &address.to_bytes(), + &address_tree.to_bytes(), + &light_token_interface::LIGHT_TOKEN_PROGRAM_ID, + ); + + // 1. Try hot (on-chain) first + if let Some(account) = self.context.get_account(address) { + if account.lamports > 0 { + return Ok(Response { + context: Context { slot }, + value: Some(MintInterface { + mint: *address, + address_tree, + compressed_address, + state: MintState::Hot { account }, + }), + }); + } + } + + // 2. Fall back to cold (compressed) via indexer + let indexer = self + .indexer + .as_ref() + .ok_or_else(|| RpcError::CustomError("Indexer not initialized".to_string()))?; + + let resp = indexer + .get_compressed_account(compressed_address, config) + .await + .map_err(|e| RpcError::CustomError(format!("Indexer error: {e}")))?; + + let value = match resp.value { + Some(compressed) => { + // Parse mint data from compressed account + let mint_data = compressed + .data + .as_ref() + .and_then(|d| { + if d.data.is_empty() { + None + } else { + Mint::try_from_slice(&d.data).ok() + } + }) + .ok_or_else(|| { + RpcError::CustomError( + "Missing or invalid mint data in compressed account".into(), + ) + })?; + + Some(MintInterface { + mint: *address, + address_tree, + compressed_address, + state: MintState::Cold { + compressed, + mint_data, + }, + }) + } + None => None, + }; + + Ok(Response { + context: Context { slot }, + value, + }) + } } impl LightProgramTest { diff --git a/sdk-tests/anchor-manual-test/tests/account_loader.rs b/sdk-tests/anchor-manual-test/tests/account_loader.rs index f98235b5a8..5250ebf249 100644 --- a/sdk-tests/anchor-manual-test/tests/account_loader.rs +++ b/sdk-tests/anchor-manual-test/tests/account_loader.rs @@ -11,8 +11,8 @@ use anchor_manual_test::{ }; use light_account::IntoVariant; use light_client::interface::{ - create_load_instructions, get_create_accounts_proof, AccountInterfaceExt, AccountSpec, - CreateAccountsProofInput, PdaSpec, + create_load_instructions, get_create_accounts_proof, AccountSpec, CreateAccountsProofInput, + PdaSpec, }; use light_compressible::rent::SLOTS_PER_EPOCH; use light_program_test::{program_test::TestRpc, Indexer, Rpc}; @@ -118,9 +118,11 @@ async fn test_zero_copy_create_compress_decompress() { // PHASE 4: Decompress account let account_interface = rpc - .get_account_interface(&record_pda, &program_id) + .get_account_interface(&record_pda, None) .await - .expect("failed to get account interface"); + .expect("failed to get account interface") + .value + .expect("account interface should exist"); assert!( account_interface.is_cold(), "Account should be cold (compressed)" diff --git a/sdk-tests/anchor-manual-test/tests/test.rs b/sdk-tests/anchor-manual-test/tests/test.rs index f6356bd0f3..4ce96b4662 100644 --- a/sdk-tests/anchor-manual-test/tests/test.rs +++ b/sdk-tests/anchor-manual-test/tests/test.rs @@ -11,8 +11,8 @@ use anchor_manual_test::{ }; use light_account::IntoVariant; use light_client::interface::{ - create_load_instructions, get_create_accounts_proof, AccountInterfaceExt, AccountSpec, - CreateAccountsProofInput, PdaSpec, + create_load_instructions, get_create_accounts_proof, AccountSpec, CreateAccountsProofInput, + PdaSpec, }; use light_compressible::rent::SLOTS_PER_EPOCH; use light_program_test::{program_test::TestRpc, Indexer, Rpc}; @@ -116,9 +116,11 @@ async fn test_create_compress_decompress() { // PHASE 4: Decompress account let account_interface = rpc - .get_account_interface(&record_pda, &program_id) + .get_account_interface(&record_pda, None) .await - .expect("failed to get account interface"); + .expect("failed to get account interface") + .value + .expect("account interface should exist"); assert!( account_interface.is_cold(), "Account should be cold (compressed)" diff --git a/sdk-tests/anchor-semi-manual-test/tests/stress_test.rs b/sdk-tests/anchor-semi-manual-test/tests/stress_test.rs index 4d3d2972a0..fe049be82a 100644 --- a/sdk-tests/anchor-semi-manual-test/tests/stress_test.rs +++ b/sdk-tests/anchor-semi-manual-test/tests/stress_test.rs @@ -18,8 +18,8 @@ use light_batched_merkle_tree::{ initialize_state_tree::InitStateTreeAccountsInstructionData, }; use light_client::interface::{ - create_load_instructions, get_create_accounts_proof, AccountInterface, AccountInterfaceExt, - AccountSpec, ColdContext, CreateAccountsProofInput, PdaSpec, + create_load_instructions, get_create_accounts_proof, AccountInterface, AccountSpec, + ColdContext, CreateAccountsProofInput, PdaSpec, }; use light_compressible::rent::SLOTS_PER_EPOCH; use light_program_test::{ @@ -239,9 +239,11 @@ async fn decompress_all(ctx: &mut StressTestContext, pdas: &TestPdas, cached: &C // PDA: MinimalRecord let record_interface = ctx .rpc - .get_account_interface(&pdas.record, &ctx.program_id) + .get_account_interface(&pdas.record, None) .await - .expect("failed to get MinimalRecord interface"); + .expect("failed to get MinimalRecord interface") + .value + .expect("MinimalRecord interface should exist"); assert!(record_interface.is_cold(), "MinimalRecord should be cold"); let record_data = MinimalRecord::deserialize(&mut &record_interface.account.data[8..]) @@ -257,9 +259,11 @@ async fn decompress_all(ctx: &mut StressTestContext, pdas: &TestPdas, cached: &C // PDA: ZeroCopyRecord let zc_interface = ctx .rpc - .get_account_interface(&pdas.zc_record, &ctx.program_id) + .get_account_interface(&pdas.zc_record, None) .await - .expect("failed to get ZeroCopyRecord interface"); + .expect("failed to get ZeroCopyRecord interface") + .value + .expect("ZeroCopyRecord interface should exist"); assert!(zc_interface.is_cold(), "ZeroCopyRecord should be cold"); let zc_data = ZeroCopyRecord::deserialize(&mut &zc_interface.account.data[8..]) @@ -275,17 +279,21 @@ async fn decompress_all(ctx: &mut StressTestContext, pdas: &TestPdas, cached: &C // ATA let ata_interface = ctx .rpc - .get_ata_interface(&pdas.ata_owner, &pdas.ata_mint) + .get_associated_token_account_interface(&pdas.ata_owner, &pdas.ata_mint, None) .await - .expect("failed to get ATA interface"); + .expect("failed to get ATA interface") + .value + .expect("ATA interface should exist"); assert!(ata_interface.is_cold(), "ATA should be cold"); // Token PDA: Vault let vault_iface = ctx .rpc - .get_token_account_interface(&pdas.vault) + .get_token_account_interface(&pdas.vault, None) .await - .expect("failed to get vault interface"); + .expect("failed to get vault interface") + .value + .expect("vault interface should exist"); assert!(vault_iface.is_cold(), "Vault should be cold"); let vault_token_data: Token = @@ -310,46 +318,24 @@ async fn decompress_all(ctx: &mut StressTestContext, pdas: &TestPdas, cached: &C // Mint A let mint_a_iface = ctx .rpc - .get_mint_interface(&pdas.mint_a) + .get_mint_interface(&pdas.mint_a, None) .await - .expect("failed to get mint A interface"); + .expect("failed to get mint A interface") + .value + .expect("mint A interface should exist"); assert!(mint_a_iface.is_cold(), "Mint A should be cold"); - let (compressed_a, _) = mint_a_iface - .compressed() - .expect("cold mint A must have compressed data"); - let mint_a_ai = AccountInterface { - key: pdas.mint_a, - account: solana_account::Account { - lamports: 0, - data: vec![], - owner: light_token::instruction::LIGHT_TOKEN_PROGRAM_ID, - executable: false, - rent_epoch: 0, - }, - cold: Some(ColdContext::Account(compressed_a.clone())), - }; + let mint_a_ai = AccountInterface::from(mint_a_iface); // Mint B let mint_b_iface = ctx .rpc - .get_mint_interface(&pdas.mint_b) + .get_mint_interface(&pdas.mint_b, None) .await - .expect("failed to get mint B interface"); + .expect("failed to get mint B interface") + .value + .expect("mint B interface should exist"); assert!(mint_b_iface.is_cold(), "Mint B should be cold"); - let (compressed_b, _) = mint_b_iface - .compressed() - .expect("cold mint B must have compressed data"); - let mint_b_ai = AccountInterface { - key: pdas.mint_b, - account: solana_account::Account { - lamports: 0, - data: vec![], - owner: light_token::instruction::LIGHT_TOKEN_PROGRAM_ID, - executable: false, - rent_epoch: 0, - }, - cold: Some(ColdContext::Account(compressed_b.clone())), - }; + let mint_b_ai = AccountInterface::from(mint_b_iface); let specs: Vec> = vec![ AccountSpec::Pda(record_spec), diff --git a/sdk-tests/anchor-semi-manual-test/tests/test_create_all.rs b/sdk-tests/anchor-semi-manual-test/tests/test_create_all.rs index 32dcc8b19b..e596f2c5a1 100644 --- a/sdk-tests/anchor-semi-manual-test/tests/test_create_all.rs +++ b/sdk-tests/anchor-semi-manual-test/tests/test_create_all.rs @@ -6,8 +6,8 @@ use anchor_semi_manual_test::{ MINT_SIGNER_SEED_B, RECORD_SEED, VAULT_AUTH_SEED, VAULT_SEED, }; use light_client::interface::{ - create_load_instructions, get_create_accounts_proof, AccountInterface, AccountInterfaceExt, - AccountSpec, ColdContext, CreateAccountsProofInput, PdaSpec, + create_load_instructions, get_create_accounts_proof, AccountInterface, AccountSpec, + ColdContext, CreateAccountsProofInput, PdaSpec, }; use light_compressible::rent::SLOTS_PER_EPOCH; use light_program_test::{program_test::TestRpc, Rpc}; @@ -220,9 +220,11 @@ async fn test_create_all_derive() { // PDA: MinimalRecord let record_interface = rpc - .get_account_interface(&record_pda, &program_id) + .get_account_interface(&record_pda, None) .await - .expect("failed to get MinimalRecord interface"); + .expect("failed to get MinimalRecord interface") + .value + .expect("MinimalRecord interface should exist"); assert!(record_interface.is_cold(), "MinimalRecord should be cold"); let record_data = MinimalRecord::deserialize(&mut &record_interface.account.data[8..]) @@ -235,9 +237,11 @@ async fn test_create_all_derive() { // PDA: ZeroCopyRecord let zc_interface = rpc - .get_account_interface(&zc_record_pda, &program_id) + .get_account_interface(&zc_record_pda, None) .await - .expect("failed to get ZeroCopyRecord interface"); + .expect("failed to get ZeroCopyRecord interface") + .value + .expect("ZeroCopyRecord interface should exist"); assert!(zc_interface.is_cold(), "ZeroCopyRecord should be cold"); let zc_data = ZeroCopyRecord::deserialize(&mut &zc_interface.account.data[8..]) @@ -250,58 +254,40 @@ async fn test_create_all_derive() { // ATA let ata_interface = rpc - .get_ata_interface(&ata_owner, &ata_mint) + .get_associated_token_account_interface(&ata_owner, &ata_mint, None) .await - .expect("failed to get ATA interface"); + .expect("failed to get ATA interface") + .value + .expect("ATA interface should exist"); assert!(ata_interface.is_cold(), "ATA should be cold"); // Mint A let mint_a_iface = rpc - .get_mint_interface(&mint_a_pda) + .get_mint_interface(&mint_a_pda, None) .await - .expect("failed to get mint A interface"); + .expect("failed to get mint A interface") + .value + .expect("mint A interface should exist"); assert!(mint_a_iface.is_cold(), "Mint A should be cold"); - let (compressed_a, _) = mint_a_iface - .compressed() - .expect("cold mint A must have compressed data"); - let mint_a_ai = AccountInterface { - key: mint_a_pda, - account: solana_account::Account { - lamports: 0, - data: vec![], - owner: light_token::instruction::LIGHT_TOKEN_PROGRAM_ID, - executable: false, - rent_epoch: 0, - }, - cold: Some(ColdContext::Account(compressed_a.clone())), - }; + let mint_a_ai = AccountInterface::from(mint_a_iface); // Mint B let mint_b_iface = rpc - .get_mint_interface(&mint_b_pda) + .get_mint_interface(&mint_b_pda, None) .await - .expect("failed to get mint B interface"); + .expect("failed to get mint B interface") + .value + .expect("mint B interface should exist"); assert!(mint_b_iface.is_cold(), "Mint B should be cold"); - let (compressed_b, _) = mint_b_iface - .compressed() - .expect("cold mint B must have compressed data"); - let mint_b_ai = AccountInterface { - key: mint_b_pda, - account: solana_account::Account { - lamports: 0, - data: vec![], - owner: light_token::instruction::LIGHT_TOKEN_PROGRAM_ID, - executable: false, - rent_epoch: 0, - }, - cold: Some(ColdContext::Account(compressed_b.clone())), - }; + let mint_b_ai = AccountInterface::from(mint_b_iface); // Token PDA: Vault let vault_iface = rpc - .get_token_account_interface(&vault) + .get_token_account_interface(&vault, None) .await - .expect("failed to get vault interface"); + .expect("failed to get vault interface") + .value + .expect("vault interface should exist"); assert!(vault_iface.is_cold(), "Vault should be cold"); let vault_token_data: Token = diff --git a/sdk-tests/anchor-semi-manual-test/tests/test_create_ata.rs b/sdk-tests/anchor-semi-manual-test/tests/test_create_ata.rs index 3f7ff9883b..c6f80552fa 100644 --- a/sdk-tests/anchor-semi-manual-test/tests/test_create_ata.rs +++ b/sdk-tests/anchor-semi-manual-test/tests/test_create_ata.rs @@ -2,9 +2,7 @@ mod shared; use anchor_lang::{InstructionData, ToAccountMetas}; use anchor_semi_manual_test::CreateAtaParams; -use light_client::interface::{ - create_load_instructions, get_create_accounts_proof, AccountInterfaceExt, AccountSpec, -}; +use light_client::interface::{create_load_instructions, get_create_accounts_proof, AccountSpec}; use light_compressible::rent::SLOTS_PER_EPOCH; use light_program_test::{program_test::TestRpc, Rpc}; use light_sdk_types::LIGHT_TOKEN_PROGRAM_ID; @@ -97,9 +95,11 @@ async fn test_create_ata_derive() { use anchor_semi_manual_test::LightAccountVariant; let ata_interface = rpc - .get_ata_interface(&ata_owner, &mint) + .get_associated_token_account_interface(&ata_owner, &mint, None) .await - .expect("failed to get ATA interface"); + .expect("failed to get ATA interface") + .value + .expect("ATA interface should exist"); assert!(ata_interface.is_cold(), "ATA should be cold"); let specs: Vec> = vec![AccountSpec::Ata(ata_interface)]; diff --git a/sdk-tests/anchor-semi-manual-test/tests/test_create_mint.rs b/sdk-tests/anchor-semi-manual-test/tests/test_create_mint.rs index 8af51ab83e..41291a984e 100644 --- a/sdk-tests/anchor-semi-manual-test/tests/test_create_mint.rs +++ b/sdk-tests/anchor-semi-manual-test/tests/test_create_mint.rs @@ -3,8 +3,8 @@ mod shared; use anchor_lang::{InstructionData, ToAccountMetas}; use anchor_semi_manual_test::{CreateMintParams, MINT_SIGNER_SEED_A}; use light_client::interface::{ - create_load_instructions, get_create_accounts_proof, AccountInterface, AccountInterfaceExt, - AccountSpec, ColdContext, CreateAccountsProofInput, + create_load_instructions, get_create_accounts_proof, AccountInterface, AccountSpec, + CreateAccountsProofInput, }; use light_compressible::rent::SLOTS_PER_EPOCH; use light_program_test::{program_test::TestRpc, Rpc}; @@ -99,25 +99,13 @@ async fn test_create_mint_derive() { use anchor_semi_manual_test::LightAccountVariant; let mint_interface = rpc - .get_mint_interface(&mint_pda) + .get_mint_interface(&mint_pda, None) .await - .expect("failed to get mint interface"); + .expect("failed to get mint interface") + .value + .expect("mint interface should exist"); assert!(mint_interface.is_cold(), "Mint should be cold"); - - let (compressed, _mint_data) = mint_interface - .compressed() - .expect("cold mint must have compressed data"); - let mint_account_interface = AccountInterface { - key: mint_pda, - account: solana_account::Account { - lamports: 0, - data: vec![], - owner: light_token::instruction::LIGHT_TOKEN_PROGRAM_ID, - executable: false, - rent_epoch: 0, - }, - cold: Some(ColdContext::Account(compressed.clone())), - }; + let mint_account_interface = AccountInterface::from(mint_interface); let specs: Vec> = vec![AccountSpec::Mint(mint_account_interface)]; diff --git a/sdk-tests/anchor-semi-manual-test/tests/test_create_pda.rs b/sdk-tests/anchor-semi-manual-test/tests/test_create_pda.rs index 06b0818f39..9b48fbec4a 100644 --- a/sdk-tests/anchor-semi-manual-test/tests/test_create_pda.rs +++ b/sdk-tests/anchor-semi-manual-test/tests/test_create_pda.rs @@ -3,8 +3,8 @@ mod shared; use anchor_lang::{AnchorDeserialize, InstructionData, ToAccountMetas}; use anchor_semi_manual_test::CreatePdaParams; use light_client::interface::{ - create_load_instructions, get_create_accounts_proof, AccountInterfaceExt, AccountSpec, - CreateAccountsProofInput, PdaSpec, + create_load_instructions, get_create_accounts_proof, AccountSpec, CreateAccountsProofInput, + PdaSpec, }; use light_compressible::rent::SLOTS_PER_EPOCH; use light_program_test::{program_test::TestRpc, Rpc}; @@ -91,9 +91,11 @@ async fn test_create_single_pda_derive() { use anchor_semi_manual_test::{LightAccountVariant, MinimalRecordSeeds}; let account_interface = rpc - .get_account_interface(&record_pda, &program_id) + .get_account_interface(&record_pda, None) .await - .expect("failed to get MinimalRecord interface"); + .expect("failed to get MinimalRecord interface") + .value + .expect("MinimalRecord interface should exist"); assert!(account_interface.is_cold(), "MinimalRecord should be cold"); let data = MinimalRecord::deserialize(&mut &account_interface.account.data[8..]) diff --git a/sdk-tests/anchor-semi-manual-test/tests/test_create_token_vault.rs b/sdk-tests/anchor-semi-manual-test/tests/test_create_token_vault.rs index ba64ff98dd..74e9e8ef9f 100644 --- a/sdk-tests/anchor-semi-manual-test/tests/test_create_token_vault.rs +++ b/sdk-tests/anchor-semi-manual-test/tests/test_create_token_vault.rs @@ -5,8 +5,8 @@ use anchor_semi_manual_test::{ CreateTokenVaultParams, LightAccountVariant, VaultSeeds, VAULT_AUTH_SEED, VAULT_SEED, }; use light_client::interface::{ - create_load_instructions, get_create_accounts_proof, AccountInterface, AccountInterfaceExt, - AccountSpec, ColdContext, PdaSpec, + create_load_instructions, get_create_accounts_proof, AccountInterface, AccountSpec, + ColdContext, PdaSpec, }; use light_compressible::rent::SLOTS_PER_EPOCH; use light_program_test::{program_test::TestRpc, Rpc}; @@ -103,9 +103,11 @@ async fn test_create_token_vault_derive() { // PHASE 3: Decompress vault let vault_iface = rpc - .get_token_account_interface(&vault) + .get_token_account_interface(&vault, None) .await - .expect("failed to get vault interface"); + .expect("failed to get vault interface") + .value + .expect("vault interface should exist"); assert!(vault_iface.is_cold(), "Vault should be cold"); let token_data: Token = diff --git a/sdk-tests/anchor-semi-manual-test/tests/test_create_two_mints.rs b/sdk-tests/anchor-semi-manual-test/tests/test_create_two_mints.rs index 4ac2f0dfb3..e9489b081a 100644 --- a/sdk-tests/anchor-semi-manual-test/tests/test_create_two_mints.rs +++ b/sdk-tests/anchor-semi-manual-test/tests/test_create_two_mints.rs @@ -3,8 +3,8 @@ mod shared; use anchor_lang::{InstructionData, ToAccountMetas}; use anchor_semi_manual_test::{CreateTwoMintsParams, MINT_SIGNER_SEED_A, MINT_SIGNER_SEED_B}; use light_client::interface::{ - create_load_instructions, get_create_accounts_proof, AccountInterface, AccountInterfaceExt, - AccountSpec, ColdContext, CreateAccountsProofInput, + create_load_instructions, get_create_accounts_proof, AccountInterface, AccountSpec, + CreateAccountsProofInput, }; use light_compressible::rent::SLOTS_PER_EPOCH; use light_program_test::{program_test::TestRpc, Rpc}; @@ -123,36 +123,23 @@ async fn test_create_two_mints_derive() { // PHASE 3: Decompress both mints via create_load_instructions use anchor_semi_manual_test::LightAccountVariant; - let build_mint_account_interface = |mint_interface: light_client::interface::MintInterface| { - let (compressed, _mint_data) = mint_interface - .compressed() - .expect("cold mint must have compressed data"); - AccountInterface { - key: mint_interface.mint, - account: solana_account::Account { - lamports: 0, - data: vec![], - owner: light_token::instruction::LIGHT_TOKEN_PROGRAM_ID, - executable: false, - rent_epoch: 0, - }, - cold: Some(ColdContext::Account(compressed.clone())), - } - }; - let mint_a_interface = rpc - .get_mint_interface(&mint_a_pda) + .get_mint_interface(&mint_a_pda, None) .await - .expect("failed to get mint A interface"); + .expect("failed to get mint A interface") + .value + .expect("mint A interface should exist"); assert!(mint_a_interface.is_cold(), "Mint A should be cold"); - let mint_a_ai = build_mint_account_interface(mint_a_interface); + let mint_a_ai = AccountInterface::from(mint_a_interface); let mint_b_interface = rpc - .get_mint_interface(&mint_b_pda) + .get_mint_interface(&mint_b_pda, None) .await - .expect("failed to get mint B interface"); + .expect("failed to get mint B interface") + .value + .expect("mint B interface should exist"); assert!(mint_b_interface.is_cold(), "Mint B should be cold"); - let mint_b_ai = build_mint_account_interface(mint_b_interface); + let mint_b_ai = AccountInterface::from(mint_b_interface); let specs: Vec> = vec![AccountSpec::Mint(mint_a_ai), AccountSpec::Mint(mint_b_ai)]; diff --git a/sdk-tests/anchor-semi-manual-test/tests/test_create_zero_copy_record.rs b/sdk-tests/anchor-semi-manual-test/tests/test_create_zero_copy_record.rs index 0655117ac6..9c3a15fb18 100644 --- a/sdk-tests/anchor-semi-manual-test/tests/test_create_zero_copy_record.rs +++ b/sdk-tests/anchor-semi-manual-test/tests/test_create_zero_copy_record.rs @@ -3,8 +3,8 @@ mod shared; use anchor_lang::{InstructionData, ToAccountMetas}; use anchor_semi_manual_test::{CreateZeroCopyRecordParams, RECORD_SEED}; use light_client::interface::{ - create_load_instructions, get_create_accounts_proof, AccountInterfaceExt, AccountSpec, - CreateAccountsProofInput, PdaSpec, + create_load_instructions, get_create_accounts_proof, AccountSpec, CreateAccountsProofInput, + PdaSpec, }; use light_compressible::rent::SLOTS_PER_EPOCH; use light_program_test::{program_test::TestRpc, Rpc}; @@ -85,9 +85,11 @@ async fn test_create_zero_copy_record_derive() { use anchor_semi_manual_test::{LightAccountVariant, ZeroCopyRecordSeeds}; let account_interface = rpc - .get_account_interface(&record_pda, &program_id) + .get_account_interface(&record_pda, None) .await - .expect("failed to get ZeroCopyRecord interface"); + .expect("failed to get ZeroCopyRecord interface") + .value + .expect("ZeroCopyRecord interface should exist"); assert!(account_interface.is_cold(), "ZeroCopyRecord should be cold"); let zc_data = ZeroCopyRecord::deserialize(&mut &account_interface.account.data[8..]) diff --git a/sdk-tests/client-test/tests/light_client.rs b/sdk-tests/client-test/tests/light_client.rs index 9207e0f69c..c8d1bb28bc 100644 --- a/sdk-tests/client-test/tests/light_client.rs +++ b/sdk-tests/client-test/tests/light_client.rs @@ -56,6 +56,7 @@ async fn test_all_endpoints() { upgradeable_programs: vec![], limit_ledger_size: None, use_surfpool: true, + validator_args: vec![], }; spawn_validator(config).await; diff --git a/sdk-tests/csdk-anchor-full-derived-test-sdk/src/lib.rs b/sdk-tests/csdk-anchor-full-derived-test-sdk/src/lib.rs index 308e4aa52f..f773c00577 100644 --- a/sdk-tests/csdk-anchor-full-derived-test-sdk/src/lib.rs +++ b/sdk-tests/csdk-anchor-full-derived-test-sdk/src/lib.rs @@ -229,6 +229,9 @@ impl AmmSdk { let compressed_account = match &account.cold { Some(ColdContext::Token(ct)) => ct.account.clone(), Some(ColdContext::Account(ca)) => ca.clone(), + Some(ColdContext::Mint(_)) => { + return Err(AmmSdkError::MissingField("unexpected Mint cold context")) + } None => return Err(AmmSdkError::MissingField("cold_context")), }; AccountInterface { @@ -290,20 +293,23 @@ impl AmmSdk { } fn account_requirements(&self, ix: &AmmInstruction) -> Vec { + let vault_0_req = AccountRequirement::new(self.token_0_vault, AccountKind::Token); + let vault_1_req = AccountRequirement::new(self.token_1_vault, AccountKind::Token); + match ix { AmmInstruction::Swap => { vec![ AccountRequirement::new(self.pool_state_pubkey, AccountKind::Pda), - AccountRequirement::new(self.token_0_vault, AccountKind::Token), - AccountRequirement::new(self.token_1_vault, AccountKind::Token), + vault_0_req, + vault_1_req, AccountRequirement::new(self.observation_key, AccountKind::Pda), ] } AmmInstruction::Deposit | AmmInstruction::Withdraw => { vec![ AccountRequirement::new(self.pool_state_pubkey, AccountKind::Pda), - AccountRequirement::new(self.token_0_vault, AccountKind::Token), - AccountRequirement::new(self.token_1_vault, AccountKind::Token), + vault_0_req, + vault_1_req, AccountRequirement::new(self.observation_key, AccountKind::Pda), AccountRequirement::new(self.lp_mint, AccountKind::Mint), ] @@ -339,12 +345,12 @@ impl LightProgramInterface for AmmSdk { fn get_accounts_to_update(&self, ix: &Self::Instruction) -> Vec { self.account_requirements(ix) .into_iter() - .filter_map(|req| { - req.pubkey.map(|pubkey| match req.kind { - AccountKind::Pda => AccountToFetch::pda(pubkey, PROGRAM_ID), - AccountKind::Token => AccountToFetch::token(pubkey), - AccountKind::Mint => AccountToFetch::mint(pubkey), - }) + .filter_map(|req| match req.kind { + AccountKind::Pda => req + .pubkey + .map(|pubkey| AccountToFetch::pda(pubkey, PROGRAM_ID)), + AccountKind::Token => req.pubkey.map(AccountToFetch::token), + AccountKind::Mint => req.pubkey.map(AccountToFetch::mint), }) .collect() } diff --git a/sdk-tests/csdk-anchor-full-derived-test/tests/amm_stress_test.rs b/sdk-tests/csdk-anchor-full-derived-test/tests/amm_stress_test.rs index c89d9c1b3c..4b22c9622f 100644 --- a/sdk-tests/csdk-anchor-full-derived-test/tests/amm_stress_test.rs +++ b/sdk-tests/csdk-anchor-full-derived-test/tests/amm_stress_test.rs @@ -19,7 +19,7 @@ use light_batched_merkle_tree::{ initialize_state_tree::InitStateTreeAccountsInstructionData, }; use light_client::interface::{ - create_load_instructions, get_create_accounts_proof, AccountInterfaceExt, + create_load_instructions, get_create_accounts_proof, AccountInterface, AccountSpec, CreateAccountsProofInput, InitializeRentFreeConfig, LightProgramInterface, }; use light_compressible::rent::SLOTS_PER_EPOCH; @@ -489,9 +489,11 @@ async fn refresh_cache(rpc: &mut LightProgramTest, pdas: &AmmPdas) -> CachedStat async fn decompress_all(ctx: &mut AmmTestContext, pdas: &AmmPdas) { let pool_interface = ctx .rpc - .get_account_interface(&pdas.pool_state, &ctx.program_id) + .get_account_interface(&pdas.pool_state, None) .await - .expect("failed to get pool_state"); + .expect("failed to get pool_state") + .value + .expect("pool_state should exist"); assert!(pool_interface.is_cold(), "pool_state should be cold"); let mut sdk = AmmSdk::from_keyed_accounts(&[pool_interface]) @@ -501,9 +503,9 @@ async fn decompress_all(ctx: &mut AmmTestContext, pdas: &AmmPdas) { let keyed_accounts = ctx .rpc - .get_multiple_account_interfaces(&accounts_to_fetch) + .fetch_accounts(&accounts_to_fetch, None) .await - .expect("get_multiple_account_interfaces should succeed"); + .expect("fetch_accounts should succeed"); sdk.update(&keyed_accounts) .expect("sdk.update should succeed"); @@ -512,69 +514,46 @@ async fn decompress_all(ctx: &mut AmmTestContext, pdas: &AmmPdas) { let creator_lp_interface = ctx .rpc - .get_ata_interface(&ctx.creator.pubkey(), &pdas.lp_mint) + .get_associated_token_account_interface(&ctx.creator.pubkey(), &pdas.lp_mint, None) .await - .expect("failed to get creator_lp_token"); + .expect("failed to get creator_lp_token") + .value + .expect("creator_lp_token should exist"); // Creator's token_0 and token_1 ATAs also get compressed during epoch warp let creator_token_0_interface = ctx .rpc - .get_ata_interface(&ctx.creator.pubkey(), &ctx.token_0_mint) + .get_associated_token_account_interface(&ctx.creator.pubkey(), &ctx.token_0_mint, None) .await - .expect("failed to get creator_token_0"); + .expect("failed to get creator_token_0") + .value + .expect("creator_token_0 should exist"); let creator_token_1_interface = ctx .rpc - .get_ata_interface(&ctx.creator.pubkey(), &ctx.token_1_mint) + .get_associated_token_account_interface(&ctx.creator.pubkey(), &ctx.token_1_mint, None) .await - .expect("failed to get creator_token_1"); + .expect("failed to get creator_token_1") + .value + .expect("creator_token_1 should exist"); - // Underlying mints also get compressed -- convert MintInterface to AccountInterface - use light_client::interface::{AccountInterface, AccountSpec, MintState}; - - let mint_0_iface = ctx - .rpc - .get_mint_interface(&ctx.token_0_mint) - .await - .expect("failed to get token_0_mint"); - let mint_0_account_iface = match mint_0_iface.state { - MintState::Hot { account } => AccountInterface { - key: mint_0_iface.mint, - account, - cold: None, - }, - MintState::Cold { compressed, .. } => { - let owner = compressed.owner; - AccountInterface::cold(mint_0_iface.mint, compressed, owner) - } - MintState::None => AccountInterface { - key: mint_0_iface.mint, - account: Default::default(), - cold: None, - }, - }; + let mint_0_account_iface = AccountInterface::from( + ctx.rpc + .get_mint_interface(&ctx.token_0_mint, None) + .await + .expect("failed to get token_0_mint") + .value + .expect("token_0_mint should exist"), + ); - let mint_1_iface = ctx - .rpc - .get_mint_interface(&ctx.token_1_mint) - .await - .expect("failed to get token_1_mint"); - let mint_1_account_iface = match mint_1_iface.state { - MintState::Hot { account } => AccountInterface { - key: mint_1_iface.mint, - account, - cold: None, - }, - MintState::Cold { compressed, .. } => { - let owner = compressed.owner; - AccountInterface::cold(mint_1_iface.mint, compressed, owner) - } - MintState::None => AccountInterface { - key: mint_1_iface.mint, - account: Default::default(), - cold: None, - }, - }; + let mint_1_account_iface = AccountInterface::from( + ctx.rpc + .get_mint_interface(&ctx.token_1_mint, None) + .await + .expect("failed to get token_1_mint") + .value + .expect("token_1_mint should exist"), + ); let mut all_specs = specs; all_specs.push(AccountSpec::Ata(creator_lp_interface)); diff --git a/sdk-tests/csdk-anchor-full-derived-test/tests/amm_test.rs b/sdk-tests/csdk-anchor-full-derived-test/tests/amm_test.rs index 844022ac64..bbb0e21b9d 100644 --- a/sdk-tests/csdk-anchor-full-derived-test/tests/amm_test.rs +++ b/sdk-tests/csdk-anchor-full-derived-test/tests/amm_test.rs @@ -17,8 +17,8 @@ use csdk_anchor_full_derived_test::amm_test::{ // SDK for AmmSdk-based approach use csdk_anchor_full_derived_test_sdk::{AmmInstruction, AmmSdk}; use light_client::interface::{ - create_load_instructions, get_create_accounts_proof, AccountInterfaceExt, - CreateAccountsProofInput, InitializeRentFreeConfig, LightProgramInterface, + create_load_instructions, get_create_accounts_proof, CreateAccountsProofInput, + InitializeRentFreeConfig, LightProgramInterface, }; use light_compressible::rent::SLOTS_PER_EPOCH; use light_program_test::{ @@ -631,9 +631,11 @@ async fn test_amm_full_lifecycle() { let pool_interface = ctx .rpc - .get_account_interface(&pdas.pool_state, &ctx.program_id) + .get_account_interface(&pdas.pool_state, None) .await - .expect("failed to get pool_state"); + .expect("failed to get pool_state") + .value + .expect("pool_state should exist"); assert!(pool_interface.is_cold(), "pool_state should be cold"); // Create Program Interface SDK. @@ -644,9 +646,9 @@ async fn test_amm_full_lifecycle() { let keyed_accounts = ctx .rpc - .get_multiple_account_interfaces(&accounts_to_fetch) + .fetch_accounts(&accounts_to_fetch, None) .await - .expect("get_multiple_account_interfaces should succeed"); + .expect("fetch_accounts should succeed"); sdk.update(&keyed_accounts) .expect("sdk.update should succeed"); @@ -655,9 +657,11 @@ async fn test_amm_full_lifecycle() { let creator_lp_interface = ctx .rpc - .get_ata_interface(&ctx.creator.pubkey(), &pdas.lp_mint) + .get_associated_token_account_interface(&ctx.creator.pubkey(), &pdas.lp_mint, None) .await - .expect("failed to get creator_lp_token"); + .expect("failed to get creator_lp_token") + .value + .expect("creator_lp_token should exist"); // add ata use light_client::interface::AccountSpec; diff --git a/sdk-tests/csdk-anchor-full-derived-test/tests/basic_test.rs b/sdk-tests/csdk-anchor-full-derived-test/tests/basic_test.rs index cab67ab068..2e225a8923 100644 --- a/sdk-tests/csdk-anchor-full-derived-test/tests/basic_test.rs +++ b/sdk-tests/csdk-anchor-full-derived-test/tests/basic_test.rs @@ -3,8 +3,7 @@ mod shared; use anchor_lang::{InstructionData, ToAccountMetas}; use light_account::derive_rent_sponsor_pda; use light_client::interface::{ - get_create_accounts_proof, AccountInterfaceExt, CreateAccountsProofInput, - InitializeRentFreeConfig, + get_create_accounts_proof, CreateAccountsProofInput, InitializeRentFreeConfig, }; use light_compressible::{rent::SLOTS_PER_EPOCH, DECOMPRESSED_PDA_DISCRIMINATOR}; use light_program_test::{ @@ -360,21 +359,27 @@ async fn test_create_pdas_and_mint_auto() { // Fetch unified interfaces (hot/cold transparent) let user_interface = rpc - .get_account_interface(&user_record_pda, &program_id) + .get_account_interface(&user_record_pda, None) .await - .expect("failed to get user"); + .expect("failed to get user") + .value + .expect("user should exist"); assert!(user_interface.is_cold(), "UserRecord should be cold"); let game_interface = rpc - .get_account_interface(&game_session_pda, &program_id) + .get_account_interface(&game_session_pda, None) .await - .expect("failed to get game"); + .expect("failed to get game") + .value + .expect("game should exist"); assert!(game_interface.is_cold(), "GameSession should be cold"); let vault_interface = rpc - .get_token_account_interface(&vault_pda) + .get_token_account_interface(&vault_pda, None) .await - .expect("failed to get vault"); + .expect("failed to get vault") + .value + .expect("vault should exist"); assert!(vault_interface.is_cold(), "Vault should be cold"); assert_eq!(vault_interface.amount(), vault_mint_amount); @@ -425,11 +430,13 @@ async fn test_create_pdas_and_mint_auto() { }; let vault_spec = PdaSpec::new(vault_interface_for_pda, vault_variant, program_id); - // get_ata_interface: fetches ATA with unified handling using standard SPL types + // get_associated_token_account_interface: fetches ATA with unified handling using standard SPL types let ata_interface = rpc - .get_ata_interface(&payer.pubkey(), &mint_pda) + .get_associated_token_account_interface(&payer.pubkey(), &mint_pda, None) .await - .expect("get_ata_interface should succeed"); + .expect("get_associated_token_account_interface should succeed") + .value + .expect("ATA should exist"); assert!(ata_interface.is_cold(), "ATA should be cold after warp"); assert_eq!(ata_interface.amount(), user_ata_mint_amount); assert_eq!(ata_interface.mint(), mint_pda); @@ -439,28 +446,18 @@ async fn test_create_pdas_and_mint_auto() { // Use TokenAccountInterface directly for ATA // (no separate AtaSpec needed - TokenAccountInterface has all the data) - // Fetch mint interface - let mint_interface = rpc - .get_mint_interface(&mint_pda) - .await - .expect("get_mint_interface should succeed"); - assert!(mint_interface.is_cold(), "Mint should be cold after warp"); - - // Convert MintInterface to AccountInterface for use in AccountSpec - let (compressed, _mint_data) = mint_interface - .compressed() - .expect("cold mint must have compressed data"); - let mint_account_interface = AccountInterface { - key: mint_pda, - account: solana_account::Account { - lamports: 0, - data: vec![], - owner: light_token::instruction::LIGHT_TOKEN_PROGRAM_ID, - executable: false, - rent_epoch: 0, - }, - cold: Some(ColdContext::Account(compressed.clone())), - }; + // Fetch mint via get_mint_interface to get ColdContext::Mint + let mint_account_interface = light_client::interface::AccountInterface::from( + rpc.get_mint_interface(&mint_pda, None) + .await + .expect("get_mint_interface should succeed") + .value + .expect("Mint should exist"), + ); + assert!( + mint_account_interface.is_cold(), + "Mint should be cold after warp" + ); // Build AccountSpec slice for all accounts let specs: Vec> = vec![ diff --git a/sdk-tests/csdk-anchor-full-derived-test/tests/d10_token_accounts_test.rs b/sdk-tests/csdk-anchor-full-derived-test/tests/d10_token_accounts_test.rs index cb80c58243..47fcd342df 100644 --- a/sdk-tests/csdk-anchor-full-derived-test/tests/d10_token_accounts_test.rs +++ b/sdk-tests/csdk-anchor-full-derived-test/tests/d10_token_accounts_test.rs @@ -10,9 +10,7 @@ use csdk_anchor_full_derived_test::d10_token_accounts::{ D10SingleAtaMarkonlyParams, D10SingleAtaParams, D10SingleVaultParams, D10_SINGLE_VAULT_AUTH_SEED, D10_SINGLE_VAULT_SEED, }; -use light_client::interface::{ - get_create_accounts_proof, AccountInterfaceExt, InitializeRentFreeConfig, -}; +use light_client::interface::{get_create_accounts_proof, InitializeRentFreeConfig}; use light_program_test::{ program_test::{setup_mock_program_data, LightProgramTest}, ProgramTestConfig, Rpc, @@ -534,12 +532,14 @@ async fn test_d10_single_ata_markonly_lifecycle() { shared::assert_onchain_closed(&mut ctx.rpc, &d10_markonly_ata, "d10_markonly_ata").await; // PHASE 3: Decompress ATA using create_load_instructions - // ATAs use get_ata_interface which fetches the compressed token data + // ATAs use get_associated_token_account_interface which fetches the compressed token data let ata_interface = ctx .rpc - .get_ata_interface(&ata_owner, &mint) + .get_associated_token_account_interface(&ata_owner, &mint, None) .await - .expect("get_ata_interface should succeed"); + .expect("get_associated_token_account_interface should succeed") + .value + .expect("ata interface should exist"); assert!( ata_interface.is_cold(), "ATA should be cold after compression" diff --git a/sdk-tests/csdk-anchor-full-derived-test/tests/d11_zero_copy_test.rs b/sdk-tests/csdk-anchor-full-derived-test/tests/d11_zero_copy_test.rs index 2faa6594d8..ed5ff76fe8 100644 --- a/sdk-tests/csdk-anchor-full-derived-test/tests/d11_zero_copy_test.rs +++ b/sdk-tests/csdk-anchor-full-derived-test/tests/d11_zero_copy_test.rs @@ -43,8 +43,8 @@ use csdk_anchor_full_derived_test::d11_zero_copy::{ }; use light_account::IntoVariant; use light_client::interface::{ - create_load_instructions, get_create_accounts_proof, AccountInterfaceExt, AccountSpec, - CreateAccountsProofInput, InitializeRentFreeConfig, PdaSpec, + create_load_instructions, get_create_accounts_proof, AccountSpec, CreateAccountsProofInput, + InitializeRentFreeConfig, PdaSpec, }; use light_compressed_account::address::derive_address; use light_compressible::rent::SLOTS_PER_EPOCH; @@ -256,9 +256,11 @@ async fn test_d11_zc_with_vault() { // PHASE 4: Decompress account let account_interface = ctx .rpc - .get_account_interface(&zc_pda, &ctx.program_id) + .get_account_interface(&zc_pda, None) .await - .expect("failed to get account interface"); + .expect("failed to get account interface") + .value + .expect("account interface should exist"); assert!( account_interface.is_cold(), "Account should be cold (compressed)" @@ -426,9 +428,11 @@ async fn test_d11_zc_with_ata() { // PHASE 4: Decompress account let account_interface = ctx .rpc - .get_account_interface(&zc_pda, &ctx.program_id) + .get_account_interface(&zc_pda, None) .await - .expect("failed to get account interface"); + .expect("failed to get account interface") + .value + .expect("account interface should exist"); assert!( account_interface.is_cold(), "Account should be cold (compressed)" @@ -584,9 +588,11 @@ async fn test_d11_multiple_zc() { // PHASE 4: Decompress first account let account_interface_1 = ctx .rpc - .get_account_interface(&zc_pda_1, &ctx.program_id) + .get_account_interface(&zc_pda_1, None) .await - .expect("failed to get account interface 1"); + .expect("failed to get account interface 1") + .value + .expect("account interface 1 should exist"); assert!(account_interface_1.is_cold(), "Account 1 should be cold"); let variant_1: LightAccountVariant = @@ -614,9 +620,11 @@ async fn test_d11_multiple_zc() { // Decompress second account let account_interface_2 = ctx .rpc - .get_account_interface(&zc_pda_2, &ctx.program_id) + .get_account_interface(&zc_pda_2, None) .await - .expect("failed to get account interface 2"); + .expect("failed to get account interface 2") + .value + .expect("account interface 2 should exist"); assert!(account_interface_2.is_cold(), "Account 2 should be cold"); let variant_2: LightAccountVariant = @@ -785,9 +793,11 @@ async fn test_d11_mixed_zc_borsh() { // PHASE 4: Decompress zero-copy account let account_interface_zc = ctx .rpc - .get_account_interface(&zc_pda, &ctx.program_id) + .get_account_interface(&zc_pda, None) .await - .expect("failed to get zc account interface"); + .expect("failed to get zc account interface") + .value + .expect("zc account interface should exist"); let variant_zc: LightAccountVariant = csdk_anchor_full_derived_test::csdk_anchor_full_derived_test::ZcMixedRecordSeeds { owner } @@ -814,9 +824,11 @@ async fn test_d11_mixed_zc_borsh() { // Decompress borsh account let account_interface_borsh = ctx .rpc - .get_account_interface(&borsh_pda, &ctx.program_id) + .get_account_interface(&borsh_pda, None) .await - .expect("failed to get borsh account interface"); + .expect("failed to get borsh account interface") + .value + .expect("borsh account interface should exist"); let variant_borsh: LightAccountVariant = csdk_anchor_full_derived_test::csdk_anchor_full_derived_test::BorshRecordSeeds { owner } @@ -975,9 +987,11 @@ async fn test_d11_zc_with_ctx_seeds() { // PHASE 4: Decompress account let account_interface = ctx .rpc - .get_account_interface(&zc_pda, &ctx.program_id) + .get_account_interface(&zc_pda, None) .await - .expect("failed to get account interface"); + .expect("failed to get account interface") + .value + .expect("account interface should exist"); assert!( account_interface.is_cold(), "Account should be cold (compressed)" @@ -1118,9 +1132,11 @@ async fn test_d11_zc_with_params_seeds() { // PHASE 4: Decompress account let account_interface = ctx .rpc - .get_account_interface(&zc_pda, &ctx.program_id) + .get_account_interface(&zc_pda, None) .await - .expect("failed to get account interface"); + .expect("failed to get account interface") + .value + .expect("account interface should exist"); assert!( account_interface.is_cold(), "Account should be cold (compressed)" @@ -1292,9 +1308,11 @@ async fn test_d11_zc_with_mint_to() { // PHASE 4: Decompress account let account_interface = ctx .rpc - .get_account_interface(&zc_pda, &ctx.program_id) + .get_account_interface(&zc_pda, None) .await - .expect("failed to get account interface"); + .expect("failed to get account interface") + .value + .expect("account interface should exist"); assert!( account_interface.is_cold(), "Account should be cold (compressed)" diff --git a/sdk-tests/csdk-anchor-full-derived-test/tests/failing_tests.rs b/sdk-tests/csdk-anchor-full-derived-test/tests/failing_tests.rs index 6a4742b67e..d89a87e25c 100644 --- a/sdk-tests/csdk-anchor-full-derived-test/tests/failing_tests.rs +++ b/sdk-tests/csdk-anchor-full-derived-test/tests/failing_tests.rs @@ -22,8 +22,8 @@ use csdk_anchor_full_derived_test::{ }; use light_account::IntoVariant; use light_client::interface::{ - create_load_instructions, get_create_accounts_proof, AccountInterfaceExt, AccountSpec, - CreateAccountsProofInput, InitializeRentFreeConfig, PdaSpec, + create_load_instructions, get_create_accounts_proof, AccountSpec, CreateAccountsProofInput, + InitializeRentFreeConfig, PdaSpec, }; use light_compressible::rent::SLOTS_PER_EPOCH; use light_program_test::{ @@ -177,9 +177,11 @@ async fn test_pda_wrong_rent_sponsor() { // Get account interface let account_interface = ctx .rpc - .get_account_interface(&zc_pda, &ctx.program_id) + .get_account_interface(&zc_pda, None) .await - .expect("failed to get account interface"); + .expect("failed to get account interface") + .value + .expect("account interface should exist"); // Build valid variant let variant: LightAccountVariant = @@ -223,9 +225,11 @@ async fn test_pda_double_decompress_is_noop() { // Get account interface let account_interface = ctx .rpc - .get_account_interface(&zc_pda, &ctx.program_id) + .get_account_interface(&zc_pda, None) .await - .expect("failed to get account interface"); + .expect("failed to get account interface") + .value + .expect("account interface should exist"); let variant: LightAccountVariant = csdk_anchor_full_derived_test::csdk_anchor_full_derived_test::ZcVaultRecordSeeds { owner } @@ -260,9 +264,11 @@ async fn test_pda_double_decompress_is_noop() { // Since the account is now hot, create_load_instructions will return empty let account_interface_2 = ctx .rpc - .get_account_interface(&zc_pda, &ctx.program_id) + .get_account_interface(&zc_pda, None) .await - .expect("failed to get account interface"); + .expect("failed to get account interface") + .value + .expect("account interface should exist"); // Account should be hot now assert!( @@ -306,9 +312,11 @@ async fn test_pda_wrong_config() { // Get account interface let account_interface = ctx .rpc - .get_account_interface(&zc_pda, &ctx.program_id) + .get_account_interface(&zc_pda, None) .await - .expect("failed to get account interface"); + .expect("failed to get account interface") + .value + .expect("account interface should exist"); let variant: LightAccountVariant = csdk_anchor_full_derived_test::csdk_anchor_full_derived_test::ZcVaultRecordSeeds { owner } @@ -354,9 +362,11 @@ async fn test_system_accounts_offset_out_of_bounds() { let account_interface = ctx .rpc - .get_account_interface(&zc_pda, &ctx.program_id) + .get_account_interface(&zc_pda, None) .await - .expect("failed to get account interface"); + .expect("failed to get account interface") + .value + .expect("account interface should exist"); let variant: LightAccountVariant = csdk_anchor_full_derived_test::csdk_anchor_full_derived_test::ZcVaultRecordSeeds { owner } @@ -398,9 +408,11 @@ async fn test_token_accounts_offset_invalid() { let account_interface = ctx .rpc - .get_account_interface(&zc_pda, &ctx.program_id) + .get_account_interface(&zc_pda, None) .await - .expect("failed to get account interface"); + .expect("failed to get account interface") + .value + .expect("account interface should exist"); let variant: LightAccountVariant = csdk_anchor_full_derived_test::csdk_anchor_full_derived_test::ZcVaultRecordSeeds { owner } @@ -447,9 +459,11 @@ async fn test_missing_system_accounts() { let account_interface = ctx .rpc - .get_account_interface(&zc_pda, &ctx.program_id) + .get_account_interface(&zc_pda, None) .await - .expect("failed to get account interface"); + .expect("failed to get account interface") + .value + .expect("account interface should exist"); let variant: LightAccountVariant = csdk_anchor_full_derived_test::csdk_anchor_full_derived_test::ZcVaultRecordSeeds { owner } @@ -490,9 +504,11 @@ async fn test_pda_account_mismatch() { let account_interface = ctx .rpc - .get_account_interface(&zc_pda, &ctx.program_id) + .get_account_interface(&zc_pda, None) .await - .expect("failed to get account interface"); + .expect("failed to get account interface") + .value + .expect("account interface should exist"); let variant: LightAccountVariant = csdk_anchor_full_derived_test::csdk_anchor_full_derived_test::ZcVaultRecordSeeds { owner } @@ -533,9 +549,11 @@ async fn test_fee_payer_not_signer() { let account_interface = ctx .rpc - .get_account_interface(&zc_pda, &ctx.program_id) + .get_account_interface(&zc_pda, None) .await - .expect("failed to get account interface"); + .expect("failed to get account interface") + .value + .expect("account interface should exist"); let variant: LightAccountVariant = csdk_anchor_full_derived_test::csdk_anchor_full_derived_test::ZcVaultRecordSeeds { owner } diff --git a/sdk-tests/csdk-anchor-full-derived-test/tests/integration_tests.rs b/sdk-tests/csdk-anchor-full-derived-test/tests/integration_tests.rs index 23afe9a1f5..be2beb2773 100644 --- a/sdk-tests/csdk-anchor-full-derived-test/tests/integration_tests.rs +++ b/sdk-tests/csdk-anchor-full-derived-test/tests/integration_tests.rs @@ -11,8 +11,8 @@ use anchor_lang::{AnchorDeserialize, InstructionData, ToAccountMetas}; use csdk_anchor_full_derived_test::csdk_anchor_full_derived_test::LightAccountVariant; use light_account::IntoVariant; use light_client::interface::{ - create_load_instructions, get_create_accounts_proof, AccountInterfaceExt, AccountSpec, - CreateAccountsProofInput, InitializeRentFreeConfig, PdaSpec, + create_load_instructions, get_create_accounts_proof, AccountSpec, CreateAccountsProofInput, + InitializeRentFreeConfig, PdaSpec, }; use light_compressible::rent::SLOTS_PER_EPOCH; use light_program_test::{ @@ -124,9 +124,11 @@ impl TestContext { // Get account interface let account_interface = self .rpc - .get_account_interface(pda, &self.program_id) + .get_account_interface(pda, None) .await - .expect("failed to get account interface"); + .expect("failed to get account interface") + .value + .expect("account should exist"); assert!( account_interface.is_cold(), "Account should be cold after compression" @@ -186,9 +188,11 @@ impl TestContext { // Get account interface let account_interface = self .rpc - .get_account_interface(pda, &self.program_id) + .get_account_interface(pda, None) .await - .expect("failed to get account interface"); + .expect("failed to get account interface") + .value + .expect("account interface should exist"); assert!( account_interface.is_cold(), "Account should be cold after compression" @@ -238,9 +242,11 @@ impl TestContext { // Fetch token account interface let vault_interface = self .rpc - .get_token_account_interface(vault_pda) + .get_token_account_interface(vault_pda, None) .await - .expect("get_token_account_interface should succeed"); + .expect("get_token_account_interface should succeed") + .value + .expect("token account interface should exist"); assert!(vault_interface.is_cold(), "Token vault should be cold"); // Deserialize token data @@ -597,8 +603,10 @@ async fn test_d8_multi_rentfree() { // Decompress first account let interface1 = ctx .rpc - .get_account_interface(&pda1, &ctx.program_id) + .get_account_interface(&pda1, None) .await + .unwrap() + .value .unwrap(); let variant1 = D8MultiRecord1Seeds { owner, id1 } .into_variant(&interface1.account.data[8..]) @@ -620,8 +628,10 @@ async fn test_d8_multi_rentfree() { // Decompress second account let interface2 = ctx .rpc - .get_account_interface(&pda2, &ctx.program_id) + .get_account_interface(&pda2, None) .await + .unwrap() + .value .unwrap(); let variant2 = D8MultiRecord2Seeds { owner, id2 } .into_variant(&interface2.account.data[8..]) @@ -736,8 +746,10 @@ async fn test_d8_all() { // Decompress first account (single type) let interface_single = ctx .rpc - .get_account_interface(&pda_single, &ctx.program_id) + .get_account_interface(&pda_single, None) .await + .unwrap() + .value .unwrap(); let variant_single = D8AllSingleSeeds { owner } .into_variant(&interface_single.account.data[8..]) @@ -759,8 +771,10 @@ async fn test_d8_all() { // Decompress second account (multi type) let interface_multi = ctx .rpc - .get_account_interface(&pda_multi, &ctx.program_id) + .get_account_interface(&pda_multi, None) .await + .unwrap() + .value .unwrap(); let variant_multi = D8AllMultiSeeds { owner } .into_variant(&interface_multi.account.data[8..]) @@ -1485,8 +1499,10 @@ async fn test_d9_all() { ) { let interface = ctx .rpc - .get_account_interface(pda, &ctx.program_id) + .get_account_interface(pda, None) .await + .unwrap() + .value .unwrap(); let variant = seeds.into_variant(&interface.account.data[8..]).unwrap(); let spec = PdaSpec::new(interface.clone(), variant, ctx.program_id); @@ -1608,9 +1624,11 @@ async fn test_d8_pda_only_full_lifecycle() { // PHASE 3: Decompress account let account_interface = ctx .rpc - .get_account_interface(&pda, &ctx.program_id) + .get_account_interface(&pda, None) .await - .expect("failed to get account interface"); + .expect("failed to get account interface") + .value + .expect("account should exist"); assert!(account_interface.is_cold(), "Account should be cold"); let variant = D8PdaOnlyRecordSeeds { owner } diff --git a/sdk-tests/csdk-anchor-full-derived-test/tests/mint/metadata_test.rs b/sdk-tests/csdk-anchor-full-derived-test/tests/mint/metadata_test.rs index 18273f21df..bbedbd1998 100644 --- a/sdk-tests/csdk-anchor-full-derived-test/tests/mint/metadata_test.rs +++ b/sdk-tests/csdk-anchor-full-derived-test/tests/mint/metadata_test.rs @@ -5,8 +5,8 @@ mod shared; use anchor_lang::{InstructionData, ToAccountMetas}; use light_client::interface::{ - decompress_mint::decompress_mint, get_create_accounts_proof, AccountInterfaceExt, - CreateAccountsProofInput, + decompress_mint::{decompress_mint_idempotent, DecompressMintRequest}, + get_create_accounts_proof, CreateAccountsProofInput, }; use light_compressible::{rent::SLOTS_PER_EPOCH, DECOMPRESSED_PDA_DISCRIMINATOR}; use light_program_test::{program_test::TestRpc, Indexer, Rpc}; @@ -224,18 +224,12 @@ async fn test_create_mint_with_metadata() { // PHASE 3: Decompress mint and verify metadata is preserved - // Fetch mint interface (unified hot/cold handling) - // Note: pass the mint PDA (cmint_pda), not the mint signer seed - let mint_interface = rpc - .get_mint_interface(&cmint_pda) - .await - .expect("get_mint_interface should succeed"); - assert!(mint_interface.is_cold(), "Mint should be cold after warp"); - - // Create decompression instruction using decompress_mint helper - let decompress_instructions = decompress_mint(&mint_interface, payer.pubkey(), &rpc) - .await - .expect("decompress_mint should succeed"); + // Create decompression instruction using decompress_mint_idempotent helper + // Note: pass the mint PDA (cmint_pda) as the mint_seed_pubkey + let decompress_instructions = + decompress_mint_idempotent(DecompressMintRequest::new(cmint_pda), payer.pubkey(), &rpc) + .await + .expect("decompress_mint_idempotent should succeed"); // Should have 1 instruction for mint decompression assert_eq!( diff --git a/sdk-tests/justfile b/sdk-tests/justfile index 27c1d0a969..defae0e5c8 100644 --- a/sdk-tests/justfile +++ b/sdk-tests/justfile @@ -1,5 +1,9 @@ # Light Protocol - SDK Tests +# Use absolute path for SBF_OUT_DIR so tests can find program binaries +root_dir := `git rev-parse --show-toplevel` +export SBF_OUT_DIR := root_dir / "target/deploy" + default: @just --list @@ -10,3 +14,30 @@ test: RUSTFLAGS="-D warnings" cargo test-sbf -p sdk-native-test RUSTFLAGS="-D warnings" cargo test-sbf -p sdk-anchor-test RUSTFLAGS="-D warnings" cargo test-sbf -p sdk-token-test + RUSTFLAGS="-D warnings" cargo test-sbf -p csdk-anchor-full-derived-test + +# === csdk-anchor-full-derived-test specific tests === + +# Build csdk-anchor-full-derived-test program +build-csdk-full: + cargo build-sbf --manifest-path csdk-anchor-full-derived-test/Cargo.toml + +# Run AMM stress test (100 iterations of compress/decompress cycles) +test-amm-stress: build-csdk-full + RUSTFLAGS="-D warnings" cargo test -p csdk-anchor-full-derived-test --test amm_stress_test -- --nocapture + +# Run AMM basic test +test-amm: build-csdk-full + RUSTFLAGS="-D warnings" cargo test -p csdk-anchor-full-derived-test --test amm_test -- --nocapture + +# Run basic integration tests +test-basic: build-csdk-full + RUSTFLAGS="-D warnings" cargo test -p csdk-anchor-full-derived-test --test basic_test -- --nocapture + +# Run integration tests +test-integration: build-csdk-full + RUSTFLAGS="-D warnings" cargo test -p csdk-anchor-full-derived-test --test integration_tests -- --nocapture + +# Run all csdk-anchor-full-derived tests +test-csdk-full: + RUSTFLAGS="-D warnings" cargo test-sbf -p csdk-anchor-full-derived-test diff --git a/sdk-tests/pinocchio-light-program-test/tests/stress_test.rs b/sdk-tests/pinocchio-light-program-test/tests/stress_test.rs index 108a1a1a92..42ca690d3d 100644 --- a/sdk-tests/pinocchio-light-program-test/tests/stress_test.rs +++ b/sdk-tests/pinocchio-light-program-test/tests/stress_test.rs @@ -13,8 +13,8 @@ use light_batched_merkle_tree::{ initialize_state_tree::InitStateTreeAccountsInstructionData, }; use light_client::interface::{ - create_load_instructions, get_create_accounts_proof, AccountInterface, AccountInterfaceExt, - AccountSpec, ColdContext, CreateAccountsProofInput, PdaSpec, + create_load_instructions, get_create_accounts_proof, AccountInterface, AccountSpec, + ColdContext, CreateAccountsProofInput, PdaSpec, }; use light_compressible::rent::SLOTS_PER_EPOCH; use light_program_test::{ @@ -225,9 +225,11 @@ async fn decompress_all(ctx: &mut StressTestContext, pdas: &TestPdas, cached: &C // PDA: MinimalRecord let record_interface = ctx .rpc - .get_account_interface(&pdas.record, &ctx.program_id) + .get_account_interface(&pdas.record, None) .await - .expect("failed to get MinimalRecord interface"); + .expect("failed to get MinimalRecord interface") + .value + .expect("MinimalRecord interface should exist"); assert!(record_interface.is_cold(), "MinimalRecord should be cold"); let record_data: MinimalRecord = @@ -244,9 +246,11 @@ async fn decompress_all(ctx: &mut StressTestContext, pdas: &TestPdas, cached: &C // PDA: ZeroCopyRecord let zc_interface = ctx .rpc - .get_account_interface(&pdas.zc_record, &ctx.program_id) + .get_account_interface(&pdas.zc_record, None) .await - .expect("failed to get ZeroCopyRecord interface"); + .expect("failed to get ZeroCopyRecord interface") + .value + .expect("ZeroCopyRecord interface should exist"); assert!(zc_interface.is_cold(), "ZeroCopyRecord should be cold"); let zc_data: ZeroCopyRecord = @@ -263,17 +267,21 @@ async fn decompress_all(ctx: &mut StressTestContext, pdas: &TestPdas, cached: &C // ATA let ata_interface = ctx .rpc - .get_ata_interface(&pdas.ata_owner, &pdas.mint) + .get_associated_token_account_interface(&pdas.ata_owner, &pdas.mint, None) .await - .expect("failed to get ATA interface"); + .expect("failed to get ATA interface") + .value + .expect("ATA interface should exist"); assert!(ata_interface.is_cold(), "ATA should be cold"); // Token PDA: Vault let vault_iface = ctx .rpc - .get_token_account_interface(&pdas.vault) + .get_token_account_interface(&pdas.vault, None) .await - .expect("failed to get vault interface"); + .expect("failed to get vault interface") + .value + .expect("vault interface should exist"); assert!(vault_iface.is_cold(), "Vault should be cold"); let vault_token_data: Token = @@ -298,24 +306,13 @@ async fn decompress_all(ctx: &mut StressTestContext, pdas: &TestPdas, cached: &C // Mint let mint_iface = ctx .rpc - .get_mint_interface(&pdas.mint) + .get_mint_interface(&pdas.mint, None) .await - .expect("failed to get mint interface"); + .expect("failed to get mint interface") + .value + .expect("mint interface should exist"); assert!(mint_iface.is_cold(), "Mint should be cold"); - let (compressed_mint, _) = mint_iface - .compressed() - .expect("cold mint must have compressed data"); - let mint_ai = AccountInterface { - key: pdas.mint, - account: solana_account::Account { - lamports: 0, - data: vec![], - owner: light_token::instruction::LIGHT_TOKEN_PROGRAM_ID, - executable: false, - rent_epoch: 0, - }, - cold: Some(ColdContext::Account(compressed_mint.clone())), - }; + let mint_ai = AccountInterface::from(mint_iface); let specs: Vec> = vec![ AccountSpec::Pda(record_spec), diff --git a/sdk-tests/pinocchio-light-program-test/tests/test_create_all.rs b/sdk-tests/pinocchio-light-program-test/tests/test_create_all.rs index 4b505dfe15..7130ccea37 100644 --- a/sdk-tests/pinocchio-light-program-test/tests/test_create_all.rs +++ b/sdk-tests/pinocchio-light-program-test/tests/test_create_all.rs @@ -2,8 +2,8 @@ mod shared; use light_account_pinocchio::token::TokenDataWithSeeds; use light_client::interface::{ - create_load_instructions, get_create_accounts_proof, AccountInterface, AccountInterfaceExt, - AccountSpec, ColdContext, CreateAccountsProofInput, PdaSpec, + create_load_instructions, get_create_accounts_proof, AccountInterface, AccountSpec, + ColdContext, CreateAccountsProofInput, PdaSpec, }; use light_compressible::rent::SLOTS_PER_EPOCH; use light_program_test::{program_test::TestRpc, Rpc}; @@ -206,9 +206,11 @@ async fn test_create_all_derive() { // PDA: MinimalRecord let record_interface = rpc - .get_account_interface(&record_pda, &program_id) + .get_account_interface(&record_pda, None) .await - .expect("failed to get MinimalRecord interface"); + .expect("failed to get MinimalRecord interface") + .value + .expect("MinimalRecord interface should exist"); assert!(record_interface.is_cold(), "MinimalRecord should be cold"); let record_data: MinimalRecord = @@ -224,9 +226,11 @@ async fn test_create_all_derive() { // PDA: ZeroCopyRecord let zc_interface = rpc - .get_account_interface(&zc_record_pda, &program_id) + .get_account_interface(&zc_record_pda, None) .await - .expect("failed to get ZeroCopyRecord interface"); + .expect("failed to get ZeroCopyRecord interface") + .value + .expect("ZeroCopyRecord interface should exist"); assert!(zc_interface.is_cold(), "ZeroCopyRecord should be cold"); let zc_data: ZeroCopyRecord = @@ -242,16 +246,20 @@ async fn test_create_all_derive() { // ATA let ata_interface = rpc - .get_ata_interface(&ata_owner, &mint_pda) + .get_associated_token_account_interface(&ata_owner, &mint_pda, None) .await - .expect("failed to get ATA interface"); + .expect("failed to get ATA interface") + .value + .expect("ATA interface should exist"); assert!(ata_interface.is_cold(), "ATA should be cold"); // Token PDA: Vault let vault_iface = rpc - .get_token_account_interface(&vault) + .get_token_account_interface(&vault, None) .await - .expect("failed to get vault interface"); + .expect("failed to get vault interface") + .value + .expect("vault interface should exist"); assert!(vault_iface.is_cold(), "Vault should be cold"); let vault_token_data: Token = @@ -275,24 +283,13 @@ async fn test_create_all_derive() { // Mint let mint_iface = rpc - .get_mint_interface(&mint_pda) + .get_mint_interface(&mint_pda, None) .await - .expect("failed to get mint interface"); + .expect("failed to get mint interface") + .value + .expect("mint interface should exist"); assert!(mint_iface.is_cold(), "Mint should be cold"); - let (compressed_mint, _) = mint_iface - .compressed() - .expect("cold mint must have compressed data"); - let mint_ai = AccountInterface { - key: mint_pda, - account: solana_account::Account { - lamports: 0, - data: vec![], - owner: light_token::instruction::LIGHT_TOKEN_PROGRAM_ID, - executable: false, - rent_epoch: 0, - }, - cold: Some(ColdContext::Account(compressed_mint.clone())), - }; + let mint_ai = AccountInterface::from(mint_iface); let specs: Vec> = vec![ AccountSpec::Pda(record_spec), diff --git a/sdk-tests/pinocchio-light-program-test/tests/test_create_ata.rs b/sdk-tests/pinocchio-light-program-test/tests/test_create_ata.rs index 73630b6a3f..94ac8ded7d 100644 --- a/sdk-tests/pinocchio-light-program-test/tests/test_create_ata.rs +++ b/sdk-tests/pinocchio-light-program-test/tests/test_create_ata.rs @@ -1,8 +1,6 @@ mod shared; -use light_client::interface::{ - create_load_instructions, get_create_accounts_proof, AccountInterfaceExt, AccountSpec, -}; +use light_client::interface::{create_load_instructions, get_create_accounts_proof, AccountSpec}; use light_compressible::rent::SLOTS_PER_EPOCH; use light_program_test::{program_test::TestRpc, Rpc}; use light_sdk_types::LIGHT_TOKEN_PROGRAM_ID; @@ -87,9 +85,11 @@ async fn test_create_ata_derive() { // PHASE 3: Decompress via create_load_instructions let ata_interface = rpc - .get_ata_interface(&ata_owner, &mint) + .get_associated_token_account_interface(&ata_owner, &mint, None) .await - .expect("failed to get ATA interface"); + .expect("failed to get ATA interface") + .value + .expect("ATA interface should exist"); assert!(ata_interface.is_cold(), "ATA should be cold"); let specs: Vec> = vec![AccountSpec::Ata(ata_interface)]; diff --git a/sdk-tests/pinocchio-light-program-test/tests/test_create_mint.rs b/sdk-tests/pinocchio-light-program-test/tests/test_create_mint.rs index 30bd074149..20233200c4 100644 --- a/sdk-tests/pinocchio-light-program-test/tests/test_create_mint.rs +++ b/sdk-tests/pinocchio-light-program-test/tests/test_create_mint.rs @@ -1,8 +1,8 @@ mod shared; use light_client::interface::{ - create_load_instructions, get_create_accounts_proof, AccountInterface, AccountInterfaceExt, - AccountSpec, ColdContext, CreateAccountsProofInput, + create_load_instructions, get_create_accounts_proof, AccountInterface, AccountSpec, + CreateAccountsProofInput, }; use light_compressible::rent::SLOTS_PER_EPOCH; use light_program_test::{program_test::TestRpc, Rpc}; @@ -101,25 +101,13 @@ async fn test_create_mint_derive() { // PHASE 3: Decompress via create_load_instructions let mint_interface = rpc - .get_mint_interface(&mint_pda) + .get_mint_interface(&mint_pda, None) .await - .expect("failed to get mint interface"); + .expect("failed to get mint interface") + .value + .expect("mint interface should exist"); assert!(mint_interface.is_cold(), "Mint should be cold"); - - let (compressed, _mint_data) = mint_interface - .compressed() - .expect("cold mint must have compressed data"); - let mint_account_interface = AccountInterface { - key: mint_pda, - account: solana_account::Account { - lamports: 0, - data: vec![], - owner: light_token::instruction::LIGHT_TOKEN_PROGRAM_ID, - executable: false, - rent_epoch: 0, - }, - cold: Some(ColdContext::Account(compressed.clone())), - }; + let mint_account_interface = AccountInterface::from(mint_interface); let specs: Vec> = vec![AccountSpec::Mint(mint_account_interface)]; diff --git a/sdk-tests/pinocchio-light-program-test/tests/test_create_pda.rs b/sdk-tests/pinocchio-light-program-test/tests/test_create_pda.rs index ac8af4db94..a7982671b8 100644 --- a/sdk-tests/pinocchio-light-program-test/tests/test_create_pda.rs +++ b/sdk-tests/pinocchio-light-program-test/tests/test_create_pda.rs @@ -1,8 +1,8 @@ mod shared; use light_client::interface::{ - create_load_instructions, get_create_accounts_proof, AccountInterfaceExt, AccountSpec, - CreateAccountsProofInput, PdaSpec, + create_load_instructions, get_create_accounts_proof, AccountSpec, CreateAccountsProofInput, + PdaSpec, }; use light_compressible::rent::SLOTS_PER_EPOCH; use light_program_test::{program_test::TestRpc, Rpc}; @@ -84,9 +84,11 @@ async fn test_create_single_pda_derive() { // PHASE 3: Decompress via create_load_instructions let account_interface = rpc - .get_account_interface(&record_pda, &program_id) + .get_account_interface(&record_pda, None) .await - .expect("failed to get MinimalRecord interface"); + .expect("failed to get MinimalRecord interface") + .value + .expect("MinimalRecord interface should exist"); assert!(account_interface.is_cold(), "MinimalRecord should be cold"); let data: MinimalRecord = diff --git a/sdk-tests/pinocchio-light-program-test/tests/test_create_token_vault.rs b/sdk-tests/pinocchio-light-program-test/tests/test_create_token_vault.rs index 777bfa4bb4..5bfcbc8959 100644 --- a/sdk-tests/pinocchio-light-program-test/tests/test_create_token_vault.rs +++ b/sdk-tests/pinocchio-light-program-test/tests/test_create_token_vault.rs @@ -1,8 +1,8 @@ mod shared; use light_client::interface::{ - create_load_instructions, get_create_accounts_proof, AccountInterface, AccountInterfaceExt, - AccountSpec, ColdContext, PdaSpec, + create_load_instructions, get_create_accounts_proof, AccountInterface, AccountSpec, + ColdContext, PdaSpec, }; use light_compressible::rent::SLOTS_PER_EPOCH; use light_program_test::{program_test::TestRpc, Rpc}; @@ -92,9 +92,11 @@ async fn test_create_token_vault_derive() { // PHASE 3: Decompress vault let vault_iface = rpc - .get_token_account_interface(&vault) + .get_token_account_interface(&vault, None) .await - .expect("failed to get vault interface"); + .expect("failed to get vault interface") + .value + .expect("vault interface should exist"); assert!(vault_iface.is_cold(), "Vault should be cold"); let token_data: Token = diff --git a/sdk-tests/pinocchio-light-program-test/tests/test_create_two_mints.rs b/sdk-tests/pinocchio-light-program-test/tests/test_create_two_mints.rs index ef04c1eed6..ebb294fbd9 100644 --- a/sdk-tests/pinocchio-light-program-test/tests/test_create_two_mints.rs +++ b/sdk-tests/pinocchio-light-program-test/tests/test_create_two_mints.rs @@ -1,8 +1,8 @@ mod shared; use light_client::interface::{ - create_load_instructions, get_create_accounts_proof, AccountInterface, AccountInterfaceExt, - AccountSpec, ColdContext, CreateAccountsProofInput, + create_load_instructions, get_create_accounts_proof, AccountInterface, AccountSpec, + CreateAccountsProofInput, }; use light_compressible::rent::SLOTS_PER_EPOCH; use light_program_test::{program_test::TestRpc, Rpc}; @@ -128,36 +128,23 @@ async fn test_create_two_mints_derive() { shared::assert_onchain_closed(&mut rpc, &mint_b_pda, "MintB").await; // PHASE 3: Decompress both mints via create_load_instructions - let build_mint_account_interface = |mint_interface: light_client::interface::MintInterface| { - let (compressed, _mint_data) = mint_interface - .compressed() - .expect("cold mint must have compressed data"); - AccountInterface { - key: mint_interface.mint, - account: solana_account::Account { - lamports: 0, - data: vec![], - owner: light_token::instruction::LIGHT_TOKEN_PROGRAM_ID, - executable: false, - rent_epoch: 0, - }, - cold: Some(ColdContext::Account(compressed.clone())), - } - }; - let mint_a_interface = rpc - .get_mint_interface(&mint_a_pda) + .get_mint_interface(&mint_a_pda, None) .await - .expect("failed to get mint A interface"); + .expect("failed to get mint A interface") + .value + .expect("mint A interface should exist"); assert!(mint_a_interface.is_cold(), "Mint A should be cold"); - let mint_a_ai = build_mint_account_interface(mint_a_interface); + let mint_a_ai = AccountInterface::from(mint_a_interface); let mint_b_interface = rpc - .get_mint_interface(&mint_b_pda) + .get_mint_interface(&mint_b_pda, None) .await - .expect("failed to get mint B interface"); + .expect("failed to get mint B interface") + .value + .expect("mint B interface should exist"); assert!(mint_b_interface.is_cold(), "Mint B should be cold"); - let mint_b_ai = build_mint_account_interface(mint_b_interface); + let mint_b_ai = AccountInterface::from(mint_b_interface); let specs: Vec> = vec![AccountSpec::Mint(mint_a_ai), AccountSpec::Mint(mint_b_ai)]; diff --git a/sdk-tests/pinocchio-light-program-test/tests/test_create_zero_copy_record.rs b/sdk-tests/pinocchio-light-program-test/tests/test_create_zero_copy_record.rs index 955c1bb0c0..209c8ba025 100644 --- a/sdk-tests/pinocchio-light-program-test/tests/test_create_zero_copy_record.rs +++ b/sdk-tests/pinocchio-light-program-test/tests/test_create_zero_copy_record.rs @@ -1,8 +1,8 @@ mod shared; use light_client::interface::{ - create_load_instructions, get_create_accounts_proof, AccountInterfaceExt, AccountSpec, - CreateAccountsProofInput, PdaSpec, + create_load_instructions, get_create_accounts_proof, AccountSpec, CreateAccountsProofInput, + PdaSpec, }; use light_compressible::rent::SLOTS_PER_EPOCH; use light_program_test::{program_test::TestRpc, Rpc}; @@ -77,9 +77,11 @@ async fn test_create_zero_copy_record_derive() { // PHASE 3: Decompress via create_load_instructions let account_interface = rpc - .get_account_interface(&record_pda, &program_id) + .get_account_interface(&record_pda, None) .await - .expect("failed to get ZeroCopyRecord interface"); + .expect("failed to get ZeroCopyRecord interface") + .value + .expect("ZeroCopyRecord interface should exist"); assert!(account_interface.is_cold(), "ZeroCopyRecord should be cold"); let zc_data: ZeroCopyRecord = diff --git a/sdk-tests/pinocchio-manual-test/tests/account_loader.rs b/sdk-tests/pinocchio-manual-test/tests/account_loader.rs index a72630b068..e9418a57df 100644 --- a/sdk-tests/pinocchio-manual-test/tests/account_loader.rs +++ b/sdk-tests/pinocchio-manual-test/tests/account_loader.rs @@ -7,8 +7,8 @@ mod shared; use light_account_pinocchio::{CompressionState, IntoVariant, LightDiscriminator}; use light_client::interface::{ - create_load_instructions, get_create_accounts_proof, AccountInterfaceExt, AccountSpec, - CreateAccountsProofInput, PdaSpec, + create_load_instructions, get_create_accounts_proof, AccountSpec, CreateAccountsProofInput, + PdaSpec, }; use light_compressible::rent::SLOTS_PER_EPOCH; use light_program_test::{program_test::TestRpc, Indexer, Rpc}; @@ -116,9 +116,11 @@ async fn test_zero_copy_create_compress_decompress() { // PHASE 4: Decompress account let account_interface = rpc - .get_account_interface(&record_pda, &program_id) + .get_account_interface(&record_pda, None) .await - .expect("failed to get account interface"); + .expect("failed to get account interface") + .value + .expect("account interface should exist"); assert!( account_interface.is_cold(), "Account should be cold (compressed)" diff --git a/sdk-tests/pinocchio-manual-test/tests/test.rs b/sdk-tests/pinocchio-manual-test/tests/test.rs index e652b91fdc..8a977caaeb 100644 --- a/sdk-tests/pinocchio-manual-test/tests/test.rs +++ b/sdk-tests/pinocchio-manual-test/tests/test.rs @@ -6,8 +6,8 @@ mod shared; use light_account_pinocchio::{CompressionState, IntoVariant}; use light_client::interface::{ - create_load_instructions, get_create_accounts_proof, AccountInterfaceExt, AccountSpec, - CreateAccountsProofInput, PdaSpec, + create_load_instructions, get_create_accounts_proof, AccountSpec, CreateAccountsProofInput, + PdaSpec, }; use light_compressible::rent::SLOTS_PER_EPOCH; use light_program_test::{program_test::TestRpc, Indexer, Rpc}; @@ -112,9 +112,11 @@ async fn test_create_compress_decompress() { // PHASE 4: Decompress account let account_interface = rpc - .get_account_interface(&record_pda, &program_id) + .get_account_interface(&record_pda, None) .await - .expect("failed to get account interface"); + .expect("failed to get account interface") + .value + .expect("account interface should exist"); assert!( account_interface.is_cold(), "Account should be cold (compressed)" diff --git a/sdk-tests/single-account-loader-test/tests/test.rs b/sdk-tests/single-account-loader-test/tests/test.rs index 1ff1efcf7c..49e51e544c 100644 --- a/sdk-tests/single-account-loader-test/tests/test.rs +++ b/sdk-tests/single-account-loader-test/tests/test.rs @@ -3,8 +3,8 @@ use anchor_lang::{InstructionData, ToAccountMetas}; use light_account::{derive_rent_sponsor_pda, IntoVariant}; use light_client::interface::{ - create_load_instructions, get_create_accounts_proof, AccountInterfaceExt, AccountSpec, - CreateAccountsProofInput, InitializeRentFreeConfig, PdaSpec, + create_load_instructions, get_create_accounts_proof, AccountSpec, CreateAccountsProofInput, + InitializeRentFreeConfig, PdaSpec, }; use light_compressible::rent::SLOTS_PER_EPOCH; use light_program_test::{ @@ -240,9 +240,11 @@ async fn test_zero_copy_record_full_lifecycle() { // PHASE 4: Decompress account let account_interface = rpc - .get_account_interface(&record_pda, &program_id) + .get_account_interface(&record_pda, None) .await - .expect("failed to get account interface"); + .expect("failed to get account interface") + .value + .expect("account interface should exist"); assert!( account_interface.is_cold(), "Account should be cold (compressed)"