diff --git a/.github/workflows/programs.yml b/.github/workflows/programs.yml
index c84da7dfe3..9cce8303e0 100644
--- a/.github/workflows/programs.yml
+++ b/.github/workflows/programs.yml
@@ -7,7 +7,7 @@ on:
- "program-tests/**"
- "program-libs/**"
- "prover/client/**"
- - ".github/workflows/light-system-programs-tests.yml"
+ - ".github/workflows/programs.yml"
pull_request:
branches:
- "*"
@@ -16,7 +16,7 @@ on:
- "program-tests/**"
- "program-libs/**"
- "prover/client/**"
- - ".github/workflows/light-system-programs-tests.yml"
+ - ".github/workflows/programs.yml"
types:
- opened
- synchronize
@@ -24,6 +24,8 @@ on:
- ready_for_review
name: programs
+permissions:
+ contents: read
concurrency:
group: ${{ github.workflow }}-${{ github.ref }}
@@ -31,7 +33,7 @@ concurrency:
jobs:
system-programs:
- name: programs
+ name: ${{ matrix.test-group }}
if: github.event.pull_request.draft == false
runs-on: warp-ubuntu-latest-x64-4x
timeout-minutes: 90
@@ -52,27 +54,16 @@ jobs:
strategy:
matrix:
- include:
- - program: account-compression-and-registry
- sub-tests: '["cargo-test-sbf -p account-compression-test", "cargo-test-sbf -p registry-test"]'
- - program: light-system-program-address
- sub-tests: '["cargo-test-sbf -p system-test -- test_with_address", "cargo-test-sbf -p e2e-test", "cargo-test-sbf -p compressed-token-test --test light_token"]'
- - program: light-system-program-compression
- sub-tests: '["cargo-test-sbf -p system-test -- test_with_compression", "cargo-test-sbf -p system-test --test test_re_init_cpi_account"]'
- - program: compressed-token-and-e2e
- sub-tests: '["cargo test -p light-compressed-token", "cargo-test-sbf -p compressed-token-test --test v1", "cargo-test-sbf -p compressed-token-test --test mint"]'
- - program: compressed-token-batched-tree
- sub-tests: '["cargo-test-sbf -p compressed-token-test -- test_transfer_with_photon_and_batched_tree"]'
- - program: system-cpi-test
- sub-tests:
- '["cargo-test-sbf -p system-cpi-test", "cargo test -p light-system-program-pinocchio",
- "cargo-test-sbf -p system-cpi-v2-test -- --skip functional_ --skip event::parse", "cargo-test-sbf -p system-cpi-v2-test -- event::parse",
- "cargo-test-sbf -p compressed-token-test --test transfer2"
- ]'
- - program: system-cpi-test-v2-functional-read-only
- sub-tests: '["cargo-test-sbf -p system-cpi-v2-test -- functional_read_only"]'
- - program: system-cpi-test-v2-functional-account-infos
- sub-tests: '["cargo-test-sbf -p system-cpi-v2-test -- functional_account_infos"]'
+ test-group:
+ - account-compression-and-registry
+ - system-address
+ - system-compression
+ - compressed-token-and-e2e
+ - compressed-token-batched-tree
+ - system-cpi
+ - system-cpi-v2-functional-read-only
+ - system-cpi-v2-functional-account-infos
+
steps:
- name: Checkout sources
uses: actions/checkout@v6
@@ -87,34 +78,7 @@ jobs:
run: |
just cli build
- - name: ${{ matrix.program }}
+ - name: Run tests
+ working-directory: program-tests
run: |
-
- IFS=',' read -r -a sub_tests <<< "${{ join(fromJSON(matrix['sub-tests']), ', ') }}"
- for subtest in "${sub_tests[@]}"
- do
- echo "$subtest"
-
- # Retry logic for flaky batched-tree test
- if [[ "$subtest" == *"test_transfer_with_photon_and_batched_tree"* ]]; then
- echo "Running flaky test with retry logic (max 3 attempts)..."
- attempt=1
- max_attempts=3
- until RUSTFLAGS="-D warnings" eval "$subtest"; do
- attempt=$((attempt + 1))
- if [ $attempt -gt $max_attempts ]; then
- echo "Test failed after $max_attempts attempts"
- exit 1
- fi
- echo "Attempt $attempt/$max_attempts failed, retrying..."
- sleep 5
- done
- echo "Test passed on attempt $attempt"
- else
- RUSTFLAGS="-D warnings" eval "$subtest"
- if [ "$subtest" == "cargo-test-sbf -p e2e-test" ]; then
- just programs build-compressed-token-small
- RUSTFLAGS="-D warnings" eval "$subtest -- --test test_10_all"
- fi
- fi
- done
+ just ci-${{ matrix.test-group }}
diff --git a/.mise.toml b/.mise.toml
new file mode 100644
index 0000000000..c5492f4b9e
--- /dev/null
+++ b/.mise.toml
@@ -0,0 +1,4 @@
+# Disable mise's Go management for this project.
+# We use our own Go installation via devenv.sh.
+[settings]
+disable_tools = ["go"]
diff --git a/Cargo.lock b/Cargo.lock
index 0ca14e0aaa..25d90546f4 100644
--- a/Cargo.lock
+++ b/Cargo.lock
@@ -3858,6 +3858,7 @@ dependencies = [
"borsh 0.10.4",
"light-compressed-account",
"light-hasher",
+ "light-token-interface",
"light-zero-copy",
"rand 0.8.5",
"thiserror 2.0.17",
diff --git a/Cargo.toml b/Cargo.toml
index 7c54409b9d..3dd5881ce7 100644
--- a/Cargo.toml
+++ b/Cargo.toml
@@ -147,6 +147,7 @@ pinocchio-pubkey = { version = "0.3.0" }
pinocchio-system = { version = "0.3.0" }
bs58 = "^0.5.1"
sha2 = "0.10"
+hex = "0.4"
litesvm = "0.7"
# Anchor
anchor-lang = { version = "0.31.1" }
diff --git a/cli/src/commands/test-validator/index.ts b/cli/src/commands/test-validator/index.ts
index e61796d0c1..9a33d58c86 100644
--- a/cli/src/commands/test-validator/index.ts
+++ b/cli/src/commands/test-validator/index.ts
@@ -43,6 +43,22 @@ class SetupCommand extends Command {
"Runs a test validator without starting a new prover service.",
default: false,
}),
+ forester: Flags.boolean({
+ description:
+ "Start the forester service for auto-compression of compressible accounts.",
+ default: false,
+ }),
+ "forester-port": Flags.integer({
+ description: "Port for the forester API server.",
+ required: false,
+ default: 8080,
+ }),
+ "compressible-pda-program": Flags.string({
+ description:
+ "Compressible PDA programs to track. Format: 'program_id:discriminator_base58'. Can be specified multiple times.",
+ required: false,
+ multiple: true,
+ }),
"skip-system-accounts": Flags.boolean({
description:
"Runs a test validator without initialized light system accounts.",
@@ -147,6 +163,13 @@ class SetupCommand extends Command {
default: true,
allowNo: true,
}),
+ "account-dir": Flags.string({
+ description:
+ "Additional directory containing account JSON files to preload. Can be specified multiple times.",
+ required: false,
+ multiple: true,
+ summary: "Usage: --account-dir ",
+ }),
};
validatePrograms(
@@ -216,6 +239,7 @@ class SetupCommand extends Command {
await stopTestEnv({
indexer: !flags["skip-indexer"],
prover: !flags["skip-prover"],
+ forester: flags.forester,
});
this.log("\nTest validator stopped successfully \x1b[32m✔\x1b[0m");
} else {
@@ -268,6 +292,9 @@ class SetupCommand extends Command {
indexerPort: flags["indexer-port"],
proverPort: flags["prover-port"],
prover: !flags["skip-prover"],
+ forester: flags.forester,
+ foresterPort: flags["forester-port"],
+ compressiblePdaPrograms: flags["compressible-pda-program"],
skipSystemAccounts: flags["skip-system-accounts"],
geyserConfig: flags["geyser-config"],
validatorArgs: flags["validator-args"],
@@ -279,6 +306,7 @@ class SetupCommand extends Command {
verbose: flags.verbose,
skipReset: flags["skip-reset"],
useSurfpool: flags["use-surfpool"],
+ additionalAccountDirs: flags["account-dir"],
});
this.log("\nSetup tasks completed successfully \x1b[32m✔\x1b[0m");
}
diff --git a/cli/src/utils/constants.ts b/cli/src/utils/constants.ts
index db2547d45e..7ff7e03730 100644
--- a/cli/src/utils/constants.ts
+++ b/cli/src/utils/constants.ts
@@ -19,6 +19,7 @@ export const SOLANA_VALIDATOR_PROCESS_NAME = "solana-test-validator";
export const SURFPOOL_PROCESS_NAME = "surfpool";
export const LIGHT_PROVER_PROCESS_NAME = "light-prover";
export const INDEXER_PROCESS_NAME = "photon";
+export const FORESTER_PROCESS_NAME = "forester";
export const SURFPOOL_VERSION = "1.0.1";
export const SURFPOOL_RELEASE_TAG = "v1.0.1-light";
diff --git a/cli/src/utils/initTestEnv.ts b/cli/src/utils/initTestEnv.ts
index 71722b6cff..7dc57d2a92 100644
--- a/cli/src/utils/initTestEnv.ts
+++ b/cli/src/utils/initTestEnv.ts
@@ -24,6 +24,11 @@ import {
} from "./process";
import { killProver, startProver } from "./processProverServer";
import { killIndexer, startIndexer } from "./processPhotonIndexer";
+import {
+ killForester,
+ startForester,
+ getPayerForForester,
+} from "./processForester";
import { Connection, PublicKey } from "@solana/web3.js";
import { execSync } from "child_process";
@@ -101,8 +106,10 @@ async function getProgramOwnedAccounts(
export async function stopTestEnv(options: {
indexer: boolean;
prover: boolean;
+ forester?: boolean;
}) {
const processesToKill = [
+ { name: "forester", condition: options.forester ?? false, killFunction: killForester },
{ name: "photon", condition: options.indexer, killFunction: killIndexer },
{ name: "prover", condition: options.prover, killFunction: killProver },
{
@@ -135,9 +142,11 @@ export async function initTestEnv({
skipSystemAccounts,
indexer = true,
prover = true,
+ forester = false,
rpcPort = 8899,
indexerPort = 8784,
proverPort = 3001,
+ foresterPort = 8080,
gossipHost = "127.0.0.1",
checkPhotonVersion = true,
photonDatabaseUrl,
@@ -148,6 +157,8 @@ export async function initTestEnv({
verbose,
skipReset,
useSurfpool,
+ compressiblePdaPrograms,
+ additionalAccountDirs,
}: {
additionalPrograms?: { address: string; path: string }[];
upgradeablePrograms?: {
@@ -158,9 +169,11 @@ export async function initTestEnv({
skipSystemAccounts?: boolean;
indexer: boolean;
prover: boolean;
+ forester?: boolean;
rpcPort?: number;
indexerPort?: number;
proverPort?: number;
+ foresterPort?: number;
gossipHost?: string;
checkPhotonVersion?: boolean;
photonDatabaseUrl?: string;
@@ -171,6 +184,8 @@ export async function initTestEnv({
verbose?: boolean;
skipReset?: boolean;
useSurfpool?: boolean;
+ compressiblePdaPrograms?: string[];
+ additionalAccountDirs?: string[];
}) {
if (useSurfpool) {
// For surfpool we can await startTestValidator because spawnBinary returns
@@ -189,6 +204,7 @@ export async function initTestEnv({
verbose,
skipReset,
useSurfpool,
+ additionalAccountDirs,
});
// Surfpool only supports JSON-RPC POST, not GET /health.
await confirmRpcReadiness(`http://127.0.0.1:${rpcPort}`);
@@ -207,6 +223,7 @@ export async function initTestEnv({
verbose,
skipReset,
useSurfpool,
+ additionalAccountDirs,
});
await waitForServers([{ port: rpcPort, path: "/health" }]);
await confirmServerStability(`http://127.0.0.1:${rpcPort}/health`);
@@ -250,6 +267,48 @@ export async function initTestEnv({
startSlot,
);
}
+
+ if (forester) {
+ if (!indexer || !prover) {
+ throw new Error("Forester requires both indexer and prover to be running");
+ }
+ try {
+ const payer = getPayerForForester();
+ await startForester({
+ rpcUrl: `http://127.0.0.1:${rpcPort}`,
+ wsRpcUrl: `ws://127.0.0.1:${rpcPort + 1}`,
+ indexerUrl: `http://127.0.0.1:${indexerPort}`,
+ proverUrl: `http://127.0.0.1:${proverPort}`,
+ payer,
+ foresterPort,
+ compressiblePdaPrograms,
+ });
+ } catch (error) {
+ console.error("Failed to start forester:", error);
+ throw error;
+ }
+ }
+
+ if (forester) {
+ if (!indexer || !prover) {
+ throw new Error("Forester requires both indexer and prover to be running");
+ }
+ try {
+ const payer = getPayerForForester();
+ await startForester({
+ rpcUrl: `http://127.0.0.1:${rpcPort}`,
+ wsRpcUrl: `ws://127.0.0.1:${rpcPort + 1}`,
+ indexerUrl: `http://127.0.0.1:${indexerPort}`,
+ proverUrl: `http://127.0.0.1:${proverPort}`,
+ payer,
+ foresterPort,
+ compressiblePdaPrograms,
+ });
+ } catch (error) {
+ console.error("Failed to start forester:", error);
+ throw error;
+ }
+ }
}
export async function initTestEnvIfNeeded({
@@ -448,6 +507,7 @@ export async function getSurfpoolArgs({
rpcPort,
gossipHost,
downloadBinaries = true,
+ additionalAccountDirs,
}: {
additionalPrograms?: { address: string; path: string }[];
upgradeablePrograms?: {
@@ -459,6 +519,7 @@ export async function getSurfpoolArgs({
rpcPort?: number;
gossipHost?: string;
downloadBinaries?: boolean;
+ additionalAccountDirs?: string[];
}): Promise> {
const dirPath = programsDirPath();
@@ -508,6 +569,13 @@ export async function getSurfpoolArgs({
args.push("--account-dir", accountsPath);
}
+ // Load additional account directories
+ if (additionalAccountDirs) {
+ for (const accountDir of additionalAccountDirs) {
+ args.push("--account-dir", path.resolve(accountDir));
+ }
+ }
+
return args;
}
@@ -603,6 +671,7 @@ export async function startTestValidator({
verbose,
skipReset,
useSurfpool,
+ additionalAccountDirs,
}: {
additionalPrograms?: { address: string; path: string }[];
upgradeablePrograms?: {
@@ -620,6 +689,7 @@ export async function startTestValidator({
verbose?: boolean;
skipReset?: boolean;
useSurfpool?: boolean;
+ additionalAccountDirs?: string[];
}) {
if (useSurfpool) {
const command = await ensureSurfpoolBinary();
@@ -629,6 +699,7 @@ export async function startTestValidator({
skipSystemAccounts,
rpcPort,
gossipHost,
+ additionalAccountDirs,
});
await killTestValidator(rpcPort);
diff --git a/cli/src/utils/processForester.ts b/cli/src/utils/processForester.ts
new file mode 100644
index 0000000000..9bbc255e72
--- /dev/null
+++ b/cli/src/utils/processForester.ts
@@ -0,0 +1,107 @@
+import which from "which";
+import { killProcess, spawnBinary, waitForServers } from "./process";
+import { FORESTER_PROCESS_NAME } from "./constants";
+import { exec } from "node:child_process";
+import * as util from "node:util";
+import { exit } from "node:process";
+import * as fs from "fs";
+import * as path from "path";
+
+const execAsync = util.promisify(exec);
+
+async function isForesterInstalled(): Promise {
+ try {
+ const resolvedOrNull = which.sync("forester", { nothrow: true });
+ return resolvedOrNull !== null;
+ } catch (error) {
+ return false;
+ }
+}
+
+function getForesterInstallMessage(): string {
+ return `\nForester not found. Please install it by running: "cargo install --git https://github.com/Lightprotocol/light-protocol forester --locked --force"`;
+}
+
+export interface ForesterConfig {
+ rpcUrl: string;
+ wsRpcUrl: string;
+ indexerUrl: string;
+ proverUrl: string;
+ payer: string;
+ foresterPort: number;
+ compressiblePdaPrograms?: string[];
+}
+
+/**
+ * Starts the forester service for auto-compression of compressible accounts.
+ *
+ * @param config - Forester configuration
+ */
+export async function startForester(config: ForesterConfig) {
+ await killForester();
+
+ if (!(await isForesterInstalled())) {
+ console.log(getForesterInstallMessage());
+ return exit(1);
+ }
+
+ console.log("Starting forester...");
+
+ const args: string[] = [
+ "start",
+ "--rpc-url",
+ config.rpcUrl,
+ "--ws-rpc-url",
+ config.wsRpcUrl,
+ "--indexer-url",
+ config.indexerUrl,
+ "--prover-url",
+ config.proverUrl,
+ "--payer",
+ config.payer,
+ "--api-server-port",
+ config.foresterPort.toString(),
+ "--enable-compressible",
+ ];
+
+ // Add compressible PDA programs if specified
+ if (config.compressiblePdaPrograms && config.compressiblePdaPrograms.length > 0) {
+ for (const program of config.compressiblePdaPrograms) {
+ args.push("--compressible-pda-program", program);
+ }
+ }
+
+ spawnBinary(FORESTER_PROCESS_NAME, args);
+ await waitForServers([{ port: config.foresterPort, path: "/health" }]);
+ console.log("Forester started successfully!");
+}
+
+export async function killForester() {
+ await killProcess(FORESTER_PROCESS_NAME);
+}
+
+/**
+ * Gets the payer keypair as a JSON array string for forester.
+ * Reads from ~/.config/solana/id.json or SOLANA_PAYER environment variable.
+ *
+ * @returns JSON array string of the keypair bytes
+ */
+export function getPayerForForester(): string {
+ // Check for SOLANA_PAYER environment variable first
+ if (process.env.SOLANA_PAYER) {
+ return process.env.SOLANA_PAYER;
+ }
+
+ // Default to standard Solana keypair location
+ const homeDir = process.env.HOME || process.env.USERPROFILE || "";
+ const keypairPath = path.join(homeDir, ".config", "solana", "id.json");
+
+ if (fs.existsSync(keypairPath)) {
+ const keypairData = fs.readFileSync(keypairPath, "utf-8");
+ return keypairData.trim();
+ }
+
+ throw new Error(
+ "No payer keypair found. Set SOLANA_PAYER environment variable or create ~/.config/solana/id.json",
+ );
+}
diff --git a/forester/Cargo.toml b/forester/Cargo.toml
index 3b0cc6aef2..21902f6436 100644
--- a/forester/Cargo.toml
+++ b/forester/Cargo.toml
@@ -45,7 +45,7 @@ futures = { workspace = true }
thiserror = { workspace = true }
borsh = { workspace = true }
bs58 = { workspace = true }
-hex = "0.4"
+hex = { workspace = true }
env_logger = { workspace = true }
async-trait = { workspace = true }
tracing = { workspace = true }
diff --git a/forester/justfile b/forester/justfile
index 430267c08f..ad7798ecae 100644
--- a/forester/justfile
+++ b/forester/justfile
@@ -35,3 +35,8 @@ test-compressible-mint: build-compressible-test-deps
test-compressible-ctoken: build-compressible-test-deps
RUST_LOG=forester=debug,light_client=debug \
cargo test --package forester --test test_compressible_ctoken -- --nocapture
+
+# Test for indexer interface scenarios (creates test data for photon)
+test-indexer-interface: build-test-deps
+ RUST_LOG=forester=debug,light_client=debug \
+ cargo test --package forester --test test_indexer_interface -- --nocapture
diff --git a/forester/src/compressible/bootstrap_helpers.rs b/forester/src/compressible/bootstrap_helpers.rs
index c358bacbfc..8ad43ec638 100644
--- a/forester/src/compressible/bootstrap_helpers.rs
+++ b/forester/src/compressible/bootstrap_helpers.rs
@@ -5,12 +5,18 @@
//! - Account field extraction from JSON responses
//! - Standard and V2 API patterns
-use std::time::Duration;
+use std::{
+ sync::{
+ atomic::{AtomicBool, Ordering},
+ Arc,
+ },
+ time::Duration,
+};
use serde_json::json;
use solana_sdk::pubkey::Pubkey;
-use tokio::time::timeout;
-use tracing::debug;
+use tokio::{sync::oneshot, time::timeout};
+use tracing::{debug, info};
use super::config::{DEFAULT_PAGE_SIZE, DEFAULT_PAGINATION_DELAY_MS};
use crate::Result;
@@ -344,3 +350,127 @@ where
Ok((page_count, total_fetched, total_inserted))
}
+
+/// Result of a bootstrap operation
+#[derive(Debug, Clone)]
+pub struct BootstrapResult {
+ /// Number of pages fetched (1 for standard API)
+ pub pages: usize,
+ /// Total number of accounts fetched from RPC
+ pub fetched: usize,
+ /// Number of accounts successfully inserted/processed
+ pub inserted: usize,
+}
+
+/// High-level bootstrap runner that handles common scaffolding.
+///
+/// This helper encapsulates:
+/// - Shutdown flag setup and listener spawning
+/// - HTTP client creation
+/// - Automatic selection between standard and V2 APIs based on localhost detection
+/// - Consistent logging with the provided label
+///
+/// # Arguments
+/// * `rpc_url` - The RPC endpoint URL
+/// * `program_id` - The program ID to fetch accounts from
+/// * `filters` - Optional memcmp/dataSize filters for the query
+/// * `shutdown_rx` - Optional shutdown receiver for graceful cancellation
+/// * `process_fn` - Closure called for each fetched account; returns true if successfully processed
+/// * `label` - Label for log messages (e.g., "Mint", "CToken", "PDA")
+///
+/// # Returns
+/// A `BootstrapResult` containing page count, fetched count, and inserted count.
+pub async fn run_bootstrap(
+ rpc_url: &str,
+ program_id: &Pubkey,
+ filters: Option>,
+ shutdown_rx: Option>,
+ process_fn: F,
+ label: &str,
+) -> Result
+where
+ F: FnMut(RawAccountData) -> bool,
+{
+ info!("Starting bootstrap of {} accounts", label);
+
+ // Set up shutdown flag and listener task
+ let shutdown_flag = Arc::new(AtomicBool::new(false));
+
+ // Spawn shutdown listener and keep handle for cleanup
+ let shutdown_listener_handle = shutdown_rx.map(|rx| {
+ let shutdown_flag_clone = shutdown_flag.clone();
+ tokio::spawn(async move {
+ let _ = rx.await;
+ shutdown_flag_clone.store(true, Ordering::SeqCst);
+ })
+ });
+
+ let client = reqwest::Client::new();
+
+ info!(
+ "Bootstrapping {} accounts from program {}",
+ label, program_id
+ );
+
+ let result = if is_localhost(rpc_url) {
+ debug!("Detected localhost, using standard getProgramAccounts");
+ let api_result = bootstrap_standard_api(
+ &client,
+ rpc_url,
+ program_id,
+ filters,
+ Some(&shutdown_flag),
+ process_fn,
+ )
+ .await;
+
+ // Abort shutdown listener before returning (success or error)
+ if let Some(handle) = shutdown_listener_handle {
+ handle.abort();
+ }
+
+ let (fetched, inserted) = api_result?;
+
+ info!(
+ "{} bootstrap complete: {} fetched, {} inserted",
+ label, fetched, inserted
+ );
+
+ BootstrapResult {
+ pages: 1,
+ fetched,
+ inserted,
+ }
+ } else {
+ debug!("Using getProgramAccountsV2 with pagination");
+ let api_result = bootstrap_v2_api(
+ &client,
+ rpc_url,
+ program_id,
+ filters,
+ Some(&shutdown_flag),
+ process_fn,
+ )
+ .await;
+
+ // Abort shutdown listener before returning (success or error)
+ if let Some(handle) = shutdown_listener_handle {
+ handle.abort();
+ }
+
+ let (pages, fetched, inserted) = api_result?;
+
+ info!(
+ "{} bootstrap complete: {} pages, {} fetched, {} inserted",
+ label, pages, fetched, inserted
+ );
+
+ BootstrapResult {
+ pages,
+ fetched,
+ inserted,
+ }
+ };
+
+ Ok(result)
+}
diff --git a/forester/src/compressible/config.rs b/forester/src/compressible/config.rs
index 46b65e35b9..14668317a2 100644
--- a/forester/src/compressible/config.rs
+++ b/forester/src/compressible/config.rs
@@ -34,7 +34,7 @@ pub const DEFAULT_PAGINATION_DELAY_MS: u64 = 100;
/// Configuration for a compressible PDA program.
///
-/// Can be specified via CLI (using `program_id:discriminator_base58` format)
+/// Can be specified via CLI `--compressible-pda-program` (using `program_id:discriminator_base58` format)
/// or via config file using the serialized struct format.
#[derive(Debug, Clone, Serialize, Deserialize)]
pub struct PdaProgramConfig {
@@ -146,7 +146,7 @@ pub struct CompressibleConfig {
#[serde(default = "default_max_concurrent_batches")]
pub max_concurrent_batches: usize,
/// Compressible PDA programs to track and compress.
- /// Can be specified in config file or via CLI `--pda-program` flags.
+ /// Can be specified in config file or via CLI `--compressible-pda-program` flags.
/// CLI values are merged with config file values.
#[serde(default)]
pub pda_programs: Vec,
diff --git a/forester/src/compressible/ctoken/state.rs b/forester/src/compressible/ctoken/state.rs
index eaf0272fac..5dbc5b9961 100644
--- a/forester/src/compressible/ctoken/state.rs
+++ b/forester/src/compressible/ctoken/state.rs
@@ -76,6 +76,7 @@ impl CTokenAccountTracker {
/// Update tracker with an already-deserialized Token.
/// Use this to avoid double deserialization when the Token is already available.
+ /// Skips mint accounts (only tracks actual token accounts).
pub fn update_from_token(
&self,
pubkey: Pubkey,
@@ -83,6 +84,12 @@ impl CTokenAccountTracker {
lamports: u64,
account_size: usize,
) -> Result<()> {
+ // Skip mint accounts - only track actual token accounts
+ if !ctoken.is_token_account() {
+ debug!("Skipping non-token account {}", pubkey);
+ return Ok(());
+ }
+
let compressible_slot = match calculate_compressible_slot(&ctoken, lamports, account_size) {
Ok(slot) => slot,
Err(e) => {
diff --git a/forester/src/compressible/mint/bootstrap.rs b/forester/src/compressible/mint/bootstrap.rs
index b20aebfe9d..104c8dd00c 100644
--- a/forester/src/compressible/mint/bootstrap.rs
+++ b/forester/src/compressible/mint/bootstrap.rs
@@ -1,4 +1,4 @@
-use std::{sync::Arc, time::Duration};
+use std::sync::Arc;
use tokio::sync::oneshot;
use tracing::{debug, info};
@@ -6,9 +6,7 @@ use tracing::{debug, info};
use super::state::MintAccountTracker;
use crate::{
compressible::{
- bootstrap_helpers::{
- bootstrap_standard_api, bootstrap_v2_api, is_localhost, RawAccountData,
- },
+ bootstrap_helpers::{run_bootstrap, RawAccountData},
config::{ACCOUNT_TYPE_OFFSET, MINT_ACCOUNT_TYPE_FILTER},
traits::CompressibleTracker,
},
@@ -21,31 +19,18 @@ pub async fn bootstrap_mint_accounts(
tracker: Arc,
shutdown_rx: Option>,
) -> Result<()> {
- info!("Starting bootstrap of decompressed Mint accounts");
-
- // Set up shutdown flag
- let shutdown_flag = Arc::new(std::sync::atomic::AtomicBool::new(false));
-
- if let Some(rx) = shutdown_rx {
- let shutdown_flag_clone = shutdown_flag.clone();
- tokio::spawn(async move {
- let _ = rx.await;
- shutdown_flag_clone.store(true, std::sync::atomic::Ordering::SeqCst);
- });
- }
-
- let client = reqwest::Client::builder()
- .timeout(Duration::from_secs(30))
- .build()?;
-
// Light Token Program ID
let program_id =
solana_sdk::pubkey::Pubkey::new_from_array(light_token_interface::LIGHT_TOKEN_PROGRAM_ID);
- info!(
- "Bootstrapping decompressed Mint accounts from program {}",
- program_id
- );
+ // Filter for decompressed Mint accounts (account_type = 1)
+ let filters = Some(vec![serde_json::json!({
+ "memcmp": {
+ "offset": ACCOUNT_TYPE_OFFSET,
+ "bytes": MINT_ACCOUNT_TYPE_FILTER,
+ "encoding": "base58"
+ }
+ })]);
// Process function that updates tracker
let process_account = |raw_data: RawAccountData| -> bool {
@@ -58,50 +43,22 @@ pub async fn bootstrap_mint_accounts(
true
};
- // Filter for decompressed Mint accounts (account_type = 1)
- let filters = Some(vec![serde_json::json!({
- "memcmp": {
- "offset": ACCOUNT_TYPE_OFFSET,
- "bytes": MINT_ACCOUNT_TYPE_FILTER,
- "encoding": "base58"
- }
- })]);
-
- if is_localhost(&rpc_url) {
- let (total_fetched, total_inserted) = bootstrap_standard_api(
- &client,
- &rpc_url,
- &program_id,
- filters,
- Some(&shutdown_flag),
- process_account,
- )
- .await?;
-
- info!(
- "Mint bootstrap complete: {} fetched, {} decompressed mints tracked",
- total_fetched, total_inserted
- );
- } else {
- let (page_count, total_fetched, total_inserted) = bootstrap_v2_api(
- &client,
- &rpc_url,
- &program_id,
- filters,
- Some(&shutdown_flag),
- process_account,
- )
- .await?;
-
- info!(
- "Mint bootstrap finished: {} pages, {} fetched, {} decompressed mints tracked",
- page_count, total_fetched, total_inserted
- );
- }
+ let result = run_bootstrap(
+ &rpc_url,
+ &program_id,
+ filters,
+ shutdown_rx,
+ process_account,
+ "Mint",
+ )
+ .await?;
info!(
- "Mint bootstrap finished: {} total mints tracked",
- tracker.len()
+ "Mint bootstrap finished: {} total mints tracked (fetched: {}, inserted: {}, pages: {})",
+ tracker.len(),
+ result.fetched,
+ result.inserted,
+ result.pages
);
Ok(())
diff --git a/forester/src/compressible/mint/state.rs b/forester/src/compressible/mint/state.rs
index db19e6dd10..4ddebb4847 100644
--- a/forester/src/compressible/mint/state.rs
+++ b/forester/src/compressible/mint/state.rs
@@ -19,12 +19,13 @@ fn calculate_compressible_slot(mint: &Mint, lamports: u64, account_size: usize)
let rent_exemption = get_rent_exemption_lamports(account_size as u64)
.map_err(|e| anyhow::anyhow!("Failed to get rent exemption: {:?}", e))?;
let compression_info = &mint.compression;
+ let config = &compression_info.rent_config;
let last_funded_epoch = get_last_funded_epoch(
account_size as u64,
lamports,
compression_info.last_claimed_slot,
- &compression_info.rent_config,
+ config,
rent_exemption,
);
diff --git a/forester/src/compressible/pda/compressor.rs b/forester/src/compressible/pda/compressor.rs
index 45c64c7fae..3b5664f857 100644
--- a/forester/src/compressible/pda/compressor.rs
+++ b/forester/src/compressible/pda/compressor.rs
@@ -310,14 +310,17 @@ impl PdaCompressor {
"Batched compress_accounts_idempotent tx confirmed: {}",
signature
);
+ Ok(signature)
} else {
tracing::warn!(
"compress_accounts_idempotent tx not confirmed: {} - accounts kept in tracker for retry",
signature
);
+ Err(anyhow::anyhow!(
+ "Batch transaction not confirmed: {}",
+ signature
+ ))
}
-
- Ok(signature)
}
/// Compress a single PDA account using cached config
diff --git a/forester/tests/e2e_test.rs b/forester/tests/e2e_test.rs
index c4db026a4d..98ca8763c4 100644
--- a/forester/tests/e2e_test.rs
+++ b/forester/tests/e2e_test.rs
@@ -277,6 +277,7 @@ async fn e2e_test() {
upgradeable_programs: vec![],
limit_ledger_size: None,
use_surfpool: true,
+ validator_args: vec![],
}))
.await;
spawn_prover().await;
diff --git a/forester/tests/legacy/address_v2_test.rs b/forester/tests/legacy/address_v2_test.rs
index 71ee957010..c7ec9db781 100644
--- a/forester/tests/legacy/address_v2_test.rs
+++ b/forester/tests/legacy/address_v2_test.rs
@@ -63,6 +63,7 @@ async fn test_create_v2_address() {
upgradeable_programs: vec![],
limit_ledger_size: Some(500000),
use_surfpool: true,
+ validator_args: vec![],
}))
.await;
diff --git a/forester/tests/legacy/batched_address_test.rs b/forester/tests/legacy/batched_address_test.rs
index fc6c0af838..aa71314226 100644
--- a/forester/tests/legacy/batched_address_test.rs
+++ b/forester/tests/legacy/batched_address_test.rs
@@ -44,6 +44,7 @@ async fn test_address_batched() {
upgradeable_programs: vec![],
limit_ledger_size: None,
use_surfpool: true,
+ validator_args: vec![],
}))
.await;
let tree_params = InitAddressTreeAccountsInstructionData::test_default();
diff --git a/forester/tests/legacy/batched_state_async_indexer_test.rs b/forester/tests/legacy/batched_state_async_indexer_test.rs
index 9e94fd8079..ac719ea9aa 100644
--- a/forester/tests/legacy/batched_state_async_indexer_test.rs
+++ b/forester/tests/legacy/batched_state_async_indexer_test.rs
@@ -84,6 +84,7 @@ async fn test_state_indexer_async_batched() {
upgradeable_programs: vec![],
limit_ledger_size: None,
use_surfpool: true,
+ validator_args: vec![],
}))
.await;
spawn_prover().await;
diff --git a/forester/tests/legacy/batched_state_indexer_test.rs b/forester/tests/legacy/batched_state_indexer_test.rs
index 4eb6a5b02d..1bea68ba1e 100644
--- a/forester/tests/legacy/batched_state_indexer_test.rs
+++ b/forester/tests/legacy/batched_state_indexer_test.rs
@@ -45,6 +45,7 @@ async fn test_state_indexer_batched() {
upgradeable_programs: vec![],
limit_ledger_size: None,
use_surfpool: true,
+ validator_args: vec![],
}))
.await;
diff --git a/forester/tests/legacy/batched_state_test.rs b/forester/tests/legacy/batched_state_test.rs
index 134ecc67ec..5cb0cbbb3e 100644
--- a/forester/tests/legacy/batched_state_test.rs
+++ b/forester/tests/legacy/batched_state_test.rs
@@ -49,6 +49,7 @@ async fn test_state_batched() {
upgradeable_programs: vec![],
limit_ledger_size: None,
use_surfpool: true,
+ validator_args: vec![],
}))
.await;
diff --git a/forester/tests/legacy/e2e_test.rs b/forester/tests/legacy/e2e_test.rs
index b413894361..80734c0483 100644
--- a/forester/tests/legacy/e2e_test.rs
+++ b/forester/tests/legacy/e2e_test.rs
@@ -41,6 +41,7 @@ async fn test_epoch_monitor_with_2_foresters() {
upgradeable_programs: vec![],
limit_ledger_size: None,
use_surfpool: true,
+ validator_args: vec![],
}))
.await;
let forester_keypair1 = Keypair::new();
@@ -389,6 +390,7 @@ async fn test_epoch_double_registration() {
upgradeable_programs: vec![],
limit_ledger_size: None,
use_surfpool: true,
+ validator_args: vec![],
}))
.await;
diff --git a/forester/tests/legacy/e2e_v1_test.rs b/forester/tests/legacy/e2e_v1_test.rs
index 4687dc33f6..88b88af86c 100644
--- a/forester/tests/legacy/e2e_v1_test.rs
+++ b/forester/tests/legacy/e2e_v1_test.rs
@@ -42,6 +42,7 @@ async fn test_e2e_v1() {
upgradeable_programs: vec![],
limit_ledger_size: None,
use_surfpool: true,
+ validator_args: vec![],
}))
.await;
let forester_keypair1 = Keypair::new();
@@ -386,6 +387,7 @@ async fn test_epoch_double_registration() {
upgradeable_programs: vec![],
limit_ledger_size: None,
use_surfpool: true,
+ validator_args: vec![],
}))
.await;
diff --git a/forester/tests/test_batch_append_spent.rs b/forester/tests/test_batch_append_spent.rs
index bc5a71b94b..547acf0193 100644
--- a/forester/tests/test_batch_append_spent.rs
+++ b/forester/tests/test_batch_append_spent.rs
@@ -52,6 +52,7 @@ async fn test_batch_sequence() {
upgradeable_programs: vec![],
limit_ledger_size: None,
use_surfpool: true,
+ validator_args: vec![],
}))
.await;
diff --git a/forester/tests/test_compressible_ctoken.rs b/forester/tests/test_compressible_ctoken.rs
index dd2bea5b91..bde04205de 100644
--- a/forester/tests/test_compressible_ctoken.rs
+++ b/forester/tests/test_compressible_ctoken.rs
@@ -90,12 +90,15 @@ async fn register_forester(
// Calculate epoch info
let current_slot = rpc.get_slot().await?;
let current_epoch = protocol_config.get_current_epoch(current_slot);
- println!("current_epoch {:?}", current_epoch);
let phases = get_epoch_phases(&protocol_config, current_epoch);
+
+ println!(
+ "Current slot: {}, current_epoch: {}, phases: {:?}",
+ current_slot, current_epoch, phases
+ );
+
let register_phase_start = phases.registration.start;
let active_phase_start = phases.active.start;
- println!("phases {:?}", phases);
- println!("current_slot {}", current_slot);
// Warp to registration phase
if rpc.get_slot().await? < register_phase_start {
@@ -104,10 +107,12 @@ async fn register_forester(
.expect("warp_to_slot to registration phase");
}
- // Register for epoch 0
- let epoch = 0u64;
- let register_epoch_ix =
- create_register_forester_epoch_pda_instruction(&forester_pubkey, &forester_pubkey, epoch);
+ // Register for the current epoch
+ let register_epoch_ix = create_register_forester_epoch_pda_instruction(
+ &forester_pubkey,
+ &forester_pubkey,
+ current_epoch,
+ );
let (blockhash, _) = rpc.get_latest_blockhash().await?;
let tx = Transaction::new_signed_with_payer(
@@ -118,12 +123,7 @@ async fn register_forester(
);
rpc.process_transaction(tx).await?;
- println!("Registered for epoch {}", epoch);
-
- println!(
- "Waiting for active phase (current slot: {}, active phase starts at: {})...",
- current_slot, active_phase_start
- );
+ println!("Registered for epoch {}", current_epoch);
// Warp to active phase
if rpc.get_slot().await? < active_phase_start {
@@ -132,11 +132,11 @@ async fn register_forester(
.expect("warp_to_slot to active phase");
}
- println!("Active phase reached");
+ println!("Active phase reached for epoch {}", current_epoch);
// Finalize registration
let finalize_ix =
- create_finalize_registration_instruction(&forester_pubkey, &forester_pubkey, epoch);
+ create_finalize_registration_instruction(&forester_pubkey, &forester_pubkey, current_epoch);
let (blockhash, _) = rpc.get_latest_blockhash().await?;
let tx = Transaction::new_signed_with_payer(
@@ -164,7 +164,7 @@ async fn register_forester(
use light_registry::protocol_config::state::EpochState;
let epoch_struct = Epoch {
- epoch,
+ epoch: current_epoch,
epoch_pda: solana_sdk::pubkey::Pubkey::default(),
forester_epoch_pda: solana_sdk::pubkey::Pubkey::default(),
phases,
@@ -199,6 +199,7 @@ async fn test_compressible_ctoken_compression() {
upgradeable_programs: vec![],
limit_ledger_size: None,
use_surfpool: true,
+ validator_args: vec![],
})
.await;
let mut rpc = LightClient::new(LightClientConfig::local())
@@ -371,6 +372,7 @@ async fn test_compressible_ctoken_bootstrap() {
upgradeable_programs: vec![],
limit_ledger_size: None,
use_surfpool: true,
+ validator_args: vec![],
})
.await;
@@ -386,6 +388,22 @@ async fn test_compressible_ctoken_bootstrap() {
.await
.expect("Failed to airdrop lamports");
+ // Count pre-existing compressible token accounts
+ let program_id = Pubkey::new_from_array(light_token_interface::LIGHT_TOKEN_PROGRAM_ID);
+ let pre_existing = rpc
+ .get_program_accounts(&program_id)
+ .await
+ .expect("Failed to get program accounts")
+ .into_iter()
+ .filter(|(_, account)| {
+ ::try_from_slice(
+ &account.data,
+ )
+ .map(|t| t.is_token_account())
+ .unwrap_or(false)
+ })
+ .count();
+
// Create mint
let mint_seed = Keypair::new();
let address_tree = rpc.get_address_tree_v2().tree;
@@ -428,7 +446,7 @@ async fn test_compressible_ctoken_bootstrap() {
// Run bootstrap test with localhost
run_bootstrap_test(
"http://localhost:8899".to_string(),
- 3,
+ pre_existing + 3,
Some((created_pubkeys, mint)),
)
.await;
@@ -473,7 +491,7 @@ async fn run_bootstrap_test(
});
if expected_count > 0 {
- // Wait for bootstrap to find expected number of accounts (with timeout)
+ // Wait for bootstrap to find at least expected number of accounts (with timeout)
let start = tokio::time::Instant::now();
let timeout = Duration::from_secs(60);
@@ -485,12 +503,12 @@ async fn run_bootstrap_test(
sleep(Duration::from_millis(500)).await;
}
- // Assert bootstrap picked up all accounts
assert_eq!(
tracker.len(),
expected_count,
- "Bootstrap should have found all {} accounts",
- expected_count
+ "Bootstrap should have found exactly {} accounts, found {}",
+ expected_count,
+ tracker.len()
);
} else {
// Mainnet test: wait a bit for bootstrap to run
@@ -504,14 +522,13 @@ async fn run_bootstrap_test(
if let Some((expected_pubkeys, expected_mint)) = expected_data {
// Verify specific accounts (localhost test)
- // Verify all created accounts are in tracker
+ // Verify all created accounts are in tracker and have correct data
for pubkey in &expected_pubkeys {
- let found = accounts.iter().any(|acc| acc.pubkey == *pubkey);
- assert!(found, "Bootstrap should have found account {}", pubkey);
- }
+ let account_state = accounts
+ .iter()
+ .find(|acc| acc.pubkey == *pubkey)
+ .unwrap_or_else(|| panic!("Bootstrap should have found account {}", pubkey));
- // Verify account data is correct
- for account_state in &accounts {
println!(
"Verifying account {}: mint={:?}, lamports={}",
account_state.pubkey, account_state.account.mint, account_state.lamports
diff --git a/forester/tests/test_compressible_mint.rs b/forester/tests/test_compressible_mint.rs
index cb16391cc1..0e36226fca 100644
--- a/forester/tests/test_compressible_mint.rs
+++ b/forester/tests/test_compressible_mint.rs
@@ -6,7 +6,7 @@ use forester::compressible::{
traits::CompressibleTracker,
AccountSubscriber, SubscriptionConfig,
};
-use forester_utils::{rpc_pool::SolanaRpcPoolBuilder, utils::wait_for_indexer};
+use forester_utils::rpc_pool::SolanaRpcPoolBuilder;
use light_client::{
indexer::{AddressWithTree, Indexer},
local_test_validator::{spawn_validator, LightValidatorConfig},
@@ -23,13 +23,48 @@ use tokio::{
time::sleep,
};
-/// Helper to create a compressed mint with decompression
+/// Build an expected Mint for assertion comparison.
+///
+/// Takes known values from test setup plus runtime values extracted from the on-chain account.
+fn build_expected_mint(
+ mint_authority: &Pubkey,
+ decimals: u8,
+ mint_pda: &Pubkey,
+ mint_signer: &[u8; 32],
+ bump: u8,
+ version: u8,
+ compression: light_compressible::compression_info::CompressionInfo,
+) -> Mint {
+ Mint {
+ base: BaseMint {
+ mint_authority: Some((*mint_authority).into()),
+ supply: 0,
+ decimals,
+ is_initialized: true,
+ freeze_authority: None,
+ },
+ metadata: MintMetadata {
+ version,
+ mint_decompressed: true,
+ mint: (*mint_pda).into(),
+ mint_signer: *mint_signer,
+ bump,
+ },
+ reserved: [0u8; 16],
+ account_type: ACCOUNT_TYPE_MINT,
+ compression,
+ extensions: None,
+ }
+}
+
+/// Helper to create a compressed mint with decompression.
+/// Returns (mint_pda, compression_address, mint_seed, bump).
async fn create_decompressed_mint(
rpc: &mut (impl Rpc + Indexer),
payer: &Keypair,
mint_authority: Pubkey,
decimals: u8,
-) -> (Pubkey, [u8; 32], Keypair) {
+) -> (Pubkey, [u8; 32], Keypair, u8) {
let mint_seed = Keypair::new();
let address_tree = rpc.get_address_tree_v2();
let output_queue = rpc.get_random_state_tree_info().unwrap().queue;
@@ -84,7 +119,7 @@ async fn create_decompressed_mint(
.await
.expect("CreateMint should succeed");
- (mint_pda, compression_address, mint_seed)
+ (mint_pda, compression_address, mint_seed, bump)
}
/// Test that Mint bootstrap discovers decompressed mints
@@ -107,6 +142,7 @@ async fn test_compressible_mint_bootstrap() {
upgradeable_programs: vec![],
limit_ledger_size: None,
use_surfpool: true,
+ validator_args: vec![],
})
.await;
@@ -124,19 +160,20 @@ async fn test_compressible_mint_bootstrap() {
.await
.expect("Failed to airdrop lamports");
- // Wait for indexer to be ready before making validity proof requests
- wait_for_indexer(&rpc)
+ // Advance slot so the indexer is ready for validity proof requests
+ let current_slot = rpc.get_slot().await.unwrap();
+ rpc.warp_to_slot(current_slot + 1)
.await
- .expect("Failed to wait for indexer");
+ .expect("warp_to_slot");
// Create a decompressed mint
- let (mint_pda, compression_address, mint_seed) =
+ let (mint_pda, compression_address, mint_seed, bump) =
create_decompressed_mint(&mut rpc, &payer, payer.pubkey(), 9).await;
println!("Created decompressed mint at: {}", mint_pda);
println!("Compression address: {:?}", compression_address);
- // Verify mint exists on-chain
+ // Verify mint exists on-chain and matches expected structure
let mint_account = rpc.get_account(mint_pda).await.unwrap();
assert!(mint_account.is_some(), "Mint should exist after creation");
@@ -144,41 +181,24 @@ async fn test_compressible_mint_bootstrap() {
let mint_data = mint_account.unwrap();
let mint = Mint::deserialize(&mut &mint_data.data[..]).expect("Failed to deserialize Mint");
- // Extract runtime-specific values from deserialized mint
- let compression = mint.compression;
- let metadata_version = mint.metadata.version;
-
- // Derive the bump from mint_seed
- let (_, bump) = find_mint_address(&mint_seed.pubkey());
-
- // Build expected Mint
- let expected_mint = Mint {
- base: BaseMint {
- mint_authority: Some(payer.pubkey().to_bytes().into()),
- supply: 0,
- decimals: 9,
- is_initialized: true,
- freeze_authority: None,
- },
- metadata: MintMetadata {
- version: metadata_version,
- mint_decompressed: true,
- mint: mint_pda.to_bytes().into(),
- mint_signer: mint_seed.pubkey().to_bytes(),
- bump,
- },
- reserved: [0u8; 16],
- account_type: ACCOUNT_TYPE_MINT,
- compression,
- extensions: None,
- };
+ // Build expected mint using known values plus runtime compression info
+ let expected_mint = build_expected_mint(
+ &payer.pubkey(),
+ 9,
+ &mint_pda,
+ &mint_seed.pubkey().to_bytes(),
+ bump,
+ mint.metadata.version,
+ mint.compression,
+ );
- assert_eq!(mint, expected_mint, "Mint should match expected state");
+ assert_eq!(mint, expected_mint, "Mint should match expected structure");
- // Wait for indexer
- wait_for_indexer(&rpc)
+ // Advance slot so the indexer processes the mint creation
+ let current_slot = rpc.get_slot().await.unwrap();
+ rpc.warp_to_slot(current_slot + 1)
.await
- .expect("Failed to wait for indexer");
+ .expect("warp_to_slot");
// Create tracker and run bootstrap
let tracker = Arc::new(MintAccountTracker::new());
@@ -263,6 +283,7 @@ async fn test_compressible_mint_compression() {
upgradeable_programs: vec![],
limit_ledger_size: None,
use_surfpool: true,
+ validator_args: vec![],
})
.await;
@@ -280,13 +301,14 @@ async fn test_compressible_mint_compression() {
.await
.expect("Failed to airdrop lamports");
- // Wait for indexer to be ready before making validity proof requests
- wait_for_indexer(&rpc)
+ // Advance slot so the indexer is ready for validity proof requests
+ let current_slot = rpc.get_slot().await.unwrap();
+ rpc.warp_to_slot(current_slot + 1)
.await
- .expect("Failed to wait for indexer");
+ .expect("warp_to_slot");
// Create a decompressed mint
- let (mint_pda, compression_address, mint_seed) =
+ let (mint_pda, compression_address, mint_seed, bump) =
create_decompressed_mint(&mut rpc, &payer, payer.pubkey(), 9).await;
println!("Created decompressed mint at: {}", mint_pda);
@@ -303,9 +325,6 @@ async fn test_compressible_mint_compression() {
let compression = mint.compression;
let metadata_version = mint.metadata.version;
- // Derive the bump from mint_seed
- let (_, bump) = find_mint_address(&mint_seed.pubkey());
-
// Build expected Mint
let expected_mint = Mint {
base: BaseMint {
@@ -330,10 +349,11 @@ async fn test_compressible_mint_compression() {
assert_eq!(mint, expected_mint, "Mint should match expected state");
- // Wait for indexer after mint creation
- wait_for_indexer(&rpc)
+ // Advance slot so the indexer processes the mint creation
+ let current_slot = rpc.get_slot().await.unwrap();
+ rpc.warp_to_slot(current_slot + 1)
.await
- .expect("Failed to wait for indexer");
+ .expect("warp_to_slot");
// Create tracker and add the mint manually
let tracker = Arc::new(MintAccountTracker::new());
@@ -366,55 +386,61 @@ async fn test_compressible_mint_compression() {
let ready_accounts = tracker.get_ready_to_compress(current_slot);
println!("Ready to compress: {} mints", ready_accounts.len());
- if !ready_accounts.is_empty() {
- // Create compressor and compress
- let compressor =
- MintCompressor::new(rpc_pool.clone(), tracker.clone(), payer.insecure_clone());
-
- println!("Compressing Mint...");
- let compress_result = compressor.compress_batch(&ready_accounts).await;
-
- let signature = compress_result.expect("Compression should succeed");
- println!("Compression transaction sent: {}", signature);
-
- // Wait for account to be closed
- let start = tokio::time::Instant::now();
- let timeout = Duration::from_secs(30);
- let mut account_closed = false;
-
- while start.elapsed() < timeout {
- let mint_after = rpc
- .get_account(mint_pda)
- .await
- .expect("Failed to query mint account");
- if mint_after.is_none() {
- account_closed = true;
- println!("Mint account closed successfully!");
- break;
- }
- sleep(Duration::from_millis(500)).await;
- }
+ assert!(
+ !ready_accounts.is_empty(),
+ "Mint should be ready to compress with rent_payment=0"
+ );
+
+ // Create compressor and compress
+ let compressor = MintCompressor::new(rpc_pool.clone(), tracker.clone(), payer.insecure_clone());
+
+ println!("Compressing Mint...");
+ let compress_result = compressor.compress_batch(&ready_accounts).await;
+
+ let signature = compress_result.expect("Compression should succeed");
+ println!("Compression transaction sent: {}", signature);
- assert!(
- account_closed,
- "Mint account should be closed after compression"
- );
+ // Wait for account to be closed
+ let start = tokio::time::Instant::now();
+ let timeout = Duration::from_secs(30);
+ let mut account_closed = false;
- // Verify compressed mint still exists in the merkle tree
- let compressed_after = rpc
- .get_compressed_account(compression_address, None)
+ while start.elapsed() < timeout {
+ let mint_after = rpc
+ .get_account(mint_pda)
.await
- .unwrap()
- .value;
- assert!(
- compressed_after.is_some(),
- "Compressed mint should still exist after compression"
- );
-
- println!("Mint compression test completed successfully!");
- } else {
- panic!("Mint should be ready to compress with rent_payment=0");
+ .expect("Failed to query mint account");
+ if mint_after.is_none() || mint_after.as_ref().map(|a| a.lamports) == Some(0) {
+ account_closed = true;
+ println!("Mint account closed successfully!");
+ break;
+ }
+ sleep(Duration::from_millis(500)).await;
}
+
+ assert!(
+ account_closed,
+ "Mint account should be closed after compression"
+ );
+
+ // Advance slot so the indexer processes the compression transaction
+ let current_slot = rpc.get_slot().await.unwrap();
+ rpc.warp_to_slot(current_slot + 1)
+ .await
+ .expect("warp_to_slot");
+
+ // Verify compressed mint still exists in the merkle tree
+ let compressed_after = rpc
+ .get_compressed_account(compression_address, None)
+ .await
+ .unwrap()
+ .value;
+ assert!(
+ compressed_after.is_some(),
+ "Compressed mint should still exist after compression"
+ );
+
+ println!("Mint compression test completed successfully!");
}
/// Test AccountSubscriber for Mint accounts
@@ -439,6 +465,7 @@ async fn test_compressible_mint_subscription() {
upgradeable_programs: vec![],
limit_ledger_size: None,
use_surfpool: true,
+ validator_args: vec![],
})
.await;
@@ -456,10 +483,11 @@ async fn test_compressible_mint_subscription() {
.await
.expect("Failed to airdrop lamports");
- // Wait for indexer to be ready
- wait_for_indexer(&rpc)
+ // Advance slot so the indexer is ready
+ let current_slot = rpc.get_slot().await.unwrap();
+ rpc.warp_to_slot(current_slot + 1)
.await
- .expect("Failed to wait for indexer");
+ .expect("warp_to_slot");
// Setup tracker and subscribers
let tracker = Arc::new(MintAccountTracker::new());
@@ -484,7 +512,7 @@ async fn test_compressible_mint_subscription() {
sleep(Duration::from_secs(2)).await;
// Create first decompressed mint (immediately compressible with rent_payment=0)
- let (mint_pda_1, compression_address_1, _mint_seed_1) =
+ let (mint_pda_1, compression_address_1, _mint_seed_1, _bump_1) =
create_decompressed_mint(&mut rpc, &payer, payer.pubkey(), 9).await;
println!("Created first decompressed mint at: {}", mint_pda_1);
@@ -511,7 +539,7 @@ async fn test_compressible_mint_subscription() {
println!("Tracker detected first mint via subscription");
// Create second decompressed mint
- let (mint_pda_2, _compression_address_2, _mint_seed_2) =
+ let (mint_pda_2, _compression_address_2, _mint_seed_2, _bump_2) =
create_decompressed_mint(&mut rpc, &payer, payer.pubkey(), 6).await;
println!("Created second decompressed mint at: {}", mint_pda_2);
@@ -625,6 +653,12 @@ async fn test_compressible_mint_subscription() {
"Compressed mint should still exist after compression"
);
+ // Advance slot so the indexer processes the compression transaction
+ let current_slot = rpc.get_slot().await.unwrap();
+ rpc.warp_to_slot(current_slot + 1)
+ .await
+ .expect("warp_to_slot");
+
// Shutdown subscribers
shutdown_tx
.send(())
diff --git a/forester/tests/test_compressible_pda.rs b/forester/tests/test_compressible_pda.rs
index 8faa38b820..e04f630432 100644
--- a/forester/tests/test_compressible_pda.rs
+++ b/forester/tests/test_compressible_pda.rs
@@ -16,7 +16,7 @@ use forester_utils::{
use light_client::{
indexer::Indexer,
interface::{get_create_accounts_proof, CreateAccountsProofInput, InitializeRentFreeConfig},
- local_test_validator::{spawn_validator, LightValidatorConfig},
+ local_test_validator::{spawn_validator, LightValidatorConfig, UpgradeableProgramConfig},
rpc::{LightClient, LightClientConfig, Rpc},
};
use light_compressed_account::address::derive_address;
@@ -265,13 +265,14 @@ async fn test_compressible_pda_bootstrap() {
enable_prover: true,
wait_time: 60,
sbf_programs: vec![],
- upgradeable_programs: vec![(
+ upgradeable_programs: vec![UpgradeableProgramConfig::new(
CSDK_TEST_PROGRAM_ID.to_string(),
"../target/deploy/csdk_anchor_full_derived_test.so".to_string(),
payer_pubkey_string(),
)],
limit_ledger_size: None,
use_surfpool: true,
+ validator_args: vec![],
})
.await;
@@ -457,13 +458,14 @@ async fn test_compressible_pda_compression() {
enable_prover: true,
wait_time: 60,
sbf_programs: vec![],
- upgradeable_programs: vec![(
+ upgradeable_programs: vec![UpgradeableProgramConfig::new(
CSDK_TEST_PROGRAM_ID.to_string(),
"../target/deploy/csdk_anchor_full_derived_test.so".to_string(),
payer_pubkey_string(),
)],
limit_ledger_size: None,
use_surfpool: true,
+ validator_args: vec![],
})
.await;
@@ -693,13 +695,14 @@ async fn test_compressible_pda_subscription() {
enable_prover: true,
wait_time: 60,
sbf_programs: vec![],
- upgradeable_programs: vec![(
+ upgradeable_programs: vec![UpgradeableProgramConfig::new(
CSDK_TEST_PROGRAM_ID.to_string(),
"../target/deploy/csdk_anchor_full_derived_test.so".to_string(),
payer_pubkey_string(),
)],
limit_ledger_size: None,
use_surfpool: true,
+ validator_args: vec![],
})
.await;
diff --git a/forester/tests/test_indexer_interface.rs b/forester/tests/test_indexer_interface.rs
new file mode 100644
index 0000000000..6918ae1fd1
--- /dev/null
+++ b/forester/tests/test_indexer_interface.rs
@@ -0,0 +1,618 @@
+/// Test scenarios for indexer interface endpoints.
+///
+/// This test creates various account types for testing the indexer's interface racing logic.
+/// After running, use `cargo xtask export-photon-test-data --test-name indexer_interface`
+/// to export transactions to the indexer's test snapshot directory.
+///
+/// Scenarios covered:
+/// 1. Light Token Mint - mint for token operations
+/// 2. Token accounts (via light-token-client MintTo) - for getTokenAccountInterface
+/// 3. Registered v2 address in batched address tree - for address tree verification
+/// 4. Compressible token accounts - on-chain accounts that can be compressed
+use std::collections::HashMap;
+
+use anchor_lang::Discriminator;
+use borsh::BorshSerialize;
+use create_address_test_program::create_invoke_cpi_instruction;
+use light_client::{
+ indexer::{photon_indexer::PhotonIndexer, AddressWithTree, Indexer},
+ local_test_validator::{spawn_validator, LightValidatorConfig},
+ rpc::{LightClient, LightClientConfig, Rpc},
+};
+use light_compressed_account::{
+ address::derive_address,
+ instruction_data::{
+ data::NewAddressParamsAssigned, with_readonly::InstructionDataInvokeCpiWithReadOnly,
+ },
+};
+use light_compressed_token::process_transfer::transfer_sdk::to_account_metas;
+use light_test_utils::{
+ actions::legacy::{
+ create_compressible_token_account,
+ instructions::mint_action::{
+ create_mint_action_instruction, MintActionParams, MintActionType,
+ },
+ CreateCompressibleTokenAccountInputs,
+ },
+ pack::pack_new_address_params_assigned,
+};
+use light_token::instruction::{
+ derive_mint_compressed_address, find_mint_address, CreateMint as CreateMintInstruction,
+ CreateMintParams,
+};
+use light_token_client::{CreateAta, CreateMint, MintTo};
+use light_token_interface::state::TokenDataVersion;
+use serial_test::serial;
+use solana_sdk::{pubkey::Pubkey, signature::Keypair, signer::Signer, transaction::Transaction};
+/// Test that creates scenarios for Photon interface testing
+///
+/// Run with: cargo test -p forester --test test_indexer_interface -- --nocapture
+/// Then export: cargo xtask export-photon-test-data --test-name indexer_interface
+#[tokio::test(flavor = "multi_thread", worker_threads = 4)]
+#[serial]
+async fn test_indexer_interface_scenarios() {
+ // Start validator with indexer, prover, and create_address_test_program
+ spawn_validator(LightValidatorConfig {
+ enable_indexer: true,
+ enable_prover: true,
+ wait_time: 0,
+ sbf_programs: vec![(
+ "FNt7byTHev1k5x2cXZLBr8TdWiC3zoP5vcnZR4P682Uy".to_string(),
+ "../target/deploy/create_address_test_program.so".to_string(),
+ )],
+ upgradeable_programs: vec![],
+ limit_ledger_size: None,
+ validator_args: vec![],
+ use_surfpool: true,
+ })
+ .await;
+
+ let mut rpc = LightClient::new(LightClientConfig::local())
+ .await
+ .expect("Failed to create LightClient");
+ rpc.get_latest_active_state_trees()
+ .await
+ .expect("Failed to get state trees");
+
+ let payer = rpc.get_payer().insecure_clone();
+ rpc.airdrop_lamports(&payer.pubkey(), 100_000_000_000)
+ .await
+ .expect("Failed to airdrop to payer");
+
+ println!("\n========== PHOTON INTERFACE TEST ==========\n");
+ println!("Payer: {}", payer.pubkey());
+
+ // ============ Scenario 1: Create Light Token Mint ============
+ println!("\n=== Creating Light Token mint ===");
+
+ let (create_mint_sig, mint_pubkey) = CreateMint {
+ decimals: 9,
+ ..Default::default()
+ }
+ .execute(&mut rpc, &payer, &payer)
+ .await
+ .expect("Failed to create Light Token mint");
+ println!(
+ "Light Token Mint: {} (sig: {})",
+ mint_pubkey, create_mint_sig
+ );
+
+ // ============ Scenario 2: Mint tokens to Bob and Charlie ============
+ println!("\n=== Minting tokens via light-token-client ===");
+
+ let bob = Keypair::new();
+ let charlie = Keypair::new();
+
+ // Create ATAs for Bob and Charlie
+ let (_, bob_ata) = CreateAta {
+ mint: mint_pubkey,
+ owner: bob.pubkey(),
+ idempotent: false,
+ }
+ .execute(&mut rpc, &payer)
+ .await
+ .expect("Failed to create Bob's ATA");
+
+ let (_, charlie_ata) = CreateAta {
+ mint: mint_pubkey,
+ owner: charlie.pubkey(),
+ idempotent: false,
+ }
+ .execute(&mut rpc, &payer)
+ .await
+ .expect("Failed to create Charlie's ATA");
+
+ // Mint tokens
+ let bob_mint_sig = MintTo {
+ mint: mint_pubkey,
+ destination: bob_ata,
+ amount: 1_000_000_000,
+ }
+ .execute(&mut rpc, &payer, &payer)
+ .await
+ .expect("Failed to mint to Bob");
+
+ let charlie_mint_sig = MintTo {
+ mint: mint_pubkey,
+ destination: charlie_ata,
+ amount: 500_000_000,
+ }
+ .execute(&mut rpc, &payer, &payer)
+ .await
+ .expect("Failed to mint to Charlie");
+
+ println!("Minted to Bob: {} (sig: {})", bob.pubkey(), bob_mint_sig);
+ println!(
+ "Minted to Charlie: {} (sig: {})",
+ charlie.pubkey(),
+ charlie_mint_sig
+ );
+
+ // ============ Scenario 3: Register v2 Address (using create_address_test_program) ============
+ println!("\n=== Registering v2 address in batched address tree ===");
+
+ // Use v2 (batched) address tree
+ let address_tree = rpc.get_address_tree_v2();
+
+ // Create a deterministic seed for the address
+ let address_seed: [u8; 32] = [42u8; 32];
+
+ // Derive address using v2 method (includes program ID)
+ let derived_address = derive_address(
+ &address_seed,
+ &address_tree.tree.to_bytes(),
+ &create_address_test_program::ID.to_bytes(),
+ );
+
+ println!("Derived v2 address: {:?}", derived_address);
+
+ // Get validity proof for the new address
+ let proof_result = rpc
+ .indexer()
+ .unwrap()
+ .get_validity_proof(
+ vec![],
+ vec![AddressWithTree {
+ address: derived_address,
+ tree: address_tree.tree,
+ }],
+ None,
+ )
+ .await
+ .unwrap();
+
+ // Build new address params
+ let new_address_params = vec![NewAddressParamsAssigned {
+ seed: address_seed,
+ address_queue_pubkey: address_tree.tree.into(), // For batched trees, queue = tree
+ address_merkle_tree_pubkey: address_tree.tree.into(),
+ address_merkle_tree_root_index: proof_result.value.get_address_root_indices()[0],
+ assigned_account_index: None,
+ }];
+
+ // Pack the address params for the instruction
+ let mut remaining_accounts = HashMap::::new();
+ let packed_new_address_params =
+ pack_new_address_params_assigned(&new_address_params, &mut remaining_accounts);
+
+ // Build instruction data for create_address_test_program
+ let ix_data = InstructionDataInvokeCpiWithReadOnly::new(
+ create_address_test_program::ID.into(),
+ 255,
+ proof_result.value.proof.0,
+ )
+ .mode_v1()
+ .with_with_transaction_hash(true)
+ .with_new_addresses(&packed_new_address_params);
+
+ let remaining_accounts_metas = to_account_metas(remaining_accounts);
+
+ // Create the instruction using the test program
+ let instruction = create_invoke_cpi_instruction(
+ payer.pubkey(),
+ [
+ light_system_program::instruction::InvokeCpiWithReadOnly::DISCRIMINATOR.to_vec(),
+ ix_data.try_to_vec().unwrap(),
+ ]
+ .concat(),
+ remaining_accounts_metas,
+ None,
+ );
+
+ let instructions = vec![
+ solana_sdk::compute_budget::ComputeBudgetInstruction::set_compute_unit_limit(1_000_000),
+ instruction,
+ ];
+ let address_sig = rpc
+ .create_and_send_transaction(&instructions, &payer.pubkey(), &[&payer])
+ .await
+ .unwrap();
+ println!(
+ "Registered v2 address: {} (sig: {})",
+ hex::encode(derived_address),
+ address_sig
+ );
+
+ // ============ Scenario 4: Decompressed Mint (CreateMint with rent_payment=0) ============
+ // This creates a compressed mint that is immediately decompressed to an on-chain CMint account.
+ // The compressed account only contains the 32-byte mint_pda reference (DECOMPRESSED_PDA_DISCRIMINATOR).
+ // Full mint data is on-chain in the CMint account owned by LIGHT_TOKEN_PROGRAM_ID.
+ println!("\n=== Creating decompressed mint (on-chain CMint) ===");
+
+ let decompressed_mint_seed = Keypair::new();
+ let output_queue = rpc.get_random_state_tree_info().unwrap().queue;
+
+ // Use v2 address tree for compressed mints
+ let mint_address_tree = rpc.get_address_tree_v2();
+
+ // Derive compression address for decompressed mint
+ let decompressed_mint_compression_address =
+ derive_mint_compressed_address(&decompressed_mint_seed.pubkey(), &mint_address_tree.tree);
+
+ let (decompressed_mint_pda, decompressed_mint_bump) =
+ find_mint_address(&decompressed_mint_seed.pubkey());
+
+ // Get validity proof for the address
+ let rpc_result = rpc
+ .get_validity_proof(
+ vec![],
+ vec![AddressWithTree {
+ address: decompressed_mint_compression_address,
+ tree: mint_address_tree.tree,
+ }],
+ None,
+ )
+ .await
+ .unwrap()
+ .value;
+
+ // Create decompressed mint (CreateMint always creates both compressed + on-chain CMint)
+ let decompressed_mint_params = CreateMintParams {
+ decimals: 6,
+ address_merkle_tree_root_index: rpc_result.addresses[0].root_index,
+ mint_authority: payer.pubkey(),
+ proof: rpc_result.proof.0.unwrap(),
+ compression_address: decompressed_mint_compression_address,
+ mint: decompressed_mint_pda,
+ bump: decompressed_mint_bump,
+ freeze_authority: None,
+ extensions: None,
+ rent_payment: 0, // Immediately compressible
+ write_top_up: 0,
+ };
+
+ let create_decompressed_mint_builder = CreateMintInstruction::new(
+ decompressed_mint_params,
+ decompressed_mint_seed.pubkey(),
+ payer.pubkey(),
+ mint_address_tree.tree,
+ output_queue,
+ );
+ let ix = create_decompressed_mint_builder.instruction().unwrap();
+
+ let (blockhash, _) = rpc.get_latest_blockhash().await.unwrap();
+ let tx = Transaction::new_signed_with_payer(
+ &[ix],
+ Some(&payer.pubkey()),
+ &[&payer, &decompressed_mint_seed],
+ blockhash,
+ );
+ let decompressed_mint_sig = rpc.process_transaction(tx).await.unwrap();
+ println!(
+ "Created decompressed mint (CMint on-chain): {} (sig: {})",
+ decompressed_mint_pda, decompressed_mint_sig
+ );
+
+ // ============ Scenario 5: Fully Compressed Mint (CreateMint + CompressAndCloseMint) ============
+ // This creates a compressed mint and then compresses it, so full mint data is in the compressed DB.
+ // This is for testing getMintInterface cold path (no on-chain data needed).
+ println!("\n=== Creating fully compressed mint ===");
+
+ let compressed_mint_seed = Keypair::new();
+
+ // Derive compression address for fully compressed mint
+ let compressed_mint_compression_address =
+ derive_mint_compressed_address(&compressed_mint_seed.pubkey(), &mint_address_tree.tree);
+
+ let (compressed_mint_pda, compressed_mint_bump) =
+ find_mint_address(&compressed_mint_seed.pubkey());
+
+ // Get validity proof for the new address
+ let rpc_result = rpc
+ .get_validity_proof(
+ vec![],
+ vec![AddressWithTree {
+ address: compressed_mint_compression_address,
+ tree: mint_address_tree.tree,
+ }],
+ None,
+ )
+ .await
+ .unwrap()
+ .value;
+
+ // Create compressed mint (will be decompressed initially)
+ let compressed_mint_params = CreateMintParams {
+ decimals: 9,
+ address_merkle_tree_root_index: rpc_result.addresses[0].root_index,
+ mint_authority: payer.pubkey(),
+ proof: rpc_result.proof.0.unwrap(),
+ compression_address: compressed_mint_compression_address,
+ mint: compressed_mint_pda,
+ bump: compressed_mint_bump,
+ freeze_authority: Some(payer.pubkey()), // Add freeze authority for variety
+ extensions: None,
+ rent_payment: 0, // Immediately compressible
+ write_top_up: 0,
+ };
+
+ let create_compressed_mint_builder = CreateMintInstruction::new(
+ compressed_mint_params,
+ compressed_mint_seed.pubkey(),
+ payer.pubkey(),
+ mint_address_tree.tree,
+ output_queue,
+ );
+ let ix = create_compressed_mint_builder.instruction().unwrap();
+
+ let (blockhash, _) = rpc.get_latest_blockhash().await.unwrap();
+ let tx = Transaction::new_signed_with_payer(
+ &[ix],
+ Some(&payer.pubkey()),
+ &[&payer, &compressed_mint_seed],
+ blockhash,
+ );
+ let create_mint_sig = rpc.process_transaction(tx).await.unwrap();
+ println!(
+ "Created mint (step 1/2): {} (sig: {})",
+ compressed_mint_pda, create_mint_sig
+ );
+
+ // Now compress and close the mint to make it fully compressed
+ println!("Compressing mint via CompressAndCloseMint...");
+
+ let compress_params = MintActionParams {
+ compressed_mint_address: compressed_mint_compression_address,
+ mint_seed: compressed_mint_seed.pubkey(),
+ authority: payer.pubkey(),
+ payer: payer.pubkey(),
+ actions: vec![MintActionType::CompressAndCloseMint { idempotent: false }],
+ new_mint: None,
+ };
+
+ let compress_ix = create_mint_action_instruction(&mut rpc, compress_params)
+ .await
+ .expect("Failed to create CompressAndCloseMint instruction");
+
+ let (blockhash, _) = rpc.get_latest_blockhash().await.unwrap();
+ let tx = Transaction::new_signed_with_payer(
+ &[compress_ix],
+ Some(&payer.pubkey()),
+ &[&payer],
+ blockhash,
+ );
+ let compress_mint_sig = rpc.process_transaction(tx).await.unwrap();
+ println!(
+ "Compressed mint (step 2/2): {} (sig: {})",
+ compressed_mint_pda, compress_mint_sig
+ );
+
+ // ============ Scenario 6: Compressible Token Account ============
+ println!("\n=== Creating compressible token account ===");
+
+ let compressible_owner = Keypair::new();
+ rpc.airdrop_lamports(&compressible_owner.pubkey(), 1_000_000_000)
+ .await
+ .expect("Failed to airdrop to compressible owner");
+
+ let compressible_token_account = create_compressible_token_account(
+ &mut rpc,
+ CreateCompressibleTokenAccountInputs {
+ owner: compressible_owner.pubkey(),
+ mint: decompressed_mint_pda,
+ num_prepaid_epochs: 2,
+ payer: &payer,
+ token_account_keypair: None,
+ lamports_per_write: Some(100),
+ token_account_version: TokenDataVersion::ShaFlat,
+ },
+ )
+ .await
+ .expect("Failed to create compressible token account");
+ println!(
+ "Created compressible token account: {}",
+ compressible_token_account
+ );
+ println!("Compressible owner: {}", compressible_owner.pubkey());
+
+ // ============ Summary ============
+ println!("\n========== ADDRESSES SUMMARY ==========\n");
+ println!("Light Token Mint: {}", mint_pubkey);
+ println!("Registered v2 Address: {}", hex::encode(derived_address));
+ println!(
+ "Decompressed Mint PDA (on-chain CMint): {}",
+ decompressed_mint_pda
+ );
+ println!(
+ "Decompressed Mint Address: {:?}",
+ decompressed_mint_compression_address
+ );
+ println!(
+ "Fully Compressed Mint PDA (in compressed DB): {}",
+ compressed_mint_pda
+ );
+ println!(
+ "Fully Compressed Mint Address: {:?}",
+ compressed_mint_compression_address
+ );
+ println!("Bob (compressed token holder): {}", bob.pubkey());
+ println!("Charlie (compressed token holder): {}", charlie.pubkey());
+ println!("Compressible owner: {}", compressible_owner.pubkey());
+ println!("Compressible token account: {}", compressible_token_account);
+
+ // ============ Test Interface Endpoints ============
+ println!("\n========== TESTING INTERFACE ENDPOINTS ==========\n");
+
+ // Create PhotonIndexer to test the interface endpoints
+ let photon_indexer = PhotonIndexer::new("http://localhost:8784".to_string(), None);
+
+ // ============ Test 1: getAccountInterface with compressible token account (on-chain) ============
+ println!("Test 1: getAccountInterface with compressible token account (on-chain)...");
+ let compressible_account_interface = photon_indexer
+ .get_account_interface(&compressible_token_account, None)
+ .await
+ .expect("getAccountInterface should not error for compressible account")
+ .value
+ .expect("Compressible token account should be found");
+
+ assert!(
+ compressible_account_interface.is_hot(),
+ "Compressible account should be hot (on-chain)"
+ );
+ assert!(
+ compressible_account_interface.cold.is_none(),
+ "On-chain account should not have cold context"
+ );
+ assert_eq!(
+ compressible_account_interface.key, compressible_token_account,
+ "Key should match the queried address"
+ );
+ assert!(
+ compressible_account_interface.account.lamports > 0,
+ "On-chain account should have lamports > 0"
+ );
+ println!(" PASSED: Compressible account resolved from on-chain");
+
+ // ============ Test 2: getTokenAccountInterface with compressible token account (on-chain) ============
+ println!("\nTest 2: getTokenAccountInterface with compressible token account (on-chain)...");
+ let compressible_token_interface = photon_indexer
+ .get_token_account_interface(&compressible_token_account, None)
+ .await
+ .expect("getTokenAccountInterface should not error")
+ .value
+ .expect("Compressible token account should be found via token interface");
+
+ assert!(
+ compressible_token_interface.account.is_hot(),
+ "Token account should be hot (on-chain)"
+ );
+ assert!(
+ compressible_token_interface.account.cold.is_none(),
+ "On-chain token account should not have cold context"
+ );
+ assert_eq!(
+ compressible_token_interface.account.key, compressible_token_account,
+ "Token account key should match"
+ );
+ assert_eq!(
+ compressible_token_interface.token.mint, decompressed_mint_pda,
+ "Token mint should match decompressed mint"
+ );
+ assert_eq!(
+ compressible_token_interface.token.owner,
+ compressible_owner.pubkey(),
+ "Token owner should match compressible owner"
+ );
+ println!(" PASSED: Token account interface resolved with correct token data");
+
+ // ============ Test 3: getMultipleAccountInterfaces batch lookup ============
+ println!("\nTest 3: getMultipleAccountInterfaces batch lookup...");
+ let batch_addresses = vec![&decompressed_mint_pda, &compressible_token_account];
+
+ let batch_response = photon_indexer
+ .get_multiple_account_interfaces(batch_addresses.clone(), None)
+ .await
+ .expect("getMultipleAccountInterfaces should not error");
+
+ assert_eq!(
+ batch_response.value.len(),
+ 2,
+ "Batch response should have exactly 2 results"
+ );
+
+ // First result: decompressed mint
+ let batch_mint = batch_response.value[0]
+ .as_ref()
+ .expect("Decompressed mint should be found in batch");
+ assert!(batch_mint.is_hot(), "Batch mint should be hot (on-chain)");
+ assert_eq!(
+ batch_mint.key, decompressed_mint_pda,
+ "Batch mint key should match"
+ );
+ assert!(
+ batch_mint.account.lamports > 0,
+ "Batch mint should have lamports > 0"
+ );
+
+ // Second result: compressible token account
+ let batch_token = batch_response.value[1]
+ .as_ref()
+ .expect("Compressible account should be found in batch");
+ assert!(
+ batch_token.is_hot(),
+ "Batch token account should be hot (on-chain)"
+ );
+ assert_eq!(
+ batch_token.key, compressible_token_account,
+ "Batch token account key should match"
+ );
+ assert!(
+ batch_token.account.lamports > 0,
+ "Batch token account should have lamports > 0"
+ );
+ println!(" PASSED: Batch lookup returned correct results");
+
+ // ============ Test 4: Verify fully compressed mint via getAccountInterface returns None ============
+ // Fully compressed mints (after CompressAndCloseMint) have full mint data in the compressed DB.
+ // Their address column contains the compression_address, not the mint_pda.
+ // Since they don't have the [255; 8] discriminator, onchain_pubkey is not set.
+ // Therefore getAccountInterface by mint_pda should return None.
+ println!("\nTest 4: getAccountInterface with fully compressed mint PDA...");
+ let compressed_via_account = photon_indexer
+ .get_account_interface(&compressed_mint_pda, None)
+ .await
+ .expect("getAccountInterface should not error");
+
+ assert!(
+ compressed_via_account.value.is_none(),
+ "Fully compressed mint should NOT be found via getAccountInterface"
+ );
+ println!(" PASSED: Fully compressed mint correctly returns None via getAccountInterface");
+
+ // ============ Test 5: Verify decompressed mint found via getAccountInterface (generic linking) ============
+ // Decompressed mints have discriminator [255; 8] + 32-byte mint_pda in data.
+ // The generic linking feature extracts this as onchain_pubkey during ingestion.
+ // Therefore getAccountInterface(mint_pda) should find it via onchain_pubkey column.
+ println!("\nTest 5: getAccountInterface with decompressed mint PDA (generic linking)...");
+ let decompressed_via_account = photon_indexer
+ .get_account_interface(&decompressed_mint_pda, None)
+ .await
+ .expect("getAccountInterface should not error");
+
+ let decompressed_account = decompressed_via_account
+ .value
+ .expect("Decompressed mint should be found via getAccountInterface (generic linking)");
+
+ // The decompressed mint should be found from on-chain (CMint account exists)
+ assert!(
+ decompressed_account.is_hot(),
+ "Decompressed mint via getAccountInterface should be hot (on-chain)"
+ );
+ assert!(
+ decompressed_account.cold.is_none(),
+ "Decompressed mint via getAccountInterface should not have cold context"
+ );
+ assert_eq!(
+ decompressed_account.key, decompressed_mint_pda,
+ "Key should match the queried mint PDA"
+ );
+ assert!(
+ decompressed_account.account.lamports > 0,
+ "Decompressed mint should have lamports > 0"
+ );
+ println!(" PASSED: Decompressed mint found via getAccountInterface with generic linking");
+
+ println!("\n========== ALL TESTS PASSED ==========");
+ println!("\nTo export transactions, run:");
+ println!("cargo xtask export-photon-test-data --test-name indexer_interface");
+}
diff --git a/js/stateless.js/tests/unit/version.test.ts b/js/stateless.js/tests/unit/version.test.ts
index 97db06c8be..a0cc7100aa 100644
--- a/js/stateless.js/tests/unit/version.test.ts
+++ b/js/stateless.js/tests/unit/version.test.ts
@@ -20,8 +20,9 @@ describe('Version System', () => {
});
it('should respect LIGHT_PROTOCOL_VERSION environment variable', () => {
+ // Default is V2 when no env var is set (see constants.ts line 31)
const expectedVersion =
- process.env.LIGHT_PROTOCOL_VERSION || VERSION.V1;
+ process.env.LIGHT_PROTOCOL_VERSION || VERSION.V2;
expect(featureFlags.version).toBe(expectedVersion);
});
diff --git a/justfile b/justfile
index cb686dc191..dbbb007cdf 100644
--- a/justfile
+++ b/justfile
@@ -32,12 +32,26 @@ build: programs::build js::build cli::build
test: program-tests::test sdk-tests::test js::test
# === Lint & Format ===
-lint: lint-rust js::lint
+lint: lint-rust lint-readmes js::lint
lint-rust:
cargo +nightly fmt --all -- --check
cargo clippy --workspace --all-features --all-targets -- -D warnings
+# Check READMEs are up-to-date with cargo-rdme
+lint-readmes:
+ #!/usr/bin/env bash
+ set -e
+ echo "Checking READMEs are up-to-date..."
+ if ! command -v cargo-rdme &> /dev/null; then
+ cargo install cargo-rdme
+ fi
+ for toml in $(find program-libs sdk-libs -name '.cargo-rdme.toml' -type f); do
+ crate_dir=$(dirname "$toml")
+ echo "Checking README in $crate_dir..."
+ (cd "$crate_dir" && cargo rdme --check --no-fail-on-warnings)
+ done
+
format:
cargo +nightly fmt --all
just js format
diff --git a/program-libs/CLAUDE.md b/program-libs/CLAUDE.md
index 81359b5151..3803b58e81 100644
--- a/program-libs/CLAUDE.md
+++ b/program-libs/CLAUDE.md
@@ -63,6 +63,7 @@ Some crates depend on external Light Protocol crates not in program-libs:
## Testing
Unit tests run with `cargo test`:
+
```bash
cargo test -p light-hasher --all-features
cargo test -p light-compressed-account --all-features
diff --git a/program-libs/compressed-account/src/constants.rs b/program-libs/compressed-account/src/constants.rs
index adea8113e9..ea14221e09 100644
--- a/program-libs/compressed-account/src/constants.rs
+++ b/program-libs/compressed-account/src/constants.rs
@@ -6,6 +6,9 @@ pub const ACCOUNT_COMPRESSION_PROGRAM_ID: [u8; 32] =
/// ID of the light-system program.
pub const LIGHT_SYSTEM_PROGRAM_ID: [u8; 32] =
pubkey_array!("SySTEM1eSU2p4BGQfQpimFEWWSC1XDFeun3Nqzz3rT7");
+/// ID of the light-registry program.
+pub const LIGHT_REGISTRY_PROGRAM_ID: [u8; 32] =
+ pubkey_array!("Lighton6oQpVkeewmo2mcPTQQp7kYHr4fWpAgJyEmDX");
#[deprecated(since = "0.9.0", note = "Use LIGHT_SYSTEM_PROGRAM_ID instead")]
pub const SYSTEM_PROGRAM_ID: [u8; 32] = LIGHT_SYSTEM_PROGRAM_ID;
pub const REGISTERED_PROGRAM_PDA: [u8; 32] =
diff --git a/program-tests/compressed-token-test/tests/v1.rs b/program-tests/compressed-token-test/tests/v1.rs
index 5b9072b0c2..81c01c82fd 100644
--- a/program-tests/compressed-token-test/tests/v1.rs
+++ b/program-tests/compressed-token-test/tests/v1.rs
@@ -4893,6 +4893,7 @@ async fn test_transfer_with_photon_and_batched_tree() {
upgradeable_programs: vec![],
limit_ledger_size: None,
use_surfpool: true,
+ validator_args: vec![],
})
.await;
diff --git a/program-tests/justfile b/program-tests/justfile
index 18454e4823..b63c5fcdaa 100644
--- a/program-tests/justfile
+++ b/program-tests/justfile
@@ -6,10 +6,126 @@ default:
build:
cd create-address-test-program && cargo build-sbf
-test: build
+# === Full test suite (mirrors CI) ===
+
+test: build test-account-compression test-registry test-system test-system-cpi test-system-cpi-v2 test-compressed-token test-e2e
+
+# === Individual test packages ===
+
+test-account-compression:
RUSTFLAGS="-D warnings" cargo test-sbf -p account-compression-test
+
+test-registry:
RUSTFLAGS="-D warnings" cargo test-sbf -p registry-test
- RUSTFLAGS="-D warnings" cargo test-sbf -p system-test
+
+# System program tests
+test-system: test-system-address test-system-compression test-system-re-init
+
+test-system-address:
+ RUSTFLAGS="-D warnings" cargo test-sbf -p system-test -- test_with_address
+
+test-system-compression:
+ RUSTFLAGS="-D warnings" cargo test-sbf -p system-test -- test_with_compression
+
+test-system-re-init:
+ RUSTFLAGS="-D warnings" cargo test-sbf -p system-test --test test_re_init_cpi_account
+
+# System CPI tests (v1)
+test-system-cpi:
RUSTFLAGS="-D warnings" cargo test-sbf -p system-cpi-test
- RUSTFLAGS="-D warnings" cargo test-sbf -p compressed-token-test
+
+# System CPI tests (v2)
+test-system-cpi-v2: test-system-cpi-v2-main test-system-cpi-v2-event-parse test-system-cpi-v2-functional
+
+test-system-cpi-v2-main:
+ RUSTFLAGS="-D warnings" cargo test-sbf -p system-cpi-v2-test -- --skip functional_ --skip event::parse
+
+test-system-cpi-v2-event-parse:
+ RUSTFLAGS="-D warnings" cargo test-sbf -p system-cpi-v2-test -- event::parse
+
+test-system-cpi-v2-functional: test-system-cpi-v2-functional-read-only test-system-cpi-v2-functional-account-infos
+
+test-system-cpi-v2-functional-read-only:
+ RUSTFLAGS="-D warnings" cargo test-sbf -p system-cpi-v2-test -- functional_read_only
+
+test-system-cpi-v2-functional-account-infos:
+ RUSTFLAGS="-D warnings" cargo test-sbf -p system-cpi-v2-test -- functional_account_infos
+
+# Compressed token tests
+test-compressed-token: test-compressed-token-unit test-compressed-token-v1 test-compressed-token-mint test-compressed-token-light-token test-compressed-token-transfer2
+
+test-compressed-token-unit:
+ RUSTFLAGS="-D warnings" cargo test -p light-compressed-token
+
+test-compressed-token-v1:
+ RUSTFLAGS="-D warnings" cargo test-sbf -p compressed-token-test --test v1
+
+test-compressed-token-mint:
+ RUSTFLAGS="-D warnings" cargo test-sbf -p compressed-token-test --test mint
+
+test-compressed-token-light-token:
+ RUSTFLAGS="-D warnings" cargo test-sbf -p compressed-token-test --test light_token
+
+test-compressed-token-transfer2:
+ RUSTFLAGS="-D warnings" cargo test-sbf -p compressed-token-test --test transfer2
+
+# Compressed token batched tree test (flaky, may need retries)
+test-compressed-token-batched-tree:
+ RUSTFLAGS="-D warnings" cargo test-sbf -p compressed-token-test -- test_transfer_with_photon_and_batched_tree
+
+# E2E tests
+test-e2e:
RUSTFLAGS="-D warnings" cargo test-sbf -p e2e-test
+
+# E2E extended tests (requires building compressed-token-small first)
+test-e2e-extended: build-compressed-token-small
+ RUSTFLAGS="-D warnings" cargo test-sbf -p e2e-test -- --test test_10_all
+
+# Pinocchio unit tests
+test-pinocchio:
+ RUSTFLAGS="-D warnings" cargo test -p light-system-program-pinocchio
+
+# === Build targets ===
+
+build-compressed-token-small:
+ pnpm --filter @lightprotocol/programs run build-compressed-token-small
+
+# === CI-equivalent grouped tests ===
+
+# Matches CI: account-compression-and-registry
+ci-account-compression-and-registry: test-account-compression test-registry
+
+# Matches CI: light-system-program-address
+ci-system-address: test-system-address test-e2e test-e2e-extended test-compressed-token-light-token
+
+# Matches CI: light-system-program-compression
+ci-system-compression: test-system-compression test-system-re-init
+
+# Matches CI: compressed-token-and-e2e
+ci-compressed-token-and-e2e: test-compressed-token-unit test-compressed-token-v1 test-compressed-token-mint
+
+# Matches CI: compressed-token-batched-tree (with retry for flaky test)
+ci-compressed-token-batched-tree:
+ #!/usr/bin/env bash
+ set -euo pipefail
+ attempt=1
+ max_attempts=3
+ until RUSTFLAGS="-D warnings" cargo test-sbf -p compressed-token-test -- test_transfer_with_photon_and_batched_tree; do
+ attempt=$((attempt + 1))
+ if [ $attempt -gt $max_attempts ]; then
+ echo "Test failed after $max_attempts attempts"
+ exit 1
+ fi
+ echo "Attempt $attempt/$max_attempts failed, retrying in 5s..."
+ sleep 5
+ done
+ echo "Test passed on attempt $attempt"
+
+# Matches CI: system-cpi-test
+ci-system-cpi: test-system-cpi test-pinocchio test-system-cpi-v2-main test-system-cpi-v2-event-parse test-compressed-token-transfer2
+
+# Matches CI: system-cpi-test-v2-functional-read-only
+ci-system-cpi-v2-functional-read-only: test-system-cpi-v2-functional-read-only
+
+# Matches CI: system-cpi-test-v2-functional-account-infos
+ci-system-cpi-v2-functional-account-infos: test-system-cpi-v2-functional-account-infos
diff --git a/program-tests/system-cpi-v2-test/tests/event.rs b/program-tests/system-cpi-v2-test/tests/event.rs
index d25554354c..9425d72144 100644
--- a/program-tests/system-cpi-v2-test/tests/event.rs
+++ b/program-tests/system-cpi-v2-test/tests/event.rs
@@ -101,6 +101,7 @@ async fn parse_batched_event_functional() {
is_compress: false,
compress_or_decompress_lamports: None,
pubkey_array: vec![env.v2_state_trees[0].output_queue.into()],
+ ata_owners: vec![],
},
address_sequence_numbers: Vec::new(),
input_sequence_numbers: Vec::new(),
@@ -227,6 +228,7 @@ async fn parse_batched_event_functional() {
})
.collect::>(),
output_compressed_accounts: output_accounts.to_vec(),
+ ata_owners: vec![],
sequence_numbers: vec![MerkleTreeSequenceNumberV1 {
tree_pubkey: env.v2_state_trees[0].merkle_tree.into(),
// queue_pubkey: env.v2_state_trees[0].output_queue,
@@ -411,6 +413,7 @@ async fn parse_batched_event_functional() {
env.v2_state_trees[0].merkle_tree.into(),
env.v2_state_trees[0].output_queue.into(),
],
+ ata_owners: vec![],
},
address_sequence_numbers: vec![MerkleTreeSequenceNumber {
tree_pubkey: env.v2_address_trees[0].into(),
@@ -496,6 +499,7 @@ async fn parse_multiple_batched_events_functional() {
is_compress: false,
compress_or_decompress_lamports: None,
pubkey_array: vec![env.v2_state_trees[0].output_queue.into()],
+ ata_owners: vec![],
},
address_sequence_numbers: Vec::new(),
input_sequence_numbers: Vec::new(),
@@ -540,6 +544,7 @@ async fn generate_photon_test_data_multiple_events() {
upgradeable_programs: vec![],
limit_ledger_size: None,
use_surfpool: true,
+ validator_args: vec![],
})
.await;
@@ -589,6 +594,7 @@ async fn generate_photon_test_data_multiple_events() {
is_compress: false,
compress_or_decompress_lamports: None,
pubkey_array: vec![env.v2_state_trees[0].output_queue.into()],
+ ata_owners: vec![],
},
address_sequence_numbers: Vec::new(),
input_sequence_numbers: Vec::new(),
diff --git a/scripts/devenv.sh b/scripts/devenv.sh
index 605bce12b7..a656a8de25 100755
--- a/scripts/devenv.sh
+++ b/scripts/devenv.sh
@@ -74,8 +74,11 @@ if [ -z "${CI:-}" ]; then
alias light="${LIGHT_PROTOCOL_TOPLEVEL}/cli/test_bin/run"
fi
-# Define GOROOT for Go.
export GOROOT="${LIGHT_PROTOCOL_TOPLEVEL}/.local/go"
+export GOTOOLCHAIN=local
+unset GOBIN
+# Disable mise entirely to prevent its hooks from overriding our paths.
+export MISE_DISABLED=1
# Ensure Rust binaries are in PATH
PATH="${CARGO_HOME}/bin:${PATH}"
diff --git a/scripts/devenv/versions.sh b/scripts/devenv/versions.sh
index c578cacb05..710ee85831 100755
--- a/scripts/devenv/versions.sh
+++ b/scripts/devenv/versions.sh
@@ -13,7 +13,7 @@ export SOLANA_VERSION="2.2.15"
export ANCHOR_VERSION="0.31.1"
export JQ_VERSION="1.8.0"
export PHOTON_VERSION="0.51.2"
-export PHOTON_COMMIT="83b46c9aef58a134edef2eb8e506c1bc6604e876"
+export PHOTON_COMMIT="9c8ce2d9a4116b643ec0cd2cfcf695339f8e1a3f"
export REDIS_VERSION="8.0.1"
export ANCHOR_TAG="anchor-v${ANCHOR_VERSION}"
diff --git a/sdk-libs/client/README.md b/sdk-libs/client/README.md
index 8c46cbc68c..42363ecba5 100644
--- a/sdk-libs/client/README.md
+++ b/sdk-libs/client/README.md
@@ -45,6 +45,7 @@ async fn main() -> Result<(), Box> {
upgradeable_programs: vec![],
limit_ledger_size: None,
use_surfpool: true,
+ validator_args: vec![],
};
spawn_validator(config).await;
diff --git a/sdk-libs/client/src/indexer/base58.rs b/sdk-libs/client/src/indexer/base58.rs
index a2b66a123f..46b3953aa3 100644
--- a/sdk-libs/client/src/indexer/base58.rs
+++ b/sdk-libs/client/src/indexer/base58.rs
@@ -38,10 +38,13 @@ pub fn decode_base58_to_fixed_array(input: &str) -> Result<[u8;
let mut buffer = [0u8; N];
let decoded_len = bs58::decode(input)
.onto(&mut buffer)
- .map_err(|_| IndexerError::InvalidResponseData)?;
+ .map_err(|e| IndexerError::base58_decode_error("base58", e))?;
if decoded_len != N {
- return Err(IndexerError::InvalidResponseData);
+ return Err(IndexerError::base58_decode_error(
+ "base58",
+ format!("expected {} bytes, got {}", N, decoded_len),
+ ));
}
Ok(buffer)
diff --git a/sdk-libs/client/src/indexer/mod.rs b/sdk-libs/client/src/indexer/mod.rs
index fa03606dfe..cc3167459c 100644
--- a/sdk-libs/client/src/indexer/mod.rs
+++ b/sdk-libs/client/src/indexer/mod.rs
@@ -14,12 +14,13 @@ pub use error::IndexerError;
pub use indexer_trait::Indexer;
pub use response::{Context, Items, ItemsWithCursor, Response};
pub use types::{
- AccountProofInputs, Address, AddressMerkleTreeAccounts, AddressProofInputs, AddressQueueData,
- AddressWithTree, CompressedAccount, CompressedTokenAccount, Hash, InputQueueData, MerkleProof,
+ AccountInterface, AccountProofInputs, Address, AddressMerkleTreeAccounts, AddressProofInputs,
+ AddressQueueData, AddressWithTree, ColdContext, ColdData, CompressedAccount,
+ CompressedTokenAccount, Hash, InputQueueData, InterfaceTreeInfo, MerkleProof,
MerkleProofWithContext, NewAddressProofWithContext, NextTreeInfo, OutputQueueData,
OwnerBalance, ProofOfLeaf, QueueElementsResult, QueueInfo, QueueInfoResult, RootIndex,
- SignatureWithMetadata, StateMerkleTreeAccounts, StateQueueData, TokenBalance, TreeInfo,
- ValidityProofWithContext,
+ SignatureWithMetadata, SolanaAccountData, StateMerkleTreeAccounts, StateQueueData,
+ TokenAccountInterface, TokenBalance, TreeInfo, ValidityProofWithContext,
};
mod options;
pub use options::*;
diff --git a/sdk-libs/client/src/indexer/options.rs b/sdk-libs/client/src/indexer/options.rs
index dbbf699fb5..87fc8f4e8a 100644
--- a/sdk-libs/client/src/indexer/options.rs
+++ b/sdk-libs/client/src/indexer/options.rs
@@ -2,7 +2,7 @@ use photon_api::models::{FilterSelector, Memcmp};
use solana_account_decoder_client_types::UiDataSliceConfig;
use solana_pubkey::Pubkey;
-#[derive(Debug, Clone)]
+#[derive(Debug, Clone, Default)]
pub struct GetCompressedTokenAccountsByOwnerOrDelegateOptions {
pub mint: Option,
pub cursor: Option,
diff --git a/sdk-libs/client/src/indexer/photon_indexer.rs b/sdk-libs/client/src/indexer/photon_indexer.rs
index a220c16554..c0ba68a686 100644
--- a/sdk-libs/client/src/indexer/photon_indexer.rs
+++ b/sdk-libs/client/src/indexer/photon_indexer.rs
@@ -2,6 +2,7 @@ use std::{fmt::Debug, time::Duration};
use async_trait::async_trait;
use bs58;
+use light_sdk_types::constants::STATE_MERKLE_TREE_CANOPY_DEPTH;
use photon_api::{
apis::configuration::{ApiKey, Configuration},
models::GetCompressedAccountsByOwnerPostRequestParams,
@@ -10,7 +11,8 @@ use solana_pubkey::Pubkey;
use tracing::{error, trace, warn};
use super::types::{
- CompressedAccount, CompressedTokenAccount, OwnerBalance, SignatureWithMetadata, TokenBalance,
+ AccountInterface, CompressedAccount, CompressedTokenAccount, OwnerBalance,
+ SignatureWithMetadata, TokenAccountInterface, TokenBalance,
};
use crate::indexer::{
base58::Base58Conversions,
@@ -895,8 +897,8 @@ impl Indexer for PhotonIndexer {
.value
.items
.iter()
- .map(SignatureWithMetadata::try_from)
- .collect::, IndexerError>>()?;
+ .map(SignatureWithMetadata::from)
+ .collect::>();
Ok(Response {
context: Context {
@@ -947,8 +949,8 @@ impl Indexer for PhotonIndexer {
.value
.items
.iter()
- .map(SignatureWithMetadata::try_from)
- .collect::, IndexerError>>()?;
+ .map(SignatureWithMetadata::from)
+ .collect::>();
let cursor = api_response.value.cursor;
@@ -1003,8 +1005,8 @@ impl Indexer for PhotonIndexer {
.value
.items
.iter()
- .map(SignatureWithMetadata::try_from)
- .collect::, IndexerError>>()?;
+ .map(SignatureWithMetadata::from)
+ .collect::>();
let cursor = api_response.value.cursor;
@@ -1060,8 +1062,8 @@ impl Indexer for PhotonIndexer {
.value
.items
.iter()
- .map(SignatureWithMetadata::try_from)
- .collect::, IndexerError>>()?;
+ .map(SignatureWithMetadata::from)
+ .collect::>();
let cursor = api_response.value.cursor;
@@ -1173,7 +1175,14 @@ impl Indexer for PhotonIndexer {
.iter()
.map(|x| {
let mut proof_vec = x.proof.clone();
- proof_vec.truncate(proof_vec.len() - 10); // Remove canopy
+ if proof_vec.len() < STATE_MERKLE_TREE_CANOPY_DEPTH {
+ return Err(IndexerError::InvalidParameters(format!(
+ "Merkle proof length ({}) is less than canopy depth ({})",
+ proof_vec.len(),
+ STATE_MERKLE_TREE_CANOPY_DEPTH,
+ )));
+ }
+ proof_vec.truncate(proof_vec.len() - STATE_MERKLE_TREE_CANOPY_DEPTH);
let proof = proof_vec
.iter()
@@ -1330,7 +1339,15 @@ impl Indexer for PhotonIndexer {
.map(|x: &String| Hash::from_base58(x))
.collect::, IndexerError>>()?;
- proof_vec.truncate(proof_vec.len() - 10); // Remove canopy
+ const ADDRESS_TREE_CANOPY_DEPTH: usize = 10;
+ if proof_vec.len() < ADDRESS_TREE_CANOPY_DEPTH {
+ return Err(IndexerError::InvalidParameters(format!(
+ "Address proof length ({}) is less than canopy depth ({})",
+ proof_vec.len(),
+ ADDRESS_TREE_CANOPY_DEPTH,
+ )));
+ }
+ proof_vec.truncate(proof_vec.len() - ADDRESS_TREE_CANOPY_DEPTH);
let mut proof_arr = [[0u8; 32]; 16];
proof_arr.copy_from_slice(&proof_vec);
@@ -1778,3 +1795,198 @@ impl Indexer for PhotonIndexer {
}
}
}
+
+// ============ Interface Methods ============
+// These methods use the Interface endpoints that race hot (on-chain) and cold (compressed) lookups
+
+impl PhotonIndexer {
+ /// Get account data from either on-chain or compressed sources.
+ /// Races both lookups and returns the result with the higher slot.
+ pub async fn get_account_interface(
+ &self,
+ address: &Pubkey,
+ config: Option,
+ ) -> Result>, IndexerError> {
+ let config = config.unwrap_or_default();
+ self.retry(config.retry_config, || async {
+ let request = photon_api::models::GetAccountInterfacePostRequest::new(
+ photon_api::models::GetAccountInterfacePostRequestParams::new(address.to_string()),
+ );
+
+ let result = photon_api::apis::default_api::get_account_interface_post(
+ &self.configuration,
+ request,
+ )
+ .await?;
+
+ let api_response = Self::extract_result_with_error_check(
+ "get_account_interface",
+ result.error,
+ result.result.map(|r| *r),
+ )?;
+
+ if api_response.context.slot < config.slot {
+ return Err(IndexerError::IndexerNotSyncedToSlot);
+ }
+
+ let account = match api_response.value {
+ Some(boxed) => Some(AccountInterface::try_from(boxed.as_ref())?),
+ None => None,
+ };
+
+ Ok(Response {
+ context: Context {
+ slot: api_response.context.slot,
+ },
+ value: account,
+ })
+ })
+ .await
+ }
+
+ /// Get token account data from either on-chain or compressed sources.
+ /// Races both lookups and returns the result with the higher slot.
+ pub async fn get_token_account_interface(
+ &self,
+ address: &Pubkey,
+ config: Option,
+ ) -> Result>, IndexerError> {
+ let config = config.unwrap_or_default();
+ self.retry(config.retry_config, || async {
+ let request = photon_api::models::GetTokenAccountInterfacePostRequest::new(
+ photon_api::models::GetTokenAccountInterfacePostRequestParams::new(
+ address.to_string(),
+ ),
+ );
+
+ let result = photon_api::apis::default_api::get_token_account_interface_post(
+ &self.configuration,
+ request,
+ )
+ .await?;
+
+ let api_response = Self::extract_result_with_error_check(
+ "get_token_account_interface",
+ result.error,
+ result.result.map(|r| *r),
+ )?;
+
+ if api_response.context.slot < config.slot {
+ return Err(IndexerError::IndexerNotSyncedToSlot);
+ }
+
+ let account = match api_response.value {
+ Some(boxed) => Some(TokenAccountInterface::try_from(boxed.as_ref())?),
+ None => None,
+ };
+
+ Ok(Response {
+ context: Context {
+ slot: api_response.context.slot,
+ },
+ value: account,
+ })
+ })
+ .await
+ }
+
+ /// Get Associated Token Account data from either on-chain or compressed sources.
+ /// Derives the Light Protocol ATA address from owner+mint, then races hot/cold lookups.
+ pub async fn get_associated_token_account_interface(
+ &self,
+ owner: &Pubkey,
+ mint: &Pubkey,
+ config: Option,
+ ) -> Result>, IndexerError> {
+ let config = config.unwrap_or_default();
+ self.retry(config.retry_config, || async {
+ let request = photon_api::models::GetAtaInterfacePostRequest::new(
+ photon_api::models::GetAtaInterfacePostRequestParams::new(
+ owner.to_string(),
+ mint.to_string(),
+ ),
+ );
+
+ let result =
+ photon_api::apis::default_api::get_ata_interface_post(&self.configuration, request)
+ .await?;
+
+ let api_response = Self::extract_result_with_error_check(
+ "get_associated_token_account_interface",
+ result.error,
+ result.result.map(|r| *r),
+ )?;
+
+ if api_response.context.slot < config.slot {
+ return Err(IndexerError::IndexerNotSyncedToSlot);
+ }
+
+ let account = match api_response.value {
+ Some(boxed) => Some(TokenAccountInterface::try_from(boxed.as_ref())?),
+ None => None,
+ };
+
+ Ok(Response {
+ context: Context {
+ slot: api_response.context.slot,
+ },
+ value: account,
+ })
+ })
+ .await
+ }
+
+ /// Get multiple account interfaces in a batch.
+ /// Returns a vector where each element corresponds to an input address.
+ pub async fn get_multiple_account_interfaces(
+ &self,
+ addresses: Vec<&Pubkey>,
+ config: Option,
+ ) -> Result>>, IndexerError> {
+ let config = config.unwrap_or_default();
+ self.retry(config.retry_config, || async {
+ let address_strings: Vec =
+ addresses.iter().map(|addr| addr.to_string()).collect();
+
+ let request = photon_api::models::GetMultipleAccountInterfacesPostRequest::new(
+ photon_api::models::GetMultipleAccountInterfacesPostRequestParams::new(
+ address_strings,
+ ),
+ );
+
+ let result = photon_api::apis::default_api::get_multiple_account_interfaces_post(
+ &self.configuration,
+ request,
+ )
+ .await?;
+
+ let api_response = Self::extract_result_with_error_check(
+ "get_multiple_account_interfaces",
+ result.error,
+ result.result.map(|r| *r),
+ )?;
+
+ if api_response.context.slot < config.slot {
+ return Err(IndexerError::IndexerNotSyncedToSlot);
+ }
+
+ let accounts: Result>, IndexerError> = api_response
+ .value
+ .into_iter()
+ .map(|maybe_acc| {
+ maybe_acc
+ .map(|ai| AccountInterface::try_from(&ai))
+ .transpose()
+ })
+ .collect();
+
+ Ok(Response {
+ context: Context {
+ slot: api_response.context.slot,
+ },
+ value: accounts?,
+ })
+ })
+ .await
+ }
+}
diff --git a/sdk-libs/client/src/indexer/types.rs b/sdk-libs/client/src/indexer/types.rs
deleted file mode 100644
index 2cd0f6c8d8..0000000000
--- a/sdk-libs/client/src/indexer/types.rs
+++ /dev/null
@@ -1,1038 +0,0 @@
-use borsh::BorshDeserialize;
-use light_account::PackedAccounts;
-use light_compressed_account::{
- compressed_account::{
- CompressedAccount as ProgramCompressedAccount, CompressedAccountData,
- CompressedAccountWithMerkleContext,
- },
- instruction_data::compressed_proof::CompressedProof,
- TreeType,
-};
-use light_indexed_merkle_tree::array::IndexedElement;
-use light_sdk::instruction::{PackedAddressTreeInfo, PackedStateTreeInfo, ValidityProof};
-use light_token::compat::{AccountState, TokenData};
-use light_token_interface::state::ExtensionStruct;
-use num_bigint::BigUint;
-use solana_pubkey::Pubkey;
-use tracing::warn;
-
-use super::{
- base58::{decode_base58_option_to_pubkey, decode_base58_to_fixed_array},
- tree_info::QUEUE_TREE_MAPPING,
- IndexerError,
-};
-
-pub struct ProofOfLeaf {
- pub leaf: [u8; 32],
- pub proof: Vec<[u8; 32]>,
-}
-
-pub type Address = [u8; 32];
-pub type Hash = [u8; 32];
-
-#[derive(Debug, Clone, PartialEq)]
-pub struct QueueInfo {
- pub tree: Pubkey,
- pub queue: Pubkey,
- pub queue_type: u8,
- pub queue_size: u64,
-}
-
-#[derive(Debug, Clone, PartialEq, Default)]
-pub struct QueueInfoResult {
- pub queues: Vec,
- pub slot: u64,
-}
-
-#[derive(Debug, Clone, PartialEq, Default)]
-pub struct OutputQueueData {
- pub leaf_indices: Vec,
- pub account_hashes: Vec<[u8; 32]>,
- pub old_leaves: Vec<[u8; 32]>,
- pub first_queue_index: u64,
- /// The tree's next_index - where new leaves will be appended
- pub next_index: u64,
- /// Pre-computed hash chains per ZKP batch (from on-chain)
- pub leaves_hash_chains: Vec<[u8; 32]>,
-}
-
-/// V2 Input Queue Data
-#[derive(Debug, Clone, PartialEq, Default)]
-pub struct InputQueueData {
- pub leaf_indices: Vec,
- pub account_hashes: Vec<[u8; 32]>,
- pub current_leaves: Vec<[u8; 32]>,
- pub tx_hashes: Vec<[u8; 32]>,
- /// Pre-computed nullifiers from indexer
- pub nullifiers: Vec<[u8; 32]>,
- pub first_queue_index: u64,
- /// Pre-computed hash chains per ZKP batch (from on-chain)
- pub leaves_hash_chains: Vec<[u8; 32]>,
-}
-
-/// State queue data with shared tree nodes for output and input queues
-#[derive(Debug, Clone, PartialEq, Default)]
-pub struct StateQueueData {
- /// Shared deduplicated tree nodes for state queues (output + input)
- /// node_index encoding: (level << 56) | position
- pub nodes: Vec,
- pub node_hashes: Vec<[u8; 32]>,
- /// Initial root for the state tree (shared by output and input queues)
- pub initial_root: [u8; 32],
- /// Sequence number of the root
- pub root_seq: u64,
- /// Output queue data (if requested)
- pub output_queue: Option,
- /// Input queue data (if requested)
- pub input_queue: Option,
-}
-
-/// V2 Address Queue Data with deduplicated nodes
-/// Proofs are reconstructed from `nodes`/`node_hashes` using `low_element_indices`
-#[derive(Debug, Clone, PartialEq, Default)]
-pub struct AddressQueueData {
- pub addresses: Vec<[u8; 32]>,
- pub low_element_values: Vec<[u8; 32]>,
- pub low_element_next_values: Vec<[u8; 32]>,
- pub low_element_indices: Vec,
- pub low_element_next_indices: Vec,
- /// Deduplicated node indices - encoding: (level << 56) | position
- pub nodes: Vec,
- /// Hashes corresponding to each node index
- pub node_hashes: Vec<[u8; 32]>,
- pub initial_root: [u8; 32],
- pub leaves_hash_chains: Vec<[u8; 32]>,
- pub subtrees: Vec<[u8; 32]>,
- pub start_index: u64,
- pub root_seq: u64,
-}
-
-impl AddressQueueData {
- /// Reconstruct a merkle proof for a given low_element_index from the deduplicated nodes.
- /// The tree_height is needed to know how many levels to traverse.
- pub fn reconstruct_proof(
- &self,
- address_idx: usize,
- tree_height: u8,
- ) -> Result, IndexerError> {
- let leaf_index = self.low_element_indices[address_idx];
- let mut proof = Vec::with_capacity(tree_height as usize);
- let mut pos = leaf_index;
-
- for level in 0..tree_height {
- let sibling_pos = if pos.is_multiple_of(2) {
- pos + 1
- } else {
- pos - 1
- };
- let sibling_idx = Self::encode_node_index(level, sibling_pos);
-
- if let Some(hash_idx) = self.nodes.iter().position(|&n| n == sibling_idx) {
- proof.push(self.node_hashes[hash_idx]);
- } else {
- return Err(IndexerError::MissingResult {
- context: "reconstruct_proof".to_string(),
- message: format!(
- "Missing proof node at level {} position {} (encoded: {})",
- level, sibling_pos, sibling_idx
- ),
- });
- }
- pos /= 2;
- }
-
- Ok(proof)
- }
-
- /// Reconstruct all proofs for all addresses
- pub fn reconstruct_all_proofs(
- &self,
- tree_height: u8,
- ) -> Result>, IndexerError> {
- (0..self.addresses.len())
- .map(|i| self.reconstruct_proof(i, tree_height))
- .collect()
- }
-
- /// Encode node index: (level << 56) | position
- #[inline]
- fn encode_node_index(level: u8, position: u64) -> u64 {
- ((level as u64) << 56) | position
- }
-}
-
-/// V2 Queue Elements Result with deduplicated node data
-#[derive(Debug, Clone, PartialEq, Default)]
-pub struct QueueElementsResult {
- pub state_queue: Option,
- pub address_queue: Option,
-}
-
-#[derive(Debug, Clone, PartialEq, Default)]
-pub struct MerkleProofWithContext {
- pub proof: Vec<[u8; 32]>,
- pub root: [u8; 32],
- pub leaf_index: u64,
- pub leaf: [u8; 32],
- pub merkle_tree: [u8; 32],
- pub root_seq: u64,
- pub tx_hash: Option<[u8; 32]>,
- pub account_hash: [u8; 32],
-}
-
-#[derive(Debug, Clone, PartialEq, Default)]
-pub struct MerkleProof {
- pub hash: [u8; 32],
- pub leaf_index: u64,
- pub merkle_tree: Pubkey,
- pub proof: Vec<[u8; 32]>,
- pub root_seq: u64,
- pub root: [u8; 32],
-}
-
-#[derive(Debug, Clone, Copy, PartialEq)]
-pub struct AddressWithTree {
- pub address: Address,
- pub tree: Pubkey,
-}
-
-#[derive(Clone, Default, Debug, PartialEq)]
-pub struct NewAddressProofWithContext {
- pub merkle_tree: Pubkey,
- pub root: [u8; 32],
- pub root_seq: u64,
- pub low_address_index: u64,
- pub low_address_value: [u8; 32],
- pub low_address_next_index: u64,
- pub low_address_next_value: [u8; 32],
- pub low_address_proof: Vec<[u8; 32]>,
- pub new_low_element: Option>,
- pub new_element: Option>,
- pub new_element_next_value: Option,
-}
-
-#[derive(Debug, Default, Clone, PartialEq)]
-pub struct ValidityProofWithContext {
- pub proof: ValidityProof,
- pub accounts: Vec,
- pub addresses: Vec,
-}
-
-// TODO: add get_public_inputs
-// -> to make it easier to use light-verifier with get_validity_proof()
-impl ValidityProofWithContext {
- pub fn get_root_indices(&self) -> Vec