diff --git a/.cargo/config.toml b/.cargo/config.toml index 67febe85..cd47f858 100644 --- a/.cargo/config.toml +++ b/.cargo/config.toml @@ -1,5 +1,5 @@ [alias] -surfpool-install = "install --path crates/cli --locked --force --features supervisor_ui --features version_check" -surfpool-install-dev = "install --path crates/cli --locked --force --features supervisor_ui" +surfpool-install = "install --path crates/cli --locked --force --features supervisor_ui --features version_check --features sqlite" +surfpool-install-dev = "install --path crates/cli --locked --force --features supervisor_ui --features sqlite" # useful for local builds that point to local txtx crates - prevents conflicts with the supervisor_ui feature surfpool-install-minimal = "install --path crates/cli --locked --force" diff --git a/Cargo.lock b/Cargo.lock index 35aa5de9..770eebb0 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -12285,16 +12285,21 @@ dependencies = [ "solana-slot-hashes 3.0.0", "solana-system-interface 2.0.0", "solana-sysvar 3.0.0", + "solana-sysvar-id 3.0.0", "solana-transaction", + "solana-transaction-context", "solana-transaction-error 3.0.0", "solana-transaction-status", "solana-version", "spl-associated-token-account-interface", "spl-token-2022-interface", "spl-token-interface", + "surfpool-db", "surfpool-subgraph", "surfpool-types", + "tempfile", "test-case", + "thiserror 2.0.16", "tokio", "txtx-addon-kit", "txtx-addon-network-svm", diff --git a/Cargo.toml b/Cargo.toml index c24135b4..9dec37c5 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -139,6 +139,7 @@ solana-signer = { version = "3.0.0", default-features = false } solana-slot-hashes = { version = "3.0.0", default-features = false } solana-system-interface = { version = "2.0.0", default-features = false } solana-sysvar = { version = "3.0.0", default-features = false } +solana-sysvar-id = { version = "3.0.0", default-features = false } solana-transaction = { version = "3.0.0", default-features = false } solana-transaction-context = { version = "3.0.0", default-features = false } solana-transaction-error = { version = "3.0.0", default-features = false } @@ -147,7 +148,9 @@ solana-version = { version = "3.0.0", default-features = false } spl-associated-token-account-interface = { version = "2.0.0", default-features = false } spl-token-2022-interface = { version = "2.0.0", default-features = false } spl-token-interface = { version = "2.0.0", default-features = false } +tempfile = "3.23.0" test-case = "^3.3.1" +thiserror = "2.0" tokio = { version = "1.43.0", default-features = false } tokio-tungstenite = { version = "=0.20.1", default-features = false } toml = { version = "0.8.23", default-features = false } diff --git a/crates/cli/Cargo.toml b/crates/cli/Cargo.toml index a93192ef..76a1a262 100644 --- a/crates/cli/Cargo.toml +++ b/crates/cli/Cargo.toml @@ -73,8 +73,8 @@ cli = ["clap/derive", "clap/env", "clap_complete", "toml", "ctrlc", "hiro-system supervisor_ui = ["txtx-supervisor-ui/crates_build"] explorer = [] geyser_plugin = ["surfpool-core/geyser_plugin"] -sqlite = ["surfpool-gql/sqlite"] -postgres = ["surfpool-gql/postgres"] +sqlite = ["surfpool-gql/sqlite", "surfpool-core/sqlite"] +postgres = ["surfpool-gql/postgres", "surfpool-core/postgres"] version_check = [] subgraph = ["surfpool-core/subgraph"] diff --git a/crates/cli/src/cli/mod.rs b/crates/cli/src/cli/mod.rs index ca457368..f8e7aaaa 100644 --- a/crates/cli/src/cli/mod.rs +++ b/crates/cli/src/cli/mod.rs @@ -245,6 +245,12 @@ pub struct StartSimnet { /// A set of inputs to use for the runbook (eg. surfpool start --runbook-input myInputs.json) #[arg(long = "runbook-input", short = 'i')] pub runbook_input: Vec, + /// Surfnet database connection URL for persistent Surfnets. For an in-memory sqlite database, use ":memory:". For an on-disk sqlite database, use a filename ending in '.sqlite'. + #[arg(long = "db")] + pub db: Option, + /// Unique identifier for this surfnet instance. Used to isolate database storage when multiple surfnets share the same database. Defaults to 0. + #[arg(long = "surfnet-id", default_value_t = 0)] + pub surfnet_id: u32, } fn parse_svm_feature(s: &str) -> Result { @@ -396,6 +402,7 @@ impl StartSimnet { }, feature_config: self.feature_config(), skip_signature_verification: false, + surfnet_id: self.surfnet_id, } } diff --git a/crates/cli/src/cli/simnet/mod.rs b/crates/cli/src/cli/simnet/mod.rs index 2c695525..7fb8abde 100644 --- a/crates/cli/src/cli/simnet/mod.rs +++ b/crates/cli/src/cli/simnet/mod.rs @@ -60,7 +60,9 @@ pub async fn handle_start_local_surfnet_command( } // We start the simnet as soon as possible, as it needs to be ready for deployments - let (mut surfnet_svm, simnet_events_rx, geyser_events_rx) = SurfnetSvm::new(); + let (mut surfnet_svm, simnet_events_rx, geyser_events_rx) = + SurfnetSvm::new_with_db(cmd.db.as_deref(), cmd.surfnet_id) + .map_err(|e| format!("Failed to initialize Surfnet SVM: {}", e))?; // Apply feature configuration from CLI flags let feature_config = cmd.feature_config(); @@ -137,7 +139,7 @@ pub async fn handle_start_local_surfnet_command( let config_copy = config.clone(); let simnet_events_tx_for_thread = simnet_events_tx.clone(); - let _handle = hiro_system_kit::thread_named("simnet") + let simnet_handle = hiro_system_kit::thread_named("simnet") .spawn(move || { let future = start_local_surfnet( surfnet_svm, @@ -226,6 +228,9 @@ pub async fn handle_start_local_surfnet_command( ) .await; + // Wait for the simnet thread to finish cleanup (including Drop/checkpoint) + let _ = simnet_handle.join(); + Ok(()) } @@ -273,6 +278,7 @@ async fn start_service( if let Some(explorer_handle) = explorer_handle { let _ = explorer_handle.stop(true).await; } + Ok(()) } @@ -286,8 +292,11 @@ fn log_events( ) -> Result<(), String> { let mut deployment_completed = false; let do_stop_loop = runloop_terminator.clone(); + let terminate_tx = simnet_commands_tx.clone(); ctrlc::set_handler(move || { do_stop_loop.store(true, Ordering::Relaxed); + // Send terminate command to allow graceful shutdown (Drop to run) + let _ = terminate_tx.send(SimnetCommand::Terminate(None)); }) .expect("Error setting Ctrl-C handler"); diff --git a/crates/cli/src/tui/simnet.rs b/crates/cli/src/tui/simnet.rs index 27d1e2c1..911a18f3 100644 --- a/crates/cli/src/tui/simnet.rs +++ b/crates/cli/src/tui/simnet.rs @@ -705,10 +705,16 @@ fn run_app(terminal: &mut Terminal, mut app: App) -> io::Result<( if key_event.kind == KeyEventKind::Press { use KeyCode::*; if key_event.modifiers == KeyModifiers::CONTROL && key_event.code == Char('c') { + // Send terminate command to allow graceful shutdown (Drop to run) + let _ = app.simnet_commands_tx.send(SimnetCommand::Terminate(None)); return Ok(()); } match key_event.code { - Char('q') | Esc => return Ok(()), + Char('q') | Esc => { + // Send terminate command to allow graceful shutdown (Drop to run) + let _ = app.simnet_commands_tx.send(SimnetCommand::Terminate(None)); + return Ok(()); + } Down => app.next(), Up => app.previous(), Char('f') | Char('j') => { diff --git a/crates/core/Cargo.toml b/crates/core/Cargo.toml index ca47710e..3218a140 100644 --- a/crates/core/Cargo.toml +++ b/crates/core/Cargo.toml @@ -77,16 +77,20 @@ solana-signer = { workspace = true } solana-slot-hashes = { workspace = true } solana-system-interface = { workspace = true } solana-sysvar = { workspace = true } +solana-sysvar-id = { workspace = true } solana-transaction = { workspace = true } +solana-transaction-context = { workspace = true } solana-transaction-error = { workspace = true } solana-transaction-status = { workspace = true } solana-version = { workspace = true } spl-associated-token-account-interface = { workspace = true } spl-token-interface = { workspace = true } spl-token-2022-interface = { workspace = true } +thiserror = { workspace = true } tokio = { workspace = true } uuid = { workspace = true } +surfpool-db = { workspace = true } surfpool-subgraph = { workspace = true, optional = true } surfpool-types = { workspace = true } @@ -99,8 +103,12 @@ txtx-addon-network-svm = { workspace = true } [dev-dependencies] test-case = { workspace = true } env_logger = "*" +tempfile = { workspace = true } [features] +default = ["sqlite"] +sqlite = ["surfpool-db/sqlite"] +postgres = ["surfpool-db/postgres"] ignore_tests_ci = [] geyser_plugin = ["solana-geyser-plugin-manager"] subgraph = ["surfpool-subgraph"] diff --git a/crates/core/src/error.rs b/crates/core/src/error.rs index 370670ab..60de4047 100644 --- a/crates/core/src/error.rs +++ b/crates/core/src/error.rs @@ -2,6 +2,7 @@ use std::{fmt::Display, future::Future, pin::Pin}; use crossbeam_channel::TrySendError; use jsonrpc_core::{Error, Result}; +use litesvm::error::LiteSVMError; use serde::Serialize; use serde_json::json; use solana_client::{client_error::ClientError, rpc_request::TokenAccountsFilter}; @@ -9,6 +10,8 @@ use solana_clock::Slot; use solana_pubkey::Pubkey; use solana_transaction_status::EncodeError; +use crate::storage::StorageError; + pub type SurfpoolResult = std::result::Result; #[derive(Debug, Clone)] @@ -447,3 +450,19 @@ impl SurfpoolError { Self(error) } } + +impl From for SurfpoolError { + fn from(e: StorageError) -> Self { + let mut error = Error::internal_error(); + error.data = Some(json!(format!("Storage error: {}", e.to_string()))); + SurfpoolError(error) + } +} + +impl From for SurfpoolError { + fn from(e: LiteSVMError) -> Self { + let mut error = Error::internal_error(); + error.data = Some(json!(format!("LiteSVM error: {}", e.to_string()))); + SurfpoolError(error) + } +} diff --git a/crates/core/src/lib.rs b/crates/core/src/lib.rs index dfc81b56..9f1d3ebd 100644 --- a/crates/core/src/lib.rs +++ b/crates/core/src/lib.rs @@ -15,6 +15,7 @@ pub mod helpers; pub mod rpc; pub mod runloops; pub mod scenarios; +pub mod storage; pub mod surfnet; pub mod types; diff --git a/crates/core/src/rpc/accounts_data.rs b/crates/core/src/rpc/accounts_data.rs index 9d7cb88f..eafb6451 100644 --- a/crates/core/src/rpc/accounts_data.rs +++ b/crates/core/src/rpc/accounts_data.rs @@ -473,12 +473,13 @@ impl AccountsData for SurfpoolAccountsDataRpc { // get the info we need and free up lock before validation let (current_slot, block_exists) = meta .with_svm_reader(|svm_reader| { - ( - svm_reader.get_latest_absolute_slot(), - svm_reader.blocks.contains_key(&block), - ) + svm_reader + .blocks + .contains_key(&block) + .map_err(SurfpoolError::from) + .map(|exists| (svm_reader.get_latest_absolute_slot(), exists)) }) - .map_err(Into::::into)?; + .map_err(Into::::into)??; // block is valid if it exists in our block history or it's not too far in the future if !block_exists && block > current_slot { @@ -816,17 +817,20 @@ mod tests { setup.context.svm_locker.with_svm_writer(|svm_writer| { use crate::surfnet::BlockHeader; - svm_writer.blocks.insert( - test_slot, - BlockHeader { - hash: SyntheticBlockhash::new(test_slot).to_string(), - previous_blockhash: SyntheticBlockhash::new(test_slot - 1).to_string(), - parent_slot: test_slot - 1, - block_time: chrono::Utc::now().timestamp_millis(), - block_height: test_slot, - signatures: vec![], - }, - ); + svm_writer + .blocks + .store( + test_slot, + BlockHeader { + hash: SyntheticBlockhash::new(test_slot).to_string(), + previous_blockhash: SyntheticBlockhash::new(test_slot - 1).to_string(), + parent_slot: test_slot - 1, + block_time: chrono::Utc::now().timestamp_millis(), + block_height: test_slot, + signatures: vec![], + }, + ) + .unwrap(); }); let result = setup @@ -1078,6 +1082,7 @@ mod tests { .context .svm_locker .airdrop(&fee_payer.pubkey(), 1_000_000_000) + .unwrap() .unwrap(); // Airdrop 1 SOL to recipient for rent exemption @@ -1085,6 +1090,7 @@ mod tests { .context .svm_locker .airdrop(&recipient.pubkey(), 1_000_000_000) + .unwrap() .unwrap(); // Generate keypair to use as address of mint @@ -1291,6 +1297,7 @@ mod tests { .context .svm_locker .airdrop(&fee_payer.pubkey(), 1_000_000_000) + .unwrap() .unwrap(); // Airdrop 1 SOL to recipient for rent exemption @@ -1298,6 +1305,7 @@ mod tests { .context .svm_locker .airdrop(&recipient.pubkey(), 1_000_000_000) + .unwrap() .unwrap(); // Generate keypair to use as address of mint diff --git a/crates/core/src/rpc/bank_data.rs b/crates/core/src/rpc/bank_data.rs index 14746c1d..40205dfd 100644 --- a/crates/core/src/rpc/bank_data.rs +++ b/crates/core/src/rpc/bank_data.rs @@ -422,9 +422,14 @@ impl BankData for SurfpoolBankDataRpc { } fn get_inflation_rate(&self, meta: Self::Metadata) -> Result { - meta.with_svm_reader(|svm_reader| { - let inflation_activation_slot = - svm_reader.blocks.keys().min().copied().unwrap_or_default(); + meta.with_svm_reader(|svm_reader| -> RpcInflationRate { + let inflation_activation_slot = svm_reader + .blocks + .keys() + .unwrap_or_default() + .into_iter() + .min() + .unwrap_or_default(); let epoch_schedule = svm_reader.inner.get_sysvar::(); let inflation_start_slot = epoch_schedule.get_first_slot_in_epoch( epoch_schedule diff --git a/crates/core/src/rpc/full.rs b/crates/core/src/rpc/full.rs index a5053b7f..2154dc86 100644 --- a/crates/core/src/rpc/full.rs +++ b/crates/core/src/rpc/full.rs @@ -1531,7 +1531,7 @@ impl Full for SurfpoolFullRpc { }; let svm_locker = ctx.svm_locker; let res = svm_locker - .airdrop(&pubkey, lamports) + .airdrop(&pubkey, lamports)? .map_err(|err| Error::invalid_params(format!("failed to send transaction: {err:?}")))?; let _ = ctx .simnet_commands_tx @@ -1789,11 +1789,11 @@ impl Full for SurfpoolFullRpc { .await .map_err(|e| SurfpoolError::client_error(e).into()) } else { - let min_slot = svm_locker.with_svm_reader(|svm_reader| { - svm_reader.blocks.keys().min().copied().unwrap_or(0) - }); - - Ok(min_slot) + svm_locker.with_svm_reader(|svm_reader| { + Ok::<_, jsonrpc_core::Error>( + svm_reader.blocks.keys()?.into_iter().min().unwrap_or(0), + ) + }) } }) } @@ -1833,11 +1833,13 @@ impl Full for SurfpoolFullRpc { Box::pin(async move { let block_time = svm_locker.with_svm_reader(|svm_reader| { - svm_reader - .blocks - .get(&slot) - .map(|block| (block.block_time / 1_000) as UnixTimestamp) - }); + Ok::<_, jsonrpc_core::Error>( + svm_reader + .blocks + .get(&slot)? + .map(|block| (block.block_time / 1_000) as UnixTimestamp), + ) + })?; Ok(block_time) }) } @@ -1892,27 +1894,28 @@ impl Full for SurfpoolFullRpc { .map(|end| end.min(committed_latest_slot)) .unwrap_or(committed_latest_slot); - let (local_min_slot, local_slots, effective_end_slot) = - if effective_end_slot < start_slot { - (None, vec![], effective_end_slot) - } else { - svm_locker.with_svm_reader(|svm_reader| { - let local_min_slot = svm_reader.blocks.keys().min().copied(); - - let local_slots: Vec = svm_reader - .blocks - .keys() - .filter(|&&slot| { - slot >= start_slot - && slot <= effective_end_slot - && slot <= committed_latest_slot - }) - .copied() - .collect(); - - (local_min_slot, local_slots, effective_end_slot) - }) - }; + let (local_min_slot, local_slots, effective_end_slot) = if effective_end_slot + < start_slot + { + (None, vec![], effective_end_slot) + } else { + svm_locker.with_svm_reader(|svm_reader| { + let local_min_slot = svm_reader.blocks.keys()?.into_iter().min(); + + let local_slots: Vec = svm_reader + .blocks + .keys()? + .into_iter() + .filter(|slot| { + *slot >= start_slot + && *slot <= effective_end_slot + && *slot <= committed_latest_slot + }) + .collect(); + + Ok::<_, jsonrpc_core::Error>((local_min_slot, local_slots, effective_end_slot)) + })? + }; if let Some(min_context_slot) = config.min_context_slot { if committed_latest_slot < min_context_slot { @@ -2014,17 +2017,17 @@ impl Full for SurfpoolFullRpc { Box::pin(async move { let committed_latest_slot = svm_locker.get_slot_for_commitment(&commitment); let (local_min_slot, local_slots) = svm_locker.with_svm_reader(|svm_reader| { - let local_min_slot = svm_reader.blocks.keys().min().copied(); + let local_min_slot = svm_reader.blocks.keys()?.into_iter().min(); let local_slots: Vec = svm_reader .blocks - .keys() - .filter(|&&slot| slot >= start_slot && slot <= committed_latest_slot) - .copied() + .keys()? + .into_iter() + .filter(|slot| *slot >= start_slot && *slot <= committed_latest_slot) .collect(); - (local_min_slot, local_slots) - }); + Ok::<_, jsonrpc_core::Error>((local_min_slot, local_slots)) + })?; if let Some(min_context_slot) = config.min_context_slot { if committed_latest_slot < min_context_slot { @@ -2138,8 +2141,15 @@ impl Full for SurfpoolFullRpc { fn get_first_available_block(&self, meta: Self::Metadata) -> Result { meta.with_svm_reader(|svm_reader| { - svm_reader.blocks.keys().min().copied().unwrap_or_default() - }) + Ok::<_, jsonrpc_core::Error>( + svm_reader + .blocks + .keys()? + .into_iter() + .min() + .unwrap_or_default(), + ) + })? .map_err(Into::into) } @@ -2289,7 +2299,7 @@ impl Full for SurfpoolFullRpc { // Get MAX_PRIORITIZATION_FEE_BLOCKS_CACHE most recent blocks let recent_headers = blocks - .into_iter() + .into_iter()? .sorted_by_key(|(slot, _)| std::cmp::Reverse(*slot)) .take(MAX_PRIORITIZATION_FEE_BLOCKS_CACHE) .collect::>(); @@ -2303,7 +2313,7 @@ impl Full for SurfpoolFullRpc { .iter() .filter_map(|signature| { // Check if the signature exists in the transactions map - transactions.get(signature).map(|tx| (slot, tx)) + transactions.get(&signature.to_string()).ok().flatten().map(|tx| (slot, tx)) }) .collect::>() }) @@ -2523,10 +2533,10 @@ mod tests { ..Default::default() }; let mutated_accounts = std::collections::HashSet::new(); - writer.transactions.insert( - sig, + writer.transactions.store( + sig.to_string(), SurfnetTransactionStatus::processed(tx_with_status_meta, mutated_accounts), - ); + ).unwrap(); status_tx .send(TransactionStatusEvent::Success( TransactionConfirmationStatus::Confirmed, @@ -2700,16 +2710,21 @@ mod tests { let sig = Signature::from_str(res.as_str()).unwrap(); let state_reader = setup.context.svm_locker.0.blocking_read(); assert_eq!( - state_reader.inner.get_account(&pk).unwrap().lamports, + state_reader + .inner + .get_account(&pk) + .unwrap() + .unwrap() + .lamports, lamports, "airdropped amount is incorrect" ); assert!( - state_reader.inner.get_transaction(&sig).is_some(), + state_reader.get_transaction(&sig).unwrap().is_some(), "transaction is not found in the SVM" ); assert!( - state_reader.transactions.get(&sig).is_some(), + state_reader.transactions.get(&sig.to_string()).unwrap().is_some(), "transaction is not found in the history" ); } @@ -3230,17 +3245,20 @@ mod tests { let block_height = svm_writer.chain_tip.index; let parent_slot = svm_writer.get_latest_absolute_slot(); - svm_writer.blocks.insert( - parent_slot, - BlockHeader { - hash, - previous_blockhash: previous_chain_tip.hash.clone(), - block_time: chrono::Utc::now().timestamp_millis(), - block_height, + svm_writer + .blocks + .store( parent_slot, - signatures: Vec::new(), - }, - ); + BlockHeader { + hash, + previous_blockhash: previous_chain_tip.hash.clone(), + block_time: chrono::Utc::now().timestamp_millis(), + block_height, + parent_slot, + signatures: Vec::new(), + }, + ) + .unwrap(); } let res = setup @@ -3827,18 +3845,21 @@ mod tests { let slots: Vec = slots.into_iter().collect(); setup.context.svm_locker.with_svm_writer(|svm_writer| { for slot in slots.iter() { - svm_writer.blocks.insert( - *slot, - BlockHeader { - hash: SyntheticBlockhash::new(*slot).to_string(), - previous_blockhash: SyntheticBlockhash::new(slot.saturating_sub(1)) - .to_string(), - block_time: chrono::Utc::now().timestamp_millis(), - block_height: *slot, - parent_slot: slot.saturating_sub(1), - signatures: vec![], - }, - ); + svm_writer + .blocks + .store( + *slot, + BlockHeader { + hash: SyntheticBlockhash::new(*slot).to_string(), + previous_blockhash: SyntheticBlockhash::new(slot.saturating_sub(1)) + .to_string(), + block_time: chrono::Utc::now().timestamp_millis(), + block_height: *slot, + parent_slot: slot.saturating_sub(1), + signatures: vec![], + }, + ) + .unwrap(); } svm_writer.latest_epoch_info.absolute_slot = slots.into_iter().max().unwrap_or(0); }); @@ -3899,7 +3920,7 @@ mod tests { insert_test_blocks(&setup, local_slots); let local_min = setup.context.svm_locker.with_svm_reader(|svm_reader| { - let min = svm_reader.blocks.keys().min().copied(); + let min = svm_reader.blocks.keys().unwrap().into_iter().min(); min }); assert_eq!(local_min, Some(50), "Local minimum should be slot 50"); @@ -3989,9 +4010,8 @@ mod tests { }); let (local_min, latest_slot) = setup.context.svm_locker.with_svm_reader(|svm_reader| { - let min = svm_reader.blocks.keys().min().copied(); + let min = svm_reader.blocks.keys().unwrap().into_iter().min(); let latest = svm_reader.get_latest_absolute_slot(); - let _available: Vec<_> = svm_reader.blocks.keys().copied().collect(); (min, latest) }); assert_eq!(local_min, Some(100), "Local minimum should be 100"); @@ -4545,13 +4565,13 @@ mod tests { ..Default::default() }; let mutated_accounts = std::collections::HashSet::new(); - writer.transactions.insert( - sig, + writer.transactions.store( + sig.to_string(), SurfnetTransactionStatus::processed( tx_with_status_meta, mutated_accounts, ), - ); + ).unwrap(); status_tx .send(TransactionStatusEvent::Success( TransactionConfirmationStatus::Processed, diff --git a/crates/core/src/rpc/minimal.rs b/crates/core/src/rpc/minimal.rs index 8e67b729..8bfab80f 100644 --- a/crates/core/src/rpc/minimal.rs +++ b/crates/core/src/rpc/minimal.rs @@ -693,9 +693,8 @@ impl Minimal for SurfpoolMinimalRpc { let config = config.unwrap_or_default(); if let Some(target_slot) = config.min_context_slot { - let block_exists = meta - .with_svm_reader(|svm_reader| svm_reader.blocks.contains_key(&target_slot)) - .map_err(Into::::into)?; + let block_exists = + meta.with_svm_reader(|svm_reader| svm_reader.blocks.contains_key(&target_slot))??; if !block_exists { return Err(jsonrpc_core::Error::invalid_params(format!( @@ -707,22 +706,23 @@ impl Minimal for SurfpoolMinimalRpc { meta.with_svm_reader(|svm_reader| { if let Some(target_slot) = config.min_context_slot { - if let Some(block_header) = svm_reader.blocks.get(&target_slot) { - return block_header.block_height; + if let Some(block_header) = svm_reader.blocks.get(&target_slot)? { + return Ok(block_header.block_height); } } // default behavior: return the latest block height with commitment adjustments let latest_block_height = svm_reader.latest_epoch_info.block_height; - match config.commitment.unwrap_or_default().commitment { + let block_height = match config.commitment.unwrap_or_default().commitment { CommitmentLevel::Processed => latest_block_height, CommitmentLevel::Confirmed => latest_block_height.saturating_sub(1), CommitmentLevel::Finalized => { latest_block_height.saturating_sub(FINALIZATION_SLOT_THRESHOLD) } - } - }) + }; + Ok::(block_height) + })? .map_err(Into::into) } @@ -871,17 +871,20 @@ mod tests { { let mut svm_writer = setup.context.svm_locker.0.blocking_write(); for (slot, block_height) in &test_cases { - svm_writer.blocks.insert( - *slot, - crate::surfnet::BlockHeader { - hash: SyntheticBlockhash::new(*slot).to_string(), - previous_blockhash: SyntheticBlockhash::new(slot - 1).to_string(), - block_time: chrono::Utc::now().timestamp_millis(), - block_height: *block_height, - parent_slot: slot - 1, - signatures: Vec::new(), - }, - ); + svm_writer + .blocks + .store( + *slot, + crate::surfnet::BlockHeader { + hash: SyntheticBlockhash::new(*slot).to_string(), + previous_blockhash: SyntheticBlockhash::new(slot - 1).to_string(), + block_time: chrono::Utc::now().timestamp_millis(), + block_height: *block_height, + parent_slot: slot - 1, + signatures: Vec::new(), + }, + ) + .unwrap(); } } @@ -914,17 +917,20 @@ mod tests { { let mut svm_writer = setup.context.svm_locker.0.blocking_write(); - svm_writer.blocks.insert( - 100, - crate::surfnet::BlockHeader { - hash: SyntheticBlockhash::new(100).to_string(), - previous_blockhash: SyntheticBlockhash::new(99).to_string(), - block_time: chrono::Utc::now().timestamp_millis(), - block_height: 50, - parent_slot: 99, - signatures: Vec::new(), - }, - ); + svm_writer + .blocks + .store( + 100, + crate::surfnet::BlockHeader { + hash: SyntheticBlockhash::new(100).to_string(), + previous_blockhash: SyntheticBlockhash::new(99).to_string(), + block_time: chrono::Utc::now().timestamp_millis(), + block_height: 50, + parent_slot: 99, + signatures: Vec::new(), + }, + ) + .unwrap(); } // slot that definitely doesn't exist diff --git a/crates/core/src/rpc/surfnet_cheatcodes.rs b/crates/core/src/rpc/surfnet_cheatcodes.rs index 16eca302..80a0da5d 100644 --- a/crates/core/src/rpc/surfnet_cheatcodes.rs +++ b/crates/core/src/rpc/surfnet_cheatcodes.rs @@ -1513,7 +1513,7 @@ impl SurfnetCheatcodes for SurfnetCheatcodesRpc { Ok(locker) => locker, Err(e) => return Err(e.into()), }; - svm_locker.register_idl(idl, slot); + svm_locker.register_idl(idl, slot)?; Ok(RpcResponse { context: RpcResponseContext::new(svm_locker.get_latest_absolute_slot()), value: (), @@ -1571,21 +1571,24 @@ impl SurfnetCheatcodes for SurfnetCheatcodesRpc { )> = svm_locker.with_svm_reader(|svm_reader| { svm_reader .transactions - .iter() - .map(|(sig, status)| { - let (transaction_with_status_meta, _) = status.expect_processed(); - ( - sig.to_string(), - transaction_with_status_meta.slot, - transaction_with_status_meta.meta.status.clone().err(), - transaction_with_status_meta - .meta - .log_messages - .clone() - .unwrap_or_default(), - ) + .into_iter() + .map(|iter| { + iter.map(|(sig, status)| { + let (transaction_with_status_meta, _) = status.expect_processed(); + ( + sig, + transaction_with_status_meta.slot, + transaction_with_status_meta.meta.status.clone().err(), + transaction_with_status_meta + .meta + .log_messages + .clone() + .unwrap_or_default(), + ) + }) + .collect() }) - .collect() + .unwrap_or_default() }); items.sort_by(|a, b| b.1.cmp(&a.1)); @@ -1706,7 +1709,12 @@ impl SurfnetCheatcodes for SurfnetCheatcodesRpc { let svm_locker = meta.get_svm_locker()?; let value = svm_locker.with_svm_reader(|svm_reader| { - GetStreamedAccountsResponse::new(&svm_reader.streamed_accounts) + let accounts: Vec<_> = svm_reader + .streamed_accounts + .into_iter() + .map(|iter| iter.collect()) + .unwrap_or_default(); + GetStreamedAccountsResponse::from_iter(accounts) }); Ok(RpcResponse { @@ -1797,7 +1805,7 @@ impl SurfnetCheatcodes for SurfnetCheatcodesRpc { ) -> Result>> { let config = config.unwrap_or_default(); let svm_locker = meta.get_svm_locker()?; - let snapshot = svm_locker.export_snapshot(config); + let snapshot = svm_locker.export_snapshot(config)?; Ok(RpcResponse { context: RpcResponseContext::new(svm_locker.get_latest_absolute_slot()), value: snapshot, @@ -1866,6 +1874,7 @@ mod tests { .context .svm_locker .airdrop(&payer.pubkey(), 1_000_000_000) + .unwrap() .unwrap(); // Airdrop 1 SOL to recipient for rent exemption @@ -1873,6 +1882,7 @@ mod tests { .context .svm_locker .airdrop(&recipient.pubkey(), 1_000_000_000) + .unwrap() .unwrap(); // Generate keypair to use as address of mint @@ -3056,10 +3066,9 @@ mod tests { let program_data_address = solana_loader_v3_interface::get_program_data_address(&program_id.pubkey()); - let program_account_before = client - .context - .svm_locker - .with_svm_reader(|svm_reader| svm_reader.inner.get_account(&program_id.pubkey())); + let program_account_before = client.context.svm_locker.with_svm_reader(|svm_reader| { + svm_reader.inner.get_account(&program_id.pubkey()).unwrap() + }); assert!( program_account_before.is_none(), "Program account should not exist initially" @@ -3085,10 +3094,9 @@ mod tests { ); // Verify program account was created - let program_account = client - .context - .svm_locker - .with_svm_reader(|svm_reader| svm_reader.inner.get_account(&program_id.pubkey())); + let program_account = client.context.svm_locker.with_svm_reader(|svm_reader| { + svm_reader.inner.get_account(&program_id.pubkey()).unwrap() + }); assert!( program_account.is_some(), "Program account should be created" @@ -3105,10 +3113,9 @@ mod tests { ); // Verify program data account was created - let program_data_account = client - .context - .svm_locker - .with_svm_reader(|svm_reader| svm_reader.inner.get_account(&program_data_address)); + let program_data_account = client.context.svm_locker.with_svm_reader(|svm_reader| { + svm_reader.inner.get_account(&program_data_address).unwrap() + }); assert!( program_data_account.is_some(), "Program data account should be created" @@ -3157,7 +3164,11 @@ mod tests { let program_data_address = solana_loader_v3_interface::get_program_data_address(&program_id.pubkey()); let account = client.context.svm_locker.with_svm_reader(|svm_reader| { - svm_reader.inner.get_account(&program_data_address).unwrap() + svm_reader + .inner + .get_account(&program_data_address) + .unwrap() + .unwrap() }); let metadata_size = @@ -3202,7 +3213,11 @@ mod tests { let program_data_address = solana_loader_v3_interface::get_program_data_address(&program_id.pubkey()); let account = client.context.svm_locker.with_svm_reader(|svm_reader| { - svm_reader.inner.get_account(&program_data_address).unwrap() + svm_reader + .inner + .get_account(&program_data_address) + .unwrap() + .unwrap() }); let metadata_size = @@ -3255,7 +3270,11 @@ mod tests { let program_data_address = solana_loader_v3_interface::get_program_data_address(&program_id.pubkey()); let account = client.context.svm_locker.with_svm_reader(|svm_reader| { - svm_reader.inner.get_account(&program_data_address).unwrap() + svm_reader + .inner + .get_account(&program_data_address) + .unwrap() + .unwrap() }); let metadata_size = @@ -3314,7 +3333,11 @@ mod tests { let program_data_address = solana_loader_v3_interface::get_program_data_address(&program_id.pubkey()); let account = client.context.svm_locker.with_svm_reader(|svm_reader| { - svm_reader.inner.get_account(&program_data_address).unwrap() + svm_reader + .inner + .get_account(&program_data_address) + .unwrap() + .unwrap() }); let metadata_size = @@ -3378,7 +3401,11 @@ mod tests { let program_data_address = solana_loader_v3_interface::get_program_data_address(&program_id.pubkey()); let account = client.context.svm_locker.with_svm_reader(|svm_reader| { - svm_reader.inner.get_account(&program_data_address).unwrap() + svm_reader + .inner + .get_account(&program_data_address) + .unwrap() + .unwrap() }); let metadata_size = @@ -3427,7 +3454,11 @@ mod tests { let program_data_address = solana_loader_v3_interface::get_program_data_address(&program_id.pubkey()); let account = client.context.svm_locker.with_svm_reader(|svm_reader| { - svm_reader.inner.get_account(&program_data_address).unwrap() + svm_reader + .inner + .get_account(&program_data_address) + .unwrap() + .unwrap() }); let metadata_size = @@ -3463,7 +3494,11 @@ mod tests { let program_data_address = solana_loader_v3_interface::get_program_data_address(&program_id.pubkey()); let account = client.context.svm_locker.with_svm_reader(|svm_reader| { - svm_reader.inner.get_account(&program_data_address).unwrap() + svm_reader + .inner + .get_account(&program_data_address) + .unwrap() + .unwrap() }); let metadata_size = @@ -3528,7 +3563,11 @@ mod tests { let program_data_address = solana_loader_v3_interface::get_program_data_address(&program_id.pubkey()); let account = client.context.svm_locker.with_svm_reader(|svm_reader| { - svm_reader.inner.get_account(&program_data_address).unwrap() + svm_reader + .inner + .get_account(&program_data_address) + .unwrap() + .unwrap() }); let metadata_size = @@ -3631,6 +3670,7 @@ mod tests { .inner .get_account(&program_data_address) .unwrap() + .unwrap() .lamports }); @@ -3653,6 +3693,7 @@ mod tests { .inner .get_account(&program_data_address) .unwrap() + .unwrap() .lamports }); @@ -3663,7 +3704,11 @@ mod tests { // Verify rent exemption let account = client.context.svm_locker.with_svm_reader(|svm_reader| { - svm_reader.inner.get_account(&program_data_address).unwrap() + svm_reader + .inner + .get_account(&program_data_address) + .unwrap() + .unwrap() }); let required_lamports = client.context.svm_locker.with_svm_reader(|svm_reader| { @@ -3705,7 +3750,11 @@ mod tests { // Check program account ownership let program_account = client.context.svm_locker.with_svm_reader(|svm_reader| { - svm_reader.inner.get_account(&program_id.pubkey()).unwrap() + svm_reader + .inner + .get_account(&program_id.pubkey()) + .unwrap() + .unwrap() }); assert_eq!( program_account.owner, @@ -3719,7 +3768,11 @@ mod tests { // Check program data account ownership let program_data_account = client.context.svm_locker.with_svm_reader(|svm_reader| { - svm_reader.inner.get_account(&program_data_address).unwrap() + svm_reader + .inner + .get_account(&program_data_address) + .unwrap() + .unwrap() }); assert_eq!( program_data_account.owner, @@ -3776,7 +3829,11 @@ mod tests { // Get initial metadata let initial_account = client.context.svm_locker.with_svm_reader(|svm_reader| { - svm_reader.inner.get_account(&program_data_address).unwrap() + svm_reader + .inner + .get_account(&program_data_address) + .unwrap() + .unwrap() }); let metadata_size = @@ -3799,7 +3856,11 @@ mod tests { // Verify metadata is preserved let final_account = client.context.svm_locker.with_svm_reader(|svm_reader| { - svm_reader.inner.get_account(&program_data_address).unwrap() + svm_reader + .inner + .get_account(&program_data_address) + .unwrap() + .unwrap() }); let final_metadata = final_account.data[..metadata_size].to_vec(); @@ -3837,7 +3898,11 @@ mod tests { solana_loader_v3_interface::get_program_data_address(&program_id.pubkey()); let first_account = client.context.svm_locker.with_svm_reader(|svm_reader| { - svm_reader.inner.get_account(&program_data_address).unwrap() + svm_reader + .inner + .get_account(&program_data_address) + .unwrap() + .unwrap() }); // Second write (same data, same offset) @@ -3854,7 +3919,11 @@ mod tests { .unwrap(); let second_account = client.context.svm_locker.with_svm_reader(|svm_reader| { - svm_reader.inner.get_account(&program_data_address).unwrap() + svm_reader + .inner + .get_account(&program_data_address) + .unwrap() + .unwrap() }); assert_eq!( diff --git a/crates/core/src/runloops/mod.rs b/crates/core/src/runloops/mod.rs index aeea9160..af62380b 100644 --- a/crates/core/src/runloops/mod.rs +++ b/crates/core/src/runloops/mod.rs @@ -320,7 +320,8 @@ pub async fn start_block_production_runloop( } } SimnetCommand::Terminate(_) => { - let _ = svm_locker.simnet_events_tx().send(SimnetEvent::Aborted("Terminated due to inactivity.".to_string())); + // Explicitly shutdown storage to trigger WAL checkpoint before exiting + svm_locker.shutdown(); break; } SimnetCommand::StartRunbookExecution(runbook_id) => { diff --git a/crates/core/src/storage/hash_map.rs b/crates/core/src/storage/hash_map.rs new file mode 100644 index 00000000..54b62ce3 --- /dev/null +++ b/crates/core/src/storage/hash_map.rs @@ -0,0 +1,43 @@ +use serde::{Deserialize, Serialize}; +pub use std::collections::HashMap; +use std::hash::Hash; + +impl super::Storage for HashMap +where + K: Serialize + for<'de> Deserialize<'de> + Clone + Send + Sync + 'static + std::cmp::Eq + Hash, + V: Serialize + for<'de> Deserialize<'de> + Clone + Send + Sync + 'static, +{ + fn store(&mut self, key: K, value: V) -> super::StorageResult<()> { + self.insert(key, value); + Ok(()) + } + + fn clear(&mut self) -> super::StorageResult<()> { + self.clear(); + Ok(()) + } + + fn get(&self, key: &K) -> super::StorageResult> { + Ok(self.get(key).cloned()) + } + + fn take(&mut self, key: &K) -> super::StorageResult> { + Ok(self.remove(key)) + } + + fn keys(&self) -> super::StorageResult> { + Ok(self.keys().cloned().collect()) + } + + fn into_iter(&self) -> super::StorageResult + '_>> { + Ok(Box::new(self.clone().into_iter())) + } + + fn clone_box(&self) -> Box> { + Box::new(self.clone()) + } + + fn contains_key(&self, key: &K) -> super::StorageResult { + Ok(self.contains_key(key)) + } +} diff --git a/crates/core/src/storage/mod.rs b/crates/core/src/storage/mod.rs new file mode 100644 index 00000000..a33a5d77 --- /dev/null +++ b/crates/core/src/storage/mod.rs @@ -0,0 +1,356 @@ +mod hash_map; +#[cfg(feature = "postgres")] +mod postgres; +#[cfg(feature = "sqlite")] +mod sqlite; +pub use hash_map::HashMap as StorageHashMap; +#[cfg(feature = "postgres")] +pub use postgres::PostgresStorage; +#[cfg(feature = "sqlite")] +pub use sqlite::SqliteStorage; + +use crate::error::SurfpoolError; + +pub fn new_kv_store( + database_url: &Option<&str>, + table_name: &str, + surfnet_id: u32, +) -> StorageResult>> +where + K: serde::Serialize + + serde::de::DeserializeOwned + + Send + + Sync + + 'static + + Clone + + Eq + + std::hash::Hash, + V: serde::Serialize + serde::de::DeserializeOwned + Send + Sync + 'static + Clone, +{ + match database_url { + Some(url) => { + #[cfg(feature = "postgres")] + if url.starts_with("postgres://") || url.starts_with("postgresql://") { + let storage = PostgresStorage::connect(url, table_name, surfnet_id)?; + Ok(Box::new(storage)) + } else { + #[cfg(feature = "sqlite")] + { + let storage = SqliteStorage::connect(url, table_name, surfnet_id)?; + Ok(Box::new(storage)) + } + #[cfg(not(feature = "sqlite"))] + { + Err(StorageError::InvalidPostgresUrl(url.to_string())) + } + } + + #[cfg(not(feature = "postgres"))] + if url.starts_with("postgres://") || url.starts_with("postgresql://") { + Err(StorageError::PostgresNotEnabled) + } else { + #[cfg(feature = "sqlite")] + { + let storage = SqliteStorage::connect( + database_url.unwrap_or(":memory:"), + table_name, + surfnet_id, + )?; + Ok(Box::new(storage)) + } + #[cfg(not(feature = "sqlite"))] + { + Err(StorageError::SqliteNotEnabled) + } + } + } + _ => { + let storage = StorageHashMap::new(); + Ok(Box::new(storage)) + } + } +} + +#[derive(Debug, thiserror::Error)] +pub enum StorageError { + #[error("Sqlite storage is not enabled in this build")] + SqliteNotEnabled, + #[error("Postgres storage is not enabled in this build")] + PostgresNotEnabled, + #[error("Invalid Postgres database URL: {0}")] + InvalidPostgresUrl(String), + #[error("Failed to get pooled connection for '{0}' database: {1}")] + PooledConnectionError(String, #[source] surfpool_db::diesel::r2d2::PoolError), + #[error("Failed to serialize key for '{0}' database: {1}")] + SerializeKeyError(String, serde_json::Error), + #[error("Failed to serialize value for '{0}' database: {1}")] + SerializeValueError(String, serde_json::Error), + #[error("Failed to deserialize value in '{0}' database: {1}")] + DeserializeValueError(String, serde_json::Error), + #[error("Failed to acquire lock for database")] + LockError, + #[error("Query failed for table '{0}' in '{1}' database: {2}")] + QueryError(String, String, #[source] QueryExecuteError), +} + +impl StorageError { + pub fn create_table( + table_name: &str, + db_type: &str, + e: surfpool_db::diesel::result::Error, + ) -> Self { + StorageError::QueryError( + table_name.to_string(), + db_type.to_string(), + QueryExecuteError::CreateTableError(e), + ) + } + pub fn store( + table_name: &str, + db_type: &str, + store_key: &str, + e: surfpool_db::diesel::result::Error, + ) -> Self { + StorageError::QueryError( + table_name.to_string(), + db_type.to_string(), + QueryExecuteError::StoreError(store_key.to_string(), e), + ) + } + pub fn get( + table_name: &str, + db_type: &str, + get_key: &str, + e: surfpool_db::diesel::result::Error, + ) -> Self { + StorageError::QueryError( + table_name.to_string(), + db_type.to_string(), + QueryExecuteError::GetError(get_key.to_string(), e), + ) + } + pub fn delete( + table_name: &str, + db_type: &str, + delete_key: &str, + e: surfpool_db::diesel::result::Error, + ) -> Self { + StorageError::QueryError( + table_name.to_string(), + db_type.to_string(), + QueryExecuteError::DeleteError(delete_key.to_string(), e), + ) + } + pub fn get_all_keys( + table_name: &str, + db_type: &str, + e: surfpool_db::diesel::result::Error, + ) -> Self { + StorageError::QueryError( + table_name.to_string(), + db_type.to_string(), + QueryExecuteError::GetAllKeysError(e), + ) + } + pub fn get_all_key_value_pairs( + table_name: &str, + db_type: &str, + e: surfpool_db::diesel::result::Error, + ) -> Self { + StorageError::QueryError( + table_name.to_string(), + db_type.to_string(), + QueryExecuteError::GetAllKeyValuePairsError(e), + ) + } +} + +#[derive(Debug, thiserror::Error)] +pub enum QueryExecuteError { + #[error("Failed to create table: {0}")] + CreateTableError(#[source] surfpool_db::diesel::result::Error), + #[error("Failed to store value for key '{0}': {1}")] + StoreError(String, #[source] surfpool_db::diesel::result::Error), + #[error("Failed to get value for key '{0}': {1}")] + GetError(String, #[source] surfpool_db::diesel::result::Error), + #[error("Failed to delete value for key '{0}': {1}")] + DeleteError(String, #[source] surfpool_db::diesel::result::Error), + #[error("Failed to get all keys: {0}")] + GetAllKeysError(#[source] surfpool_db::diesel::result::Error), + #[error("Failed to get all key-value pairs: {0}")] + GetAllKeyValuePairsError(#[source] surfpool_db::diesel::result::Error), +} + +pub type StorageResult = Result; + +impl From for jsonrpc_core::Error { + fn from(err: StorageError) -> Self { + SurfpoolError::from(err).into() + } +} + +pub trait Storage: Send + Sync { + fn store(&mut self, key: K, value: V) -> StorageResult<()>; + fn clear(&mut self) -> StorageResult<()>; + fn get(&self, key: &K) -> StorageResult>; + fn take(&mut self, key: &K) -> StorageResult>; + fn keys(&self) -> StorageResult>; + fn into_iter(&self) -> StorageResult + '_>>; + fn contains_key(&self, key: &K) -> StorageResult { + Ok(self.get(key)?.is_some()) + } + + /// Explicitly shutdown the storage, performing any cleanup like WAL checkpoint. + /// This should be called before the application exits to ensure data is persisted. + /// Default implementation does nothing. + fn shutdown(&self) {} + + // Enable cloning of boxed trait objects + fn clone_box(&self) -> Box>; +} + +// Implement Clone for Box> +impl Clone for Box> { + fn clone(&self) -> Self { + self.clone_box() + } +} + +// Separate trait for construction - this doesn't need to be dyn-compatible +pub trait StorageConstructor: Storage + Clone { + fn connect(database_url: &str, table_name: &str, surfnet_id: u32) -> StorageResult + where + Self: Sized; +} + +#[cfg(test)] +pub mod tests { + use std::collections::hash_map::RandomState; + use std::hash::{BuildHasher, Hasher}; + use std::os::unix::fs::PermissionsExt; + + use crossbeam_channel::Receiver; + use surfpool_types::SimnetEvent; + + use crate::surfnet::{GeyserEvent, svm::SurfnetSvm}; + + /// Environment variable for PostgreSQL database URL used in tests + pub const POSTGRES_TEST_URL_ENV: &str = "SURFPOOL_TEST_POSTGRES_URL"; + + /// Generates a random u32 using std's RandomState (no external dependencies) + pub fn random_surfnet_id() -> u32 { + let state = RandomState::new(); + let mut hasher = state.build_hasher(); + // Use thread name/id as string since as_u64() is unstable + hasher.write(format!("{:?}", std::thread::current().id()).as_bytes()); + hasher.write_u128( + std::time::SystemTime::now() + .duration_since(std::time::UNIX_EPOCH) + .unwrap() + .as_nanos(), + ); + hasher.finish() as u32 + } + + pub enum TestType { + NoDb, + InMemorySqlite, + OnDiskSqlite(String), + /// PostgreSQL with a random surfnet_id for test isolation + #[cfg(feature = "postgres")] + Postgres { + url: String, + surfnet_id: u32, + }, + } + + impl TestType { + pub fn initialize_svm(&self) -> (SurfnetSvm, Receiver, Receiver) { + match &self { + TestType::NoDb => SurfnetSvm::new(), + TestType::InMemorySqlite => SurfnetSvm::new_with_db(Some(":memory:"), 0).unwrap(), + TestType::OnDiskSqlite(db_path) => { + SurfnetSvm::new_with_db(Some(db_path.as_ref()), 0).unwrap() + } + #[cfg(feature = "postgres")] + TestType::Postgres { url, surfnet_id } => { + SurfnetSvm::new_with_db(Some(url.as_ref()), *surfnet_id).unwrap() + } + } + } + + pub fn sqlite() -> Self { + let database_url = crate::storage::tests::create_tmp_sqlite_storage(); + TestType::OnDiskSqlite(database_url) + } + + pub fn no_db() -> Self { + TestType::NoDb + } + + pub fn in_memory() -> Self { + TestType::InMemorySqlite + } + + /// Creates a PostgreSQL test type with a random surfnet_id for test isolation. + /// The database URL is read from the SURFPOOL_TEST_POSTGRES_URL environment variable. + /// Panics if the environment variable is not set. + #[cfg(feature = "postgres")] + pub fn postgres() -> Self { + let url = std::env::var(POSTGRES_TEST_URL_ENV).unwrap_or_else(|_| { + panic!( + "PostgreSQL test URL not set. Set the {} environment variable.", + POSTGRES_TEST_URL_ENV + ) + }); + let surfnet_id = random_surfnet_id(); + println!( + "Created PostgreSQL test connection with surfnet_id: {}", + surfnet_id + ); + TestType::Postgres { url, surfnet_id } + } + + /// Creates a PostgreSQL test type with a random surfnet_id for test isolation. + /// Returns None if the SURFPOOL_TEST_POSTGRES_URL environment variable is not set. + #[cfg(feature = "postgres")] + pub fn postgres_if_available() -> Option { + std::env::var(POSTGRES_TEST_URL_ENV).ok().map(|url| { + let surfnet_id = random_surfnet_id(); + println!( + "Created PostgreSQL test connection with surfnet_id: {}", + surfnet_id + ); + TestType::Postgres { url, surfnet_id } + }) + } + } + + impl Drop for TestType { + fn drop(&mut self) { + if let TestType::OnDiskSqlite(db_path) = self { + // Delete file at db_path when TestType goes out of scope + let _ = std::fs::remove_file(db_path); + } + // Note: PostgreSQL data is isolated by surfnet_id and doesn't need cleanup + // The random surfnet_id ensures test isolation without table cleanup + } + } + + pub fn create_tmp_sqlite_storage() -> String { + // let temp_dir = tempfile::tempdir().expect("Failed to create temp dir for SqliteStorage"); + let write_permissions = std::fs::Permissions::from_mode(0o600); + let file = tempfile::Builder::new() + .permissions(write_permissions) + .suffix(".sqlite") + .tempfile() + .expect("Failed to create temp file for SqliteStorage"); + let database_url = file.path().to_path_buf(); + + // Use a simple path without creating the file beforehand + // Let SQLite create the database file itself + let database_url = database_url.to_str().unwrap().to_string(); + println!("Created temporary Sqlite database at: {}", database_url); + database_url + } +} diff --git a/crates/core/src/storage/postgres.rs b/crates/core/src/storage/postgres.rs new file mode 100644 index 00000000..730c7296 --- /dev/null +++ b/crates/core/src/storage/postgres.rs @@ -0,0 +1,366 @@ +use std::collections::HashMap; +use std::sync::{Mutex, OnceLock}; + +use log::debug; +use serde::{Deserialize, Serialize}; +use surfpool_db::diesel::{ + self, QueryableByName, RunQueryDsl, + connection::SimpleConnection, + r2d2::{ConnectionManager, Pool}, + sql_query, + sql_types::Text, +}; + +use crate::storage::{Storage, StorageConstructor, StorageError, StorageResult}; + +/// Global shared connection pools keyed by database URL. +/// This allows multiple PostgresStorage instances to share the same pool, +/// which is essential for tests that run in parallel. +static SHARED_POOLS: OnceLock>>>> = OnceLock::new(); + +fn get_or_create_shared_pool(database_url: &str) -> StorageResult>> { + let pools = SHARED_POOLS.get_or_init(|| Mutex::new(HashMap::new())); + let mut pools_guard = pools.lock().map_err(|_| StorageError::LockError)?; + + if let Some(pool) = pools_guard.get(database_url) { + debug!("Reusing existing shared PostgreSQL connection pool for {}", database_url); + return Ok(pool.clone()); + } + + debug!("Creating new shared PostgreSQL connection pool for {}", database_url); + let manager = ConnectionManager::::new(database_url); + let pool = Pool::builder() + .max_size(10) // Limit total connections across all tests + .min_idle(Some(1)) + .build(manager) + .map_err(|e| StorageError::PooledConnectionError(NAME.into(), e))?; + + pools_guard.insert(database_url.to_string(), pool.clone()); + Ok(pool) +} + +#[derive(QueryableByName, Debug)] +struct KvRecord { + #[diesel(sql_type = Text)] + key: String, + #[diesel(sql_type = Text)] + value: String, +} + +#[derive(QueryableByName, Debug)] +struct ValueRecord { + #[diesel(sql_type = Text)] + value: String, +} + +#[derive(QueryableByName, Debug)] +struct KeyRecord { + #[diesel(sql_type = Text)] + key: String, +} + +#[derive(Clone)] +pub struct PostgresStorage { + pool: Pool>, + _phantom: std::marker::PhantomData<(K, V)>, + table_name: String, + surfnet_id: u32, +} + +const NAME: &str = "PostgreSQL"; + +impl PostgresStorage +where + K: Serialize + for<'de> Deserialize<'de>, + V: Serialize + for<'de> Deserialize<'de> + Clone, +{ + fn ensure_table_exists(&self) -> StorageResult<()> { + debug!("Ensuring table '{}' exists", self.table_name); + let create_table_sql = format!( + " + CREATE TABLE IF NOT EXISTS {} ( + surfnet_id INTEGER NOT NULL, + key TEXT NOT NULL, + value TEXT NOT NULL, + created_at TIMESTAMP DEFAULT CURRENT_TIMESTAMP, + updated_at TIMESTAMP DEFAULT CURRENT_TIMESTAMP, + PRIMARY KEY (surfnet_id, key) + ) + ", + self.table_name + ); + + debug!("Getting connection from pool for table creation"); + let mut conn = self.pool.get().map_err(|_| StorageError::LockError)?; + + conn.batch_execute(&create_table_sql) + .map_err(|e| StorageError::create_table(&self.table_name, NAME, e))?; + + debug!("Successfully ensured table '{}' exists", self.table_name); + Ok(()) + } + + fn serialize_key(&self, key: &K) -> StorageResult { + trace!("Serializing key for table '{}'", self.table_name); + let result = + serde_json::to_string(key).map_err(|e| StorageError::SerializeKeyError(NAME.into(), e)); + if let Ok(ref serialized) = result { + trace!("Key serialized successfully: {}", serialized); + } + result + } + + fn serialize_value(&self, value: &V) -> StorageResult { + trace!("Serializing value for table '{}'", self.table_name); + let result = serde_json::to_string(value) + .map_err(|e| StorageError::SerializeValueError(NAME.into(), e)); + if let Ok(ref serialized) = result { + trace!( + "Value serialized successfully, length: {} chars", + serialized.len() + ); + } + result + } + + fn deserialize_value(&self, value_str: &str) -> StorageResult { + trace!( + "Deserializing value from table '{}', input length: {} chars", + self.table_name, + value_str.len() + ); + let result = serde_json::from_str(value_str) + .map_err(|e| StorageError::DeserializeValueError(NAME.into(), e)); + if result.is_ok() { + trace!("Value deserialized successfully"); + } + result + } + + fn load_value_from_db(&self, key_str: &str) -> StorageResult> { + debug!("Loading value from DB for key: {}", key_str); + let query = sql_query(format!( + "SELECT value FROM {} WHERE surfnet_id = $1 AND key = $2", + self.table_name + )) + .bind::(self.surfnet_id as i32) + .bind::(key_str); + + trace!("Getting connection from pool for loading value"); + let mut conn = self.pool.get().map_err(|_| StorageError::LockError)?; + + let records = query + .load::(&mut *conn) + .map_err(|e| StorageError::get(&self.table_name, NAME, key_str, e))?; + + if let Some(record) = records.into_iter().next() { + debug!("Found record for key: {}", key_str); + let value = self.deserialize_value(&record.value)?; + Ok(Some(value)) + } else { + debug!("No record found for key: {}", key_str); + Ok(None) + } + } +} + +impl Storage for PostgresStorage +where + K: Serialize + for<'de> Deserialize<'de> + Clone + Send + Sync + 'static, + V: Serialize + for<'de> Deserialize<'de> + Clone + Send + Sync + 'static, +{ + fn store(&mut self, key: K, value: V) -> StorageResult<()> { + debug!("Storing value in table '{}", self.table_name); + let key_str = self.serialize_key(&key)?; + let value_str = self.serialize_value(&value)?; + + // Use PostgreSQL UPSERT syntax with ON CONFLICT + let query = sql_query(format!( + "INSERT INTO {} (surfnet_id, key, value, updated_at) VALUES ($1, $2, $3, CURRENT_TIMESTAMP) + ON CONFLICT (surfnet_id, key) DO UPDATE SET + value = EXCLUDED.value, + updated_at = CURRENT_TIMESTAMP", + self.table_name + )) + .bind::(self.surfnet_id as i32) + .bind::(&key_str) + .bind::(&value_str); + + trace!("Getting connection from pool for store operation"); + let mut conn = self.pool.get().map_err(|_| StorageError::LockError)?; + + query + .execute(&mut *conn) + .map_err(|e| StorageError::store(&self.table_name, NAME, &key_str, e))?; + + debug!("Value stored successfully in table '{}'", self.table_name); + Ok(()) + } + + fn get(&self, key: &K) -> StorageResult> { + debug!("Getting value from table '{}", self.table_name); + let key_str = self.serialize_key(key)?; + + self.load_value_from_db(&key_str) + } + + fn take(&mut self, key: &K) -> StorageResult> { + debug!("Taking value from table '{}'", self.table_name); + let key_str = self.serialize_key(key)?; + + // If not in cache, try to load from database + if let Some(value) = self.load_value_from_db(&key_str)? { + debug!("Value found, removing from database"); + // Remove from database + let delete_query = sql_query(format!( + "DELETE FROM {} WHERE surfnet_id = $1 AND key = $2", + self.table_name + )) + .bind::(self.surfnet_id as i32) + .bind::(&key_str); + + trace!("Getting connection from pool for delete operation"); + let mut conn = self.pool.get().map_err(|_| StorageError::LockError)?; + + delete_query + .execute(&mut *conn) + .map_err(|e| StorageError::delete(&self.table_name, NAME, &key_str, e))?; + + debug!( + "Value taken and removed successfully from table '{}'", + self.table_name + ); + Ok(Some(value)) + } else { + debug!("No value found to take from table '{}'", self.table_name); + Ok(None) + } + } + + fn clear(&mut self) -> StorageResult<()> { + debug!("Clearing all data from table '{}'", self.table_name); + let delete_query = sql_query(format!( + "DELETE FROM {} WHERE surfnet_id = $1", + self.table_name + )) + .bind::(self.surfnet_id as i32); + + trace!("Getting connection from pool for clear operation"); + let mut conn = self.pool.get().map_err(|_| StorageError::LockError)?; + + delete_query + .execute(&mut *conn) + .map_err(|e| StorageError::delete(&self.table_name, NAME, "*all*", e))?; + + debug!("Table '{}' cleared successfully", self.table_name); + Ok(()) + } + + fn keys(&self) -> StorageResult> { + debug!("Fetching all keys from table '{}'", self.table_name); + let query = sql_query(format!( + "SELECT key FROM {} WHERE surfnet_id = $1", + self.table_name + )) + .bind::(self.surfnet_id as i32); + + trace!("Getting connection from pool for keys operation"); + let mut conn = self.pool.get().map_err(|_| StorageError::LockError)?; + + let records = query + .load::(&mut *conn) + .map_err(|e| StorageError::get_all_keys(&self.table_name, NAME, e))?; + + let mut keys = Vec::new(); + for record in records { + let key: K = serde_json::from_str(&record.key) + .map_err(|e| StorageError::DeserializeValueError(NAME.into(), e))?; + keys.push(key); + } + + debug!( + "Retrieved {} keys from table '{}'", + keys.len(), + self.table_name + ); + Ok(keys) + } + + fn clone_box(&self) -> Box> { + Box::new(self.clone()) + } + + fn into_iter(&self) -> StorageResult + '_>> { + debug!( + "Creating iterator for all key-value pairs in table '{}'", + self.table_name + ); + let query = sql_query(format!( + "SELECT key, value FROM {} WHERE surfnet_id = $1", + self.table_name + )) + .bind::(self.surfnet_id as i32); + + trace!("Getting connection from pool for into_iter operation"); + let mut conn = self.pool.get().map_err(|_| StorageError::LockError)?; + + let records = query + .load::(&mut *conn) + .map_err(|e| StorageError::get_all_key_value_pairs(&self.table_name, NAME, e))?; + + let iter = records.into_iter().filter_map(move |record| { + let key: K = match serde_json::from_str(&record.key) { + Ok(k) => k, + Err(e) => { + debug!("Failed to deserialize key: {}", e); + return None; + } + }; + let value: V = match serde_json::from_str(&record.value) { + Ok(v) => v, + Err(e) => { + debug!("Failed to deserialize value: {}", e); + return None; + } + }; + Some((key, value)) + }); + + debug!( + "Iterator created successfully for table '{}'", + self.table_name + ); + Ok(Box::new(iter)) + } +} + +impl StorageConstructor for PostgresStorage +where + K: Serialize + for<'de> Deserialize<'de> + Clone + Send + Sync + 'static, + V: Serialize + for<'de> Deserialize<'de> + Clone + Send + Sync + 'static, +{ + fn connect(database_url: &str, table_name: &str, surfnet_id: u32) -> StorageResult { + debug!( + "Connecting to PostgreSQL database: {} with table: {} and surfnet_id: {}", + database_url, table_name, surfnet_id + ); + + // Use shared connection pool to avoid exhausting connections when many + // instances connect to the same database (e.g., parallel tests) + let pool = get_or_create_shared_pool(database_url)?; + + let storage = PostgresStorage { + pool, + _phantom: std::marker::PhantomData, + table_name: table_name.to_string(), + surfnet_id, + }; + + storage.ensure_table_exists()?; + debug!( + "PostgreSQL storage connected successfully for table: {}", + table_name + ); + Ok(storage) + } +} diff --git a/crates/core/src/storage/sqlite.rs b/crates/core/src/storage/sqlite.rs new file mode 100644 index 00000000..23bf3f33 --- /dev/null +++ b/crates/core/src/storage/sqlite.rs @@ -0,0 +1,479 @@ +use std::collections::HashSet; +use std::sync::{Mutex, OnceLock}; + +use log::debug; +use serde::{Deserialize, Serialize}; +use surfpool_db::diesel::{ + self, QueryableByName, RunQueryDsl, + connection::SimpleConnection, + r2d2::{ConnectionManager, Pool}, + sql_query, + sql_types::Text, +}; + +use crate::storage::{Storage, StorageConstructor, StorageError, StorageResult}; + +/// Track which database files have already been checkpointed during shutdown. +/// This prevents multiple SqliteStorage instances sharing the same file from +/// conflicting when each tries to checkpoint and delete WAL files. +fn checkpointed_databases() -> &'static Mutex> { + static CHECKPOINTED: OnceLock>> = OnceLock::new(); + CHECKPOINTED.get_or_init(|| Mutex::new(HashSet::new())) +} + +#[derive(QueryableByName, Debug)] +struct KvRecord { + #[diesel(sql_type = Text)] + key: String, + #[diesel(sql_type = Text)] + value: String, +} + +#[derive(QueryableByName, Debug)] +struct ValueRecord { + #[diesel(sql_type = Text)] + value: String, +} + +#[derive(QueryableByName, Debug)] +struct KeyRecord { + #[diesel(sql_type = Text)] + key: String, +} + +#[derive(Clone)] +pub struct SqliteStorage { + pool: Pool>, + _phantom: std::marker::PhantomData<(K, V)>, + table_name: String, + surfnet_id: u32, + /// Whether this is a file-based database (not :memory:) + /// Used to determine if WAL checkpoint should be performed on drop + is_file_based: bool, + /// The connection string for creating direct connections during cleanup + connection_string: String, +} + +const NAME: &str = "SQLite"; + +// Checkpoint implementation that doesn't require K, V bounds +impl SqliteStorage { + /// Checkpoint the WAL and truncate it to consolidate into the main database file, + /// then remove the -wal and -shm files. + /// Only runs for file-based databases (not :memory:). + /// Uses a static set to track which databases have been checkpointed to avoid + /// conflicts when multiple SqliteStorage instances share the same database file. + fn checkpoint(&self) { + if !self.is_file_based { + return; + } + + // Extract the file path from the connection string + // Connection string is like "file:/path/to/db.sqlite?mode=rwc" + let db_path = self + .connection_string + .strip_prefix("file:") + .and_then(|s| s.split('?').next()) + .unwrap_or(&self.connection_string) + .to_string(); + + // Check if this database has already been checkpointed by another storage instance + { + let mut checkpointed = checkpointed_databases().lock().unwrap(); + if checkpointed.contains(&db_path) { + debug!( + "Database {} already checkpointed, skipping for table '{}'", + db_path, self.table_name + ); + return; + } + checkpointed.insert(db_path.clone()); + } + + debug!( + "Checkpointing WAL for database '{}' (table '{}')", + db_path, self.table_name + ); + + // Use pool connection to checkpoint - this flushes WAL to main database + if let Ok(mut conn) = self.pool.get() { + if let Err(e) = conn.batch_execute("PRAGMA wal_checkpoint(TRUNCATE);") { + debug!("WAL checkpoint failed: {}", e); + return; + } + } + + // Remove the -wal and -shm files + let wal_path = format!("{}-wal", db_path); + let shm_path = format!("{}-shm", db_path); + + if std::path::Path::new(&wal_path).exists() { + if let Err(e) = std::fs::remove_file(&wal_path) { + debug!("Failed to remove WAL file {}: {}", wal_path, e); + } else { + debug!("Removed WAL file: {}", wal_path); + } + } + + if std::path::Path::new(&shm_path).exists() { + if let Err(e) = std::fs::remove_file(&shm_path) { + debug!("Failed to remove SHM file {}: {}", shm_path, e); + } else { + debug!("Removed SHM file: {}", shm_path); + } + } + } +} + +impl SqliteStorage +where + K: Serialize + for<'de> Deserialize<'de>, + V: Serialize + for<'de> Deserialize<'de> + Clone, +{ + fn ensure_table_exists(&self) -> StorageResult<()> { + debug!("Ensuring table '{}' exists", self.table_name); + let create_table_sql = format!( + " + CREATE TABLE IF NOT EXISTS {} ( + surfnet_id INTEGER NOT NULL, + key TEXT NOT NULL, + value TEXT NOT NULL, + created_at DATETIME DEFAULT CURRENT_TIMESTAMP, + updated_at DATETIME DEFAULT CURRENT_TIMESTAMP, + PRIMARY KEY (surfnet_id, key) + ) + ", + self.table_name + ); + + debug!("Getting connection from pool for table creation"); + let mut conn = self.pool.get().map_err(|_| StorageError::LockError)?; + + conn.batch_execute(&create_table_sql) + .map_err(|e| StorageError::create_table(&self.table_name, NAME, e))?; + + debug!("Successfully ensured table '{}' exists", self.table_name); + Ok(()) + } + + fn serialize_key(&self, key: &K) -> StorageResult { + trace!("Serializing key for table '{}'", self.table_name); + let result = + serde_json::to_string(key).map_err(|e| StorageError::SerializeKeyError(NAME.into(), e)); + if let Ok(ref serialized) = result { + trace!("Key serialized successfully: {}", serialized); + } + result + } + + fn serialize_value(&self, value: &V) -> StorageResult { + trace!("Serializing value for table '{}'", self.table_name); + let result = serde_json::to_string(value) + .map_err(|e| StorageError::SerializeValueError(NAME.into(), e)); + if let Ok(ref serialized) = result { + trace!( + "Value serialized successfully, length: {} chars", + serialized.len() + ); + } + result + } + + fn deserialize_value(&self, value_str: &str) -> StorageResult { + trace!( + "Deserializing value from table '{}', input length: {} chars", + self.table_name, + value_str.len() + ); + let result = serde_json::from_str(value_str) + .map_err(|e| StorageError::DeserializeValueError(NAME.into(), e)); + if result.is_ok() { + trace!("Value deserialized successfully"); + } + result + } + + fn load_value_from_db(&self, key_str: &str) -> StorageResult> { + debug!("Loading value from DB for key: {}", key_str); + let query = sql_query(format!( + "SELECT value FROM {} WHERE surfnet_id = ? AND key = ?", + self.table_name + )) + .bind::(self.surfnet_id as i32) + .bind::(key_str); + + trace!("Getting connection from pool for loading value"); + let mut conn = self.pool.get().map_err(|_| StorageError::LockError)?; + + let records = query + .load::(&mut *conn) + .map_err(|e| StorageError::get(&self.table_name, NAME, key_str, e))?; + + if let Some(record) = records.into_iter().next() { + debug!("Found record for key: {}", key_str); + let value = self.deserialize_value(&record.value)?; + Ok(Some(value)) + } else { + debug!("No record found for key: {}", key_str); + Ok(None) + } + } +} + +impl Storage for SqliteStorage +where + K: Serialize + for<'de> Deserialize<'de> + Clone + Send + Sync + 'static, + V: Serialize + for<'de> Deserialize<'de> + Clone + Send + Sync + 'static, +{ + fn store(&mut self, key: K, value: V) -> StorageResult<()> { + debug!("Storing value in table '{}", self.table_name); + let key_str = self.serialize_key(&key)?; + let value_str = self.serialize_value(&value)?; + + // Use prepared statement with sql_query for better safety + let query = sql_query(format!( + "INSERT OR REPLACE INTO {} (surfnet_id, key, value, updated_at) VALUES (?, ?, ?, CURRENT_TIMESTAMP)", + self.table_name + )) + .bind::(self.surfnet_id as i32) + .bind::(&key_str) + .bind::(&value_str); + + trace!("Getting connection from pool for store operation"); + let mut conn = self.pool.get().map_err(|_| StorageError::LockError)?; + + query + .execute(&mut *conn) + .map_err(|e| StorageError::store(&self.table_name, NAME, &key_str, e))?; + + debug!("Value stored successfully in table '{}'", self.table_name); + Ok(()) + } + + fn get(&self, key: &K) -> StorageResult> { + debug!("Getting value from table '{}", self.table_name); + let key_str = self.serialize_key(key)?; + + self.load_value_from_db(&key_str) + } + + fn take(&mut self, key: &K) -> StorageResult> { + debug!("Taking value from table '{}'", self.table_name); + let key_str = self.serialize_key(key)?; + + // If not in cache, try to load from database + if let Some(value) = self.load_value_from_db(&key_str)? { + debug!("Value found, removing from database"); + // Remove from database + let delete_query = sql_query(format!( + "DELETE FROM {} WHERE surfnet_id = ? AND key = ?", + self.table_name + )) + .bind::(self.surfnet_id as i32) + .bind::(&key_str); + + trace!("Getting connection from pool for delete operation"); + let mut conn = self.pool.get().map_err(|_| StorageError::LockError)?; + + delete_query + .execute(&mut *conn) + .map_err(|e| StorageError::delete(&self.table_name, NAME, &key_str, e))?; + + debug!( + "Value taken and removed successfully from table '{}'", + self.table_name + ); + Ok(Some(value)) + } else { + debug!("No value found to take from table '{}'", self.table_name); + Ok(None) + } + } + + fn clear(&mut self) -> StorageResult<()> { + debug!("Clearing all data from table '{}'", self.table_name); + let delete_query = sql_query(format!( + "DELETE FROM {} WHERE surfnet_id = ?", + self.table_name + )) + .bind::(self.surfnet_id as i32); + + trace!("Getting connection from pool for clear operation"); + let mut conn = self.pool.get().map_err(|_| StorageError::LockError)?; + + delete_query + .execute(&mut *conn) + .map_err(|e| StorageError::delete(&self.table_name, NAME, "*all*", e))?; + + debug!("Table '{}' cleared successfully", self.table_name); + Ok(()) + } + + fn keys(&self) -> StorageResult> { + debug!("Fetching all keys from table '{}'", self.table_name); + let query = sql_query(format!( + "SELECT key FROM {} WHERE surfnet_id = ?", + self.table_name + )) + .bind::(self.surfnet_id as i32); + + trace!("Getting connection from pool for keys operation"); + let mut conn = self.pool.get().map_err(|_| StorageError::LockError)?; + + let records = query + .load::(&mut *conn) + .map_err(|e| StorageError::get_all_keys(&self.table_name, NAME, e))?; + + let mut keys = Vec::new(); + for record in records { + let key: K = serde_json::from_str(&record.key) + .map_err(|e| StorageError::DeserializeValueError(NAME.into(), e))?; + keys.push(key); + } + + debug!( + "Retrieved {} keys from table '{}'", + keys.len(), + self.table_name + ); + Ok(keys) + } + + fn clone_box(&self) -> Box> { + Box::new(self.clone()) + } + + fn shutdown(&self) { + self.checkpoint(); + } + + fn into_iter(&self) -> StorageResult + '_>> { + debug!( + "Creating iterator for all key-value pairs in table '{}'", + self.table_name + ); + let query = sql_query(format!( + "SELECT key, value FROM {} WHERE surfnet_id = ?", + self.table_name + )) + .bind::(self.surfnet_id as i32); + + trace!("Getting connection from pool for into_iter operation"); + let mut conn = self.pool.get().map_err(|_| StorageError::LockError)?; + + let records = query + .load::(&mut *conn) + .map_err(|e| StorageError::get_all_key_value_pairs(&self.table_name, NAME, e))?; + + let iter = records.into_iter().filter_map(move |record| { + let key: K = match serde_json::from_str(&record.key) { + Ok(k) => k, + Err(e) => { + debug!("Failed to deserialize key: {}", e); + return None; + } + }; + let value: V = match serde_json::from_str(&record.value) { + Ok(v) => v, + Err(e) => { + debug!("Failed to deserialize value: {}", e); + return None; + } + }; + Some((key, value)) + }); + + debug!( + "Iterator created successfully for table '{}'", + self.table_name + ); + Ok(Box::new(iter)) + } +} + +impl StorageConstructor for SqliteStorage +where + K: Serialize + for<'de> Deserialize<'de> + Clone + Send + Sync + 'static, + V: Serialize + for<'de> Deserialize<'de> + Clone + Send + Sync + 'static, +{ + fn connect(database_url: &str, table_name: &str, surfnet_id: u32) -> StorageResult { + debug!( + "Connecting to SQLite database: {} with table: {} and surfnet_id: {}", + database_url, table_name, surfnet_id + ); + + let connection_string = if database_url == ":memory:" { + database_url.to_string() + } else if database_url.starts_with("file:") { + // Already a URI, just add mode if needed + if database_url.contains('?') { + format!("{}&mode=rwc", database_url) + } else { + format!("{}?mode=rwc", database_url) + } + } else { + // Convert plain path to file: URI format for proper parameter handling + format!("file:{}?mode=rwc", database_url) + }; + + let manager = ConnectionManager::::new(connection_string.clone()); + trace!("Creating connection pool"); + let pool = + Pool::new(manager).map_err(|e| StorageError::PooledConnectionError(NAME.into(), e))?; + + let is_file_based = database_url != ":memory:"; + let storage = SqliteStorage { + pool, + _phantom: std::marker::PhantomData, + table_name: table_name.to_string(), + surfnet_id, + is_file_based, + connection_string, + }; + + // Set SQLite pragmas for performance and reliability + { + let mut conn = storage.pool.get().map_err(|_| StorageError::LockError)?; + + // Different pragma sets for file-based vs in-memory databases + let pragmas = if database_url == ":memory:" { + // In-memory database pragmas (WAL not supported) + " + PRAGMA synchronous=OFF; + PRAGMA temp_store=MEMORY; + PRAGMA cache_size=-64000; + PRAGMA busy_timeout=5000; + " + } else { + // File-based database pragmas + " + PRAGMA journal_mode=WAL; + PRAGMA synchronous=NORMAL; + PRAGMA temp_store=MEMORY; + PRAGMA mmap_size=268435456; + PRAGMA cache_size=-64000; + PRAGMA busy_timeout=5000; + PRAGMA wal_autocheckpoint=1000; + " + // Pragma explanations: + // - journal_mode=WAL: Write-Ahead Logging for better concurrency and crash recovery + // - synchronous=NORMAL: Safe with WAL mode, good performance/durability balance + // - temp_store=MEMORY: Store temp tables in memory for speed + // - mmap_size=268435456: 256MB memory-mapped I/O for faster reads + // - cache_size=-64000: 64MB page cache (negative = KB) + // - busy_timeout=5000: Wait 5s for locks instead of failing immediately + // - wal_autocheckpoint=1000: Checkpoint WAL after 1000 pages (~4MB with default page size) + }; + + conn.batch_execute(pragmas) + .map_err(|e| StorageError::create_table(table_name, NAME, e))?; + } + + storage.ensure_table_exists()?; + debug!( + "SQLite storage connected successfully for table: {}", + table_name + ); + Ok(storage) + } +} diff --git a/crates/core/src/surfnet/locker.rs b/crates/core/src/surfnet/locker.rs index 0dcc7d95..fe1b3063 100644 --- a/crates/core/src/surfnet/locker.rs +++ b/crates/core/src/surfnet/locker.rs @@ -134,6 +134,16 @@ impl Clone for SurfnetSvmLocker { /// Functions for reading and writing to the underlying SurfnetSvm instance impl SurfnetSvmLocker { + /// Explicitly shutdown the SVM, performing cleanup like WAL checkpoint for SQLite. + /// This should be called before the application exits to ensure data is persisted. + pub fn shutdown(&self) { + let read_lock = self.0.clone(); + tokio::task::block_in_place(move || { + let read_guard = read_lock.blocking_read(); + read_guard.shutdown(); + }); + } + /// Executes a read-only operation on the underlying `SurfnetSvm` by acquiring a blocking read lock. /// Accepts a closure that receives a shared reference to `SurfnetSvm` and returns a value. /// @@ -237,20 +247,19 @@ impl SurfnetSvmLocker { /// Retrieves a local account from the SVM cache, returning a contextualized result. pub fn get_account_local(&self, pubkey: &Pubkey) -> SvmAccessContext { self.with_contextualized_svm_reader(|svm_reader| { - match svm_reader.inner.get_account(pubkey) { - Some(account) => GetAccountResult::FoundAccount( - *pubkey, account, - // mark as not an account that should be updated in the SVM, since this is a local read and it already exists - false, - ), - None => match svm_reader.get_account_from_feature_set(pubkey) { + let result = svm_reader.inner.get_account_result(pubkey).unwrap(); + + if result.is_none() { + return match svm_reader.get_account_from_feature_set(pubkey) { Some(account) => GetAccountResult::FoundAccount( *pubkey, account, // mark as not an account that should be updated in the SVM, since this is a local read and it already exists false, ), None => GetAccountResult::None(*pubkey), - }, + }; + } else { + return result; } }) } @@ -312,22 +321,18 @@ impl SurfnetSvmLocker { let mut accounts = vec![]; for pubkey in pubkeys { - let res = match svm_reader.inner.get_account(pubkey) { - Some(account) => GetAccountResult::FoundAccount( - *pubkey, account, - // mark as not an account that should be updated in the SVM, since this is a local read and it already exists - false, - ), - None => match svm_reader.get_account_from_feature_set(pubkey) { + let mut result = svm_reader.inner.get_account_result(pubkey).unwrap(); + if result.is_none() { + result = match svm_reader.get_account_from_feature_set(pubkey) { Some(account) => GetAccountResult::FoundAccount( *pubkey, account, // mark as not an account that should be updated in the SVM, since this is a local read and it already exists false, ), None => GetAccountResult::None(*pubkey), - }, + } }; - accounts.push(res); + accounts.push(result); } accounts }) @@ -463,8 +468,8 @@ impl SurfnetSvmLocker { pub fn get_largest_accounts_local( &self, config: RpcLargestAccountsConfig, - ) -> SvmAccessContext> { - self.with_contextualized_svm_reader(|svm_reader| { + ) -> SurfpoolContextualizedResult> { + let res: Vec = self.with_svm_reader(|svm_reader| { let non_circulating_accounts: Vec<_> = svm_reader .non_circulating_accounts .iter() @@ -472,7 +477,8 @@ impl SurfnetSvmLocker { .collect(); let ordered_accounts = svm_reader - .iter_accounts() + .get_all_accounts()? + .into_iter() .sorted_by(|a, b| b.1.lamports().cmp(&a.1.lamports())) .collect::>(); let ordered_filtered_accounts = match config.filter { @@ -487,15 +493,18 @@ impl SurfnetSvmLocker { None => ordered_accounts, }; - ordered_filtered_accounts - .iter() - .take(20) - .map(|(pubkey, account)| RpcAccountBalance { - address: pubkey.to_string(), - lamports: account.lamports(), - }) - .collect() - }) + Ok::, SurfpoolError>( + ordered_filtered_accounts + .iter() + .take(20) + .map(|(pubkey, account)| RpcAccountBalance { + address: pubkey.to_string(), + lamports: account.lamports(), + }) + .collect(), + ) + })?; + Ok(self.with_contextualized_svm_reader(|_| res.to_owned())) } pub async fn get_largest_accounts_local_then_remote( @@ -564,7 +573,7 @@ impl SurfnetSvmLocker { // now that our local cache is aware of all large remote accounts, we can get the largest accounts locally // and filter according to the config - Ok(self.get_largest_accounts_local(config)) + self.get_largest_accounts_local(config) } pub async fn get_largest_accounts( @@ -572,14 +581,12 @@ impl SurfnetSvmLocker { remote_ctx: &Option<(SurfnetRemoteClient, CommitmentConfig)>, config: RpcLargestAccountsConfig, ) -> SurfpoolContextualizedResult> { - let results = if let Some((remote_client, commitment_config)) = remote_ctx { + if let Some((remote_client, commitment_config)) = remote_ctx { self.get_largest_accounts_local_then_remote(remote_client, config, *commitment_config) - .await? + .await } else { self.get_largest_accounts_local(config) - }; - - Ok(results) + } } pub fn account_to_rpc_keyed_account( @@ -616,57 +623,60 @@ impl SurfnetSvmLocker { let sigs: Vec<_> = svm_reader .transactions - .iter() - .filter_map(|(sig, status)| { - let ( - TransactionWithStatusMeta { - slot, - transaction, - meta, - }, - _, - ) = status.expect_processed(); - - if *slot < config.clone().min_context_slot.unwrap_or_default() { - return None; - } - - if Some(sig.to_string()) == config_before { - before_slot = Some(*slot); - } + .into_iter() + .map(|iter| { + iter.filter_map(|(sig, status)| { + let ( + TransactionWithStatusMeta { + slot, + transaction, + meta, + }, + _, + ) = status.expect_processed(); + + if *slot < config.clone().min_context_slot.unwrap_or_default() { + return None; + } - if Some(sig.to_string()) == config_until { - until_slot = Some(*slot); - } + if Some(sig.clone()) == config_before { + before_slot = Some(*slot); + } - // Check if the pubkey is a signer + if Some(sig.clone()) == config_until { + until_slot = Some(*slot); + } - if !transaction.message.static_account_keys().contains(pubkey) { - return None; - } + // Check if the pubkey is a signer - // Determine confirmation status - let confirmation_status = match current_slot { - cs if cs == *slot => SolanaTransactionConfirmationStatus::Processed, - cs if cs < slot + FINALIZATION_SLOT_THRESHOLD => { - SolanaTransactionConfirmationStatus::Confirmed + if !transaction.message.static_account_keys().contains(pubkey) { + return None; } - _ => SolanaTransactionConfirmationStatus::Finalized, - }; - Some(RpcConfirmedTransactionStatusWithSignature { - err: match &meta.status { - Ok(_) => None, - Err(e) => Some(e.clone().into()), - }, - slot: *slot, - memo: None, - block_time: None, - confirmation_status: Some(confirmation_status), - signature: sig.to_string(), + // Determine confirmation status + let confirmation_status = match current_slot { + cs if cs == *slot => SolanaTransactionConfirmationStatus::Processed, + cs if cs < *slot + FINALIZATION_SLOT_THRESHOLD => { + SolanaTransactionConfirmationStatus::Confirmed + } + _ => SolanaTransactionConfirmationStatus::Finalized, + }; + + Some(RpcConfirmedTransactionStatusWithSignature { + err: match &meta.status { + Ok(_) => None, + Err(e) => Some(e.clone().into()), + }, + slot: *slot, + memo: None, + block_time: None, + confirmation_status: Some(confirmation_status), + signature: sig, + }) }) + .collect() }) - .collect(); + .unwrap_or_default(); sigs.into_iter() .filter(|sig| { @@ -752,7 +762,7 @@ impl SurfnetSvmLocker { self.with_svm_reader(|svm_reader| { let latest_absolute_slot = svm_reader.get_latest_absolute_slot(); - let Some(entry) = svm_reader.transactions.get(signature) else { + let Some(entry) = svm_reader.transactions.get(&signature.to_string())? else { return Ok(GetTransactionResult::None(*signature)); }; @@ -760,7 +770,7 @@ impl SurfnetSvmLocker { let slot = transaction_with_status_meta.slot; let block_time = svm_reader .blocks - .get(&slot) + .get(&slot)? .map(|b| (b.block_time / 1_000) as UnixTimestamp) .unwrap_or(0); let encoded = transaction_with_status_meta.encode( @@ -843,8 +853,8 @@ impl SurfnetSvmLocker { .await?; self.with_svm_writer(|svm_writer| { - svm_writer.write_executed_profile_result(signature, profile_result); - }); + svm_writer.write_executed_profile_result(signature, profile_result) + })?; Ok(()) } @@ -883,8 +893,8 @@ impl SurfnetSvmLocker { profile_result.key = UuidOrSignature::Uuid(uuid); self.with_svm_writer(|svm_writer| { - svm_writer.write_simulated_profile_result(uuid, tag, profile_result); - }); + svm_writer.write_simulated_profile_result(uuid, tag, profile_result) + })?; Ok(self.with_contextualized_svm_reader(|_| uuid)) } @@ -999,12 +1009,19 @@ impl SurfnetSvmLocker { let accounts_before = transaction_accounts .iter() .map(|p| svm_reader.inner.get_account(p)) - .collect::>>(); + .collect::>, SurfpoolError>>()?; let token_accounts_before = transaction_accounts .iter() .enumerate() - .filter_map(|(i, p)| svm_reader.token_accounts.get(p).cloned().map(|a| (i, a))) + .filter_map(|(i, p)| { + svm_reader + .token_accounts + .get(&p.to_string()) + .ok() + .flatten() + .map(|a| (i, a)) + }) .collect::>(); let token_programs = token_accounts_before @@ -1012,13 +1029,19 @@ impl SurfnetSvmLocker { .map(|(i, ta)| { svm_reader .get_account(&transaction_accounts[*i]) - .map(|a| a.owner) - .unwrap_or(ta.token_program_id()) + .map(|res| res.map(|a| a.owner).unwrap_or(ta.token_program_id())) }) - .collect::>() - .clone(); - (accounts_before, token_accounts_before, token_programs) - }); + .collect::, SurfpoolError>>()?; + + Ok::< + ( + Vec>, + Vec<(usize, TokenAccount)>, + Vec, + ), + SurfpoolError, + >((accounts_before, token_accounts_before, token_programs)) + })?; let loaded_addresses = tx_loaded_addresses.as_ref().map(|l| l.loaded_addresses()); @@ -1247,7 +1270,7 @@ impl SurfnetSvmLocker { pre_execution_capture: ExecutionCapture, status_tx: Sender, do_propagate: bool, - ) -> ProfileResult { + ) -> SurfpoolResult { let FailedTransactionMetadata { err, meta } = failed_transaction_metadata; let cus = meta.compute_units_consumed; @@ -1258,7 +1281,7 @@ impl SurfnetSvmLocker { let accounts_after = pubkeys_from_message .iter() .map(|p| self.with_svm_reader(|svm_reader| svm_reader.inner.get_account(p))) - .collect::>>(); + .collect::>>>()?; for (pubkey, (before, after)) in pubkeys_from_message .iter() @@ -1283,8 +1306,9 @@ impl SurfnetSvmLocker { .map(|(_, a)| { svm_reader .token_mints - .get(&a.mint()) - .cloned() + .get(&a.mint().to_string()) + .ok() + .flatten() .ok_or(SurfpoolError::token_mint_not_found(a.mint())) }) .collect::, SurfpoolError>>() @@ -1318,13 +1342,13 @@ impl SurfnetSvmLocker { token_programs, loaded_addresses.clone().unwrap_or_default(), ); - svm_writer.transactions.insert( - signature, + svm_writer.transactions.store( + signature.to_string(), SurfnetTransactionStatus::processed( transaction_with_status_meta, HashSet::new(), ), - ); + )?; svm_writer.transactions_queued_for_confirmation.push_back(( transaction.clone(), @@ -1350,15 +1374,16 @@ impl SurfnetSvmLocker { meta_canonical, Some(err.clone()), )); - }); + Ok::<(), SurfpoolError>(()) + })?; } - ProfileResult::new( + Ok(ProfileResult::new( pre_execution_capture, BTreeMap::new(), cus, Some(log_messages), Some(err_string), - ) + )) } #[allow(clippy::too_many_arguments)] @@ -1383,9 +1408,8 @@ impl SurfnetSvmLocker { let post_execution_capture = self.with_svm_writer(|svm_writer| { let accounts_after = pubkeys_from_message .iter() - .map(|p| svm_writer.inner.get_account(p)) + .map(|p| svm_writer.inner.get_account_no_db(p)) .collect::>>(); - let (sanitized_transaction, versioned_transaction) = if do_propagate { ( SanitizedTransaction::try_create( @@ -1441,7 +1465,11 @@ impl SurfnetSvmLocker { .zip(accounts_after.iter()) .enumerate() { - let token_account = svm_writer.token_accounts.get(pubkey).cloned(); + let token_account = svm_writer + .token_accounts + .get(&pubkey.to_string()) + .ok() + .flatten(); post_execution_capture.insert(*pubkey, account.clone()); if let Some(token_account) = token_account { @@ -1460,9 +1488,10 @@ impl SurfnetSvmLocker { .map(|(_, a)| { svm_writer .token_mints - .get(&a.mint()) + .get(&a.mint().to_string()) + .ok() + .flatten() .ok_or(SurfpoolError::token_mint_not_found(a.mint())) - .cloned() }) .collect::, SurfpoolError>>()?; @@ -1482,13 +1511,13 @@ impl SurfnetSvmLocker { &post_token_program_ids, loaded_addresses.clone().unwrap_or_default(), ); - svm_writer.transactions.insert( - transaction_meta.signature, + svm_writer.transactions.store( + transaction_meta.signature.to_string(), SurfnetTransactionStatus::processed( transaction_with_status_meta.clone(), mutated_account_pubkeys, ), - ); + )?; let _ = svm_writer .simnet_events_tx @@ -1590,7 +1619,7 @@ impl SurfnetSvmLocker { pre_execution_capture, status_tx.clone(), do_propagate, - ), + )?, }; Ok(res) } @@ -1710,13 +1739,19 @@ impl SurfnetSvmLocker { self.with_svm_writer(|svm_writer| { svm_writer .streamed_accounts - .insert(pubkey, include_owned_accounts); - }); + .store(pubkey.to_string(), include_owned_accounts) + })?; Ok(()) } - pub fn get_streamed_accounts(&self) -> HashMap { - self.with_svm_reader(|svm_reader| svm_reader.streamed_accounts.clone()) + pub fn get_streamed_accounts(&self) -> Vec<(String, bool)> { + self.with_svm_reader(|svm_reader| { + svm_reader + .streamed_accounts + .into_iter() + .map(|iter| iter.collect()) + .unwrap_or_default() + }) } /// Removes an account from the closed accounts set. @@ -1759,7 +1794,7 @@ impl SurfnetSvmLocker { self.get_token_accounts_by_owner_local_then_remote(owner, filter, remote_client, config) .await } else { - Ok(self.get_token_accounts_by_owner_local(owner, filter, config)) + self.get_token_accounts_by_owner_local(owner, filter, config) } } @@ -1768,29 +1803,50 @@ impl SurfnetSvmLocker { owner: Pubkey, filter: &TokenAccountsFilter, config: &RpcAccountInfoConfig, - ) -> SvmAccessContext> { - self.with_contextualized_svm_reader(|svm_reader| { + ) -> SurfpoolContextualizedResult> { + let result = self.with_contextualized_svm_reader(|svm_reader| { svm_reader .get_parsed_token_accounts_by_owner(&owner) .iter() .filter_map(|(pubkey, token_account)| { - let account = svm_reader.get_account(pubkey)?; - if match filter { - TokenAccountsFilter::Mint(mint) => token_account.mint().eq(mint), - TokenAccountsFilter::ProgramId(program_id) => account.owner.eq(program_id), - } { - Some(svm_reader.account_to_rpc_keyed_account( - pubkey, - &account, - config, - Some(token_account.mint()), - )) - } else { - None - } + svm_reader + .get_account(pubkey) + .map(|res| { + let Some(account) = res else { + return None; + }; + if match filter { + TokenAccountsFilter::Mint(mint) => token_account.mint().eq(mint), + TokenAccountsFilter::ProgramId(program_id) => { + account.owner.eq(program_id) + } + } { + Some(svm_reader.account_to_rpc_keyed_account( + pubkey, + &account, + config, + Some(token_account.mint()), + )) + } else { + None + } + }) + .transpose() }) - .collect::>() - }) + .collect::>>() + }); + let SvmAccessContext { + slot, + latest_epoch_info, + latest_blockhash, + inner: accounts, + } = result; + Ok(SvmAccessContext::new( + slot, + latest_epoch_info, + latest_blockhash, + accounts?, + )) } pub async fn get_token_accounts_by_owner_local_then_remote( @@ -1805,7 +1861,7 @@ impl SurfnetSvmLocker { latest_epoch_info, latest_blockhash, inner: local_accounts, - } = self.get_token_accounts_by_owner_local(owner, filter, config); + } = self.get_token_accounts_by_owner_local(owner, filter, config)?; let remote_accounts = remote_client .get_token_accounts_by_owner(owner, filter, config) @@ -1855,7 +1911,7 @@ impl SurfnetSvmLocker { ) .await } else { - Ok(self.get_token_accounts_by_delegate_local(delegate, filter, config)) + self.get_token_accounts_by_delegate_local(delegate, filter, config) } } } @@ -1867,33 +1923,53 @@ impl SurfnetSvmLocker { delegate: Pubkey, filter: &TokenAccountsFilter, config: &RpcAccountInfoConfig, - ) -> SvmAccessContext> { - self.with_contextualized_svm_reader(|svm_reader| { + ) -> SurfpoolContextualizedResult> { + let result = self.with_contextualized_svm_reader(|svm_reader| { svm_reader .get_token_accounts_by_delegate(&delegate) .iter() .filter_map(|(pubkey, token_account)| { - let account = svm_reader.get_account(pubkey)?; - let include = match filter { - TokenAccountsFilter::Mint(mint) => token_account.mint() == *mint, - TokenAccountsFilter::ProgramId(program_id) => { - account.owner == *program_id && is_supported_token_program(program_id) - } - }; - - if include { - Some(svm_reader.account_to_rpc_keyed_account( - pubkey, - &account, - config, - Some(token_account.mint()), - )) - } else { - None - } + svm_reader + .get_account(pubkey) + .map(|res| { + let Some(account) = res else { + return None; + }; + let include = match filter { + TokenAccountsFilter::Mint(mint) => token_account.mint() == *mint, + TokenAccountsFilter::ProgramId(program_id) => { + account.owner == *program_id + && is_supported_token_program(program_id) + } + }; + + if include { + Some(svm_reader.account_to_rpc_keyed_account( + pubkey, + &account, + config, + Some(token_account.mint()), + )) + } else { + None + } + }) + .transpose() }) - .collect::>() - }) + .collect::>>() + }); + let SvmAccessContext { + slot, + latest_epoch_info, + latest_blockhash, + inner: accounts, + } = result; + Ok(SvmAccessContext::new( + slot, + latest_epoch_info, + latest_blockhash, + accounts?, + )) } pub async fn get_token_accounts_by_delegate_local_then_remote( @@ -1908,7 +1984,7 @@ impl SurfnetSvmLocker { latest_epoch_info, latest_blockhash, inner: local_accounts, - } = self.get_token_accounts_by_delegate_local(delegate, filter, config); + } = self.get_token_accounts_by_delegate_local(delegate, filter, config)?; let remote_accounts = remote_client .get_token_accounts_by_delegate(delegate, filter, config) @@ -1948,7 +2024,9 @@ impl SurfnetSvmLocker { let token_accounts = svm_reader.get_token_accounts_by_mint(mint); // get mint information to determine decimals - let mint_decimals = if let Some(mint_account) = svm_reader.token_mints.get(mint) { + let mint_decimals = if let Some(mint_account) = + svm_reader.token_mints.get(&mint.to_string()).ok().flatten() + { mint_account.decimals() } else { 0 @@ -2289,7 +2367,7 @@ impl SurfnetSvmLocker { tag: String, config: &RpcProfileResultConfig, ) -> SurfpoolResult>> { - let tag_map = self.with_svm_reader(|svm| svm.profile_tag_map.get(&tag).cloned()); + let tag_map = self.with_svm_reader(|svm| svm.profile_tag_map.get(&tag).ok().flatten()); match tag_map { None => Ok(None), Some(uuids_or_sigs) => { @@ -2306,19 +2384,26 @@ impl SurfnetSvmLocker { } } - pub fn register_idl(&self, idl: Idl, slot: Option) { + pub fn register_idl(&self, idl: Idl, slot: Option) -> SurfpoolResult<()> { self.with_svm_writer(|svm_writer| svm_writer.register_idl(idl, slot)) } pub fn get_idl(&self, address: &Pubkey, slot: Option) -> Option { self.with_svm_reader(|svm_reader| { let query_slot = slot.unwrap_or_else(|| svm_reader.get_latest_absolute_slot()); - svm_reader.registered_idls.get(address).and_then(|heap| { - heap.iter() - .filter(|VersionedIdl(s, _)| s <= &query_slot) - .max() - .map(|VersionedIdl(_, idl)| idl.clone()) - }) + // IDLs are stored sorted by slot descending, so the first one that passes the filter is the latest + svm_reader + .registered_idls + .get(&address.to_string()) + .ok() + .flatten() + .and_then(|idl_versions| { + idl_versions + .iter() + .filter(|VersionedIdl(s, _)| *s <= query_slot) + .max() + .map(|VersionedIdl(_, idl)| idl.clone()) + }) }) } @@ -2581,7 +2666,7 @@ impl SurfnetSvmLocker { filters: Option>, ) -> SurfpoolContextualizedResult> { let res = self.with_svm_reader(|svm_reader| { - let res = svm_reader.get_account_owned_by(program_id); + let res = svm_reader.get_account_owned_by(program_id)?; let mut filtered = vec![]; for (pubkey, account) in &res { @@ -2681,7 +2766,7 @@ impl SurfnetSvmLocker { impl SurfnetSvmLocker { pub fn get_first_local_slot(&self) -> Option { - self.with_svm_reader(|svm_reader| svm_reader.blocks.keys().min().copied()) + self.with_svm_reader(|svm_reader| svm_reader.blocks.keys().unwrap().into_iter().min()) } pub async fn get_block( @@ -2824,7 +2909,7 @@ impl SurfnetSvmLocker { /// Executes an airdrop via the underlying SVM. #[allow(clippy::result_large_err)] - pub fn airdrop(&self, pubkey: &Pubkey, lamports: u64) -> TransactionResult { + pub fn airdrop(&self, pubkey: &Pubkey, lamports: u64) -> SurfpoolResult { self.with_svm_writer(|svm_writer| svm_writer.airdrop(pubkey, lamports)) } @@ -2911,7 +2996,7 @@ impl SurfnetSvmLocker { pub fn export_snapshot( &self, config: ExportSnapshotConfig, - ) -> BTreeMap { + ) -> SurfpoolResult> { self.with_svm_reader(|svm_reader| svm_reader.export_snapshot(config)) } @@ -3380,7 +3465,7 @@ mod tests { // Step 2: Register the IDL for this account let account_pubkey = Pubkey::from_str_const("rec5EKMGg6MxZYaMdyBfgwp4d5rB9T1VQH5pJv5LtFJ"); - svm_locker.register_idl(idl.clone(), None); + svm_locker.register_idl(idl.clone(), None).unwrap(); // Step 3: Create an account with the Pyth data let pyth_account = Account { diff --git a/crates/core/src/surfnet/mod.rs b/crates/core/src/surfnet/mod.rs index c66a199a..215b8848 100644 --- a/crates/core/src/surfnet/mod.rs +++ b/crates/core/src/surfnet/mod.rs @@ -23,6 +23,7 @@ use crate::{ pub mod locker; pub mod remote; +pub mod surfnet_lite_svm; pub mod svm; pub const FINALIZATION_SLOT_THRESHOLD: u64 = 31; @@ -59,7 +60,7 @@ impl BlockIdentifier { } } -#[derive(Debug, Clone)] +#[derive(Debug, Clone, Serialize, Deserialize)] pub struct BlockHeader { pub hash: String, pub previous_blockhash: String, diff --git a/crates/core/src/surfnet/surfnet_lite_svm.rs b/crates/core/src/surfnet/surfnet_lite_svm.rs new file mode 100644 index 00000000..d4f3ec21 --- /dev/null +++ b/crates/core/src/surfnet/surfnet_lite_svm.rs @@ -0,0 +1,294 @@ +use std::collections::HashMap; + +use agave_feature_set::FeatureSet; +use itertools::Itertools; +use litesvm::{ + LiteSVM, + types::{FailedTransactionMetadata, SimulatedTransactionInfo, TransactionResult}, +}; +use solana_account::{Account, AccountSharedData}; +use solana_loader_v3_interface::get_program_data_address; +use solana_program_option::COption; +use solana_pubkey::Pubkey; +use solana_transaction::versioned::VersionedTransaction; + +use crate::{ + error::{SurfpoolError, SurfpoolResult}, + storage::{Storage, new_kv_store}, + surfnet::{GetAccountResult, locker::is_supported_token_program}, +}; + +#[derive(Clone)] +pub struct SurfnetLiteSvm { + pub svm: LiteSVM, + pub db: Option>>, +} + +impl SurfnetLiteSvm { + pub fn new() -> Self { + Self { + svm: LiteSVM::new(), + db: None, + } + } + + pub fn initialize( + mut self, + feature_set: FeatureSet, + database_url: Option<&str>, + surfnet_id: u32, + ) -> SurfpoolResult { + self.svm = LiteSVM::new() + .with_blockhash_check(false) + .with_sigverify(false) + .with_feature_set(feature_set); + + create_native_mint(&mut self); + + if let Some(db_url) = database_url { + let db: Box> = + new_kv_store(&Some(db_url), "accounts", surfnet_id)?; + self.db = Some(db); + } + + Ok(self) + } + + /// Explicitly shutdown the storage, performing cleanup like WAL checkpoint for SQLite. + pub fn shutdown(&self) { + if let Some(db) = &self.db { + db.shutdown(); + } + } + + pub fn reset(&mut self, feature_set: FeatureSet) -> SurfpoolResult<()> { + self.svm = LiteSVM::new() + .with_blockhash_check(false) + .with_sigverify(false) + .with_feature_set(feature_set); + + create_native_mint(self); + + if let Some(db) = &mut self.db { + db.clear()?; + } + Ok(()) + } + + /// Perform garbage collection by resetting the SVM state while retaining the database. + /// This is useful for cleaning up unused accounts and reducing memory usage. + /// If no database is configured, this function is a no-op. + pub fn garbage_collect(&mut self, feature_set: FeatureSet) { + // If no DB is configured, skip garbage collection + if self.db.is_none() { + return; + } + // todo: this is also resetting the log bytes limit and airdrop keypair, would be nice to avoid + self.svm = LiteSVM::new() + .with_blockhash_check(false) + .with_sigverify(false) + .with_feature_set(feature_set); + + create_native_mint(self); + } + + pub fn apply_feature_config(&mut self, feature_set: FeatureSet) -> &mut Self { + self.svm = LiteSVM::new() + .with_blockhash_check(false) + .with_sigverify(false) + .with_feature_set(feature_set); + + create_native_mint(self); + self + } + + pub fn set_log_bytes_limit(&mut self, limit: Option) { + self.svm.set_log_bytes_limit(limit); + } + + pub fn set_sigverify(&mut self, sigverify: bool) { + self.svm.set_sigverify(sigverify); + } + + pub fn with_blockhash_check(mut self, check: bool) -> Self { + self.svm = self.svm.with_blockhash_check(check); + self + } + + pub fn get_sysvar(&self) -> T + where + T: solana_sysvar::Sysvar + solana_sysvar_id::SysvarId + serde::de::DeserializeOwned, + { + self.svm.get_sysvar() + } + + pub fn set_sysvar(&mut self, sysvar: &T) + where + T: solana_sysvar::Sysvar + solana_sysvar_id::SysvarId + solana_sysvar::SysvarSerialize, + { + self.svm.set_sysvar(sysvar); + } + + pub fn expire_blockhash(&mut self) { + self.svm.expire_blockhash(); + } + + pub fn send_transaction(&mut self, tx: impl Into) -> TransactionResult { + self.svm.send_transaction(tx) + } + + pub fn minimum_balance_for_rent_exemption(&self, data_len: usize) -> u64 { + self.svm.minimum_balance_for_rent_exemption(data_len) + } + + pub fn simulate_transaction( + &self, + tx: impl Into, + ) -> Result { + self.svm.simulate_transaction(tx) + } + + pub fn airdrop(&mut self, pubkey: &Pubkey, lamports: u64) -> TransactionResult { + self.svm.airdrop(pubkey, lamports) + } + + pub fn get_account_no_db(&self, pubkey: &Pubkey) -> Option { + self.svm.get_account(pubkey) + } + + pub fn get_account(&self, pubkey: &Pubkey) -> SurfpoolResult> { + if let Some(account) = self.svm.get_account(pubkey) { + return Ok(Some(account)); + } else if let Some(db) = &self.db { + return Ok(db.get(&pubkey.to_string())?.map::(Into::into)); + } + Ok(None) + } + + pub fn get_account_result(&self, pubkey: &Pubkey) -> SurfpoolResult { + if let Some(account) = self.svm.get_account(pubkey) { + return Ok(GetAccountResult::FoundAccount( + *pubkey, account, + // mark as not an account that should be updated in the SVM, since this is a local read and it already exists + false, + )); + } else if let Some(db) = &self.db { + let mut result = None; + if let Some(account) = db.get(&pubkey.to_string())?.map::(Into::into) { + if is_supported_token_program(&account.owner) { + if let Ok(token_account) = crate::types::TokenAccount::unpack(&account.data) { + let mint = db.get(&token_account.mint().to_string())?.map(Into::into); + + result = Some(GetAccountResult::FoundTokenAccount( + (*pubkey, account.clone()), + (token_account.mint(), mint), + )); + }; + } else if account.executable { + let program_data_address = get_program_data_address(pubkey); + + let program_data = db.get(&program_data_address.to_string())?.map(Into::into); + + result = Some(GetAccountResult::FoundProgramAccount( + (*pubkey, account.clone()), + (program_data_address, program_data), + )); + } + + return Ok(result.unwrap_or(GetAccountResult::FoundAccount( + *pubkey, account, + // Mark this account as needing to be updated in the SVM, since we pulled it from the db + true, + ))); + } + } + Ok(GetAccountResult::None(*pubkey)) + } + + pub fn set_account(&mut self, pubkey: Pubkey, account: Account) -> SurfpoolResult<()> { + self.set_account_in_db(pubkey, account.clone().into())?; + + self.svm.set_account(pubkey, account)?; + Ok(()) + } + + pub fn delete_account(&mut self, pubkey: &Pubkey) -> SurfpoolResult<()> { + self.delete_account_in_db(pubkey)?; + + // You can't delete an account using the LiteSvm, so we set it to an empty account + // so it can be garbage collected later + self.svm + .set_account(*pubkey, Account::default()) + .map_err(|e| SurfpoolError::set_account(*pubkey, e))?; + Ok(()) + } + + pub fn set_account_in_db( + &mut self, + pubkey: Pubkey, + account: AccountSharedData, + ) -> SurfpoolResult<()> { + if let Some(db) = &mut self.db { + db.store(pubkey.to_string(), account)?; + } + Ok(()) + } + + pub fn delete_account_in_db(&mut self, pubkey: &Pubkey) -> SurfpoolResult<()> { + if let Some(db) = &mut self.db { + db.take(&pubkey.to_string())?; + } + Ok(()) + } + + /// Get all accounts from both the LiteSVM state and the database, merging them together. + /// Accounts in the LiteSVM state take precedence over those in the database. + /// The resulting accounts are sorted by Pubkey. + pub fn get_all_accounts(&self) -> SurfpoolResult> { + // In general, we trust the LiteSVM state as the most up-to-date source of truth for any given account, + // But there's a chance that the account was garbage collected, meaning it exists in the DB but not in the SVM. + // Therefore, we need to merge the two sources of accounts, prioritizing the SVM state. + let mut accounts = HashMap::new(); + if let Some(db) = &self.db { + let db_accounts = db.into_iter()?; + for (key, account) in db_accounts { + let pubkey = Pubkey::from_str_const(&key); + accounts.insert(pubkey, account); + } + } + for (pubkey, account) in self.svm.accounts_db().inner.iter() { + if !accounts.contains_key(pubkey) { + accounts.insert(*pubkey, account.clone()); + } + } + Ok(accounts + .into_iter() + .sorted_by(|a, b| a.0.cmp(&b.0)) + .collect()) + } +} + +fn create_native_mint(svm: &mut SurfnetLiteSvm) { + use solana_program_pack::Pack; + use solana_sysvar::rent::Rent; + use spl_token_interface::state::Mint; + + let mut data = vec![0; Mint::LEN]; + let mint = Mint { + mint_authority: COption::None, + supply: 0, + decimals: spl_token_interface::native_mint::DECIMALS, + is_initialized: true, + freeze_authority: COption::None, + }; + Mint::pack(mint, &mut data).unwrap(); + let account = Account { + lamports: svm.get_sysvar::().minimum_balance(data.len()), + data, + owner: spl_token_interface::ID, + executable: false, + rent_epoch: 0, + }; + svm.set_account(spl_token_interface::native_mint::ID, account) + .expect("Failed to create native mint account in SVM"); +} diff --git a/crates/core/src/surfnet/svm.rs b/crates/core/src/surfnet/svm.rs index fef8584a..7a9c5a1b 100644 --- a/crates/core/src/surfnet/svm.rs +++ b/crates/core/src/surfnet/svm.rs @@ -1,6 +1,6 @@ use std::{ cmp::max, - collections::{BTreeMap, BinaryHeap, HashMap, HashSet, VecDeque}, + collections::{BTreeMap, HashMap, HashSet, VecDeque}, str::FromStr, time::SystemTime, }; @@ -27,13 +27,9 @@ use base64::{Engine, prelude::BASE64_STANDARD}; use chrono::Utc; use convert_case::Casing; use crossbeam_channel::{Receiver, Sender, unbounded}; -use litesvm::{ - LiteSVM, - types::{ - FailedTransactionMetadata, SimulatedTransactionInfo, TransactionMetadata, TransactionResult, - }, +use litesvm::types::{ + FailedTransactionMetadata, SimulatedTransactionInfo, TransactionMetadata, TransactionResult, }; -use litesvm_token::create_native_mint; use solana_account::{Account, AccountSharedData, ReadableAccount}; use solana_account_decoder::{ UiAccount, UiAccountData, UiAccountEncoding, UiDataSliceConfig, encode_ui_account, @@ -101,7 +97,10 @@ use crate::{ error::{SurfpoolError, SurfpoolResult}, rpc::utils::convert_transaction_metadata_from_canonical, scenarios::TemplateRegistry, - surfnet::{LogsSubscriptionData, locker::is_supported_token_program}, + storage::{Storage, new_kv_store}, + surfnet::{ + LogsSubscriptionData, locker::is_supported_token_program, surfnet_lite_svm::SurfnetLiteSvm, + }, types::{ GeyserAccountUpdate, MintAccount, SurfnetTransactionStatus, SyntheticBlockhash, TokenAccount, TransactionWithStatusMeta, @@ -215,11 +214,11 @@ pub fn get_txtx_value_json_converters() -> Vec> { /// It also exposes channels to listen for simulation events (`SimnetEvent`) and Geyser plugin events (`GeyserEvent`). #[derive(Clone)] pub struct SurfnetSvm { - pub inner: LiteSVM, + pub inner: SurfnetLiteSvm, pub remote_rpc_url: Option, pub chain_tip: BlockIdentifier, - pub blocks: HashMap, - pub transactions: HashMap, + pub blocks: Box>, + pub transactions: Box>, pub transactions_queued_for_confirmation: VecDeque<( VersionedTransaction, Sender, @@ -239,20 +238,20 @@ pub struct SurfnetSvm { pub signature_subscriptions: HashMap>, pub account_subscriptions: AccountSubscriptionData, pub slot_subscriptions: Vec>, - pub profile_tag_map: HashMap>, + pub profile_tag_map: Box>>, pub simulated_transaction_profiles: HashMap, pub executed_transaction_profiles: FifoMap, pub logs_subscriptions: Vec, pub updated_at: u64, pub slot_time: u64, pub start_time: SystemTime, - pub accounts_by_owner: HashMap>, + pub accounts_by_owner: Box>>, pub account_associated_data: HashMap, - pub token_accounts: HashMap, - pub token_mints: HashMap, - pub token_accounts_by_owner: HashMap>, - pub token_accounts_by_delegate: HashMap>, - pub token_accounts_by_mint: HashMap>, + pub token_accounts: Box>, + pub token_mints: Box>, + pub token_accounts_by_owner: Box>>, + pub token_accounts_by_delegate: Box>>, + pub token_accounts_by_mint: Box>>, pub total_supply: u64, pub circulating_supply: u64, pub non_circulating_supply: u64, @@ -263,16 +262,15 @@ pub struct SurfnetSvm { /// For example, when an account is updated in the same slot multiple times, /// the update with higher write_version should supersede the one with lower write_version. pub write_version: u64, - pub registered_idls: HashMap>, - // pub registered_idls: HashMap<[u8; 8], BinaryHeap>, + pub registered_idls: Box>>, pub feature_set: FeatureSet, pub instruction_profiling_enabled: bool, pub max_profiles: usize, pub runbook_executions: Vec, pub account_update_slots: HashMap, - pub streamed_accounts: HashMap, + pub streamed_accounts: Box>, pub recent_blockhashes: VecDeque<(SyntheticBlockhash, i64)>, - pub scheduled_overrides: HashMap>, + pub scheduled_overrides: Box>>, /// Tracks accounts that have been explicitly closed by the user. /// These accounts will not be fetched from mainnet even if they don't exist in the local cache. pub closed_accounts: HashSet, @@ -283,10 +281,42 @@ pub const FEATURE: Feature = Feature { }; impl SurfnetSvm { + pub fn new() -> (Self, Receiver, Receiver) { + Self::_new(None, 0).unwrap() + } + + pub fn new_with_db( + database_url: Option<&str>, + surfnet_id: u32, + ) -> SurfpoolResult<(Self, Receiver, Receiver)> { + Self::_new(database_url, surfnet_id) + } + + /// Explicitly shutdown the SVM, performing cleanup like WAL checkpoint for SQLite. + /// This should be called before the application exits to ensure data is persisted. + pub fn shutdown(&self) { + self.inner.shutdown(); + self.blocks.shutdown(); + self.transactions.shutdown(); + self.token_accounts.shutdown(); + self.token_mints.shutdown(); + self.accounts_by_owner.shutdown(); + self.token_accounts_by_owner.shutdown(); + self.token_accounts_by_delegate.shutdown(); + self.token_accounts_by_mint.shutdown(); + self.streamed_accounts.shutdown(); + self.scheduled_overrides.shutdown(); + self.registered_idls.shutdown(); + self.profile_tag_map.shutdown(); + } + /// Creates a new instance of `SurfnetSvm`. /// /// Returns a tuple containing the SVM instance, a receiver for simulation events, and a receiver for Geyser plugin events. - pub fn new() -> (Self, Receiver, Receiver) { + fn _new( + database_url: Option<&str>, + surfnet_id: u32, + ) -> SurfpoolResult<(Self, Receiver, Receiver)> { let (simnet_events_tx, simnet_events_rx) = crossbeam_channel::bounded(1024); let (geyser_events_tx, geyser_events_rx) = crossbeam_channel::bounded(1024); @@ -296,32 +326,64 @@ impl SurfnetSvm { // todo: consider making this configurable via config feature_set.deactivate(&enable_extend_program_checked::id()); - let mut inner = LiteSVM::new() - .with_feature_set(feature_set.clone()) - .with_blockhash_check(false) - .with_sigverify(false); + let inner = + SurfnetLiteSvm::new().initialize(feature_set.clone(), database_url, surfnet_id)?; - // Add the native mint (SOL) to the SVM - create_native_mint(&mut inner); let native_mint_account = inner - .get_account(&spl_token_interface::native_mint::ID) + .get_account(&spl_token_interface::native_mint::ID)? .unwrap(); let parsed_mint_account = MintAccount::unpack(&native_mint_account.data).unwrap(); // Load native mint into owned account and token mint indexes - let accounts_by_owner = HashMap::from([( - native_mint_account.owner, - vec![spl_token_interface::native_mint::ID], - )]); - let token_mints = - HashMap::from([(spl_token_interface::native_mint::ID, parsed_mint_account)]); + let mut accounts_by_owner_db: Box>> = + new_kv_store(&database_url, "accounts_by_owner", surfnet_id)?; + accounts_by_owner_db.store( + native_mint_account.owner.to_string(), + vec![spl_token_interface::native_mint::ID.to_string()], + )?; + let blocks_db = new_kv_store(&database_url, "blocks", surfnet_id)?; + let transactions_db = new_kv_store(&database_url, "transactions", surfnet_id)?; + let token_accounts_db = new_kv_store(&database_url, "token_accounts", surfnet_id)?; + let mut token_mints_db: Box> = + new_kv_store(&database_url, "token_mints", surfnet_id)?; + token_mints_db.store( + spl_token_interface::native_mint::ID.to_string(), + parsed_mint_account, + )?; + let token_accounts_by_owner_db: Box>> = + new_kv_store(&database_url, "token_accounts_by_owner", surfnet_id)?; + let token_accounts_by_delegate_db: Box>> = + new_kv_store(&database_url, "token_accounts_by_delegate", surfnet_id)?; + let token_accounts_by_mint_db: Box>> = + new_kv_store(&database_url, "token_accounts_by_mint", surfnet_id)?; + let streamed_accounts_db: Box> = + new_kv_store(&database_url, "streamed_accounts", surfnet_id)?; + let scheduled_overrides_db: Box>> = + new_kv_store(&database_url, "scheduled_overrides", surfnet_id)?; + let registered_idls_db: Box>> = + new_kv_store(&database_url, "registered_idls", surfnet_id)?; + let profile_tag_map_db: Box>> = + new_kv_store(&database_url, "profile_tag_map", surfnet_id)?; + + let chain_tip = if let Some((_, block)) = blocks_db + .into_iter() + .unwrap() + .max_by_key(|(slot, _): &(u64, BlockHeader)| *slot) + { + BlockIdentifier { + index: block.block_height, + hash: block.hash, + } + } else { + BlockIdentifier::zero() + }; let mut svm = Self { inner, remote_rpc_url: None, - chain_tip: BlockIdentifier::zero(), - blocks: HashMap::new(), - transactions: HashMap::new(), + chain_tip, + blocks: blocks_db, + transactions: transactions_db, perf_samples: VecDeque::new(), transactions_processed: 0, simnet_events_tx, @@ -339,20 +401,20 @@ impl SurfnetSvm { signature_subscriptions: HashMap::new(), account_subscriptions: HashMap::new(), slot_subscriptions: Vec::new(), - profile_tag_map: HashMap::new(), + profile_tag_map: profile_tag_map_db, simulated_transaction_profiles: HashMap::new(), executed_transaction_profiles: FifoMap::default(), logs_subscriptions: Vec::new(), updated_at: Utc::now().timestamp_millis() as u64, slot_time: DEFAULT_SLOT_TIME_MS, start_time: SystemTime::now(), - accounts_by_owner, + accounts_by_owner: accounts_by_owner_db, account_associated_data: HashMap::new(), - token_accounts: HashMap::new(), - token_mints, - token_accounts_by_owner: HashMap::new(), - token_accounts_by_delegate: HashMap::new(), - token_accounts_by_mint: HashMap::new(), + token_accounts: token_accounts_db, + token_mints: token_mints_db, + token_accounts_by_owner: token_accounts_by_owner_db, + token_accounts_by_delegate: token_accounts_by_delegate_db, + token_accounts_by_mint: token_accounts_by_mint_db, total_supply: 0, circulating_supply: 0, non_circulating_supply: 0, @@ -360,22 +422,22 @@ impl SurfnetSvm { genesis_config: GenesisConfig::default(), inflation: Inflation::default(), write_version: 0, - registered_idls: HashMap::new(), + registered_idls: registered_idls_db, feature_set, instruction_profiling_enabled: true, max_profiles: DEFAULT_PROFILING_MAP_CAPACITY, runbook_executions: Vec::new(), account_update_slots: HashMap::new(), - streamed_accounts: HashMap::new(), + streamed_accounts: streamed_accounts_db, recent_blockhashes: VecDeque::new(), - scheduled_overrides: HashMap::new(), + scheduled_overrides: scheduled_overrides_db, closed_accounts: HashSet::new(), }; // Generate the initial synthetic blockhash svm.chain_tip = svm.new_blockhash(); - (svm, simnet_events_rx, geyser_events_rx) + Ok((svm, simnet_events_rx, geyser_events_rx)) } /// Applies the SVM feature configuration to the internal feature set. @@ -400,14 +462,8 @@ impl SurfnetSvm { } } - // Rebuild LiteSVM with updated feature set - self.inner = LiteSVM::new() - .with_feature_set(self.feature_set.clone()) - .with_blockhash_check(false) - .with_sigverify(false); - - // Re-add the native mint - create_native_mint(&mut self.inner); + // Rebuild inner VM with updated feature set + self.inner.apply_feature_config(self.feature_set.clone()); } /// Maps an SvmFeature enum variant to its corresponding feature ID (Pubkey). @@ -517,7 +573,7 @@ impl SurfnetSvm { let registry = TemplateRegistry::new(); for (_, template) in registry.templates.into_iter() { - self.register_idl(template.idl, None); + let _ = self.register_idl(template.idl, None); } if let Some(remote_client) = remote_ctx { @@ -559,13 +615,13 @@ impl SurfnetSvm { /// # Returns /// A `TransactionResult` indicating success or failure. #[allow(clippy::result_large_err)] - pub fn airdrop(&mut self, pubkey: &Pubkey, lamports: u64) -> TransactionResult { + pub fn airdrop(&mut self, pubkey: &Pubkey, lamports: u64) -> SurfpoolResult { let res = self.inner.airdrop(pubkey, lamports); let (status_tx, _rx) = unbounded(); if let Ok(ref tx_result) = res { let airdrop_keypair = Keypair::new(); let slot = self.latest_epoch_info.absolute_slot; - let account = self.get_account(pubkey).unwrap(); + let account = self.get_account(pubkey)?.unwrap(); let mut tx = VersionedTransaction::try_new( VersionedMessage::Legacy(Message::new( @@ -586,11 +642,11 @@ impl SurfnetSvm { tx.signatures[0] = tx_result.signature; let system_lamports = self - .get_account(&system_program::id()) + .get_account(&system_program::id())? .map(|a| a.lamports()) .unwrap_or(1); - self.transactions.insert( - *tx.get_signature(), + self.transactions.store( + tx.get_signature().to_string(), SurfnetTransactionStatus::processed( TransactionWithStatusMeta { slot, @@ -621,7 +677,7 @@ impl SurfnetSvm { }, HashSet::from([*pubkey]), ), - ); + )?; self.notify_signature_subscribers( SignatureSubscriptionType::processed(), tx.get_signature(), @@ -636,10 +692,10 @@ impl SurfnetSvm { ); self.transactions_queued_for_confirmation .push_back((tx, status_tx.clone(), None)); - let account = self.get_account(pubkey).unwrap(); - let _ = self.set_account(pubkey, account); + let account = self.get_account(pubkey)?.unwrap(); + self.set_account(pubkey, account)?; } - res + Ok(res) } /// Airdrops a specified amount of lamports to a list of public keys. @@ -649,11 +705,20 @@ impl SurfnetSvm { /// * `addresses` - Slice of recipient public keys. pub fn airdrop_pubkeys(&mut self, lamports: u64, addresses: &[Pubkey]) { for recipient in addresses { - let _ = self.airdrop(recipient, lamports); - let _ = self.simnet_events_tx.send(SimnetEvent::info(format!( - "Genesis airdrop successful {}: {}", - recipient, lamports - ))); + match self.airdrop(recipient, lamports) { + Ok(_) => { + let _ = self.simnet_events_tx.send(SimnetEvent::info(format!( + "Genesis airdrop successful {}: {}", + recipient, lamports + ))); + } + Err(e) => { + let _ = self.simnet_events_tx.send(SimnetEvent::error(format!( + "Genesis airdrop failed {}: {}", + recipient, e + ))); + } + }; } } @@ -815,7 +880,9 @@ impl SurfnetSvm { trace!("Nonce account pubkey: {:?}", nonce_account_pubkey,); - let Some(nonce_account) = self.get_account(nonce_account_pubkey) else { + // Here we're swallowing errors in the storage - if we fail to fetch the account because of a storage error, + // we're just considering the blockhash to be invalid. + let Ok(Some(nonce_account)) = self.get_account(nonce_account_pubkey) else { return false; }; trace!("Nonce account: {:?}", nonce_account); @@ -871,60 +938,95 @@ impl SurfnetSvm { pubkey: &Pubkey, account: &Account, ) -> SurfpoolResult<()> { - if account == &Account::default() { + let is_deleted_account = account == &Account::default(); + + // When this function is called after processing a transaction, the account is already updated + // in the inner SVM. However, the database hasn't been updated yet, so we need to manually update the db. + if is_deleted_account { + // This amounts to deleting the account from the db if the account is deleted in the SVM + self.inner.delete_account_in_db(pubkey)?; + } else { + // Or updating the db account to match the SVM account if not deleted + self.inner + .set_account_in_db(*pubkey, account.clone().into())?; + } + + if is_deleted_account { self.closed_accounts.insert(*pubkey); - if let Some(old_account) = self.get_account(pubkey) { - self.remove_from_indexes(pubkey, &old_account); + if let Some(old_account) = self.get_account(pubkey)? { + self.remove_from_indexes(pubkey, &old_account)?; } return Ok(()); } // only update our indexes if the account exists in the svm accounts db - if let Some(old_account) = self.get_account(pubkey) { - self.remove_from_indexes(pubkey, &old_account); + if let Some(old_account) = self.get_account(pubkey)? { + self.remove_from_indexes(pubkey, &old_account)?; } // add to owner index (check for duplicates) - let owner_accounts = self.accounts_by_owner.entry(account.owner).or_default(); - if !owner_accounts.contains(pubkey) { - owner_accounts.push(*pubkey); + let owner_key = account.owner.to_string(); + let pubkey_str = pubkey.to_string(); + let mut owner_accounts = self + .accounts_by_owner + .get(&owner_key) + .ok() + .flatten() + .unwrap_or_default(); + if !owner_accounts.contains(&pubkey_str) { + owner_accounts.push(pubkey_str.clone()); + self.accounts_by_owner.store(owner_key, owner_accounts)?; } // if it's a token account, update token-specific indexes if is_supported_token_program(&account.owner) { if let Ok(token_account) = TokenAccount::unpack(&account.data) { // index by owner -> check for duplicates - let token_owner_accounts = self + let owner_key = token_account.owner().to_string(); + let mut token_owner_accounts = self .token_accounts_by_owner - .entry(token_account.owner()) - .or_default(); - - if !token_owner_accounts.contains(pubkey) { - token_owner_accounts.push(*pubkey); + .get(&owner_key) + .ok() + .flatten() + .unwrap_or_default(); + if !token_owner_accounts.contains(&pubkey_str) { + token_owner_accounts.push(pubkey_str.clone()); + self.token_accounts_by_owner + .store(owner_key, token_owner_accounts)?; } // index by mint -> check for duplicates - let mint_accounts = self + let mint_key = token_account.mint().to_string(); + let mut mint_accounts = self .token_accounts_by_mint - .entry(token_account.mint()) - .or_default(); - - if !mint_accounts.contains(pubkey) { - mint_accounts.push(*pubkey); + .get(&mint_key) + .ok() + .flatten() + .unwrap_or_default(); + if !mint_accounts.contains(&pubkey_str) { + mint_accounts.push(pubkey_str.clone()); + self.token_accounts_by_mint.store(mint_key, mint_accounts)?; } if let COption::Some(delegate) = token_account.delegate() { - let delegate_accounts = - self.token_accounts_by_delegate.entry(delegate).or_default(); - if !delegate_accounts.contains(pubkey) { - delegate_accounts.push(*pubkey); + let delegate_key = delegate.to_string(); + let mut delegate_accounts = self + .token_accounts_by_delegate + .get(&delegate_key) + .ok() + .flatten() + .unwrap_or_default(); + if !delegate_accounts.contains(&pubkey_str) { + delegate_accounts.push(pubkey_str); + self.token_accounts_by_delegate + .store(delegate_key, delegate_accounts)?; } } - - self.token_accounts.insert(*pubkey, token_account); + self.token_accounts + .store(pubkey.to_string(), token_account)?; } if let Ok(mint_account) = MintAccount::unpack(&account.data) { - self.token_mints.insert(*pubkey, mint_account); + self.token_mints.store(pubkey.to_string(), mint_account)?; } if let Ok(mint) = @@ -954,94 +1056,108 @@ impl SurfnetSvm { Ok(()) } - fn remove_from_indexes(&mut self, pubkey: &Pubkey, old_account: &Account) { - if let Some(accounts) = self.accounts_by_owner.get_mut(&old_account.owner) { - accounts.retain(|pk| pk != pubkey); + fn remove_from_indexes( + &mut self, + pubkey: &Pubkey, + old_account: &Account, + ) -> SurfpoolResult<()> { + let owner_key = old_account.owner.to_string(); + let pubkey_str = pubkey.to_string(); + if let Some(mut accounts) = self.accounts_by_owner.get(&owner_key).ok().flatten() { + accounts.retain(|pk| pk != &pubkey_str); if accounts.is_empty() { - self.accounts_by_owner.remove(&old_account.owner); + self.accounts_by_owner.take(&owner_key)?; + } else { + self.accounts_by_owner.store(owner_key, accounts)?; } } // if it was a token account, remove from token indexes if is_supported_token_program(&old_account.owner) { - if let Some(old_token_account) = self.token_accounts.remove(pubkey) { - if let Some(accounts) = self - .token_accounts_by_owner - .get_mut(&old_token_account.owner()) + if let Some(old_token_account) = self.token_accounts.take(&pubkey.to_string())? { + let owner_key = old_token_account.owner().to_string(); + if let Some(mut accounts) = + self.token_accounts_by_owner.get(&owner_key).ok().flatten() { - accounts.retain(|pk| pk != pubkey); + accounts.retain(|pk| pk != &pubkey_str); if accounts.is_empty() { - self.token_accounts_by_owner - .remove(&old_token_account.owner()); + self.token_accounts_by_owner.take(&owner_key)?; + } else { + self.token_accounts_by_owner.store(owner_key, accounts)?; } } - if let Some(accounts) = self - .token_accounts_by_mint - .get_mut(&old_token_account.mint()) + let mint_key = old_token_account.mint().to_string(); + if let Some(mut accounts) = + self.token_accounts_by_mint.get(&mint_key).ok().flatten() { - accounts.retain(|pk| pk != pubkey); + accounts.retain(|pk| pk != &pubkey_str); if accounts.is_empty() { - self.token_accounts_by_mint - .remove(&old_token_account.mint()); + self.token_accounts_by_mint.take(&mint_key)?; + } else { + self.token_accounts_by_mint.store(mint_key, accounts)?; } } if let COption::Some(delegate) = old_token_account.delegate() { - if let Some(accounts) = self.token_accounts_by_delegate.get_mut(&delegate) { - accounts.retain(|pk| pk != pubkey); + let delegate_key = delegate.to_string(); + if let Some(mut accounts) = self + .token_accounts_by_delegate + .get(&delegate_key) + .ok() + .flatten() + { + accounts.retain(|pk| pk != &pubkey_str); if accounts.is_empty() { - self.token_accounts_by_delegate.remove(&delegate); + self.token_accounts_by_delegate.take(&delegate_key)?; + } else { + self.token_accounts_by_delegate + .store(delegate_key, accounts)?; } } } } } + Ok(()) } pub fn reset_network(&mut self) -> SurfpoolResult<()> { - // pub inner: LiteSVM, - let mut inner = LiteSVM::new() - .with_feature_set(self.feature_set.clone()) - .with_blockhash_check(false) - .with_sigverify(false); - - // Add the native mint (SOL) to the SVM - create_native_mint(&mut inner); - let native_mint_account = inner - .get_account(&spl_token_interface::native_mint::ID) + self.inner.reset(self.feature_set.clone())?; + + let native_mint_account = self + .inner + .get_account(&spl_token_interface::native_mint::ID)? .unwrap(); let parsed_mint_account = MintAccount::unpack(&native_mint_account.data).unwrap(); - // Load native mint into owned account and token mint indexes - let accounts_by_owner = HashMap::from([( - native_mint_account.owner, - vec![spl_token_interface::native_mint::ID], - )]); - let token_mints = - HashMap::from([(spl_token_interface::native_mint::ID, parsed_mint_account)]); - - self.inner = inner; - self.blocks.clear(); - self.transactions.clear(); + self.blocks.clear()?; + self.transactions.clear()?; self.transactions_queued_for_confirmation.clear(); self.transactions_queued_for_finalization.clear(); self.perf_samples.clear(); self.transactions_processed = 0; - self.profile_tag_map.clear(); + self.profile_tag_map.clear()?; self.simulated_transaction_profiles.clear(); - self.accounts_by_owner = accounts_by_owner; + self.accounts_by_owner.clear()?; + self.accounts_by_owner.store( + native_mint_account.owner.to_string(), + vec![spl_token_interface::native_mint::ID.to_string()], + )?; self.account_associated_data.clear(); - self.token_accounts.clear(); - self.token_mints = token_mints; - self.token_accounts_by_owner.clear(); - self.token_accounts_by_delegate.clear(); - self.token_accounts_by_mint.clear(); + self.token_accounts.clear()?; + self.token_mints.clear()?; + self.token_mints.store( + spl_token_interface::native_mint::ID.to_string(), + parsed_mint_account, + )?; + self.token_accounts_by_owner.clear()?; + self.token_accounts_by_delegate.clear()?; + self.token_accounts_by_mint.clear()?; self.non_circulating_accounts.clear(); - self.registered_idls.clear(); + self.registered_idls.clear()?; self.runbook_executions.clear(); - self.streamed_accounts.clear(); - self.scheduled_overrides.clear(); + self.streamed_accounts.clear()?; + self.scheduled_overrides.clear()?; Ok(()) } @@ -1050,7 +1166,7 @@ impl SurfnetSvm { pubkey: &Pubkey, include_owned_accounts: bool, ) -> SurfpoolResult<()> { - let Some(account) = self.get_account(pubkey) else { + let Some(account) = self.get_account(pubkey)? else { return Ok(()); }; @@ -1065,7 +1181,7 @@ impl SurfnetSvm { } } if include_owned_accounts { - let owned_accounts = self.get_account_owned_by(pubkey); + let owned_accounts = self.get_account_owned_by(pubkey)?; for (owned_pubkey, _) in owned_accounts { // Avoid infinite recursion by not cascading further self.purge_account_from_cache(&account, &owned_pubkey)?; @@ -1081,12 +1197,9 @@ impl SurfnetSvm { account: &Account, pubkey: &Pubkey, ) -> SurfpoolResult<()> { - self.remove_from_indexes(pubkey, account); + self.remove_from_indexes(pubkey, account)?; - // Set the empty account - self.inner - .set_account(*pubkey, Account::default()) - .map_err(|e| SurfpoolError::set_account(*pubkey, e))?; + self.inner.delete_account(pubkey)?; Ok(()) } @@ -1268,7 +1381,7 @@ impl SurfnetSvm { ); let Some(SurfnetTransactionStatus::Processed(tx_data)) = - self.transactions.get(&signature) + self.transactions.get(&signature.to_string()).ok().flatten() else { continue; }; @@ -1317,7 +1430,7 @@ impl SurfnetSvm { error, ); let Some(SurfnetTransactionStatus::Processed(tx_data)) = - self.transactions.get(signature) + self.transactions.get(&signature.to_string()).ok().flatten() else { continue; }; @@ -1386,10 +1499,18 @@ impl SurfnetSvm { if let Some((programdata_address, programdata_account)) = init_programdata_account(&account) { - if self.get_account(&programdata_address).is_none() { - if let Err(e) = - self.set_account(&programdata_address, programdata_account) - { + match self.get_account(&programdata_address) { + Ok(None) => { + if let Err(e) = + self.set_account(&programdata_address, programdata_account) + { + let _ = self + .simnet_events_tx + .send(SimnetEvent::error(e.to_string())); + } + } + Ok(Some(_)) => {} + Err(e) => { let _ = self .simnet_events_tx .send(SimnetEvent::error(e.to_string())); @@ -1407,9 +1528,18 @@ impl SurfnetSvm { if let Some((programdata_address, programdata_account)) = init_programdata_account(&account) { - if self.get_account(&programdata_address).is_none() { - if let Err(e) = self.set_account(&programdata_address, programdata_account) - { + match self.get_account(&programdata_address) { + Ok(None) => { + if let Err(e) = + self.set_account(&programdata_address, programdata_account) + { + let _ = self + .simnet_events_tx + .send(SimnetEvent::error(e.to_string())); + } + } + Ok(Some(_)) => {} + Err(e) => { let _ = self .simnet_events_tx .send(SimnetEvent::error(e.to_string())); @@ -1453,9 +1583,13 @@ impl SurfnetSvm { } } - pub fn confirm_current_block(&mut self) -> Result<(), SurfpoolError> { + pub fn confirm_current_block(&mut self) -> SurfpoolResult<()> { let slot = self.get_latest_absolute_slot(); let previous_chain_tip = self.chain_tip.clone(); + if slot % 100 == 0 { + debug!("Clearing liteSVM cache at slot {}", slot); + self.inner.garbage_collect(self.feature_set.clone()); + } self.chain_tip = self.new_blockhash(); // Confirm processed transactions let (confirmed_signatures, all_mutated_account_keys) = self.confirm_transactions()?; @@ -1463,7 +1597,7 @@ impl SurfnetSvm { // Notify Geyser plugin of account updates for pubkey in all_mutated_account_keys { - let Some(account) = self.inner.get_account(&pubkey) else { + let Some(account) = self.inner.get_account(&pubkey)? else { continue; }; self.geyser_events_tx @@ -1476,7 +1610,7 @@ impl SurfnetSvm { let num_transactions = confirmed_signatures.len() as u64; self.updated_at += self.slot_time; - self.blocks.insert( + self.blocks.store( slot, BlockHeader { hash: self.chain_tip.hash.clone(), @@ -1486,7 +1620,7 @@ impl SurfnetSvm { parent_slot: slot, signatures: confirmed_signatures, }, - ); + )?; if self.perf_samples.len() > 30 { self.perf_samples.pop_back(); } @@ -1529,9 +1663,11 @@ impl SurfnetSvm { self.finalize_transactions()?; // Evict the accounts marked as streamed from cache to enforce them to be fetched again - let accounts_to_reset = self.streamed_accounts.clone(); - for (pubkey, include_owned_accounts) in accounts_to_reset.iter() { - self.reset_account(pubkey, *include_owned_accounts)?; + let accounts_to_reset: Vec<_> = self.streamed_accounts.into_iter()?.collect(); + for (pubkey_str, include_owned_accounts) in accounts_to_reset { + let pubkey = Pubkey::from_str(&pubkey_str) + .map_err(|e| SurfpoolError::invalid_pubkey(&pubkey_str, e.to_string()))?; + self.reset_account(&pubkey, include_owned_accounts)?; } Ok(()) @@ -1552,7 +1688,7 @@ impl SurfnetSvm { let current_slot = self.latest_epoch_info.absolute_slot; // Remove and get overrides for this slot - let Some(overrides) = self.scheduled_overrides.remove(¤t_slot) else { + let Some(overrides) = self.scheduled_overrides.take(¤t_slot)? else { // No overrides for this slot return Ok(()); }; @@ -1653,7 +1789,7 @@ impl SurfnetSvm { ); // Get the account from the SVM - let Some(account) = self.inner.get_account(&account_pubkey) else { + let Some(account) = self.inner.get_account(&account_pubkey)? else { warn!( "Account {} not found in SVM for override {}, skipping modifications", account_pubkey, override_instance.id @@ -1665,16 +1801,26 @@ impl SurfnetSvm { let owner_program_id = account.owner(); // Look up the IDL for the owner program - let Some(idl_versions) = self.registered_idls.get(owner_program_id) else { - warn!( - "No IDL registered for program {} (owner of account {}), skipping override {}", - owner_program_id, account_pubkey, override_instance.id - ); - continue; + let idl_versions = match self.registered_idls.get(&owner_program_id.to_string()) { + Ok(Some(versions)) => versions, + Ok(None) => { + warn!( + "No IDL registered for program {} (owner of account {}), skipping override {}", + owner_program_id, account_pubkey, override_instance.id + ); + continue; + } + Err(e) => { + warn!( + "Failed to get IDL for program {}: {}, skipping override {}", + owner_program_id, e, override_instance.id + ); + continue; + } }; - // Get the latest IDL version - let Some(versioned_idl) = idl_versions.peek() else { + // Get the latest IDL version (first in the sorted Vec) + let Some(versioned_idl) = idl_versions.first() else { warn!( "IDL versions empty for program {}, skipping override {}", owner_program_id, override_instance.id @@ -1968,7 +2114,7 @@ impl SurfnetSvm { slot: Slot, config: &RpcBlockConfig, ) -> SurfpoolResult> { - let Some(block) = self.blocks.get(&slot) else { + let Some(block) = self.blocks.get(&slot)? else { return Ok(None); }; @@ -1982,7 +2128,7 @@ impl SurfnetSvm { block .signatures .iter() - .filter_map(|sig| self.transactions.get(sig)) + .filter_map(|sig| self.transactions.get(&sig.to_string()).ok().flatten()) .map(|tx_with_meta| { let (meta, _) = tx_with_meta.expect_processed(); meta.encode( @@ -2002,7 +2148,7 @@ impl SurfnetSvm { block .signatures .iter() - .filter_map(|sig| self.transactions.get(sig)) + .filter_map(|sig| self.transactions.get(&sig.to_string()).ok().flatten()) .map(|tx_with_meta| { let (meta, _) = tx_with_meta.expect_processed(); meta.to_json_accounts( @@ -2042,6 +2188,7 @@ impl SurfnetSvm { pub fn blockhash_for_slot(&self, slot: Slot) -> Option { self.blocks .get(&slot) + .unwrap() .and_then(|header| header.hash.parse().ok()) } @@ -2054,18 +2201,26 @@ impl SurfnetSvm { /// # Returns /// /// * A vector of (account_pubkey, account) tuples for all accounts owned by the program. - pub fn get_account_owned_by(&self, program_id: &Pubkey) -> Vec<(Pubkey, Account)> { - if let Some(account_pubkeys) = self.accounts_by_owner.get(program_id) { - account_pubkeys - .iter() - .filter_map(|pubkey| { - self.get_account(pubkey) - .map(|account| (*pubkey, account.clone())) - }) - .collect() - } else { - Vec::new() - } + pub fn get_account_owned_by( + &self, + program_id: &Pubkey, + ) -> SurfpoolResult> { + let account_pubkeys = self + .accounts_by_owner + .get(&program_id.to_string()) + .ok() + .flatten() + .unwrap_or_default(); + + account_pubkeys + .iter() + .filter_map(|pk_str| { + let pk = Pubkey::from_str(pk_str).ok()?; + self.get_account(&pk) + .map(|res| res.map(|account| (pk, account.clone()))) + .transpose() + }) + .collect::, SurfpoolError>>() } fn get_additional_data( @@ -2076,7 +2231,11 @@ impl SurfnetSvm { let token_mint = if let Some(mint) = token_mint { Some(mint) } else { - self.token_accounts.get(pubkey).map(|ta| ta.mint()) + self.token_accounts + .get(&pubkey.to_string()) + .ok() + .flatten() + .map(|ta| ta.mint()) }; token_mint.and_then(|mint| self.account_associated_data.get(&mint).cloned()) @@ -2113,10 +2272,22 @@ impl SurfnetSvm { /// /// * A vector of (account_pubkey, token_account) tuples for all token accounts delegated to the specified delegate. pub fn get_token_accounts_by_delegate(&self, delegate: &Pubkey) -> Vec<(Pubkey, TokenAccount)> { - if let Some(account_pubkeys) = self.token_accounts_by_delegate.get(delegate) { + if let Some(account_pubkeys) = self + .token_accounts_by_delegate + .get(&delegate.to_string()) + .ok() + .flatten() + { account_pubkeys .iter() - .filter_map(|pk| self.token_accounts.get(pk).map(|ta| (*pk, *ta))) + .filter_map(|pk_str| { + let pk = Pubkey::from_str(pk_str).ok()?; + self.token_accounts + .get(pk_str) + .ok() + .flatten() + .map(|ta| (pk, ta)) + }) .collect() } else { Vec::new() @@ -2136,26 +2307,48 @@ impl SurfnetSvm { &self, owner: &Pubkey, ) -> Vec<(Pubkey, TokenAccount)> { - if let Some(account_pubkeys) = self.token_accounts_by_owner.get(owner) { + if let Some(account_pubkeys) = self + .token_accounts_by_owner + .get(&owner.to_string()) + .ok() + .flatten() + { account_pubkeys .iter() - .filter_map(|pk| self.token_accounts.get(pk).map(|ta| (*pk, *ta))) + .filter_map(|pk_str| { + let pk = Pubkey::from_str(pk_str).ok()?; + self.token_accounts + .get(pk_str) + .ok() + .flatten() + .map(|ta| (pk, ta)) + }) .collect() } else { Vec::new() } } - pub fn get_token_accounts_by_owner(&self, owner: &Pubkey) -> Vec<(Pubkey, Account)> { - self.token_accounts_by_owner - .get(owner) - .map(|account_pubkeys| { - account_pubkeys - .iter() - .filter_map(|pk| self.get_account(pk).map(|account| (*pk, account.clone()))) - .collect() + pub fn get_token_accounts_by_owner( + &self, + owner: &Pubkey, + ) -> SurfpoolResult> { + let account_pubkeys = self + .token_accounts_by_owner + .get(&owner.to_string()) + .ok() + .flatten() + .unwrap_or_default(); + + account_pubkeys + .iter() + .filter_map(|pk_str| { + let pk = Pubkey::from_str(pk_str).ok()?; + self.get_account(&pk) + .map(|res| res.map(|account| (pk, account.clone()))) + .transpose() }) - .unwrap_or_default() + .collect::, SurfpoolError>>() } /// Gets all token accounts for a specific mint (token type). @@ -2168,10 +2361,22 @@ impl SurfnetSvm { /// /// * A vector of (account_pubkey, token_account) tuples for all token accounts of the specified mint. pub fn get_token_accounts_by_mint(&self, mint: &Pubkey) -> Vec<(Pubkey, TokenAccount)> { - if let Some(account_pubkeys) = self.token_accounts_by_mint.get(mint) { + if let Some(account_pubkeys) = self + .token_accounts_by_mint + .get(&mint.to_string()) + .ok() + .flatten() + { account_pubkeys .iter() - .filter_map(|pk| self.token_accounts.get(pk).map(|ta| (*pk, *ta))) + .filter_map(|pk_str| { + let pk = Pubkey::from_str(pk_str).ok()?; + self.token_accounts + .get(pk_str) + .ok() + .flatten() + .map(|ta| (pk, ta)) + }) .collect() } else { Vec::new() @@ -2194,28 +2399,39 @@ impl SurfnetSvm { uuid: Uuid, tag: Option, profile_result: KeyedProfileResult, - ) { + ) -> SurfpoolResult<()> { self.simulated_transaction_profiles .insert(uuid, profile_result); let tag = tag.unwrap_or_else(|| uuid.to_string()); - self.profile_tag_map - .entry(tag) - .or_default() - .push(UuidOrSignature::Uuid(uuid)); + let mut tags = self + .profile_tag_map + .get(&tag) + .ok() + .flatten() + .unwrap_or_default(); + tags.push(UuidOrSignature::Uuid(uuid)); + self.profile_tag_map.store(tag, tags)?; + Ok(()) } pub fn write_executed_profile_result( &mut self, signature: Signature, profile_result: KeyedProfileResult, - ) { + ) -> SurfpoolResult<()> { self.executed_transaction_profiles .insert(signature, profile_result); - self.profile_tag_map - .entry(signature.to_string()) - .or_default() - .push(UuidOrSignature::Signature(signature)); + let tag = signature.to_string(); + let mut tags = self + .profile_tag_map + .get(&tag) + .ok() + .flatten() + .unwrap_or_default(); + tags.push(UuidOrSignature::Signature(signature)); + self.profile_tag_map.store(tag, tags)?; + Ok(()) } pub fn subscribe_for_logs_updates( @@ -2248,7 +2464,7 @@ impl SurfnetSvm { // Get the tx accounts including loaded addresses let transaction_accounts = if let Some(SurfnetTransactionStatus::Processed(tx_data)) = - self.transactions.get(signature) + self.transactions.get(&signature.to_string()).ok().flatten() { let (tx_meta, _) = tx_data.as_ref(); let mut accounts = match &tx_meta.transaction.message { @@ -2288,13 +2504,21 @@ impl SurfnetSvm { } } - pub fn register_idl(&mut self, idl: Idl, slot: Option) { + pub fn register_idl(&mut self, idl: Idl, slot: Option) -> SurfpoolResult<()> { let slot = slot.unwrap_or(self.latest_epoch_info.absolute_slot); let program_id = Pubkey::from_str_const(&idl.address); - self.registered_idls - .entry(program_id) - .or_default() - .push(VersionedIdl(slot, idl)); + let program_id_str = program_id.to_string(); + let mut idl_versions = self + .registered_idls + .get(&program_id_str) + .ok() + .flatten() + .unwrap_or_default(); + idl_versions.push(VersionedIdl(slot, idl)); + // Sort by slot descending so the latest IDL is first + idl_versions.sort_by(|a, b| b.0.cmp(&a.0)); + self.registered_idls.store(program_id_str, idl_versions)?; + Ok(()) } fn encode_ui_account_profile_state( @@ -2451,7 +2675,10 @@ impl SurfnetSvm { let filter_slot = self.latest_epoch_info.absolute_slot; // todo: consider if we should pass in a slot if encoding == UiAccountEncoding::JsonParsed { - if let Some(registered_idls) = self.registered_idls.get(owner_program_id) { + if let Ok(Some(registered_idls)) = + self.registered_idls.get(&owner_program_id.to_string()) + { + // IDLs are stored sorted by slot descending (most recent first) let ordered_available_idls = registered_idls .iter() // only get IDLs that are active (their slot is before the latest slot) @@ -2531,12 +2758,19 @@ impl SurfnetSvm { ) } - pub fn get_account(&self, pubkey: &Pubkey) -> Option { + pub fn get_account(&self, pubkey: &Pubkey) -> SurfpoolResult> { self.inner.get_account(pubkey) } - pub fn iter_accounts(&self) -> std::collections::hash_map::Iter<'_, Pubkey, AccountSharedData> { - self.inner.accounts_db().inner.iter() + pub fn get_all_accounts(&self) -> SurfpoolResult> { + self.inner.get_all_accounts() + } + + pub fn get_transaction( + &self, + signature: &Signature, + ) -> SurfpoolResult> { + Ok(self.transactions.get(&signature.to_string())?) } pub fn start_runbook_execution(&mut self, runbook_id: String) { @@ -2564,7 +2798,7 @@ impl SurfnetSvm { pub fn export_snapshot( &self, config: ExportSnapshotConfig, - ) -> BTreeMap { + ) -> SurfpoolResult> { let mut fixtures = BTreeMap::new(); let encoding = if config.include_parsed_accounts.unwrap_or_default() { UiAccountEncoding::JsonParsed @@ -2634,7 +2868,7 @@ impl SurfnetSvm { match &config.scope { ExportSnapshotScope::Network => { // Export all network accounts (current behavior) - for (pubkey, account_shared_data) in self.iter_accounts() { + for (pubkey, account_shared_data) in self.get_all_accounts()? { let account = Account::from(account_shared_data.clone()); process_account(&pubkey, &account); } @@ -2662,7 +2896,7 @@ impl SurfnetSvm { } } - fixtures + Ok(fixtures) } /// Registers a scenario for execution by scheduling its overrides @@ -2695,10 +2929,15 @@ impl SurfnetSvm { absolute_slot, base_slot, scenario_relative_slot ); + let mut slot_overrides = self + .scheduled_overrides + .get(&absolute_slot) + .ok() + .flatten() + .unwrap_or_default(); + slot_overrides.push(override_instance); self.scheduled_overrides - .entry(absolute_slot) - .or_insert_with(Vec::new) - .push(override_instance); + .store(absolute_slot, slot_overrides)?; } Ok(()) @@ -2714,12 +2953,18 @@ mod tests { use solana_loader_v3_interface::get_program_data_address; use solana_program_pack::Pack; use spl_token_interface::state::{Account as TokenAccount, AccountState}; + use test_case::test_case; + + use crate::storage::tests::TestType; use super::*; - #[test] - fn test_synthetic_blockhash_generation() { - let (mut svm, _events_rx, _geyser_rx) = SurfnetSvm::new(); + #[test_case(TestType::sqlite(); "with on-disk sqlite db")] + #[test_case(TestType::in_memory(); "with in-memory sqlite db")] + #[test_case(TestType::no_db(); "with no db")] + #[cfg_attr(feature = "postgres", test_case(TestType::postgres(); "with postgres db"))] + fn test_synthetic_blockhash_generation(test_type: TestType) { + let (mut svm, _events_rx, _geyser_rx) = test_type.initialize_svm(); // Test with different chain tip indices let test_cases = vec![0, 1, 42, 255, 1000, 0x12345678]; @@ -2778,9 +3023,12 @@ mod tests { println!("Generated hash: {}", hash_str); } - #[test] - fn test_blockhash_consistency_across_calls() { - let (mut svm, _events_rx, _geyser_rx) = SurfnetSvm::new(); + #[test_case(TestType::sqlite(); "with on-disk sqlite db")] + #[test_case(TestType::in_memory(); "with in-memory sqlite db")] + #[test_case(TestType::no_db(); "with no db")] + #[cfg_attr(feature = "postgres", test_case(TestType::postgres(); "with postgres db"))] + fn test_blockhash_consistency_across_calls(test_type: TestType) { + let (mut svm, _events_rx, _geyser_rx) = test_type.initialize_svm(); // Set a specific chain tip svm.chain_tip = BlockIdentifier::new(123, "initial_hash"); @@ -2810,9 +3058,12 @@ mod tests { } } - #[test] - fn test_token_account_indexing() { - let (mut svm, _events_rx, _geyser_rx) = SurfnetSvm::new(); + #[test_case(TestType::sqlite(); "with on-disk sqlite db")] + #[test_case(TestType::in_memory(); "with in-memory sqlite db")] + #[test_case(TestType::no_db(); "with no db")] + #[cfg_attr(feature = "postgres", test_case(TestType::postgres(); "with postgres db"))] + fn test_token_account_indexing(test_type: TestType) { + let (mut svm, _events_rx, _geyser_rx) = test_type.initialize_svm(); let owner = Pubkey::new_unique(); let delegate = Pubkey::new_unique(); @@ -2844,7 +3095,7 @@ mod tests { svm.set_account(&token_account_pubkey, account).unwrap(); // test all indexes were created correctly - assert_eq!(svm.token_accounts.len(), 1); + assert_eq!(svm.token_accounts.keys().unwrap().len(), 1); // test owner index let owner_accounts = svm.get_parsed_token_accounts_by_owner(&owner); @@ -2862,9 +3113,12 @@ mod tests { assert_eq!(mint_accounts[0].0, token_account_pubkey); } - #[test] - fn test_account_update_removes_old_indexes() { - let (mut svm, _events_rx, _geyser_rx) = SurfnetSvm::new(); + #[test_case(TestType::sqlite(); "with on-disk sqlite db")] + #[test_case(TestType::in_memory(); "with in-memory sqlite db")] + #[test_case(TestType::no_db(); "with no db")] + #[cfg_attr(feature = "postgres", test_case(TestType::postgres(); "with postgres db"))] + fn test_account_update_removes_old_indexes(test_type: TestType) { + let (mut svm, _events_rx, _geyser_rx) = test_type.initialize_svm(); let owner = Pubkey::new_unique(); let old_delegate = Pubkey::new_unique(); @@ -2932,9 +3186,12 @@ mod tests { assert_eq!(svm.get_parsed_token_accounts_by_owner(&owner).len(), 1); } - #[test] - fn test_non_token_accounts_not_indexed() { - let (mut svm, _events_rx, _geyser_rx) = SurfnetSvm::new(); + #[test_case(TestType::sqlite(); "with on-disk sqlite db")] + #[test_case(TestType::in_memory(); "with in-memory sqlite db")] + #[test_case(TestType::no_db(); "with no db")] + #[cfg_attr(feature = "postgres", test_case(TestType::postgres(); "with postgres db"))] + fn test_non_token_accounts_not_indexed(test_type: TestType) { + let (mut svm, _events_rx, _geyser_rx) = test_type.initialize_svm(); let system_account_pubkey = Pubkey::new_unique(); let account = Account { @@ -2948,10 +3205,10 @@ mod tests { svm.set_account(&system_account_pubkey, account).unwrap(); // should be in general registry but not token indexes - assert_eq!(svm.token_accounts.len(), 0); - assert_eq!(svm.token_accounts_by_owner.len(), 0); - assert_eq!(svm.token_accounts_by_delegate.len(), 0); - assert_eq!(svm.token_accounts_by_mint.len(), 0); + assert_eq!(svm.token_accounts.keys().unwrap().len(), 0); + assert_eq!(svm.token_accounts_by_owner.keys().unwrap().len(), 0); + assert_eq!(svm.token_accounts_by_delegate.keys().unwrap().len(), 0); + assert_eq!(svm.token_accounts_by_mint.keys().unwrap().len(), 0); } fn expect_account_update_event( @@ -2964,7 +3221,10 @@ mod tests { Ok(event) => match event { SimnetEvent::AccountUpdate(_, account_pubkey) => { assert_eq!(pubkey, &account_pubkey); - assert_eq!(svm.get_account(&pubkey).as_ref(), Some(expected_account)); + assert_eq!( + svm.get_account(&pubkey).unwrap().as_ref(), + Some(expected_account) + ); true } event => { @@ -3033,9 +3293,12 @@ mod tests { ) } - #[test] - fn test_inserting_account_updates() { - let (mut svm, events_rx, _geyser_rx) = SurfnetSvm::new(); + #[test_case(TestType::sqlite(); "with on-disk sqlite db")] + #[test_case(TestType::in_memory(); "with in-memory sqlite db")] + #[test_case(TestType::no_db(); "with no db")] + #[cfg_attr(feature = "postgres", test_case(TestType::postgres(); "with postgres db"))] + fn test_inserting_account_updates(test_type: TestType) { + let (mut svm, events_rx, _geyser_rx) = test_type.initialize_svm(); let pubkey = Pubkey::new_unique(); let account = Account { @@ -3048,27 +3311,27 @@ mod tests { // GetAccountResult::None should be a noop when writing account updates { - let index_before = svm.inner.accounts_db().clone().inner; + let index_before = svm.get_all_accounts().unwrap(); let empty_update = GetAccountResult::None(pubkey); svm.write_account_update(empty_update); - assert_eq!(svm.inner.accounts_db().clone().inner, index_before); + assert_eq!(svm.get_all_accounts().unwrap(), index_before); } // GetAccountResult::FoundAccount with `DoUpdateSvm` flag to false should be a noop { - let index_before = svm.inner.accounts_db().clone().inner; + let index_before = svm.get_all_accounts().unwrap(); let found_update = GetAccountResult::FoundAccount(pubkey, account.clone(), false); svm.write_account_update(found_update); - assert_eq!(svm.inner.accounts_db().clone().inner, index_before); + assert_eq!(svm.get_all_accounts().unwrap(), index_before); } // GetAccountResult::FoundAccount with `DoUpdateSvm` flag to true should update the account { - let index_before = svm.inner.accounts_db().clone().inner; + let index_before = svm.get_all_accounts().unwrap(); let found_update = GetAccountResult::FoundAccount(pubkey, account.clone(), true); svm.write_account_update(found_update); assert_eq!( - svm.inner.accounts_db().clone().inner.len(), + svm.get_all_accounts().unwrap().len(), index_before.len() + 1 ); if !expect_account_update_event(&events_rx, &svm, &pubkey, &account) { @@ -3102,7 +3365,7 @@ mod tests { rent_epoch: 0, }; - let index_before = svm.inner.accounts_db().clone().inner; + let index_before = svm.get_all_accounts().unwrap(); let found_program_account_update = GetAccountResult::FoundProgramAccount( (program_address, program_account.clone()), (program_data_address, None), @@ -3126,7 +3389,7 @@ mod tests { ); } assert_eq!( - svm.inner.accounts_db().clone().inner.len(), + svm.get_all_accounts().unwrap().len(), index_before.len() + 2 ); } @@ -3136,14 +3399,14 @@ mod tests { let (program_address, program_account, program_data_address, program_data_account) = create_program_accounts(); - let index_before = svm.inner.accounts_db().clone().inner; + let index_before = svm.get_all_accounts().unwrap(); let found_program_account_update = GetAccountResult::FoundProgramAccount( (program_address, program_account.clone()), (program_data_address, Some(program_data_account.clone())), ); svm.write_account_update(found_program_account_update); assert_eq!( - svm.inner.accounts_db().clone().inner.len(), + svm.get_all_accounts().unwrap().len(), index_before.len() + 2 ); if !expect_account_update_event( @@ -3170,7 +3433,7 @@ mod tests { let (program_address, program_account, program_data_address, program_data_account) = create_program_accounts(); - let index_before = svm.inner.accounts_db().clone().inner; + let index_before = svm.get_all_accounts().unwrap(); let found_update = GetAccountResult::FoundAccount( program_data_address, program_data_account.clone(), @@ -3178,7 +3441,7 @@ mod tests { ); svm.write_account_update(found_update); assert_eq!( - svm.inner.accounts_db().clone().inner.len(), + svm.get_all_accounts().unwrap().len(), index_before.len() + 1 ); if !expect_account_update_event( @@ -3192,14 +3455,14 @@ mod tests { ); } - let index_before = svm.inner.accounts_db().clone().inner; + let index_before = svm.get_all_accounts().unwrap(); let program_account_found_update = GetAccountResult::FoundProgramAccount( (program_address, program_account.clone()), (program_data_address, None), ); svm.write_account_update(program_account_found_update); assert_eq!( - svm.inner.accounts_db().clone().inner.len(), + svm.get_all_accounts().unwrap().len(), index_before.len() + 1 ); if !expect_account_update_event(&events_rx, &svm, &program_address, &program_account) { @@ -3210,15 +3473,18 @@ mod tests { } } - #[test] - fn test_encode_ui_account() { - let (mut svm, _events_rx, _geyser_rx) = SurfnetSvm::new(); + #[test_case(TestType::sqlite(); "with on-disk sqlite db")] + #[test_case(TestType::in_memory(); "with in-memory sqlite db")] + #[test_case(TestType::no_db(); "with no db")] + #[cfg_attr(feature = "postgres", test_case(TestType::postgres(); "with postgres db"))] + fn test_encode_ui_account(test_type: TestType) { + let (mut svm, _events_rx, _geyser_rx) = test_type.initialize_svm(); let idl_v1: Idl = serde_json::from_slice(&include_bytes!("../tests/assets/idl_v1.json").to_vec()) .unwrap(); - svm.register_idl(idl_v1.clone(), Some(0)); + svm.register_idl(idl_v1.clone(), Some(0)).unwrap(); let account_pubkey = Pubkey::new_unique(); @@ -3313,7 +3579,7 @@ mod tests { serde_json::from_slice(&include_bytes!("../tests/assets/idl_v2.json").to_vec()) .unwrap(); - svm.register_idl(idl_v2.clone(), Some(100)); + svm.register_idl(idl_v2.clone(), Some(100)).unwrap(); // even though we have a new IDL that is more recent, we should be able to match with the old IDL { @@ -3438,18 +3704,24 @@ mod tests { } } - #[test] - fn test_profiling_map_capacity_default() { - let (svm, _events_rx, _geyser_rx) = SurfnetSvm::new(); + #[test_case(TestType::sqlite(); "with on-disk sqlite db")] + #[test_case(TestType::in_memory(); "with in-memory sqlite db")] + #[test_case(TestType::no_db(); "with no db")] + #[cfg_attr(feature = "postgres", test_case(TestType::postgres(); "with postgres db"))] + fn test_profiling_map_capacity_default(test_type: TestType) { + let (svm, _events_rx, _geyser_rx) = test_type.initialize_svm(); assert_eq!( svm.executed_transaction_profiles.capacity(), DEFAULT_PROFILING_MAP_CAPACITY ); } - #[test] - fn test_profiling_map_capacity_set() { - let (mut svm, _events_rx, _geyser_rx) = SurfnetSvm::new(); + #[test_case(TestType::sqlite(); "with on-disk sqlite db")] + #[test_case(TestType::in_memory(); "with in-memory sqlite db")] + #[test_case(TestType::no_db(); "with no db")] + #[cfg_attr(feature = "postgres", test_case(TestType::postgres(); "with postgres db"))] + fn test_profiling_map_capacity_set(test_type: TestType) { + let (mut svm, _events_rx, _geyser_rx) = test_type.initialize_svm(); svm.set_profiling_map_capacity(10); assert_eq!(svm.executed_transaction_profiles.capacity(), 10); } @@ -3484,18 +3756,24 @@ mod tests { assert_ne!(loader_v4_id, disable_fees_id); } - #[test] - fn test_apply_feature_config_empty() { - let (mut svm, _events_rx, _geyser_rx) = SurfnetSvm::new(); + #[test_case(TestType::sqlite(); "with on-disk sqlite db")] + #[test_case(TestType::in_memory(); "with in-memory sqlite db")] + #[test_case(TestType::no_db(); "with no db")] + #[cfg_attr(feature = "postgres", test_case(TestType::postgres(); "with postgres db"))] + fn test_apply_feature_config_empty(test_type: TestType) { + let (mut svm, _events_rx, _geyser_rx) = test_type.initialize_svm(); let config = SvmFeatureConfig::new(); // Should not panic with empty config svm.apply_feature_config(&config); } - #[test] - fn test_apply_feature_config_enable_feature() { - let (mut svm, _events_rx, _geyser_rx) = SurfnetSvm::new(); + #[test_case(TestType::sqlite(); "with on-disk sqlite db")] + #[test_case(TestType::in_memory(); "with in-memory sqlite db")] + #[test_case(TestType::no_db(); "with no db")] + #[cfg_attr(feature = "postgres", test_case(TestType::postgres(); "with postgres db"))] + fn test_apply_feature_config_enable_feature(test_type: TestType) { + let (mut svm, _events_rx, _geyser_rx) = test_type.initialize_svm(); // Disable a feature first let feature_id = enable_loader_v4::id(); @@ -3509,9 +3787,12 @@ mod tests { assert!(svm.feature_set.is_active(&feature_id)); } - #[test] - fn test_apply_feature_config_disable_feature() { - let (mut svm, _events_rx, _geyser_rx) = SurfnetSvm::new(); + #[test_case(TestType::sqlite(); "with on-disk sqlite db")] + #[test_case(TestType::in_memory(); "with in-memory sqlite db")] + #[test_case(TestType::no_db(); "with no db")] + #[cfg_attr(feature = "postgres", test_case(TestType::postgres(); "with postgres db"))] + fn test_apply_feature_config_disable_feature(test_type: TestType) { + let (mut svm, _events_rx, _geyser_rx) = test_type.initialize_svm(); // Feature should be active by default (all_enabled) let feature_id = disable_fees_sysvar::id(); @@ -3524,9 +3805,12 @@ mod tests { assert!(!svm.feature_set.is_active(&feature_id)); } - #[test] - fn test_apply_feature_config_mainnet_defaults() { - let (mut svm, _events_rx, _geyser_rx) = SurfnetSvm::new(); + #[test_case(TestType::sqlite(); "with on-disk sqlite db")] + #[test_case(TestType::in_memory(); "with in-memory sqlite db")] + #[test_case(TestType::no_db(); "with no db")] + #[cfg_attr(feature = "postgres", test_case(TestType::postgres(); "with postgres db"))] + fn test_apply_feature_config_mainnet_defaults(test_type: TestType) { + let (mut svm, _events_rx, _geyser_rx) = test_type.initialize_svm(); let config = SvmFeatureConfig::default_mainnet_features(); svm.apply_feature_config(&config); @@ -3568,9 +3852,12 @@ mod tests { ); } - #[test] - fn test_apply_feature_config_mainnet_with_override() { - let (mut svm, _events_rx, _geyser_rx) = SurfnetSvm::new(); + #[test_case(TestType::sqlite(); "with on-disk sqlite db")] + #[test_case(TestType::in_memory(); "with in-memory sqlite db")] + #[test_case(TestType::no_db(); "with no db")] + #[cfg_attr(feature = "postgres", test_case(TestType::postgres(); "with postgres db"))] + fn test_apply_feature_config_mainnet_with_override(test_type: TestType) { + let (mut svm, _events_rx, _geyser_rx) = test_type.initialize_svm(); // Start with mainnet defaults, but enable loader v4 let config = @@ -3589,9 +3876,12 @@ mod tests { ); } - #[test] - fn test_apply_feature_config_multiple_changes() { - let (mut svm, _events_rx, _geyser_rx) = SurfnetSvm::new(); + #[test_case(TestType::sqlite(); "with on-disk sqlite db")] + #[test_case(TestType::in_memory(); "with in-memory sqlite db")] + #[test_case(TestType::no_db(); "with no db")] + #[cfg_attr(feature = "postgres", test_case(TestType::postgres(); "with postgres db"))] + fn test_apply_feature_config_multiple_changes(test_type: TestType) { + let (mut svm, _events_rx, _geyser_rx) = test_type.initialize_svm(); let config = SvmFeatureConfig::new() .enable(SvmFeature::EnableLoaderV4) @@ -3610,14 +3900,18 @@ mod tests { assert!(!svm.feature_set.is_active(&blake3_syscall_enabled::id())); } - #[test] - fn test_apply_feature_config_preserves_native_mint() { - let (mut svm, _events_rx, _geyser_rx) = SurfnetSvm::new(); + #[test_case(TestType::sqlite(); "with on-disk sqlite db")] + #[test_case(TestType::in_memory(); "with in-memory sqlite db")] + #[test_case(TestType::no_db(); "with no db")] + #[cfg_attr(feature = "postgres", test_case(TestType::postgres(); "with postgres db"))] + fn test_apply_feature_config_preserves_native_mint(test_type: TestType) { + let (mut svm, _events_rx, _geyser_rx) = test_type.initialize_svm(); // Native mint should exist before assert!( svm.inner .get_account(&spl_token_interface::native_mint::ID) + .unwrap() .is_some() ); @@ -3628,13 +3922,17 @@ mod tests { assert!( svm.inner .get_account(&spl_token_interface::native_mint::ID) + .unwrap() .is_some() ); } - #[test] - fn test_apply_feature_config_idempotent() { - let (mut svm, _events_rx, _geyser_rx) = SurfnetSvm::new(); + #[test_case(TestType::sqlite(); "with on-disk sqlite db")] + #[test_case(TestType::in_memory(); "with in-memory sqlite db")] + #[test_case(TestType::no_db(); "with no db")] + #[cfg_attr(feature = "postgres", test_case(TestType::postgres(); "with postgres db"))] + fn test_apply_feature_config_idempotent(test_type: TestType) { + let (mut svm, _events_rx, _geyser_rx) = test_type.initialize_svm(); let config = SvmFeatureConfig::new() .enable(SvmFeature::EnableLoaderV4) @@ -3651,9 +3949,12 @@ mod tests { // Garbage collection tests - #[test] - fn test_garbage_collected_account_tracking() { - let (mut svm, _events_rx, _geyser_rx) = SurfnetSvm::new(); + #[test_case(TestType::sqlite(); "with on-disk sqlite db")] + #[test_case(TestType::in_memory(); "with in-memory sqlite db")] + #[test_case(TestType::no_db(); "with no db")] + #[cfg_attr(feature = "postgres", test_case(TestType::postgres(); "with postgres db"))] + fn test_garbage_collected_account_tracking(test_type: TestType) { + let (mut svm, _events_rx, _geyser_rx) = test_type.initialize_svm(); let owner = Pubkey::new_unique(); let account_pubkey = Pubkey::new_unique(); @@ -3668,9 +3969,9 @@ mod tests { svm.set_account(&account_pubkey, account.clone()).unwrap(); - assert!(svm.get_account(&account_pubkey).is_some()); + assert!(svm.get_account(&account_pubkey).unwrap().is_some()); assert!(!svm.closed_accounts.contains(&account_pubkey)); - assert_eq!(svm.get_account_owned_by(&owner).len(), 1); + assert_eq!(svm.get_account_owned_by(&owner).unwrap().len(), 1); let empty_account = Account::default(); svm.update_account_registries(&account_pubkey, &empty_account) @@ -3678,15 +3979,18 @@ mod tests { assert!(svm.closed_accounts.contains(&account_pubkey)); - assert_eq!(svm.get_account_owned_by(&owner).len(), 0); + assert_eq!(svm.get_account_owned_by(&owner).unwrap().len(), 0); - let owned_accounts = svm.get_account_owned_by(&owner); + let owned_accounts = svm.get_account_owned_by(&owner).unwrap(); assert!(!owned_accounts.iter().any(|(pk, _)| *pk == account_pubkey)); } - #[test] - fn test_garbage_collected_token_account_cleanup() { - let (mut svm, _events_rx, _geyser_rx) = SurfnetSvm::new(); + #[test_case(TestType::sqlite(); "with on-disk sqlite db")] + #[test_case(TestType::in_memory(); "with in-memory sqlite db")] + #[test_case(TestType::no_db(); "with no db")] + #[cfg_attr(feature = "postgres", test_case(TestType::postgres(); "with postgres db"))] + fn test_garbage_collected_token_account_cleanup(test_type: TestType) { + let (mut svm, _events_rx, _geyser_rx) = test_type.initialize_svm(); let token_owner = Pubkey::new_unique(); let delegate = Pubkey::new_unique(); @@ -3716,7 +4020,10 @@ mod tests { svm.set_account(&token_account_pubkey, account).unwrap(); - assert_eq!(svm.get_token_accounts_by_owner(&token_owner).len(), 1); + assert_eq!( + svm.get_token_accounts_by_owner(&token_owner).unwrap().len(), + 1 + ); assert_eq!(svm.get_token_accounts_by_delegate(&delegate).len(), 1); assert!(!svm.closed_accounts.contains(&token_account_pubkey)); @@ -3726,8 +4033,16 @@ mod tests { assert!(svm.closed_accounts.contains(&token_account_pubkey)); - assert_eq!(svm.get_token_accounts_by_owner(&token_owner).len(), 0); + assert_eq!( + svm.get_token_accounts_by_owner(&token_owner).unwrap().len(), + 0 + ); assert_eq!(svm.get_token_accounts_by_delegate(&delegate).len(), 0); - assert!(svm.token_accounts.get(&token_account_pubkey).is_none()); + assert!( + svm.token_accounts + .get(&token_account_pubkey.to_string()) + .unwrap() + .is_none() + ); } } diff --git a/crates/core/src/tests/helpers.rs b/crates/core/src/tests/helpers.rs index a0722684..b048dfc4 100644 --- a/crates/core/src/tests/helpers.rs +++ b/crates/core/src/tests/helpers.rs @@ -2,7 +2,6 @@ use std::net::TcpListener; use crossbeam_channel::Sender; -use litesvm::LiteSVM; use solana_clock::Clock; use solana_epoch_info::EpochInfo; use solana_transaction::versioned::VersionedTransaction; @@ -83,12 +82,6 @@ where setup } - pub fn new_with_svm(rpc: T, svm: LiteSVM) -> Self { - let setup = TestSetup::new(rpc); - setup.context.svm_locker.0.blocking_write().inner = svm; - setup - } - pub fn new_with_mempool(rpc: T, simnet_commands_tx: Sender) -> Self { let mut setup = TestSetup::new(rpc); setup.context.simnet_commands_tx = simnet_commands_tx; diff --git a/crates/core/src/tests/integration.rs b/crates/core/src/tests/integration.rs index 41cc8b3c..c2940c74 100644 --- a/crates/core/src/tests/integration.rs +++ b/crates/core/src/tests/integration.rs @@ -1,4 +1,4 @@ -use std::{str::FromStr, sync::Arc, thread::sleep, time::Duration}; +use std::{str::FromStr, sync::Arc, time::Duration}; use base64::Engine; use crossbeam_channel::{unbounded, unbounded as crossbeam_unbounded}; @@ -53,6 +53,7 @@ use crate::{ surfnet_cheatcodes::{SurfnetCheatcodes, SurfnetCheatcodesRpc}, }, runloops::start_local_surfnet_runloop, + storage::tests::TestType, surfnet::{SignatureSubscriptionType, locker::SurfnetSvmLocker, svm::SurfnetSvm}, tests::helpers::get_free_port, types::{TimeTravelConfig, TransactionLoadedAddresses}, @@ -78,8 +79,12 @@ fn wait_for_ready_and_connected(simnet_events_rx: &crossbeam_channel::Receiver>(); let airdrop_addresses: Vec = airdrop_keypairs.iter().map(|kp| kp.pubkey()).collect(); @@ -195,7 +208,7 @@ async fn test_simnet_some_sol_transfers() { ..SurfpoolConfig::default() }; - let (surfnet_svm, simnet_events_rx, geyser_events_rx) = SurfnetSvm::new(); + let (surfnet_svm, simnet_events_rx, geyser_events_rx) = test_type.initialize_svm(); let (simnet_commands_tx, simnet_commands_rx) = unbounded(); let (subgraph_commands_tx, _subgraph_commands_rx) = unbounded(); let svm_locker = SurfnetSvmLocker::new(surfnet_svm); @@ -324,8 +337,12 @@ async fn test_simnet_some_sol_transfers() { // However, we are not actually setting up a tx that will use the lookup table internally, // we are kind of just trusting that LiteSVM will do its job here. #[cfg_attr(feature = "ignore_tests_ci", ignore = "flaky CI tests")] +#[test_case(TestType::sqlite(); "with on-disk sqlite db")] +#[test_case(TestType::in_memory(); "with in-memory sqlite db")] +#[test_case(TestType::no_db(); "with no db")] +#[cfg_attr(feature = "postgres", test_case(TestType::postgres(); "with postgres db"))] #[tokio::test(flavor = "multi_thread")] -async fn test_add_alt_entries_fetching() { +async fn test_add_alt_entries_fetching(test_type: TestType) { let payer = Keypair::new(); let pk = payer.pubkey(); @@ -347,7 +364,7 @@ async fn test_add_alt_entries_fetching() { ..SurfpoolConfig::default() }; - let (surfnet_svm, simnet_events_rx, geyser_events_rx) = SurfnetSvm::new(); + let (surfnet_svm, simnet_events_rx, geyser_events_rx) = test_type.initialize_svm(); let (simnet_commands_tx, simnet_commands_rx) = unbounded(); let (subgraph_commands_tx, _subgraph_commands_rx) = unbounded(); let svm_locker = Arc::new(RwLock::new(surfnet_svm)); @@ -491,8 +508,12 @@ async fn test_add_alt_entries_fetching() { // However, we are not actually setting up a tx that will use the lookup table internally, // we are kind of just trusting that LiteSVM will do its job here. #[cfg_attr(feature = "ignore_tests_ci", ignore = "flaky CI tests")] +#[test_case(TestType::sqlite(); "with on-disk sqlite db")] +#[test_case(TestType::in_memory(); "with in-memory sqlite db")] +#[test_case(TestType::no_db(); "with no db")] +#[cfg_attr(feature = "postgres", test_case(TestType::postgres(); "with postgres db"))] #[tokio::test(flavor = "multi_thread")] -async fn test_simulate_add_alt_entries_fetching() { +async fn test_simulate_add_alt_entries_fetching(test_type: TestType) { let payer = Keypair::new(); let pk = payer.pubkey(); @@ -514,7 +535,7 @@ async fn test_simulate_add_alt_entries_fetching() { ..SurfpoolConfig::default() }; - let (surfnet_svm, simnet_events_rx, geyser_events_rx) = SurfnetSvm::new(); + let (surfnet_svm, simnet_events_rx, geyser_events_rx) = test_type.initialize_svm(); let (simnet_commands_tx, simnet_commands_rx) = unbounded(); let (subgraph_commands_tx, _subgraph_commands_rx) = unbounded(); let svm_locker = Arc::new(RwLock::new(surfnet_svm)); @@ -601,9 +622,14 @@ async fn test_simulate_add_alt_entries_fetching() { "Unexpected simulation error" ); } + #[cfg_attr(feature = "ignore_tests_ci", ignore = "flaky CI tests")] +#[test_case(TestType::sqlite(); "with on-disk sqlite db")] +#[test_case(TestType::in_memory(); "with in-memory sqlite db")] +#[test_case(TestType::no_db(); "with no db")] +#[cfg_attr(feature = "postgres", test_case(TestType::postgres(); "with postgres db"))] #[tokio::test(flavor = "multi_thread")] -async fn test_simulate_transaction_no_signers() { +async fn test_simulate_transaction_no_signers(test_type: TestType) { let payer = Keypair::new(); let pk = payer.pubkey(); let lamports = LAMPORTS_PER_SOL; @@ -626,7 +652,7 @@ async fn test_simulate_transaction_no_signers() { ..SurfpoolConfig::default() }; - let (surfnet_svm, simnet_events_rx, geyser_events_rx) = SurfnetSvm::new(); + let (surfnet_svm, simnet_events_rx, geyser_events_rx) = test_type.initialize_svm(); let (simnet_commands_tx, simnet_commands_rx) = unbounded(); let (subgraph_commands_tx, _subgraph_commands_rx) = unbounded(); let svm_locker = Arc::new(RwLock::new(surfnet_svm)); @@ -691,9 +717,13 @@ async fn test_simulate_transaction_no_signers() { } #[cfg_attr(feature = "ignore_tests_ci", ignore = "flaky CI tests")] +#[test_case(TestType::sqlite(); "with on-disk sqlite db")] +#[test_case(TestType::in_memory(); "with in-memory sqlite db")] +#[test_case(TestType::no_db(); "with no db")] +#[cfg_attr(feature = "postgres", test_case(TestType::postgres(); "with postgres db"))] #[tokio::test(flavor = "multi_thread")] -async fn test_surfnet_estimate_compute_units() { - let (mut svm_instance, _simnet_events_rx, _geyser_events_rx) = SurfnetSvm::new(); +async fn test_surfnet_estimate_compute_units(test_type: TestType) { + let (mut svm_instance, _simnet_events_rx, _geyser_events_rx) = test_type.initialize_svm(); let rpc_server = crate::rpc::surfnet_cheatcodes::SurfnetCheatcodesRpc; let payer = Keypair::new(); @@ -702,6 +732,7 @@ async fn test_surfnet_estimate_compute_units() { svm_instance .airdrop(&payer.pubkey(), lamports_to_send * 2) + .unwrap() .unwrap(); let instruction = transfer(&payer.pubkey(), &recipient, lamports_to_send); @@ -913,7 +944,7 @@ async fn test_surfnet_estimate_compute_units() { response_no_tag_again.is_ok(), "RPC call with None tag (again) failed" ); - let rpc_response_no_tag_again_value = response_no_tag_again.unwrap().value; + let _rpc_response_no_tag_again_value = response_no_tag_again.unwrap().value; println!("Retrieving profile results for tag: {} again", tag1); let results_response_tag1_again = @@ -943,9 +974,10 @@ async fn test_surfnet_estimate_compute_units() { // Test send_transaction with cu_analysis_enabled = true // Create a new SVM instance - let (mut svm_for_send, simnet_rx_for_send, _geyser_rx_for_send) = SurfnetSvm::new(); + let (mut svm_for_send, simnet_rx_for_send, _geyser_rx_for_send) = test_type.initialize_svm(); svm_for_send .airdrop(&payer.pubkey(), lamports_to_send * 2) + .unwrap() .unwrap(); let latest_blockhash_for_send = svm_for_send.latest_blockhash(); @@ -976,10 +1008,14 @@ async fn test_surfnet_estimate_compute_units() { assert!(found_cu_event, "Did not find CU estimation SimnetEvent"); } +#[test_case(TestType::sqlite(); "with on-disk sqlite db")] +#[test_case(TestType::in_memory(); "with in-memory sqlite db")] +#[test_case(TestType::no_db(); "with no db")] +#[cfg_attr(feature = "postgres", test_case(TestType::postgres(); "with postgres db"))] #[tokio::test(flavor = "multi_thread")] -async fn test_get_transaction_profile() { +async fn test_get_transaction_profile(test_type: TestType) { let rpc_server = SurfnetCheatcodesRpc; - let (mut svm_instance, _simnet_events_rx, _geyser_events_rx) = SurfnetSvm::new(); + let (mut svm_instance, _simnet_events_rx, _geyser_events_rx) = test_type.initialize_svm(); // Set up test accounts let payer = Keypair::new(); @@ -988,6 +1024,7 @@ async fn test_get_transaction_profile() { svm_instance .airdrop(&payer.pubkey(), lamports_to_send * 2) + .unwrap() .unwrap(); // Create a transaction to profile @@ -1191,11 +1228,14 @@ async fn test_get_transaction_profile() { println!("All get_transaction_profile tests passed successfully!"); } -#[test] -fn test_register_and_get_idl_without_slot() { +#[test_case(TestType::sqlite(); "with on-disk sqlite db")] +#[test_case(TestType::in_memory(); "with in-memory sqlite db")] +#[test_case(TestType::no_db(); "with no db")] +#[cfg_attr(feature = "postgres", test_case(TestType::postgres(); "with postgres db"))] +fn test_register_and_get_idl_without_slot(test_type: TestType) { let idl: Idl = serde_json::from_slice(include_bytes!("./assets/idl_v1.json")).unwrap(); let rpc_server = SurfnetCheatcodesRpc; - let (svm_instance, _simnet_events_rx, _geyser_events_rx) = SurfnetSvm::new(); + let (svm_instance, _simnet_events_rx, _geyser_events_rx) = test_type.initialize_svm(); let svm_locker_for_context = SurfnetSvmLocker::new(svm_instance); let (simnet_cmd_tx, _simnet_cmd_rx) = crossbeam_unbounded::(); @@ -1242,11 +1282,14 @@ fn test_register_and_get_idl_without_slot() { println!("All IDL registration and retrieval tests passed successfully!"); } -#[test] -fn test_register_and_get_idl_with_slot() { +#[test_case(TestType::sqlite(); "with on-disk sqlite db")] +#[test_case(TestType::in_memory(); "with in-memory sqlite db")] +#[test_case(TestType::no_db(); "with no db")] +#[cfg_attr(feature = "postgres", test_case(TestType::postgres(); "with postgres db"))] +fn test_register_and_get_idl_with_slot(test_type: TestType) { let idl: Idl = serde_json::from_slice(include_bytes!("./assets/idl_v1.json")).unwrap(); let rpc_server = SurfnetCheatcodesRpc; - let (svm_instance, _simnet_events_rx, _geyser_events_rx) = SurfnetSvm::new(); + let (svm_instance, _simnet_events_rx, _geyser_events_rx) = test_type.initialize_svm(); let svm_locker_for_context = SurfnetSvmLocker::new(svm_instance); let (simnet_cmd_tx, _simnet_cmd_rx) = crossbeam_unbounded::(); @@ -1303,13 +1346,17 @@ fn test_register_and_get_idl_with_slot() { println!("All IDL registration and retrieval tests passed successfully!"); } +#[test_case(TestType::sqlite(); "with on-disk sqlite db")] +#[test_case(TestType::in_memory(); "with in-memory sqlite db")] +#[test_case(TestType::no_db(); "with no db")] +#[cfg_attr(feature = "postgres", test_case(TestType::postgres(); "with postgres db"))] #[tokio::test(flavor = "multi_thread")] -async fn test_register_and_get_same_idl_with_different_slots() { +async fn test_register_and_get_same_idl_with_different_slots(test_type: TestType) { let idl_v1: Idl = serde_json::from_slice(include_bytes!("./assets/idl_v1.json")).unwrap(); let idl_v2: Idl = serde_json::from_slice(include_bytes!("./assets/idl_v2.json")).unwrap(); let idl_v3: Idl = serde_json::from_slice(include_bytes!("./assets/idl_v3.json")).unwrap(); let rpc_server = SurfnetCheatcodesRpc; - let (svm_instance, _simnet_events_rx, _geyser_events_rx) = SurfnetSvm::new(); + let (svm_instance, _simnet_events_rx, _geyser_events_rx) = test_type.initialize_svm(); let svm_locker_for_context = SurfnetSvmLocker::new(svm_instance); @@ -1452,10 +1499,14 @@ async fn test_register_and_get_same_idl_with_different_slots() { println!("All IDL registration and retrieval tests at different slots passed successfully!"); } +#[test_case(TestType::sqlite(); "with on-disk sqlite db")] +#[test_case(TestType::in_memory(); "with in-memory sqlite db")] +#[test_case(TestType::no_db(); "with no db")] +#[cfg_attr(feature = "postgres", test_case(TestType::postgres(); "with postgres db"))] #[tokio::test(flavor = "multi_thread")] -async fn test_profile_transaction_basic() { +async fn test_profile_transaction_basic(test_type: TestType) { // Set up test environment - let (svm_instance, _simnet_events_rx, _geyser_events_rx) = SurfnetSvm::new(); + let (svm_instance, _simnet_events_rx, _geyser_events_rx) = test_type.initialize_svm(); let svm_locker = SurfnetSvmLocker::new(svm_instance); // Set up test accounts @@ -1466,6 +1517,7 @@ async fn test_profile_transaction_basic() { // Airdrop SOL to payer svm_locker .with_svm_writer(|svm| svm.airdrop(&payer.pubkey(), lamports_to_send * 2)) + .unwrap() .unwrap(); // Create a simple transfer transaction @@ -1535,9 +1587,13 @@ async fn test_profile_transaction_basic() { println!("Basic transaction profiling test passed successfully!"); } +#[test_case(TestType::sqlite(); "with on-disk sqlite db")] +#[test_case(TestType::in_memory(); "with in-memory sqlite db")] +#[test_case(TestType::no_db(); "with no db")] +#[cfg_attr(feature = "postgres", test_case(TestType::postgres(); "with postgres db"))] #[tokio::test(flavor = "multi_thread")] -async fn test_profile_transaction_multi_instruction_basic() { - let (svm_instance, _simnet_events_rx, _geyser_events_rx) = SurfnetSvm::new(); +async fn test_profile_transaction_multi_instruction_basic(test_type: TestType) { + let (svm_instance, _simnet_events_rx, _geyser_events_rx) = test_type.initialize_svm(); let svm_locker = SurfnetSvmLocker::new(svm_instance); let payer = Keypair::new(); @@ -1545,6 +1601,7 @@ async fn test_profile_transaction_multi_instruction_basic() { svm_locker .with_svm_writer(|svm| svm.airdrop(&payer.pubkey(), lamports_to_send * 4)) + .unwrap() .unwrap(); // Create a multi-instruction transaction: 3 transfers to different recipients @@ -1927,10 +1984,14 @@ async fn test_profile_transaction_multi_instruction_basic() { } } +#[test_case(TestType::sqlite(); "with on-disk sqlite db")] +#[test_case(TestType::in_memory(); "with in-memory sqlite db")] +#[test_case(TestType::no_db(); "with no db")] +#[cfg_attr(feature = "postgres", test_case(TestType::postgres(); "with postgres db"))] #[tokio::test(flavor = "multi_thread")] -async fn test_profile_transaction_with_tag() { +async fn test_profile_transaction_with_tag(test_type: TestType) { // Set up test environment - let (svm_instance, _simnet_events_rx, _geyser_events_rx) = SurfnetSvm::new(); + let (svm_instance, _simnet_events_rx, _geyser_events_rx) = test_type.initialize_svm(); let svm_locker = SurfnetSvmLocker::new(svm_instance); // Set up test accounts @@ -1941,6 +2002,7 @@ async fn test_profile_transaction_with_tag() { // Airdrop SOL to payer svm_locker .with_svm_writer(|svm| svm.airdrop(&payer.pubkey(), lamports_to_send * 3)) + .unwrap() .unwrap(); // Create a simple transfer transaction @@ -2083,9 +2145,13 @@ async fn test_profile_transaction_with_tag() { println!("Tag-based transaction profiling test passed successfully!"); } +#[test_case(TestType::sqlite(); "with on-disk sqlite db")] +#[test_case(TestType::in_memory(); "with in-memory sqlite db")] +#[test_case(TestType::no_db(); "with no db")] +#[cfg_attr(feature = "postgres", test_case(TestType::postgres(); "with postgres db"))] #[tokio::test(flavor = "multi_thread")] -async fn test_profile_transaction_token_transfer() { - let (svm_instance, _simnet_events_rx, _geyser_events_rx) = SurfnetSvm::new(); +async fn test_profile_transaction_token_transfer(test_type: TestType) { + let (svm_instance, _simnet_events_rx, _geyser_events_rx) = test_type.initialize_svm(); let svm_locker = SurfnetSvmLocker::new(svm_instance); // Set up test accounts @@ -2100,6 +2166,7 @@ async fn test_profile_transaction_token_transfer() { // Airdrop SOL to payer svm_locker .airdrop(&payer.pubkey(), lamports_to_send) + .unwrap() .unwrap(); let recent_blockhash = svm_locker.with_svm_reader(|svm| svm.latest_blockhash()); @@ -2132,7 +2199,7 @@ async fn test_profile_transaction_token_transfer() { &spl_token_2022_interface::id(), ); println!("Source ATA: {}", source_ata); - let dest_ata = spl_associated_token_account_interface::address::get_associated_token_address_with_program_id( + let _dest_ata = spl_associated_token_account_interface::address::get_associated_token_address_with_program_id( &recipient, &mint.pubkey(), &spl_token_2022_interface::id(), @@ -2146,7 +2213,7 @@ async fn test_profile_transaction_token_transfer() { &spl_token_2022_interface::id(), ); - let create_dest_ata_ix = + let _create_dest_ata_ix = spl_associated_token_account_interface::instruction::create_associated_token_account( &payer.pubkey(), &recipient, @@ -2156,7 +2223,7 @@ async fn test_profile_transaction_token_transfer() { // Mint tokens let mint_amount = 100_00; // 100 tokens with 2 decimals - let mint_to_ix = spl_token_2022_interface::instruction::mint_to( + let _mint_to_ix = spl_token_2022_interface::instruction::mint_to( &spl_token_2022_interface::id(), &mint.pubkey(), &source_ata, @@ -2518,9 +2585,13 @@ async fn test_profile_transaction_token_transfer() { // println!("Token transfer profiling test passed successfully!"); } +#[test_case(TestType::sqlite(); "with on-disk sqlite db")] +#[test_case(TestType::in_memory(); "with in-memory sqlite db")] +#[test_case(TestType::no_db(); "with no db")] +#[cfg_attr(feature = "postgres", test_case(TestType::postgres(); "with postgres db"))] #[tokio::test(flavor = "multi_thread")] -async fn test_profile_transaction_insufficient_funds() { - let (svm_instance, _simnet_events_rx, _geyser_events_rx) = SurfnetSvm::new(); +async fn test_profile_transaction_insufficient_funds(test_type: TestType) { + let (svm_instance, _simnet_events_rx, _geyser_events_rx) = test_type.initialize_svm(); let svm_locker = SurfnetSvmLocker::new(svm_instance); // Set up test accounts with insufficient funds @@ -2531,6 +2602,7 @@ async fn test_profile_transaction_insufficient_funds() { svm_locker .airdrop(&payer.pubkey(), insufficient_funds) + .unwrap() .unwrap(); // Create a transfer transaction that will fail due to insufficient funds @@ -2583,9 +2655,13 @@ async fn test_profile_transaction_insufficient_funds() { println!("Insufficient funds profiling test passed successfully!"); } +#[test_case(TestType::sqlite(); "with on-disk sqlite db")] +#[test_case(TestType::in_memory(); "with in-memory sqlite db")] +#[test_case(TestType::no_db(); "with no db")] +#[cfg_attr(feature = "postgres", test_case(TestType::postgres(); "with postgres db"))] #[tokio::test(flavor = "multi_thread")] -async fn test_profile_transaction_multi_instruction_failure() { - let (svm_instance, _simnet_events_rx, _geyser_events_rx) = SurfnetSvm::new(); +async fn test_profile_transaction_multi_instruction_failure(test_type: TestType) { + let (svm_instance, _simnet_events_rx, _geyser_events_rx) = test_type.initialize_svm(); let svm_locker = SurfnetSvmLocker::new(svm_instance); // Set up test accounts @@ -2597,6 +2673,7 @@ async fn test_profile_transaction_multi_instruction_failure() { // Airdrop SOL to payer svm_locker .airdrop(&payer.pubkey(), lamports_to_send * 3) + .unwrap() .unwrap(); // Create a multi-instruction transaction where the second instruction will fail @@ -2662,9 +2739,13 @@ async fn test_profile_transaction_multi_instruction_failure() { println!("Multi-instruction failure profiling test passed successfully!"); } +#[test_case(TestType::sqlite(); "with on-disk sqlite db")] +#[test_case(TestType::in_memory(); "with in-memory sqlite db")] +#[test_case(TestType::no_db(); "with no db")] +#[cfg_attr(feature = "postgres", test_case(TestType::postgres(); "with postgres db"))] #[tokio::test(flavor = "multi_thread")] -async fn test_profile_transaction_with_encoding() { - let (svm_instance, _simnet_events_rx, _geyser_events_rx) = SurfnetSvm::new(); +async fn test_profile_transaction_with_encoding(test_type: TestType) { + let (svm_instance, _simnet_events_rx, _geyser_events_rx) = test_type.initialize_svm(); let svm_locker = SurfnetSvmLocker::new(svm_instance); // Set up test accounts @@ -2675,6 +2756,7 @@ async fn test_profile_transaction_with_encoding() { // Airdrop SOL to payer svm_locker .with_svm_writer(|svm| svm.airdrop(&payer.pubkey(), lamports_to_send * 2)) + .unwrap() .unwrap(); // Create a simple transfer transaction @@ -2729,9 +2811,13 @@ async fn test_profile_transaction_with_encoding() { println!("Encoding profiling test passed successfully!"); } +#[test_case(TestType::sqlite(); "with on-disk sqlite db")] +#[test_case(TestType::in_memory(); "with in-memory sqlite db")] +#[test_case(TestType::no_db(); "with no db")] +#[cfg_attr(feature = "postgres", test_case(TestType::postgres(); "with postgres db"))] #[tokio::test(flavor = "multi_thread")] -async fn test_profile_transaction_with_tag_and_retrieval() { - let (svm_instance, _simnet_events_rx, _geyser_events_rx) = SurfnetSvm::new(); +async fn test_profile_transaction_with_tag_and_retrieval(test_type: TestType) { + let (svm_instance, _simnet_events_rx, _geyser_events_rx) = test_type.initialize_svm(); let svm_locker = SurfnetSvmLocker::new(svm_instance); // Set up test accounts @@ -2742,6 +2828,7 @@ async fn test_profile_transaction_with_tag_and_retrieval() { // Airdrop SOL to payer svm_locker .with_svm_writer(|svm| svm.airdrop(&payer.pubkey(), lamports_to_send * 3)) + .unwrap() .unwrap(); // Create a simple transfer transaction @@ -2828,9 +2915,13 @@ async fn test_profile_transaction_with_tag_and_retrieval() { println!("Tag and retrieval profiling test passed successfully!"); } +#[test_case(TestType::sqlite(); "with on-disk sqlite db")] +#[test_case(TestType::in_memory(); "with in-memory sqlite db")] +#[test_case(TestType::no_db(); "with no db")] +#[cfg_attr(feature = "postgres", test_case(TestType::postgres(); "with postgres db"))] #[tokio::test(flavor = "multi_thread")] -async fn test_profile_transaction_empty_instruction() { - let (svm_instance, _simnet_events_rx, _geyser_events_rx) = SurfnetSvm::new(); +async fn test_profile_transaction_empty_instruction(test_type: TestType) { + let (svm_instance, _simnet_events_rx, _geyser_events_rx) = test_type.initialize_svm(); let svm_locker = SurfnetSvmLocker::new(svm_instance); // Set up test accounts @@ -2840,6 +2931,7 @@ async fn test_profile_transaction_empty_instruction() { // Airdrop SOL to payer svm_locker .airdrop(&payer.pubkey(), lamports_to_send) + .unwrap() .unwrap(); // Create a transaction with no instructions @@ -2883,9 +2975,13 @@ async fn test_profile_transaction_empty_instruction() { ); } +#[test_case(TestType::sqlite(); "with on-disk sqlite db")] +#[test_case(TestType::in_memory(); "with in-memory sqlite db")] +#[test_case(TestType::no_db(); "with no db")] +#[cfg_attr(feature = "postgres", test_case(TestType::postgres(); "with postgres db"))] #[tokio::test(flavor = "multi_thread")] -async fn test_profile_transaction_versioned_message() { - let (svm_instance, _simnet_events_rx, _geyser_events_rx) = SurfnetSvm::new(); +async fn test_profile_transaction_versioned_message(test_type: TestType) { + let (svm_instance, _simnet_events_rx, _geyser_events_rx) = test_type.initialize_svm(); let svm_locker = SurfnetSvmLocker::new(svm_instance); // Set up test accounts @@ -2896,6 +2992,7 @@ async fn test_profile_transaction_versioned_message() { // Airdrop SOL to payer svm_locker .airdrop(&payer.pubkey(), 2 * lamports_to_send) + .unwrap() .unwrap(); svm_locker.confirm_current_block(&None).await.unwrap(); @@ -2943,10 +3040,14 @@ async fn test_profile_transaction_versioned_message() { println!("Versioned message profiling test passed successfully!"); } +#[test_case(TestType::sqlite(); "with on-disk sqlite db")] +#[test_case(TestType::in_memory(); "with in-memory sqlite db")] +#[test_case(TestType::no_db(); "with no db")] +#[cfg_attr(feature = "postgres", test_case(TestType::postgres(); "with postgres db"))] #[tokio::test(flavor = "multi_thread")] -async fn test_get_local_signatures_without_limit() { +async fn test_get_local_signatures_without_limit(test_type: TestType) { let rpc_server = SurfnetCheatcodesRpc; - let (svm_instance, _simnet_events_rx, _geyser_events_rx) = SurfnetSvm::new(); + let (svm_instance, _simnet_events_rx, _geyser_events_rx) = test_type.initialize_svm(); let svm_locker_for_context = SurfnetSvmLocker::new(svm_instance.clone()); @@ -2967,6 +3068,7 @@ async fn test_get_local_signatures_without_limit() { svm_locker_for_context .airdrop(&payer.pubkey(), lamports_to_send * 2) + .unwrap() .unwrap(); svm_locker_for_context @@ -3042,11 +3144,14 @@ async fn test_get_local_signatures_without_limit() { assert!(local_signatures.len() > 0); } +#[test_case(TestType::sqlite(); "with on-disk sqlite db")] +#[test_case(TestType::in_memory(); "with in-memory sqlite db")] +#[test_case(TestType::no_db(); "with no db")] +#[cfg_attr(feature = "postgres", test_case(TestType::postgres(); "with postgres db"))] #[tokio::test(flavor = "multi_thread")] -async fn test_get_local_signatures_with_limit() { +async fn test_get_local_signatures_with_limit(test_type: TestType) { let rpc_server = SurfnetCheatcodesRpc; - let (svm_instance, _simnet_events_rx, _geyser_events_rx) = SurfnetSvm::new(); - + let (svm_instance, _simnet_events_rx, _geyser_events_rx) = test_type.initialize_svm(); let svm_locker_for_context = SurfnetSvmLocker::new(svm_instance.clone()); let (simnet_cmd_tx, _simnet_cmd_rx) = crossbeam_unbounded::(); @@ -3066,6 +3171,7 @@ async fn test_get_local_signatures_with_limit() { svm_locker_for_context .airdrop(&payer.pubkey(), lamports_to_send * 10) + .unwrap() .unwrap(); svm_locker_for_context @@ -3188,6 +3294,7 @@ async fn test_get_local_signatures_with_limit() { fn boot_simnet( block_production_mode: BlockProductionMode, slot_time: Option, + test_type: TestType, ) -> ( SurfnetSvmLocker, crossbeam_channel::Sender, @@ -3210,7 +3317,7 @@ fn boot_simnet( ..SurfpoolConfig::default() }; - let (surfnet_svm, simnet_events_rx, geyser_events_rx) = SurfnetSvm::new(); + let (surfnet_svm, simnet_events_rx, geyser_events_rx) = test_type.initialize_svm(); let (simnet_commands_tx, simnet_commands_rx) = unbounded(); let (subgraph_commands_tx, _subgraph_commands_rx) = unbounded(); let svm_locker = SurfnetSvmLocker::new(surfnet_svm); @@ -3240,10 +3347,14 @@ fn boot_simnet( (svm_locker, simnet_commands_tx, simnet_events_rx) } -#[test] -fn test_time_travel_resume_paused_clock() { +#[test_case(TestType::sqlite(); "with on-disk sqlite db")] +#[test_case(TestType::in_memory(); "with in-memory sqlite db")] +#[test_case(TestType::no_db(); "with no db")] +#[cfg_attr(feature = "postgres", test_case(TestType::postgres(); "with postgres db"))] +fn test_time_travel_resume_paused_clock(test_type: TestType) { let rpc_server = SurfnetCheatcodesRpc; - let (svm_locker, simnet_cmd_tx, _) = boot_simnet(BlockProductionMode::Clock, Some(100)); + let (svm_locker, simnet_cmd_tx, _) = + boot_simnet(BlockProductionMode::Clock, Some(100), test_type); let (plugin_cmd_tx, _plugin_cmd_rx) = crossbeam_unbounded::(); let runloop_context = RunloopContext { @@ -3312,12 +3423,18 @@ fn test_time_travel_resume_paused_clock() { println!("Resume clock test passed successfully!"); } -#[test] -fn test_time_travel_absolute_timestamp() { +#[test_case(TestType::sqlite(); "with on-disk sqlite db")] +#[test_case(TestType::in_memory(); "with in-memory sqlite db")] +#[test_case(TestType::no_db(); "with no db")] +#[cfg_attr(feature = "postgres", test_case(TestType::postgres(); "with postgres db"))] +fn test_time_travel_absolute_timestamp(test_type: TestType) { let rpc_server = SurfnetCheatcodesRpc; let slot_time = 100; - let (svm_locker, simnet_cmd_tx, simnet_events_rx) = - boot_simnet(BlockProductionMode::Clock, Some(slot_time.clone())); + let (svm_locker, simnet_cmd_tx, simnet_events_rx) = boot_simnet( + BlockProductionMode::Clock, + Some(slot_time.clone()), + test_type, + ); let (plugin_cmd_tx, _plugin_cmd_rx) = crossbeam_unbounded::(); let runloop_context = RunloopContext { @@ -3391,11 +3508,14 @@ fn test_time_travel_absolute_timestamp() { println!("Time travel to absolute timestamp test passed successfully!"); } -#[test] -fn test_time_travel_absolute_slot() { +#[test_case(TestType::sqlite(); "with on-disk sqlite db")] +#[test_case(TestType::in_memory(); "with in-memory sqlite db")] +#[test_case(TestType::no_db(); "with no db")] +#[cfg_attr(feature = "postgres", test_case(TestType::postgres(); "with postgres db"))] +fn test_time_travel_absolute_slot(test_type: TestType) { let rpc_server = SurfnetCheatcodesRpc; let (svm_locker, simnet_cmd_tx, simnet_events_rx) = - boot_simnet(BlockProductionMode::Clock, Some(400)); + boot_simnet(BlockProductionMode::Clock, Some(400), test_type); let (plugin_cmd_tx, _plugin_cmd_rx) = crossbeam_unbounded::(); let runloop_context = RunloopContext { @@ -3463,11 +3583,14 @@ fn test_time_travel_absolute_slot() { println!("Time travel to absolute slot test passed successfully!"); } -#[test] -fn test_time_travel_absolute_epoch() { +#[test_case(TestType::sqlite(); "with on-disk sqlite db")] +#[test_case(TestType::in_memory(); "with in-memory sqlite db")] +#[test_case(TestType::no_db(); "with no db")] +#[cfg_attr(feature = "postgres", test_case(TestType::postgres(); "with postgres db"))] +fn test_time_travel_absolute_epoch(test_type: TestType) { let rpc_server = SurfnetCheatcodesRpc; let (svm_locker, simnet_cmd_tx, simnet_events_rx) = - boot_simnet(BlockProductionMode::Clock, Some(400)); + boot_simnet(BlockProductionMode::Clock, Some(400), test_type); let (plugin_cmd_tx, _plugin_cmd_rx) = crossbeam_unbounded::(); let runloop_context = RunloopContext { @@ -3538,16 +3661,26 @@ fn test_time_travel_absolute_epoch() { println!("Time travel to absolute epoch test passed successfully!"); } +#[test_case(TestType::sqlite(); "with on-disk sqlite db")] +#[test_case(TestType::in_memory(); "with in-memory sqlite db")] +#[test_case(TestType::no_db(); "with no db")] +#[cfg_attr(feature = "postgres", test_case(TestType::postgres(); "with postgres db"))] #[tokio::test(flavor = "multi_thread")] -async fn test_ix_profiling_with_alt_tx() { +async fn test_ix_profiling_with_alt_tx(test_type: TestType) { let (svm_locker, _simnet_cmd_tx, _simnet_events_rx) = - boot_simnet(BlockProductionMode::Clock, Some(400)); + boot_simnet(BlockProductionMode::Clock, Some(400), test_type); let p1 = Keypair::new(); let p2 = Keypair::new(); - svm_locker.airdrop(&p1.pubkey(), LAMPORTS_PER_SOL).unwrap(); - svm_locker.airdrop(&p2.pubkey(), LAMPORTS_PER_SOL).unwrap(); + svm_locker + .airdrop(&p1.pubkey(), LAMPORTS_PER_SOL) + .unwrap() + .unwrap(); + svm_locker + .airdrop(&p2.pubkey(), LAMPORTS_PER_SOL) + .unwrap() + .unwrap(); let recent_blockhash = svm_locker.with_svm_reader(|svm| svm.latest_blockhash()); @@ -3817,15 +3950,21 @@ async fn test_ix_profiling_with_alt_tx() { } } +#[test_case(TestType::sqlite(); "with on-disk sqlite db")] +#[test_case(TestType::in_memory(); "with in-memory sqlite db")] +#[test_case(TestType::no_db(); "with no db")] +#[cfg_attr(feature = "postgres", test_case(TestType::postgres(); "with postgres db"))] #[tokio::test(flavor = "multi_thread")] -async fn it_should_delete_accounts_with_no_lamports() { +async fn it_should_delete_accounts_with_no_lamports(test_type: TestType) { let (svm_locker, _simnet_cmd_tx, _simnet_events_rx) = - boot_simnet(BlockProductionMode::Clock, Some(400)); - + boot_simnet(BlockProductionMode::Clock, Some(400), test_type); let p1 = Keypair::new(); let p2 = Keypair::new(); - svm_locker.airdrop(&p1.pubkey(), LAMPORTS_PER_SOL).unwrap(); + svm_locker + .airdrop(&p1.pubkey(), LAMPORTS_PER_SOL) + .unwrap() + .unwrap(); let recent_blockhash = svm_locker.with_svm_reader(|svm| svm.latest_blockhash()); @@ -3863,15 +4002,21 @@ async fn it_should_delete_accounts_with_no_lamports() { ); } +#[test_case(TestType::sqlite(); "with on-disk sqlite db")] +#[test_case(TestType::in_memory(); "with in-memory sqlite db")] +#[test_case(TestType::no_db(); "with no db")] +#[cfg_attr(feature = "postgres", test_case(TestType::postgres(); "with postgres db"))] #[tokio::test(flavor = "multi_thread")] -async fn test_compute_budget_profiling() { +async fn test_compute_budget_profiling(test_type: TestType) { let (svm_locker, _simnet_cmd_tx, _simnet_events_rx) = - boot_simnet(BlockProductionMode::Clock, Some(400)); - + boot_simnet(BlockProductionMode::Clock, Some(400), test_type); let p1 = Keypair::new(); let p2 = Keypair::new(); - svm_locker.airdrop(&p1.pubkey(), LAMPORTS_PER_SOL).unwrap(); + svm_locker + .airdrop(&p1.pubkey(), LAMPORTS_PER_SOL) + .unwrap() + .unwrap(); let recent_blockhash = svm_locker.with_svm_reader(|svm| svm.latest_blockhash()); @@ -3927,13 +4072,19 @@ async fn test_compute_budget_profiling() { assert_eq!(ix.compute_units_consumed, 150); } -#[test] -fn test_reset_account() { - let (svm_instance, _simnet_events_rx, _geyser_events_rx) = SurfnetSvm::new(); +#[test_case(TestType::sqlite(); "with on-disk sqlite db")] +#[test_case(TestType::in_memory(); "with in-memory sqlite db")] +#[test_case(TestType::no_db(); "with no db")] +#[cfg_attr(feature = "postgres", test_case(TestType::postgres(); "with postgres db"))] +fn test_reset_account(test_type: TestType) { + let (svm_instance, _simnet_events_rx, _geyser_events_rx) = test_type.initialize_svm(); let svm_locker = SurfnetSvmLocker::new(svm_instance); let p1 = Keypair::new(); println!("P1 pubkey: {}", p1.pubkey()); - svm_locker.airdrop(&p1.pubkey(), LAMPORTS_PER_SOL).unwrap(); // account is created in the SVM + svm_locker + .airdrop(&p1.pubkey(), LAMPORTS_PER_SOL) + .unwrap() + .unwrap(); // account is created in the SVM.unwrap() println!("Airdropped SOL to p1"); println!( @@ -3956,9 +4107,12 @@ fn test_reset_account() { ); } -#[test] -fn test_reset_account_cascade() { - let (svm_instance, _simnet_events_rx, _geyser_events_rx) = SurfnetSvm::new(); +#[test_case(TestType::sqlite(); "with on-disk sqlite db")] +#[test_case(TestType::in_memory(); "with in-memory sqlite db")] +#[test_case(TestType::no_db(); "with no db")] +#[cfg_attr(feature = "postgres", test_case(TestType::postgres(); "with postgres db"))] +fn test_reset_account_cascade(test_type: TestType) { + let (svm_instance, _simnet_events_rx, _geyser_events_rx) = test_type.initialize_svm(); let svm_locker = SurfnetSvmLocker::new(svm_instance); // Create owner account and owned account @@ -4005,13 +4159,20 @@ fn test_reset_account_cascade() { svm_locker.reset_account(owned, false).unwrap(); } +#[test_case(TestType::sqlite(); "with on-disk sqlite db")] +#[test_case(TestType::in_memory(); "with in-memory sqlite db")] +#[test_case(TestType::no_db(); "with no db")] +#[cfg_attr(feature = "postgres", test_case(TestType::postgres(); "with postgres db"))] #[tokio::test(flavor = "multi_thread")] -async fn test_reset_streamed_account() { - let (svm_instance, _simnet_events_rx, _geyser_events_rx) = SurfnetSvm::new(); +async fn test_reset_streamed_account(test_type: TestType) { + let (svm_instance, _simnet_events_rx, _geyser_events_rx) = test_type.initialize_svm(); let svm_locker = SurfnetSvmLocker::new(svm_instance); let p1 = Keypair::new(); println!("P1 pubkey: {}", p1.pubkey()); - svm_locker.airdrop(&p1.pubkey(), LAMPORTS_PER_SOL).unwrap(); // account is created in the SVM + svm_locker + .airdrop(&p1.pubkey(), LAMPORTS_PER_SOL) + .unwrap() + .unwrap(); // account is created in the SVM.unwrap() println!("Airdropped SOL to p1"); let _ = svm_locker.confirm_current_block(&None).await; @@ -4028,9 +4189,13 @@ async fn test_reset_streamed_account() { ); } +#[test_case(TestType::sqlite(); "with on-disk sqlite db")] +#[test_case(TestType::in_memory(); "with in-memory sqlite db")] +#[test_case(TestType::no_db(); "with no db")] +#[cfg_attr(feature = "postgres", test_case(TestType::postgres(); "with postgres db"))] #[tokio::test(flavor = "multi_thread")] -async fn test_reset_streamed_account_cascade() { - let (svm_instance, _simnet_events_rx, _geyser_events_rx) = SurfnetSvm::new(); +async fn test_reset_streamed_account_cascade(test_type: TestType) { + let (svm_instance, _simnet_events_rx, _geyser_events_rx) = test_type.initialize_svm(); let svm_locker = SurfnetSvmLocker::new(svm_instance); // Create owner account and owned account @@ -4079,9 +4244,12 @@ async fn test_reset_streamed_account_cascade() { assert!(svm_locker.get_account_local(&owned).inner.is_none()); } -#[test] -fn test_reset_network() { - let (svm_instance, _simnet_events_rx, _geyser_events_rx) = SurfnetSvm::new(); +#[test_case(TestType::sqlite(); "with on-disk sqlite db")] +#[test_case(TestType::in_memory(); "with in-memory sqlite db")] +#[test_case(TestType::no_db(); "with no db")] +#[cfg_attr(feature = "postgres", test_case(TestType::postgres(); "with postgres db"))] +fn test_reset_network(test_type: TestType) { + let (svm_instance, _simnet_events_rx, _geyser_events_rx) = test_type.initialize_svm(); let svm_locker = SurfnetSvmLocker::new(svm_instance); // Create owner account and owned account @@ -4131,6 +4299,7 @@ fn test_reset_network() { fn start_surfnet( airdrop_addresses: Vec, datasource_rpc_url: Option, + test_type: TestType, ) -> Result<(String, SurfnetSvmLocker), String> { let bind_host = "127.0.0.1"; let bind_port = get_free_port().unwrap(); @@ -4153,7 +4322,7 @@ fn start_surfnet( ..SurfpoolConfig::default() }; - let (surfnet_svm, simnet_events_rx, geyser_events_rx) = SurfnetSvm::new(); + let (surfnet_svm, simnet_events_rx, geyser_events_rx) = test_type.initialize_svm(); let (simnet_commands_tx, simnet_commands_rx) = unbounded(); let (subgraph_commands_tx, _subgraph_commands_rx) = unbounded(); let svm_locker = SurfnetSvmLocker::new(surfnet_svm); @@ -4194,19 +4363,34 @@ fn start_surfnet( Ok((format!("http://{}:{}", bind_host, bind_port), svm_locker)) } +#[test_case(TestType::sqlite(); "with on-disk sqlite db")] +#[test_case(TestType::in_memory(); "with in-memory sqlite db")] +#[test_case(TestType::no_db(); "with no db")] +#[cfg_attr(feature = "postgres", test_case(TestType::postgres(); "with postgres db"))] #[cfg_attr(feature = "ignore_tests_ci", ignore = "flaky CI tests")] #[tokio::test(flavor = "multi_thread")] -async fn test_closed_accounts() { +async fn test_closed_accounts(test_type: TestType) { let keypair = Keypair::new(); let pubkey = keypair.pubkey(); + let another_test_type = match &test_type { + TestType::OnDiskSqlite(_) => TestType::sqlite(), + TestType::InMemorySqlite => TestType::in_memory(), + TestType::NoDb => TestType::no_db(), + #[cfg(feature = "postgres")] + TestType::Postgres { url, .. } => TestType::Postgres { + url: url.clone(), + surfnet_id: crate::storage::tests::random_surfnet_id(), + }, + }; // Start datasource surfnet first, which will only have accounts we airdrop to let (datasource_surfnet_url, _datasource_svm_locker) = - start_surfnet(vec![pubkey], None).expect("Failed to start datasource surfnet"); + start_surfnet(vec![pubkey], None, test_type).expect("Failed to start datasource surfnet"); println!("Datasource surfnet started at {}", datasource_surfnet_url); // Now start the test surfnet which forks the datasource surfnet let (surfnet_url, surfnet_svm_locker) = - start_surfnet(vec![], Some(datasource_surfnet_url)).expect("Failed to start surfnet"); + start_surfnet(vec![], Some(datasource_surfnet_url), another_test_type) + .expect("Failed to start surfnet"); println!("Surfnet started at {}", surfnet_url); let rpc_client = RpcClient::new(surfnet_url); @@ -4335,6 +4519,7 @@ async fn test_ws_signature_subscribe(subscription_type: SignatureSubscriptionTyp let lamports_to_send = 100_000; svm_locker .airdrop(&payer.pubkey(), LAMPORTS_PER_SOL) + .unwrap() .unwrap(); let recent_blockhash = svm_locker.with_svm_reader(|svm| svm.latest_blockhash()); @@ -4388,20 +4573,27 @@ async fn test_ws_signature_subscribe(subscription_type: SignatureSubscriptionTyp ); } +#[test_case(TestType::sqlite(); "with on-disk sqlite db")] +#[test_case(TestType::in_memory(); "with in-memory sqlite db")] +#[test_case(TestType::no_db(); "with no db")] +#[cfg_attr(feature = "postgres", test_case(TestType::postgres(); "with postgres db"))] #[tokio::test(flavor = "multi_thread")] -async fn test_ws_signature_subscribe_failed_transaction() { +async fn test_ws_signature_subscribe_failed_transaction(test_type: TestType) { use crossbeam_channel::unbounded; use solana_system_interface::instruction as system_instruction; use crate::surfnet::SignatureSubscriptionType; - let (svm_instance, _simnet_events_rx, _geyser_events_rx) = SurfnetSvm::new(); + let (svm_instance, _simnet_events_rx, _geyser_events_rx) = test_type.initialize_svm(); let svm_locker = SurfnetSvmLocker::new(svm_instance); // create a test transaction that will fail (insufficient funds) let payer = Keypair::new(); let recipient = Pubkey::new_unique(); - svm_locker.airdrop(&payer.pubkey(), 10_000).unwrap(); // airdrop a very small amount + svm_locker + .airdrop(&payer.pubkey(), 10_000) + .unwrap() + .unwrap(); // airdrop a very small amount.unwrap() let recent_blockhash = svm_locker.with_svm_reader(|svm| svm.latest_blockhash()); let transfer_ix = system_instruction::transfer(&payer.pubkey(), &recipient, LAMPORTS_PER_SOL); // Try to send more than we have @@ -4444,14 +4636,18 @@ async fn test_ws_signature_subscribe_failed_transaction() { ); } +#[test_case(TestType::sqlite(); "with on-disk sqlite db")] +#[test_case(TestType::in_memory(); "with in-memory sqlite db")] +#[test_case(TestType::no_db(); "with no db")] +#[cfg_attr(feature = "postgres", test_case(TestType::postgres(); "with postgres db"))] #[tokio::test(flavor = "multi_thread")] -async fn test_ws_signature_subscribe_multiple_subscribers() { +async fn test_ws_signature_subscribe_multiple_subscribers(test_type: TestType) { use crossbeam_channel::unbounded; use solana_system_interface::instruction as system_instruction; use crate::surfnet::SignatureSubscriptionType; - let (svm_instance, _simnet_events_rx, _geyser_events_rx) = SurfnetSvm::new(); + let (svm_instance, _simnet_events_rx, _geyser_events_rx) = test_type.initialize_svm(); let svm_locker = SurfnetSvmLocker::new(svm_instance); // create a test transaction @@ -4459,6 +4655,7 @@ async fn test_ws_signature_subscribe_multiple_subscribers() { let recipient = Pubkey::new_unique(); svm_locker .airdrop(&payer.pubkey(), LAMPORTS_PER_SOL) + .unwrap() .unwrap(); let recent_blockhash = svm_locker.with_svm_reader(|svm| svm.latest_blockhash()); @@ -4518,20 +4715,25 @@ async fn test_ws_signature_subscribe_multiple_subscribers() { println!("✓ Multiple subscribers all received notifications correctly"); } +#[test_case(TestType::sqlite(); "with on-disk sqlite db")] +#[test_case(TestType::in_memory(); "with in-memory sqlite db")] +#[test_case(TestType::no_db(); "with no db")] +#[cfg_attr(feature = "postgres", test_case(TestType::postgres(); "with postgres db"))] #[tokio::test(flavor = "multi_thread")] -async fn test_ws_signature_subscribe_before_transaction_exists() { +async fn test_ws_signature_subscribe_before_transaction_exists(test_type: TestType) { use crossbeam_channel::unbounded; use solana_system_interface::instruction as system_instruction; use crate::surfnet::SignatureSubscriptionType; - let (svm_instance, _simnet_events_rx, _geyser_events_rx) = SurfnetSvm::new(); + let (svm_instance, _simnet_events_rx, _geyser_events_rx) = test_type.initialize_svm(); let svm_locker = SurfnetSvmLocker::new(svm_instance); let payer = Keypair::new(); let recipient = Pubkey::new_unique(); svm_locker .airdrop(&payer.pubkey(), LAMPORTS_PER_SOL) + .unwrap() .unwrap(); let recent_blockhash = svm_locker.with_svm_reader(|svm| svm.latest_blockhash()); @@ -4579,12 +4781,16 @@ async fn test_ws_signature_subscribe_before_transaction_exists() { ); } +#[test_case(TestType::sqlite(); "with on-disk sqlite db")] +#[test_case(TestType::in_memory(); "with in-memory sqlite db")] +#[test_case(TestType::no_db(); "with no db")] +#[cfg_attr(feature = "postgres", test_case(TestType::postgres(); "with postgres db"))] #[tokio::test(flavor = "multi_thread")] -async fn test_ws_account_subscribe_balance_change() { +async fn test_ws_account_subscribe_balance_change(test_type: TestType) { use crossbeam_channel::unbounded; use solana_system_interface::instruction as system_instruction; - let (svm_instance, _simnet_events_rx, _geyser_events_rx) = SurfnetSvm::new(); + let (svm_instance, _simnet_events_rx, _geyser_events_rx) = test_type.initialize_svm(); let svm_locker = SurfnetSvmLocker::new(svm_instance); // create and fund a new account @@ -4592,6 +4798,7 @@ async fn test_ws_account_subscribe_balance_change() { let recipient = Pubkey::new_unique(); svm_locker .airdrop(&payer.pubkey(), LAMPORTS_PER_SOL) + .unwrap() .unwrap(); // subscribe to payer account updates @@ -4635,13 +4842,17 @@ async fn test_ws_account_subscribe_balance_change() { ); } +#[test_case(TestType::sqlite(); "with on-disk sqlite db")] +#[test_case(TestType::in_memory(); "with in-memory sqlite db")] +#[test_case(TestType::no_db(); "with no db")] +#[cfg_attr(feature = "postgres", test_case(TestType::postgres(); "with postgres db"))] #[tokio::test(flavor = "multi_thread")] -async fn test_ws_account_subscribe_multiple_changes() { +async fn test_ws_account_subscribe_multiple_changes(test_type: TestType) { use crossbeam_channel::unbounded; use solana_account_decoder::UiAccountEncoding; use solana_system_interface::instruction as system_instruction; - let (svm_instance, _simnet_events_rx, _geyser_events_rx) = SurfnetSvm::new(); + let (svm_instance, _simnet_events_rx, _geyser_events_rx) = test_type.initialize_svm(); let svm_locker = SurfnetSvmLocker::new(svm_instance); // create and fund a new account @@ -4649,6 +4860,7 @@ async fn test_ws_account_subscribe_multiple_changes() { let recipient = Pubkey::new_unique(); svm_locker .airdrop(&payer.pubkey(), 10 * LAMPORTS_PER_SOL) + .unwrap() .unwrap(); // subscribe to payer account updates @@ -4697,22 +4909,28 @@ async fn test_ws_account_subscribe_multiple_changes() { } } +#[test_case(TestType::sqlite(); "with on-disk sqlite db")] +#[test_case(TestType::in_memory(); "with in-memory sqlite db")] +#[test_case(TestType::no_db(); "with no db")] +#[cfg_attr(feature = "postgres", test_case(TestType::postgres(); "with postgres db"))] #[tokio::test(flavor = "multi_thread")] -async fn test_ws_account_subscribe_multiple_subscribers() { +async fn test_ws_account_subscribe_multiple_subscribers(test_type: TestType) { use crossbeam_channel::unbounded; use solana_account_decoder::UiAccountEncoding; use solana_system_interface::instruction as system_instruction; - let (svm_instance, _simnet_events_rx, _geyser_events_rx) = SurfnetSvm::new(); + let (svm_instance, _simnet_events_rx, _geyser_events_rx) = test_type.initialize_svm(); let svm_locker = SurfnetSvmLocker::new(svm_instance); let payer = Keypair::new(); let sender = Keypair::new(); svm_locker .airdrop(&payer.pubkey(), LAMPORTS_PER_SOL) + .unwrap() .unwrap(); svm_locker .airdrop(&sender.pubkey(), LAMPORTS_PER_SOL) + .unwrap() .unwrap(); // create multiple subscriptions to the same account @@ -4761,19 +4979,24 @@ async fn test_ws_account_subscribe_multiple_subscribers() { println!("✓ All 3 subscribers received notifications for account change"); } +#[test_case(TestType::sqlite(); "with on-disk sqlite db")] +#[test_case(TestType::in_memory(); "with in-memory sqlite db")] +#[test_case(TestType::no_db(); "with no db")] +#[cfg_attr(feature = "postgres", test_case(TestType::postgres(); "with postgres db"))] #[tokio::test(flavor = "multi_thread")] -async fn test_ws_account_subscribe_new_account_creation() { +async fn test_ws_account_subscribe_new_account_creation(test_type: TestType) { use crossbeam_channel::unbounded; use solana_account_decoder::UiAccountEncoding; use solana_system_interface::instruction as system_instruction; - let (svm_instance, _simnet_events_rx, _geyser_events_rx) = SurfnetSvm::new(); + let (svm_instance, _simnet_events_rx, _geyser_events_rx) = test_type.initialize_svm(); let svm_locker = SurfnetSvmLocker::new(svm_instance); let payer = Keypair::new(); let new_account = Pubkey::new_unique(); svm_locker .airdrop(&payer.pubkey(), LAMPORTS_PER_SOL) + .unwrap() .unwrap(); // subscribe to an account that doesn't exist yet @@ -4820,13 +5043,17 @@ async fn test_ws_account_subscribe_new_account_creation() { ); } +#[test_case(TestType::sqlite(); "with on-disk sqlite db")] +#[test_case(TestType::in_memory(); "with in-memory sqlite db")] +#[test_case(TestType::no_db(); "with no db")] +#[cfg_attr(feature = "postgres", test_case(TestType::postgres(); "with postgres db"))] #[tokio::test(flavor = "multi_thread")] -async fn test_ws_account_subscribe_account_closure() { +async fn test_ws_account_subscribe_account_closure(test_type: TestType) { use crossbeam_channel::unbounded; use solana_account_decoder::UiAccountEncoding; use solana_system_interface::instruction as system_instruction; - let (svm_instance, _simnet_events_rx, _geyser_events_rx) = SurfnetSvm::new(); + let (svm_instance, _simnet_events_rx, _geyser_events_rx) = test_type.initialize_svm(); let svm_locker = SurfnetSvmLocker::new(svm_instance); let account_to_close = Keypair::new(); @@ -4835,6 +5062,7 @@ async fn test_ws_account_subscribe_account_closure() { // give the account some funds svm_locker .airdrop(&account_to_close.pubkey(), 10_000) + .unwrap() .unwrap(); // subscribe to the account @@ -4873,12 +5101,16 @@ async fn test_ws_account_subscribe_account_closure() { println!("✓ Received notification for account closure"); } +#[test_case(TestType::sqlite(); "with on-disk sqlite db")] +#[test_case(TestType::in_memory(); "with in-memory sqlite db")] +#[test_case(TestType::no_db(); "with no db")] +#[cfg_attr(feature = "postgres", test_case(TestType::postgres(); "with postgres db"))] #[tokio::test(flavor = "multi_thread")] -async fn test_ws_slot_subscribe_basic() { +async fn test_ws_slot_subscribe_basic(test_type: TestType) { use surfpool_types::types::BlockProductionMode; let (svm_locker, _simnet_commands_tx, _simnet_events_rx) = - boot_simnet(BlockProductionMode::Clock, Some(100)); + boot_simnet(BlockProductionMode::Clock, Some(100), test_type); // subscribe to slot updates let slot_rx = svm_locker.subscribe_for_slot_updates(); @@ -4904,9 +5136,13 @@ async fn test_ws_slot_subscribe_basic() { println!("✓ Slot updates are progressing correctly"); } +#[test_case(TestType::sqlite(); "with on-disk sqlite db")] +#[test_case(TestType::in_memory(); "with in-memory sqlite db")] +#[test_case(TestType::no_db(); "with no db")] +#[cfg_attr(feature = "postgres", test_case(TestType::postgres(); "with postgres db"))] #[tokio::test(flavor = "multi_thread")] -async fn test_ws_slot_subscribe_manual_advancement() { - let (svm_instance, _simnet_events_rx, _geyser_events_rx) = SurfnetSvm::new(); +async fn test_ws_slot_subscribe_manual_advancement(test_type: TestType) { + let (svm_instance, _simnet_events_rx, _geyser_events_rx) = test_type.initialize_svm(); let svm_locker = SurfnetSvmLocker::new(svm_instance); // subscribe to slot updates @@ -4935,9 +5171,13 @@ async fn test_ws_slot_subscribe_manual_advancement() { ); } +#[test_case(TestType::sqlite(); "with on-disk sqlite db")] +#[test_case(TestType::in_memory(); "with in-memory sqlite db")] +#[test_case(TestType::no_db(); "with no db")] +#[cfg_attr(feature = "postgres", test_case(TestType::postgres(); "with postgres db"))] #[tokio::test(flavor = "multi_thread")] -async fn test_ws_slot_subscribe_multiple_subscribers() { - let (svm_instance, _simnet_events_rx, _geyser_events_rx) = SurfnetSvm::new(); +async fn test_ws_slot_subscribe_multiple_subscribers(test_type: TestType) { + let (svm_instance, _simnet_events_rx, _geyser_events_rx) = test_type.initialize_svm(); let svm_locker = SurfnetSvmLocker::new(svm_instance); // create multiple subscriptions @@ -4965,9 +5205,13 @@ async fn test_ws_slot_subscribe_multiple_subscribers() { println!("✓ All 3 subscribers received slot update notifications"); } +#[test_case(TestType::sqlite(); "with on-disk sqlite db")] +#[test_case(TestType::in_memory(); "with in-memory sqlite db")] +#[test_case(TestType::no_db(); "with no db")] +#[cfg_attr(feature = "postgres", test_case(TestType::postgres(); "with postgres db"))] #[tokio::test(flavor = "multi_thread")] -async fn test_ws_slot_subscribe_multiple_slot_changes() { - let (svm_instance, _simnet_events_rx, _geyser_events_rx) = SurfnetSvm::new(); +async fn test_ws_slot_subscribe_multiple_slot_changes(test_type: TestType) { + let (svm_instance, _simnet_events_rx, _geyser_events_rx) = test_type.initialize_svm(); let svm_locker = SurfnetSvmLocker::new(svm_instance); let slot_rx = svm_locker.subscribe_for_slot_updates(); @@ -4992,19 +5236,24 @@ async fn test_ws_slot_subscribe_multiple_slot_changes() { } } +#[test_case(TestType::sqlite(); "with on-disk sqlite db")] +#[test_case(TestType::in_memory(); "with in-memory sqlite db")] +#[test_case(TestType::no_db(); "with no db")] +#[cfg_attr(feature = "postgres", test_case(TestType::postgres(); "with postgres db"))] #[tokio::test(flavor = "multi_thread")] -async fn test_ws_logs_subscribe_all_transactions() { +async fn test_ws_logs_subscribe_all_transactions(test_type: TestType) { use crossbeam_channel::unbounded; use solana_client::rpc_config::RpcTransactionLogsFilter; use solana_system_interface::instruction as system_instruction; - let (svm_instance, _simnet_events_rx, _geyser_events_rx) = SurfnetSvm::new(); + let (svm_instance, _simnet_events_rx, _geyser_events_rx) = test_type.initialize_svm(); let svm_locker = SurfnetSvmLocker::new(svm_instance); let payer = Keypair::new(); let recipient = Pubkey::new_unique(); svm_locker .airdrop(&payer.pubkey(), LAMPORTS_PER_SOL) + .unwrap() .unwrap(); // subscribe to all transaction logs @@ -5055,20 +5304,25 @@ async fn test_ws_logs_subscribe_all_transactions() { println!("✓ Received logs update for transaction: {}", signature); } +#[test_case(TestType::sqlite(); "with on-disk sqlite db")] +#[test_case(TestType::in_memory(); "with in-memory sqlite db")] +#[test_case(TestType::no_db(); "with no db")] +#[cfg_attr(feature = "postgres", test_case(TestType::postgres(); "with postgres db"))] #[tokio::test(flavor = "multi_thread")] -async fn test_ws_logs_subscribe_mentions_account() { +async fn test_ws_logs_subscribe_mentions_account(test_type: TestType) { use crossbeam_channel::unbounded; use solana_client::rpc_config::RpcTransactionLogsFilter; use solana_commitment_config::CommitmentLevel; use solana_system_interface::instruction as system_instruction; - let (svm_instance, _simnet_events_rx, _geyser_events_rx) = SurfnetSvm::new(); + let (svm_instance, _simnet_events_rx, _geyser_events_rx) = test_type.initialize_svm(); let svm_locker = SurfnetSvmLocker::new(svm_instance); let payer = Keypair::new(); let recipient = Pubkey::new_unique(); svm_locker .airdrop(&payer.pubkey(), LAMPORTS_PER_SOL) + .unwrap() .unwrap(); // subscribe to logs mentioning the system program @@ -5129,14 +5383,18 @@ async fn test_ws_logs_subscribe_mentions_account() { println!("✓ Did not receive logs notification for transaction not mentioning token program"); } +#[test_case(TestType::sqlite(); "with on-disk sqlite db")] +#[test_case(TestType::in_memory(); "with in-memory sqlite db")] +#[test_case(TestType::no_db(); "with no db")] +#[cfg_attr(feature = "postgres", test_case(TestType::postgres(); "with postgres db"))] #[tokio::test(flavor = "multi_thread")] -async fn test_ws_logs_subscribe_confirmed_commitment() { +async fn test_ws_logs_subscribe_confirmed_commitment(test_type: TestType) { use crossbeam_channel::unbounded; use solana_client::rpc_config::RpcTransactionLogsFilter; use solana_commitment_config::CommitmentLevel; use solana_system_interface::instruction as system_instruction; - let (svm_instance, _simnet_events_rx, _geyser_events_rx) = SurfnetSvm::new(); + let (svm_instance, _simnet_events_rx, _geyser_events_rx) = test_type.initialize_svm(); let svm_locker = SurfnetSvmLocker::new(svm_instance); // subscribe to confirmed logs @@ -5148,6 +5406,7 @@ async fn test_ws_logs_subscribe_confirmed_commitment() { let recipient = Pubkey::new_unique(); svm_locker .airdrop(&payer.pubkey(), LAMPORTS_PER_SOL) + .unwrap() .unwrap(); let recent_blockhash = svm_locker.with_svm_reader(|svm| svm.latest_blockhash()); @@ -5189,8 +5448,12 @@ async fn test_ws_logs_subscribe_confirmed_commitment() { println!("✓ Received confirmed logs notification at slot {}", slot); } +#[test_case(TestType::sqlite(); "with on-disk sqlite db")] +#[test_case(TestType::in_memory(); "with in-memory sqlite db")] +#[test_case(TestType::no_db(); "with no db")] +#[cfg_attr(feature = "postgres", test_case(TestType::postgres(); "with postgres db"))] #[tokio::test(flavor = "multi_thread")] -async fn test_ws_logs_subscribe_finalized_commitment() { +async fn test_ws_logs_subscribe_finalized_commitment(test_type: TestType) { use crossbeam_channel::unbounded; use solana_client::rpc_config::RpcTransactionLogsFilter; use solana_commitment_config::CommitmentLevel; @@ -5198,7 +5461,7 @@ async fn test_ws_logs_subscribe_finalized_commitment() { use crate::surfnet::FINALIZATION_SLOT_THRESHOLD; - let (svm_instance, _simnet_events_rx, _geyser_events_rx) = SurfnetSvm::new(); + let (svm_instance, _simnet_events_rx, _geyser_events_rx) = test_type.initialize_svm(); let svm_locker = SurfnetSvmLocker::new(svm_instance); // subscribe to finalized logs @@ -5210,6 +5473,7 @@ async fn test_ws_logs_subscribe_finalized_commitment() { let recipient = Pubkey::new_unique(); svm_locker .airdrop(&payer.pubkey(), LAMPORTS_PER_SOL) + .unwrap() .unwrap(); let recent_blockhash = svm_locker.with_svm_reader(|svm| svm.latest_blockhash()); @@ -5256,20 +5520,24 @@ async fn test_ws_logs_subscribe_finalized_commitment() { println!("✓ Received finalized logs notification at slot {}", slot); } +#[test_case(TestType::sqlite(); "with on-disk sqlite db")] +#[test_case(TestType::in_memory(); "with in-memory sqlite db")] +#[test_case(TestType::no_db(); "with no db")] +#[cfg_attr(feature = "postgres", test_case(TestType::postgres(); "with postgres db"))] #[tokio::test(flavor = "multi_thread")] -async fn test_ws_logs_subscribe_failed_transaction() { +async fn test_ws_logs_subscribe_failed_transaction(test_type: TestType) { use crossbeam_channel::unbounded; use solana_client::rpc_config::RpcTransactionLogsFilter; use solana_commitment_config::CommitmentLevel; use solana_system_interface::instruction as system_instruction; - let (svm_instance, _simnet_events_rx, _geyser_events_rx) = SurfnetSvm::new(); + let (svm_instance, _simnet_events_rx, _geyser_events_rx) = test_type.initialize_svm(); let svm_locker = SurfnetSvmLocker::new(svm_instance); // create test accounts let payer = Keypair::new(); let recipient = Pubkey::new_unique(); - svm_locker.airdrop(&payer.pubkey(), 5_000).unwrap(); + svm_locker.airdrop(&payer.pubkey(), 5_000).unwrap().unwrap(); // subscribe to all logs let logs_rx = svm_locker @@ -5318,14 +5586,18 @@ async fn test_ws_logs_subscribe_failed_transaction() { ); } +#[test_case(TestType::sqlite(); "with on-disk sqlite db")] +#[test_case(TestType::in_memory(); "with in-memory sqlite db")] +#[test_case(TestType::no_db(); "with no db")] +#[cfg_attr(feature = "postgres", test_case(TestType::postgres(); "with postgres db"))] #[tokio::test(flavor = "multi_thread")] -async fn test_ws_logs_subscribe_multiple_subscribers() { +async fn test_ws_logs_subscribe_multiple_subscribers(test_type: TestType) { use crossbeam_channel::unbounded; use solana_client::rpc_config::RpcTransactionLogsFilter; use solana_commitment_config::CommitmentLevel; use solana_system_interface::instruction as system_instruction; - let (svm_instance, _simnet_events_rx, _geyser_events_rx) = SurfnetSvm::new(); + let (svm_instance, _simnet_events_rx, _geyser_events_rx) = test_type.initialize_svm(); let svm_locker = SurfnetSvmLocker::new(svm_instance); // create multiple subscriptions with different commitment levels @@ -5341,6 +5613,7 @@ async fn test_ws_logs_subscribe_multiple_subscribers() { let recipient = Pubkey::new_unique(); svm_locker .airdrop(&payer.pubkey(), LAMPORTS_PER_SOL) + .unwrap() .unwrap(); let recent_blockhash = svm_locker.with_svm_reader(|svm| svm.latest_blockhash()); @@ -5384,14 +5657,18 @@ async fn test_ws_logs_subscribe_multiple_subscribers() { println!("✓ All subscribers received logs notifications at their respective commitment levels"); } +#[test_case(TestType::sqlite(); "with on-disk sqlite db")] +#[test_case(TestType::in_memory(); "with in-memory sqlite db")] +#[test_case(TestType::no_db(); "with no db")] +#[cfg_attr(feature = "postgres", test_case(TestType::postgres(); "with postgres db"))] #[tokio::test(flavor = "multi_thread")] -async fn test_ws_logs_subscribe_logs_content() { +async fn test_ws_logs_subscribe_logs_content(test_type: TestType) { use crossbeam_channel::unbounded; use solana_client::rpc_config::RpcTransactionLogsFilter; use solana_commitment_config::CommitmentLevel; use solana_system_interface::instruction as system_instruction; - let (svm_instance, _simnet_events_rx, _geyser_events_rx) = SurfnetSvm::new(); + let (svm_instance, _simnet_events_rx, _geyser_events_rx) = test_type.initialize_svm(); let svm_locker = SurfnetSvmLocker::new(svm_instance); // create test accounts @@ -5399,6 +5676,7 @@ async fn test_ws_logs_subscribe_logs_content() { let recipient = Pubkey::new_unique(); svm_locker .airdrop(&payer.pubkey(), LAMPORTS_PER_SOL) + .unwrap() .unwrap(); // subscribe to all logs @@ -5454,14 +5732,18 @@ async fn test_ws_logs_subscribe_logs_content() { /// Token-2022 lifecycle: /// create mint → initialize → create ATA → mint → transfer → burn → close account +#[test_case(TestType::sqlite(); "with on-disk sqlite db")] +#[test_case(TestType::in_memory(); "with in-memory sqlite db")] +#[test_case(TestType::no_db(); "with no db")] +#[cfg_attr(feature = "postgres", test_case(TestType::postgres(); "with postgres db"))] #[tokio::test(flavor = "multi_thread")] -async fn test_token2022_full_lifecycle() { +async fn test_token2022_full_lifecycle(test_type: TestType) { use solana_system_interface::instruction as system_instruction; use spl_token_2022_interface::instruction::{ burn, close_account, initialize_mint2, mint_to, transfer_checked, }; - let (svm_instance, _simnet_events_rx, _geyser_events_rx) = SurfnetSvm::new(); + let (svm_instance, _simnet_events_rx, _geyser_events_rx) = test_type.initialize_svm(); let svm_locker = SurfnetSvmLocker::new(svm_instance); let payer = Keypair::new(); @@ -5471,9 +5753,11 @@ async fn test_token2022_full_lifecycle() { svm_locker .airdrop(&payer.pubkey(), 10 * LAMPORTS_PER_SOL) + .unwrap() .unwrap(); svm_locker .airdrop(&recipient.pubkey(), 1 * LAMPORTS_PER_SOL) + .unwrap() .unwrap(); let recent_blockhash = svm_locker.with_svm_reader(|svm| svm.latest_blockhash()); @@ -5664,14 +5948,18 @@ async fn test_token2022_full_lifecycle() { } /// Token-2022 error cases: transfer/burn > balance and close with balance. +#[test_case(TestType::sqlite(); "with on-disk sqlite db")] +#[test_case(TestType::in_memory(); "with in-memory sqlite db")] +#[test_case(TestType::no_db(); "with no db")] +#[cfg_attr(feature = "postgres", test_case(TestType::postgres(); "with postgres db"))] #[tokio::test(flavor = "multi_thread")] -async fn test_token2022_error_cases() { +async fn test_token2022_error_cases(test_type: TestType) { use solana_system_interface::instruction as system_instruction; use spl_token_2022_interface::instruction::{ burn, close_account, initialize_mint2, mint_to, transfer_checked, }; - let (svm_instance, _simnet_events_rx, _geyser_events_rx) = SurfnetSvm::new(); + let (svm_instance, _simnet_events_rx, _geyser_events_rx) = test_type.initialize_svm(); let svm_locker = SurfnetSvmLocker::new(svm_instance); let payer = Keypair::new(); @@ -5681,9 +5969,11 @@ async fn test_token2022_error_cases() { svm_locker .airdrop(&payer.pubkey(), 10 * LAMPORTS_PER_SOL) + .unwrap() .unwrap(); svm_locker .airdrop(&recipient.pubkey(), 1 * LAMPORTS_PER_SOL) + .unwrap() .unwrap(); let recent_blockhash = svm_locker.with_svm_reader(|svm| svm.latest_blockhash()); @@ -5841,14 +6131,18 @@ async fn test_token2022_error_cases() { } /// Token-2022 delegate operations: approve, delegated transfer, revoke. +#[test_case(TestType::sqlite(); "with on-disk sqlite db")] +#[test_case(TestType::in_memory(); "with in-memory sqlite db")] +#[test_case(TestType::no_db(); "with no db")] +#[cfg_attr(feature = "postgres", test_case(TestType::postgres(); "with postgres db"))] #[tokio::test(flavor = "multi_thread")] -async fn test_token2022_delegate_operations() { +async fn test_token2022_delegate_operations(test_type: TestType) { use solana_system_interface::instruction as system_instruction; use spl_token_2022_interface::instruction::{ approve, initialize_mint2, mint_to, revoke, transfer_checked, }; - let (svm_instance, _simnet_events_rx, _geyser_events_rx) = SurfnetSvm::new(); + let (svm_instance, _simnet_events_rx, _geyser_events_rx) = test_type.initialize_svm(); let svm_locker = SurfnetSvmLocker::new(svm_instance); let owner = Keypair::new(); @@ -5859,9 +6153,11 @@ async fn test_token2022_delegate_operations() { svm_locker .airdrop(&owner.pubkey(), 10 * LAMPORTS_PER_SOL) + .unwrap() .unwrap(); svm_locker .airdrop(&delegate.pubkey(), 1 * LAMPORTS_PER_SOL) + .unwrap() .unwrap(); let recent_blockhash = svm_locker.with_svm_reader(|svm| svm.latest_blockhash()); @@ -6057,14 +6353,18 @@ async fn test_token2022_delegate_operations() { } /// Token-2022 freeze/thaw operations. +#[test_case(TestType::sqlite(); "with on-disk sqlite db")] +#[test_case(TestType::in_memory(); "with in-memory sqlite db")] +#[test_case(TestType::no_db(); "with no db")] +#[cfg_attr(feature = "postgres", test_case(TestType::postgres(); "with postgres db"))] #[tokio::test(flavor = "multi_thread")] -async fn test_token2022_freeze_thaw() { +async fn test_token2022_freeze_thaw(test_type: TestType) { use solana_system_interface::instruction as system_instruction; use spl_token_2022_interface::instruction::{ freeze_account, initialize_mint2, mint_to, thaw_account, transfer_checked, }; - let (svm_instance, _simnet_events_rx, _geyser_events_rx) = SurfnetSvm::new(); + let (svm_instance, _simnet_events_rx, _geyser_events_rx) = test_type.initialize_svm(); let svm_locker = SurfnetSvmLocker::new(svm_instance); let owner = Keypair::new(); @@ -6074,6 +6374,7 @@ async fn test_token2022_freeze_thaw() { svm_locker .airdrop(&owner.pubkey(), 10 * LAMPORTS_PER_SOL) + .unwrap() .unwrap(); let recent_blockhash = svm_locker.with_svm_reader(|svm| svm.latest_blockhash()); @@ -6296,6 +6597,7 @@ fn test_nonce_accounts() { svm_locker .airdrop(&payer.pubkey(), 5 * LAMPORTS_PER_SOL) + .unwrap() .unwrap(); let nonce_rent = svm_locker.with_svm_reader(|svm_reader| { diff --git a/crates/core/src/types.rs b/crates/core/src/types.rs index c0819b37..86d6e229 100644 --- a/crates/core/src/types.rs +++ b/crates/core/src/types.rs @@ -4,6 +4,7 @@ use agave_reserved_account_keys::ReservedAccountKeys; use base64::{Engine, prelude::BASE64_STANDARD}; use chrono::Utc; use litesvm::types::TransactionMetadata; +use serde::{Deserialize, Deserializer, Serialize, Serializer}; use solana_account::Account; use solana_account_decoder::parse_token::UiTokenAmount; use solana_clock::{Epoch, Slot}; @@ -19,9 +20,11 @@ use solana_transaction::{ sanitized::SanitizedTransaction, versioned::{TransactionVersion, VersionedTransaction}, }; +use solana_transaction_context::TransactionReturnData; +use solana_transaction_error::TransactionError; use solana_transaction_status::{ Encodable, EncodableWithMeta, EncodeError, EncodedTransaction, - EncodedTransactionWithStatusMeta, InnerInstruction, InnerInstructions, + EncodedTransactionWithStatusMeta, InnerInstruction, InnerInstructions, Reward, TransactionBinaryEncoding, TransactionConfirmationStatus, TransactionStatus, TransactionStatusMeta, TransactionTokenBalance, UiAccountsList, UiLoadedAddresses, UiTransaction, UiTransactionEncoding, UiTransactionStatusMeta, @@ -37,7 +40,117 @@ use crate::{ surfnet::locker::{format_ui_amount, format_ui_amount_string}, }; -#[derive(Debug, Clone)] +/// Serializable version of TransactionTokenBalance +#[derive(Debug, Clone, Serialize, Deserialize)] +pub struct SerializableTransactionTokenBalance { + pub account_index: u8, + pub mint: String, + pub ui_token_amount: UiTokenAmount, + pub owner: String, + pub program_id: String, +} + +impl From for SerializableTransactionTokenBalance { + fn from(ttb: TransactionTokenBalance) -> Self { + Self { + account_index: ttb.account_index, + mint: ttb.mint, + ui_token_amount: ttb.ui_token_amount, + owner: ttb.owner, + program_id: ttb.program_id, + } + } +} + +impl From for TransactionTokenBalance { + fn from(sttb: SerializableTransactionTokenBalance) -> Self { + Self { + account_index: sttb.account_index, + mint: sttb.mint, + ui_token_amount: sttb.ui_token_amount, + owner: sttb.owner, + program_id: sttb.program_id, + } + } +} + +/// Serializable version of TransactionStatusMeta +#[derive(Debug, Clone, Serialize, Deserialize)] +pub struct SerializableTransactionStatusMeta { + pub status: Result<(), TransactionError>, + pub fee: u64, + pub pre_balances: Vec, + pub post_balances: Vec, + pub inner_instructions: Option>, + pub log_messages: Option>, + pub pre_token_balances: Option>, + pub post_token_balances: Option>, + pub rewards: Option>, + pub loaded_addresses: LoadedAddresses, + pub return_data: Option, + pub compute_units_consumed: Option, + pub cost_units: Option, +} + +impl From for SerializableTransactionStatusMeta { + fn from(meta: TransactionStatusMeta) -> Self { + Self { + status: meta.status, + fee: meta.fee, + pre_balances: meta.pre_balances, + post_balances: meta.post_balances, + inner_instructions: meta.inner_instructions, + log_messages: meta.log_messages, + pre_token_balances: meta + .pre_token_balances + .map(|v| v.into_iter().map(Into::into).collect()), + post_token_balances: meta + .post_token_balances + .map(|v| v.into_iter().map(Into::into).collect()), + rewards: meta.rewards, + loaded_addresses: meta.loaded_addresses, + return_data: meta.return_data, + compute_units_consumed: meta.compute_units_consumed, + cost_units: meta.cost_units, + } + } +} + +impl From for TransactionStatusMeta { + fn from(smeta: SerializableTransactionStatusMeta) -> Self { + Self { + status: smeta.status, + fee: smeta.fee, + pre_balances: smeta.pre_balances, + post_balances: smeta.post_balances, + inner_instructions: smeta.inner_instructions, + log_messages: smeta.log_messages, + pre_token_balances: smeta + .pre_token_balances + .map(|v| v.into_iter().map(Into::into).collect()), + post_token_balances: smeta + .post_token_balances + .map(|v| v.into_iter().map(Into::into).collect()), + rewards: smeta.rewards, + loaded_addresses: smeta.loaded_addresses, + return_data: smeta.return_data, + compute_units_consumed: smeta.compute_units_consumed, + cost_units: smeta.cost_units, + } + } +} + +/// Helper struct for serializing TransactionWithStatusMeta +/// Note: VersionedTransaction uses bincode internally, so we serialize it as base64-encoded bytes +#[derive(Serialize, Deserialize)] +struct SerializableTransactionWithStatusMeta { + pub slot: u64, + /// Base64-encoded bincode serialization of VersionedTransaction + pub transaction_bytes: String, + pub meta: SerializableTransactionStatusMeta, +} + +#[derive(Debug, Clone, serde::Serialize, serde::Deserialize)] pub enum SurfnetTransactionStatus { Received, Processed(Box<(TransactionWithStatusMeta, HashSet)>), @@ -63,6 +176,47 @@ pub struct TransactionWithStatusMeta { pub meta: TransactionStatusMeta, } +impl Serialize for TransactionWithStatusMeta { + fn serialize(&self, serializer: S) -> Result + where + S: Serializer, + { + // Serialize VersionedTransaction using bincode, then base64 encode + let tx_bytes = bincode::serialize(&self.transaction) + .map_err(|e| serde::ser::Error::custom(format!("bincode error: {}", e)))?; + let tx_base64 = BASE64_STANDARD.encode(&tx_bytes); + + let helper = SerializableTransactionWithStatusMeta { + slot: self.slot, + transaction_bytes: tx_base64, + meta: self.meta.clone().into(), + }; + helper.serialize(serializer) + } +} + +impl<'de> Deserialize<'de> for TransactionWithStatusMeta { + fn deserialize(deserializer: D) -> Result + where + D: Deserializer<'de>, + { + let helper = SerializableTransactionWithStatusMeta::deserialize(deserializer)?; + + // Decode base64 and deserialize using bincode + let tx_bytes = BASE64_STANDARD + .decode(&helper.transaction_bytes) + .map_err(|e| serde::de::Error::custom(format!("base64 decode error: {}", e)))?; + let transaction: VersionedTransaction = bincode::deserialize(&tx_bytes) + .map_err(|e| serde::de::Error::custom(format!("bincode deserialize error: {}", e)))?; + + Ok(Self { + slot: helper.slot, + transaction, + meta: helper.meta.into(), + }) + } +} + impl TransactionWithStatusMeta { pub fn into_status(&self, current_slot: u64) -> TransactionStatus { TransactionStatus { @@ -563,6 +717,29 @@ impl RemoteRpcResult { } } +/// Discriminant byte used for serializing token program variants. +/// Ensures consistent encoding between TokenAccount and MintAccount. +#[repr(u8)] +#[derive(Debug, Clone, Copy, PartialEq, Eq)] +pub enum TokenProgramDiscriminant { + SplToken = 0, + SplToken2022 = 1, +} + +impl TokenProgramDiscriminant { + pub fn from_byte(byte: u8) -> Option { + match byte { + 0 => Some(Self::SplToken), + 1 => Some(Self::SplToken2022), + _ => None, + } + } + + pub fn as_byte(self) -> u8 { + self as u8 + } +} + #[derive(Debug, Copy, Clone, PartialEq)] pub enum TokenAccount { SplToken2022(spl_token_2022_interface::state::Account), @@ -715,12 +892,130 @@ impl TokenAccount { } } +impl Serialize for TokenAccount { + fn serialize(&self, serializer: S) -> Result + where + S: Serializer, + { + let mut bytes = Vec::with_capacity(1 + spl_token_2022_interface::state::Account::LEN); + match self { + Self::SplToken2022(account) => { + bytes.push(TokenProgramDiscriminant::SplToken2022.as_byte()); + let mut dst = [0u8; spl_token_2022_interface::state::Account::LEN]; + account.pack_into_slice(&mut dst); + bytes.extend_from_slice(&dst); + } + Self::SplToken(account) => { + bytes.push(TokenProgramDiscriminant::SplToken.as_byte()); + let mut dst = [0u8; spl_token_interface::state::Account::LEN]; + account.pack_into_slice(&mut dst); + bytes.extend_from_slice(&dst); + } + } + let encoded = BASE64_STANDARD.encode(&bytes); + serializer.serialize_str(&encoded) + } +} + +impl<'de> Deserialize<'de> for TokenAccount { + fn deserialize(deserializer: D) -> Result + where + D: Deserializer<'de>, + { + let encoded = String::deserialize(deserializer)?; + let bytes = BASE64_STANDARD + .decode(&encoded) + .map_err(serde::de::Error::custom)?; + + if bytes.is_empty() { + return Err(serde::de::Error::custom("Empty TokenAccount bytes")); + } + + let discriminant = TokenProgramDiscriminant::from_byte(bytes[0]).ok_or_else(|| { + serde::de::Error::custom(format!("Unknown TokenAccount discriminant: {}", bytes[0])) + })?; + let data = &bytes[1..]; + + match discriminant { + TokenProgramDiscriminant::SplToken2022 => { + let account = spl_token_2022_interface::state::Account::unpack(data) + .map_err(serde::de::Error::custom)?; + Ok(TokenAccount::SplToken2022(account)) + } + TokenProgramDiscriminant::SplToken => { + let account = spl_token_interface::state::Account::unpack(data) + .map_err(serde::de::Error::custom)?; + Ok(TokenAccount::SplToken(account)) + } + } + } +} + #[derive(Debug, Clone)] pub enum MintAccount { SplToken2022(spl_token_2022_interface::state::Mint), SplToken(spl_token_interface::state::Mint), } +impl Serialize for MintAccount { + fn serialize(&self, serializer: S) -> Result + where + S: Serializer, + { + let mut bytes = Vec::with_capacity(1 + spl_token_2022_interface::state::Mint::LEN); + match self { + Self::SplToken2022(mint) => { + bytes.push(TokenProgramDiscriminant::SplToken2022.as_byte()); + let mut dst = [0u8; spl_token_2022_interface::state::Mint::LEN]; + mint.pack_into_slice(&mut dst); + bytes.extend_from_slice(&dst); + } + Self::SplToken(mint) => { + bytes.push(TokenProgramDiscriminant::SplToken.as_byte()); + let mut dst = [0u8; spl_token_interface::state::Mint::LEN]; + mint.pack_into_slice(&mut dst); + bytes.extend_from_slice(&dst); + } + } + let encoded = BASE64_STANDARD.encode(&bytes); + serializer.serialize_str(&encoded) + } +} + +impl<'de> Deserialize<'de> for MintAccount { + fn deserialize(deserializer: D) -> Result + where + D: Deserializer<'de>, + { + let encoded = String::deserialize(deserializer)?; + let bytes = BASE64_STANDARD + .decode(&encoded) + .map_err(serde::de::Error::custom)?; + + if bytes.is_empty() { + return Err(serde::de::Error::custom("Empty MintAccount bytes")); + } + + let discriminant = TokenProgramDiscriminant::from_byte(bytes[0]).ok_or_else(|| { + serde::de::Error::custom(format!("Unknown MintAccount discriminant: {}", bytes[0])) + })?; + let data = &bytes[1..]; + + match discriminant { + TokenProgramDiscriminant::SplToken2022 => { + let mint = spl_token_2022_interface::state::Mint::unpack(data) + .map_err(serde::de::Error::custom)?; + Ok(MintAccount::SplToken2022(mint)) + } + TokenProgramDiscriminant::SplToken => { + let mint = spl_token_interface::state::Mint::unpack(data) + .map_err(serde::de::Error::custom)?; + Ok(MintAccount::SplToken(mint)) + } + } + } +} + impl MintAccount { pub fn unpack(bytes: &[u8]) -> SurfpoolResult { if let Ok(mint) = diff --git a/crates/types/src/types.rs b/crates/types/src/types.rs index 55f577b0..ff858c51 100644 --- a/crates/types/src/types.rs +++ b/crates/types/src/types.rs @@ -549,6 +549,9 @@ pub struct SimnetConfig { pub log_bytes_limit: Option, pub feature_config: SvmFeatureConfig, pub skip_signature_verification: bool, + /// Unique identifier for this surfnet instance. Used to isolate database storage + /// when multiple surfnets share the same database. Defaults to 0. + pub surfnet_id: u32, } impl Default for SimnetConfig { @@ -566,6 +569,7 @@ impl Default for SimnetConfig { log_bytes_limit: Some(10_000), feature_config: SvmFeatureConfig::default(), skip_signature_verification: false, + surfnet_id: 0, } } } @@ -894,7 +898,7 @@ pub enum DataIndexingCommand { } // Define a wrapper struct -#[derive(Debug, Clone)] +#[derive(Debug, Clone, Serialize, Deserialize)] pub struct VersionedIdl(pub Slot, pub Idl); // Implement ordering based on Slot @@ -1094,14 +1098,17 @@ pub struct GetStreamedAccountsResponse { accounts: Vec, } impl GetStreamedAccountsResponse { - pub fn new(streamed_accounts: &HashMap) -> Self { - let mut accounts = vec![]; - for (pubkey, include_owned_accounts) in streamed_accounts { - accounts.push(StreamedAccountInfo { - pubkey: pubkey.to_string(), - include_owned_accounts: *include_owned_accounts, - }); - } + pub fn from_iter(streamed_accounts: I) -> Self + where + I: IntoIterator, + { + let accounts = streamed_accounts + .into_iter() + .map(|(pubkey, include_owned_accounts)| StreamedAccountInfo { + pubkey, + include_owned_accounts, + }) + .collect(); Self { accounts } } }