diff --git a/.github/workflows/release-typescript-sdk.yml b/.github/workflows/release-typescript-sdk.yml index 0556b751..e3813e80 100644 --- a/.github/workflows/release-typescript-sdk.yml +++ b/.github/workflows/release-typescript-sdk.yml @@ -103,6 +103,7 @@ jobs: needs: verify runs-on: ${{ matrix.runner }} timeout-minutes: 60 + continue-on-error: true strategy: fail-fast: false matrix: @@ -131,6 +132,8 @@ jobs: - name: Setup Rust CI toolchain and tools uses: ./.github/actions/rust-ci-setup + with: + cargo-tools: cargo-make,cargo-nextest - name: Setup pnpm uses: pnpm/action-setup@v4 @@ -154,7 +157,7 @@ jobs: env: NODE_AUTH_TOKEN: ${{ secrets.NPM_TOKEN }} shell: bash - run: pnpm publish --access public --no-git-checks + run: node ../../scripts/publish-package.mjs - name: Pack native runtime package if: needs.verify.outputs.publish_release != 'true' @@ -239,7 +242,7 @@ jobs: env: NODE_AUTH_TOKEN: ${{ secrets.NPM_TOKEN }} shell: bash - run: pnpm publish --access public --no-git-checks + run: node scripts/publish-package.mjs - name: Pack TypeScript SDK if: needs.verify.outputs.publish_release != 'true' diff --git a/Cargo.lock b/Cargo.lock index 5c1e364e..0e6d61b1 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -832,15 +832,6 @@ version = "1.5.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "c08606f8c3cbf4ce6ec8e28fb0014a2c086708fe954eaa885384a6165172e7e8" -[[package]] -name = "autotools" -version = "0.2.7" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ef941527c41b0fc0dd48511a8154cd5fc7e29200a0ff8b7203c5d777dbc795cf" -dependencies = [ - "cc", -] - [[package]] name = "axum" version = "0.8.8" @@ -3216,14 +3207,12 @@ dependencies = [ [[package]] name = "laserstream-core-proto" version = "9.0.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a12d5ab2767a78aea87aeee99c7e62a241319a7976711e3f02f8b33844e33c03" dependencies = [ "anyhow", "bincode", "prost", "prost-types", - "protobuf-src", + "protoc-bin-vendored", "solana-account", "solana-account-decoder", "solana-clock", @@ -4223,15 +4212,6 @@ dependencies = [ "prost", ] -[[package]] -name = "protobuf-src" -version = "1.1.0+21.5" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c7ac8852baeb3cc6fb83b93646fb93c0ffe5d14bf138c945ceb4b9948ee0e3c1" -dependencies = [ - "autotools", -] - [[package]] name = "protoc-bin-vendored" version = "3.2.0" @@ -10027,8 +10007,6 @@ checksum = "21a573b5aa423fecf27e2dc9c4009db67cd2f35718c2a859d7352ee3fe0d1780" [[package]] name = "yellowstone-grpc-client" version = "12.2.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "32b250b0ed11fb0fb11b80e89599e6f5a77640bce3dafe26ffb8022cdfc6c49e" dependencies = [ "bytes", "futures", diff --git a/Cargo.toml b/Cargo.toml index 97ba0c9c..69cd0e4c 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -13,6 +13,8 @@ exclude = ["crates/sof-solana-gossip"] [patch.crates-io] sof-solana-gossip = { path = "crates/sof-solana-gossip" } helius-laserstream = { path = "vendor/helius-laserstream" } +laserstream-core-proto = { path = "vendor/laserstream-core-proto" } +yellowstone-grpc-client = { path = "vendor/yellowstone-grpc-client" } [workspace.package] edition = "2024" diff --git a/crates/sof-observer/src/ingest/config.rs b/crates/sof-observer/src/ingest/config.rs index 71f404ba..ca5b3b36 100644 --- a/crates/sof-observer/src/ingest/config.rs +++ b/crates/sof-observer/src/ingest/config.rs @@ -70,18 +70,21 @@ pub(super) fn read_udp_idle_wait_ms() -> u64 { .unwrap_or(100) } +#[cfg(target_os = "linux")] pub(super) fn read_udp_busy_poll_us() -> Option { read_env_var("SOF_UDP_BUSY_POLL_US") .and_then(|value| value.parse::().ok()) .filter(|value| *value > 0) } +#[cfg(target_os = "linux")] pub(super) fn read_udp_busy_poll_budget() -> Option { read_env_var("SOF_UDP_BUSY_POLL_BUDGET") .and_then(|value| value.parse::().ok()) .filter(|value| *value > 0) } +#[cfg(target_os = "linux")] pub(super) fn read_udp_prefer_busy_poll() -> bool { read_bool_env("SOF_UDP_PREFER_BUSY_POLL", false) } @@ -90,6 +93,7 @@ pub(super) fn read_udp_drop_on_channel_full() -> bool { read_bool_env("SOF_UDP_DROP_ON_CHANNEL_FULL", true) } +#[cfg(target_os = "linux")] fn read_udp_track_rxq_ovfl() -> bool { read_bool_env("SOF_UDP_TRACK_RXQ_OVFL", false) } diff --git a/crates/sof-observer/src/ingest/receiver/core.rs b/crates/sof-observer/src/ingest/receiver/core.rs index 188ff697..59c2581b 100644 --- a/crates/sof-observer/src/ingest/receiver/core.rs +++ b/crates/sof-observer/src/ingest/receiver/core.rs @@ -142,6 +142,7 @@ impl RawPacketBatch { storage.packets.reserve(additional); } + #[cfg(target_os = "linux")] pub(super) fn ensure_receive_slots(&mut self, additional: usize) -> usize { let Some(storage) = self.storage.as_mut() else { return 0; @@ -156,6 +157,7 @@ impl RawPacketBatch { start_index } + #[cfg(target_os = "linux")] pub(super) fn receive_buffer_mut( &mut self, buffer_index: usize, @@ -163,6 +165,7 @@ impl RawPacketBatch { self.storage.as_mut()?.buffers.get_mut(buffer_index) } + #[cfg(target_os = "linux")] pub(super) fn push_received_metadata( &mut self, source: SocketAddr, @@ -366,6 +369,7 @@ impl ReceiverTelemetry { .store(current_unix_ms(), Ordering::Relaxed); } + #[cfg(target_os = "linux")] fn record_packets(&self, packet_count: usize) { self.packets.fetch_add( u64::try_from(packet_count).unwrap_or(u64::MAX), diff --git a/crates/sof-observer/src/runtime.rs b/crates/sof-observer/src/runtime.rs index 5f6c4a0a..fdcb81d9 100644 --- a/crates/sof-observer/src/runtime.rs +++ b/crates/sof-observer/src/runtime.rs @@ -8,7 +8,7 @@ use std::{ path::PathBuf, pin::Pin, sync::Arc, - thread::{self, available_parallelism}, + thread::available_parallelism, time::{Duration, Instant}, }; @@ -3449,7 +3449,7 @@ async fn wait_for_termination_signal() { #[cfg(unix)] { let (shutdown_tx, shutdown_rx) = tokio::sync::oneshot::channel(); - thread::spawn(move || { + std::thread::spawn(move || { let mut signals = match signal_hook::iterator::Signals::new([ signal_hook::consts::signal::SIGTERM, signal_hook::consts::signal::SIGINT, @@ -3822,7 +3822,6 @@ mod tests { Arc, Mutex, atomic::{AtomicUsize, Ordering}, }, - thread, time::Instant, }; @@ -5326,7 +5325,7 @@ mod tests { if counter.load(Ordering::Relaxed) >= expected { return true; } - thread::sleep(Duration::from_millis(10)); + std::thread::sleep(Duration::from_millis(10)); } counter.load(Ordering::Relaxed) >= expected } diff --git a/pnpm-workspace.yaml b/pnpm-workspace.yaml index e7d8c356..ba11a41f 100644 --- a/pnpm-workspace.yaml +++ b/pnpm-workspace.yaml @@ -1,3 +1,6 @@ packages: - sdks/typescript - sdks/typescript/native/* + +onlyBuiltDependencies: + - "@biomejs/biome" diff --git a/sdks/typescript/scripts/build-native-host.mjs b/sdks/typescript/scripts/build-native-host.mjs index b9475528..78d518dc 100644 --- a/sdks/typescript/scripts/build-native-host.mjs +++ b/sdks/typescript/scripts/build-native-host.mjs @@ -1,13 +1,72 @@ import { spawnSync } from "node:child_process"; import { chmodSync, copyFileSync, existsSync, mkdirSync, readFileSync } from "node:fs"; -import { basename, join } from "node:path"; +import { basename, delimiter, join } from "node:path"; const scriptDirectory = import.meta.dirname; const sdkPackageDirectory = join(scriptDirectory, ".."); const workspaceDirectory = join(sdkPackageDirectory, "..", ".."); +const cargoTargetRoot = process.env.CARGO_TARGET_DIR ?? join(workspaceDirectory, "target"); +const overriddenPlatform = process.env.SOF_TS_NATIVE_PLATFORM; +const overriddenArch = process.env.SOF_TS_NATIVE_ARCH; +const overriddenTarget = process.env.SOF_TS_NATIVE_TARGET; +const cargoSubcommand = process.env.SOF_TS_NATIVE_CARGO_SUBCOMMAND; +const crossBuildCargoSubcommand = "zigbuild"; -function runtimeHostBinaryName() { - return process.platform === "win32" ? "sof_ts_runtime_host.exe" : "sof_ts_runtime_host"; +function packagePlatformFromDirectory(packageDirectory) { + const name = basename(packageDirectory); + const separator = name.indexOf("-"); + if (separator === -1) { + return; + } + + return { + arch: name.slice(separator + 1), + platform: name.slice(0, separator), + }; +} + +function buildPlatform(packageDirectory) { + return ( + overriddenPlatform ?? + packagePlatformFromDirectory(packageDirectory)?.platform ?? + process.platform + ); +} + +function buildArch(packageDirectory) { + return overriddenArch ?? packagePlatformFromDirectory(packageDirectory)?.arch ?? process.arch; +} + +function runtimeHostBinaryName(platform) { + return platform === "win32" ? "sof_ts_runtime_host.exe" : "sof_ts_runtime_host"; +} + +function defaultCargoTarget(platform, arch) { + if (platform === process.platform && arch === process.arch) { + return; + } + + if (platform === "darwin" && arch === "x64") { + return "x86_64-apple-darwin"; + } + if (platform === "darwin" && arch === "arm64") { + return "aarch64-apple-darwin"; + } + if (platform === "win32" && arch === "x64") { + return "x86_64-pc-windows-gnu"; + } + if (platform === "win32" && arch === "arm64") { + return "aarch64-pc-windows-gnullvm"; + } + if (platform === "linux" && arch === "arm64") { + return "aarch64-unknown-linux-gnu"; + } +} + +function defaultCargoSubcommand(target) { + if (target !== undefined) { + return crossBuildCargoSubcommand; + } } function nativePackageDirectory() { @@ -29,7 +88,7 @@ function packageMetadata(packageDirectory) { const packageJsonPath = join(packageDirectory, "package.json"); if (!existsSync(packageJsonPath)) { throw new Error( - `native package metadata was not found at ${packageJsonPath}; expected a package for ${process.platform}-${process.arch}`, + `native package metadata was not found at ${packageJsonPath}; expected a package for ${buildPlatform()}-${buildArch()}`, ); } @@ -38,40 +97,62 @@ function packageMetadata(packageDirectory) { const packageDirectory = nativePackageDirectory(); const packageJson = packageMetadata(packageDirectory); +const platform = buildPlatform(packageDirectory); +const arch = buildArch(packageDirectory); +const target = overriddenTarget ?? defaultCargoTarget(platform, arch); +const subcommand = cargoSubcommand ?? defaultCargoSubcommand(target); const outputDirectory = join(packageDirectory, "vendor"); -const outputPath = join(outputDirectory, runtimeHostBinaryName()); -const sourcePath = join(workspaceDirectory, "target", "release", runtimeHostBinaryName()); +const binaryName = runtimeHostBinaryName(platform); +const targetDirectory = target === undefined ? "release" : join(target, "release"); +const outputPath = join(outputDirectory, binaryName); +const sourcePath = join(cargoTargetRoot, targetDirectory, binaryName); const features = ["provider-websocket", "provider-grpc", "gossip-bootstrap"]; -if (process.platform === "linux") { +if (platform === "linux") { features.push("kernel-bypass"); } const packagePlatform = basename(packageDirectory); -if (packagePlatform !== `${process.platform}-${process.arch}`) { +const currentPlatformKey = `${platform}-${arch}`; +if (packagePlatform !== currentPlatformKey) { throw new Error( - `native package ${packageJson.name ?? packagePlatform} does not match the current platform ${process.platform}-${process.arch}`, + `native package ${packageJson.name ?? packagePlatform} does not match the selected platform ${currentPlatformKey}`, ); } -const cargo = spawnSync( - "cargo", - [ - "build", - "--release", - "-p", - "sof", - "--features", - features.join(","), - "--bin", - "sof_ts_runtime_host", - ], - { - cwd: workspaceDirectory, - stdio: "inherit", - }, +const cargoArgs = []; +if (subcommand !== undefined && subcommand.length > 0) { + cargoArgs.push(subcommand); +} else { + cargoArgs.push("build"); +} +if (target !== undefined) { + cargoArgs.push("--target", target); +} +cargoArgs.push( + "--release", + "-p", + "sof", + "--features", + features.join(","), + "--bin", + "sof_ts_runtime_host", ); +const cargoEnv = { ...process.env }; +cargoEnv.PATH = [join(sdkPackageDirectory, "node_modules", ".bin"), cargoEnv.PATH] + .filter((value) => value !== undefined && value.length > 0) + .join(delimiter); +if (platform === "darwin") { + cargoEnv.CARGO_PROFILE_RELEASE_LTO = "off"; +} + +const cargo = spawnSync("cargo", cargoArgs, { + cwd: workspaceDirectory, + env: cargoEnv, + stdio: "inherit", +}); + if (cargo.error !== undefined) { throw new Error(`failed to run cargo: ${cargo.error.message}`); } @@ -87,6 +168,6 @@ if (!existsSync(sourcePath)) { mkdirSync(outputDirectory, { recursive: true }); copyFileSync(sourcePath, outputPath); -if (process.platform !== "win32") { +if (platform !== "win32") { chmodSync(outputPath, 0o755); } diff --git a/sdks/typescript/scripts/publish-package.mjs b/sdks/typescript/scripts/publish-package.mjs new file mode 100644 index 00000000..36ae8f4c --- /dev/null +++ b/sdks/typescript/scripts/publish-package.mjs @@ -0,0 +1,45 @@ +import { spawnSync } from "node:child_process"; +import { readFileSync } from "node:fs"; +import { join } from "node:path"; + +function fail(message) { + throw new Error(message); +} + +const packageJson = JSON.parse(readFileSync(join(process.cwd(), "package.json"), "utf8")); +const packageName = packageJson.name; +const packageVersion = packageJson.version; + +if (typeof packageName !== "string" || packageName.length === 0) { + fail("package.json must define a package name"); +} + +if (typeof packageVersion !== "string" || packageVersion.length === 0) { + fail("package.json must define a package version"); +} + +const packageSpec = `${packageName}@${packageVersion}`; +const view = spawnSync("npm", ["view", packageSpec, "version"], { + stdio: "ignore", +}); + +if (view.error !== undefined) { + fail(`failed to check npm registry for ${packageSpec}: ${view.error.message}`); +} + +if (view.status === 0) { + process.stdout.write(`${packageSpec} is already published; skipping npm publish.\n`); +} else { + const publish = spawnSync("pnpm", ["publish", "--access", "public", "--no-git-checks"], { + stdio: "inherit", + }); + + if (publish.error !== undefined) { + fail(`failed to run pnpm publish for ${packageSpec}: ${publish.error.message}`); + } + + if (publish.status !== 0) { + process.exitCode = publish.status ?? 1; + fail(`pnpm publish failed for ${packageSpec} with status ${String(publish.status)}`); + } +} diff --git a/sdks/typescript/scripts/validate-native-package.mjs b/sdks/typescript/scripts/validate-native-package.mjs new file mode 100644 index 00000000..122405af --- /dev/null +++ b/sdks/typescript/scripts/validate-native-package.mjs @@ -0,0 +1,53 @@ +import { spawnSync } from "node:child_process"; +import { resolve } from "node:path"; + +function fail(message) { + throw new Error(message); +} + +function readFlag(name) { + const flag = `--${name}`; + const index = process.argv.indexOf(flag); + if (index === -1) { + return; + } + + const value = process.argv[index + 1]; + if (value === undefined || value.startsWith("--")) { + fail(`missing value for ${flag}`); + } + + return value; +} + +const packageDirectoryFlag = readFlag("package-dir"); +if (packageDirectoryFlag === undefined) { + fail("expected --package-dir"); +} + +const platform = readFlag("platform"); +const arch = readFlag("arch"); +const target = readFlag("target"); +const cargoSubcommand = readFlag("cargo-subcommand"); +const packageDirectory = resolve(packageDirectoryFlag); + +const child = spawnSync("pnpm", ["publish", "--dry-run", "--access", "public", "--no-git-checks"], { + cwd: packageDirectory, + env: { + ...process.env, + ...(platform === undefined ? {} : { SOF_TS_NATIVE_PLATFORM: platform }), + ...(arch === undefined ? {} : { SOF_TS_NATIVE_ARCH: arch }), + ...(target === undefined ? {} : { SOF_TS_NATIVE_TARGET: target }), + ...(cargoSubcommand === undefined ? {} : { SOF_TS_NATIVE_CARGO_SUBCOMMAND: cargoSubcommand }), + }, + stdio: "inherit", +}); + +if (child.error !== undefined) { + fail(`failed to run pnpm: ${child.error.message}`); +} + +if (child.status !== 0) { + process.exitCode = child.status ?? 1; + fail(`pnpm publish --dry-run failed with status ${String(child.status)}`); +} diff --git a/vendor/laserstream-core-proto/Cargo.toml b/vendor/laserstream-core-proto/Cargo.toml new file mode 100644 index 00000000..87f64a38 --- /dev/null +++ b/vendor/laserstream-core-proto/Cargo.toml @@ -0,0 +1,227 @@ +# THIS FILE IS AUTOMATICALLY GENERATED BY CARGO +# +# When uploading crates to the registry Cargo will automatically +# "normalize" Cargo.toml files for maximal compatibility +# with all versions of Cargo and also rewrite `path` dependencies +# to registry (e.g., crates.io) dependencies. +# +# If you are reading this file be aware that the original Cargo.toml +# will likely look very different (and much more reasonable). +# See Cargo.toml.orig for the original contents. + +[package] +edition = "2021" +name = "laserstream-core-proto" +version = "9.0.2" +authors = ["Helius Labs "] +build = "build.rs" +publish = true +autolib = false +autobins = false +autoexamples = false +autotests = false +autobenches = false +description = "LaserStream gRPC Protocol Definitions" +homepage = "https://helius.dev" +readme = false +keywords = [ + "solana", + "grpc", + "laserstream", +] +license = "Apache-2.0" +repository = "https://github.com/helius-labs/yellowstone-grpc" + +[features] +convert = [ + "dep:bincode", + "dep:solana-account", + "dep:solana-account-decoder", + "dep:solana-clock", + "dep:solana-hash", + "dep:solana-message", + "dep:solana-pubkey", + "dep:solana-signature", + "dep:solana-transaction", + "dep:solana-transaction-context", + "dep:solana-transaction-error", + "dep:solana-transaction-status", +] +default = [ + "convert", + "tonic", + "tonic-compression", +] +plugin = [ + "convert", + "dep:agave-geyser-plugin-interface", + "dep:base64", + "dep:bs58", + "dep:bytes", + "dep:serde", + "dep:smallvec", + "dep:spl-token-2022", + "dep:thiserror", + "dep:tonic", +] +plugin-bench = [ + "plugin", + "dep:prost_011", + "dep:solana-storage-proto", + "solana-transaction/blake3", +] +tonic = [ + "dep:tonic", + "dep:tonic-prost", +] +tonic-compression = [ + "tonic", + "tonic/gzip", + "tonic/zstd", +] + +[lib] +name = "laserstream_core_proto" +path = "src/lib.rs" + +[[bench]] +name = "encode" +path = "benches/encode.rs" +harness = false +required-features = ["plugin-bench"] + +[dependencies.agave-geyser-plugin-interface] +version = "~2.3.0" +optional = true + +[dependencies.base64] +version = "0.22.1" +optional = true + +[dependencies.bincode] +version = "1.0.0" +optional = true + +[dependencies.bs58] +version = "0.5.1" +optional = true + +[dependencies.bytes] +version = "1.0.0" +optional = true + +[dependencies.prost] +version = "0.14.0" + +[dependencies.prost-types] +version = "0.14.0" + +[dependencies.prost_011] +version = "0.11.9" +optional = true +package = "prost" + +[dependencies.serde] +version = "1.0.0" +optional = true + +[dependencies.smallvec] +version = "1.0.0" +optional = true + +[dependencies.solana-account] +version = "3.0.0" +optional = true + +[dependencies.solana-account-decoder] +version = "3.0.0" +optional = true + +[dependencies.solana-clock] +version = "3.0.0" +optional = true + +[dependencies.solana-hash] +version = "3.0.0" +optional = true + +[dependencies.solana-message] +version = "3.0.0" +optional = true + +[dependencies.solana-pubkey] +version = "3.0.0" +optional = true + +[dependencies.solana-signature] +version = "3.0.0" +optional = true + +[dependencies.solana-storage-proto] +version = "3.0.0" +optional = true + +[dependencies.solana-transaction] +version = "3.0.0" +optional = true + +[dependencies.solana-transaction-context] +version = "3.0.0" +optional = true + +[dependencies.solana-transaction-error] +version = "3.0.0" +optional = true + +[dependencies.solana-transaction-status] +version = "3.0.0" +optional = true +features = ["agave-unstable-api"] + +[dependencies.spl-token-2022] +version = "8.0.0" +optional = true + +[dependencies.thiserror] +version = "1.0.0" +optional = true + +[dependencies.tonic] +version = "0.14.0" +optional = true + +[dependencies.tonic-prost] +version = "0.14.0" +optional = true + +[dev-dependencies.criterion] +version = "0.5.1" + +[dev-dependencies.prost_011] +version = "0.11.9" +package = "prost" + +[dev-dependencies.solana-keypair] +version = "3.0.0" + +[dev-dependencies.solana-signer] +version = "3.0.0" + +[dev-dependencies.solana-storage-proto] +version = "3.0.0" + +[dev-dependencies.solana-transaction] +version = "3.0.0" +features = ["blake3"] + +[build-dependencies.anyhow] +version = "1.0.0" + +[build-dependencies.protoc-bin-vendored] +version = "3.2.0" + +[build-dependencies.tonic-build] +version = "0.14.0" + +[build-dependencies.tonic-prost-build] +version = "0.14.0" diff --git a/vendor/laserstream-core-proto/LICENSE_APACHE2 b/vendor/laserstream-core-proto/LICENSE_APACHE2 new file mode 100644 index 00000000..373dde57 --- /dev/null +++ b/vendor/laserstream-core-proto/LICENSE_APACHE2 @@ -0,0 +1,202 @@ + + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright 2015 Grafana Labs + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/vendor/laserstream-core-proto/benches/encode.rs b/vendor/laserstream-core-proto/benches/encode.rs new file mode 100644 index 00000000..7a92f84b --- /dev/null +++ b/vendor/laserstream-core-proto/benches/encode.rs @@ -0,0 +1,82 @@ +use { + criterion::{criterion_group, criterion_main, BenchmarkId, Criterion}, + prost::Message as _, + prost_types::Timestamp, + std::time::{Duration, SystemTime}, + laserstream_core_proto::plugin::{ + filter::message::{ + tests::{ + create_accounts, create_message_filters, load_predefined_blocks, + load_predefined_transactions, + }, + FilteredUpdate, FilteredUpdateOneof, + }, + message::MessageTransaction, + }, +}; + +fn bench_account(c: &mut Criterion) { + let filters = create_message_filters(&["my special filter"]); + + macro_rules! bench { + ($updates:expr, $kind:expr) => { + c.bench_with_input(BenchmarkId::new($kind, "ref"), $updates, |b, updates| { + b.iter(|| { + for update in updates.iter() { + update.encode_to_vec().len(); + } + }) + }); + c.bench_with_input(BenchmarkId::new($kind, "prost"), $updates, |b, updates| { + b.iter(|| { + for update in updates.iter() { + update.as_subscribe_update().encode_to_vec().len(); + } + }) + }); + }; + } + + let updates = create_accounts() + .into_iter() + .map(|(msg, data_slice)| FilteredUpdate { + filters: filters.clone(), + message: FilteredUpdateOneof::account(&msg, data_slice), + created_at: Timestamp::from(SystemTime::now()), + }) + .collect::>(); + bench!(&updates, "accounts"); + + let updates = load_predefined_transactions() + .into_iter() + .map(|transaction| FilteredUpdate { + filters: filters.clone(), + message: FilteredUpdateOneof::transaction(&MessageTransaction { + transaction, + slot: 42, + created_at: Timestamp::from(SystemTime::now()), + }), + created_at: Timestamp::from(SystemTime::now()), + }) + .collect::>(); + bench!(&updates, "transactions"); + + let updates = load_predefined_blocks() + .into_iter() + .map(|block| FilteredUpdate { + filters: filters.clone(), + message: FilteredUpdateOneof::block(Box::new(block)), + created_at: Timestamp::from(SystemTime::now()), + }) + .collect::>(); + bench!(&updates, "blocks"); +} + +criterion_group!( + name = benches; + config = Criterion::default() + .warm_up_time(Duration::from_secs(3)) // default 3 + .measurement_time(Duration::from_secs(5)); // default 5 + targets = bench_account +); +criterion_main!(benches); diff --git a/vendor/laserstream-core-proto/build.rs b/vendor/laserstream-core-proto/build.rs new file mode 100644 index 00000000..3c6d6a06 --- /dev/null +++ b/vendor/laserstream-core-proto/build.rs @@ -0,0 +1,141 @@ +use { + std::{env, fs, path::Path}, + tonic_prost_build::manual::{Builder, Method, Service}, +}; + +fn main() -> anyhow::Result<()> { + let protoc_path = protoc_bin_vendored::protoc_bin_path()?; + unsafe { + std::env::set_var("PROTOC", protoc_path); + } + + // build protos + tonic_prost_build::configure().compile_protos(&["proto/geyser.proto"], &["proto"])?; + + // build protos without tonic (wasm) + let out_dir = env::var("OUT_DIR").expect("OUT_DIR not found"); + let out_dir_path = Path::new(&out_dir).join("no-tonic"); + fs::create_dir_all(&out_dir_path).expect("failed to create out no-tonic directory"); + tonic_prost_build::configure() + .build_client(false) + .build_server(false) + .out_dir(out_dir_path) + .compile_protos(&["proto/geyser.proto"], &["proto"])?; + + // build with accepting our custom struct + let geyser_service = Service::builder() + .name("Geyser") + .package("geyser") + .method( + Method::builder() + .name("subscribe") + .route_name("Subscribe") + .input_type("crate::geyser::SubscribeRequest") + .output_type("crate::plugin::filter::message::FilteredUpdate") + .codec_path("tonic_prost::ProstCodec") + .client_streaming() + .server_streaming() + .build(), + ) + .method( + Method::builder() + .name("subscribe_raw") + .route_name("SubscribeRaw") + .input_type("crate::geyser::SubscribeRequest") + .output_type("crate::plugin::filter::message::FilteredUpdate") + .codec_path("tonic_prost::ProstCodec") + .client_streaming() + .server_streaming() + .build(), + ) + .method( + Method::builder() + .name("subscribe_batch") + .route_name("SubscribeBatch") + .input_type("crate::geyser::SubscribeRequest") + .output_type("crate::plugin::filter::message::FilteredUpdateBatch") + .codec_path("tonic_prost::ProstCodec") + .client_streaming() + .server_streaming() + .build(), + ) + .method( + Method::builder() + .name("subscribe_first_available_slot") + .route_name("SubscribeReplayInfo") + .input_type("crate::geyser::SubscribeReplayInfoRequest") + .output_type("crate::geyser::SubscribeReplayInfoResponse") + .codec_path("tonic_prost::ProstCodec") + .build(), + ) + .method( + Method::builder() + .name("ping") + .route_name("Ping") + .input_type("crate::geyser::PingRequest") + .output_type("crate::geyser::PongResponse") + .codec_path("tonic_prost::ProstCodec") + .build(), + ) + .method( + Method::builder() + .name("get_latest_blockhash") + .route_name("GetLatestBlockhash") + .input_type("crate::geyser::GetLatestBlockhashRequest") + .output_type("crate::geyser::GetLatestBlockhashResponse") + .codec_path("tonic_prost::ProstCodec") + .build(), + ) + .method( + Method::builder() + .name("get_block_height") + .route_name("GetBlockHeight") + .input_type("crate::geyser::GetBlockHeightRequest") + .output_type("crate::geyser::GetBlockHeightResponse") + .codec_path("tonic_prost::ProstCodec") + .build(), + ) + .method( + Method::builder() + .name("get_slot") + .route_name("GetSlot") + .input_type("crate::geyser::GetSlotRequest") + .output_type("crate::geyser::GetSlotResponse") + .codec_path("tonic_prost::ProstCodec") + .build(), + ) + .method( + Method::builder() + .name("is_blockhash_valid") + .route_name("IsBlockhashValid") + .input_type("crate::geyser::IsBlockhashValidRequest") + .output_type("crate::geyser::IsBlockhashValidResponse") + .codec_path("tonic_prost::ProstCodec") + .build(), + ) + .method( + Method::builder() + .name("get_version") + .route_name("GetVersion") + .input_type("crate::geyser::GetVersionRequest") + .output_type("crate::geyser::GetVersionResponse") + .codec_path("tonic_prost::ProstCodec") + .build(), + ) + .build(); + Builder::new() + .build_client(false) + .compile(&[geyser_service]); + + // patching generated custom struct (if custom Codec is used) + // let mut location = std::path::PathBuf::from(std::env::var("OUT_DIR")?); + // location.push("geyser.Geyser.rs"); + // let geyser_rs = std::fs::read_to_string(location.clone())?; + // let geyser_rs = geyser_rs.replace( + // "let codec = crate::plugin::codec::SubscribeCodec::default();", + // "let codec = crate::plugin::codec::SubscribeCodec::::default();", + // ); + // std::fs::write(location, geyser_rs)?; + + Ok(()) +} diff --git a/vendor/laserstream-core-proto/fixtures/blocks/18144001.bincode b/vendor/laserstream-core-proto/fixtures/blocks/18144001.bincode new file mode 100644 index 00000000..3d0a65b7 Binary files /dev/null and b/vendor/laserstream-core-proto/fixtures/blocks/18144001.bincode differ diff --git a/vendor/laserstream-core-proto/fixtures/blocks/43200000.bincode b/vendor/laserstream-core-proto/fixtures/blocks/43200000.bincode new file mode 100644 index 00000000..acb1f1c6 Binary files /dev/null and b/vendor/laserstream-core-proto/fixtures/blocks/43200000.bincode differ diff --git a/vendor/laserstream-core-proto/fixtures/blocks/64800004.bincode b/vendor/laserstream-core-proto/fixtures/blocks/64800004.bincode new file mode 100644 index 00000000..232addc3 Binary files /dev/null and b/vendor/laserstream-core-proto/fixtures/blocks/64800004.bincode differ diff --git a/vendor/laserstream-core-proto/proto/geyser.proto b/vendor/laserstream-core-proto/proto/geyser.proto new file mode 100644 index 00000000..76bf11d3 --- /dev/null +++ b/vendor/laserstream-core-proto/proto/geyser.proto @@ -0,0 +1,320 @@ +syntax = "proto3"; + +import "google/protobuf/timestamp.proto"; +import public "solana-storage.proto"; + +option go_package = "github.com/helius-labs/laserstream-sdk/go/proto"; + +package geyser; + +service Geyser { + rpc Subscribe(stream SubscribeRequest) returns (stream SubscribeUpdate) {} + rpc SubscribePreprocessed(stream SubscribePreprocessedRequest) returns (stream SubscribePreprocessedUpdate) {} + rpc SubscribeReplayInfo(SubscribeReplayInfoRequest) returns (SubscribeReplayInfoResponse) {} + rpc Ping(PingRequest) returns (PongResponse) {} + rpc GetLatestBlockhash(GetLatestBlockhashRequest) returns (GetLatestBlockhashResponse) {} + rpc GetBlockHeight(GetBlockHeightRequest) returns (GetBlockHeightResponse) {} + rpc GetSlot(GetSlotRequest) returns (GetSlotResponse) {} + rpc IsBlockhashValid(IsBlockhashValidRequest) returns (IsBlockhashValidResponse) {} + rpc GetVersion(GetVersionRequest) returns (GetVersionResponse) {} +} + +// Subscriptions to Preprocessed transactions +message SubscribePreprocessedRequest { + map transactions = 1; + optional SubscribeRequestPing ping = 2; + +} + +message SubscribePreprocessedRequestFilterTransactions { + optional bool vote = 1; + optional string signature = 2; + repeated string account_include = 3; + repeated string account_exclude = 4; + repeated string account_required = 5; +} + +message SubscribePreprocessedUpdate { + repeated string filters = 1; + oneof update_oneof { + SubscribePreprocessedTransaction transaction = 2; + SubscribeUpdatePing ping = 3; + SubscribeUpdatePong pong = 4; + } + google.protobuf.Timestamp created_at = 5; +} + +message SubscribePreprocessedTransaction { + SubscribePreprocessedTransactionInfo transaction = 1; + uint64 slot = 2; +} + +message SubscribePreprocessedTransactionInfo { + bytes signature = 1; + bool is_vote = 2; + solana.storage.ConfirmedBlock.Transaction transaction = 3; +} + +enum CommitmentLevel { + PROCESSED = 0; + CONFIRMED = 1; + FINALIZED = 2; +} + +enum SlotStatus { + SLOT_PROCESSED = 0; + SLOT_CONFIRMED = 1; + SLOT_FINALIZED = 2; + SLOT_FIRST_SHRED_RECEIVED = 3; + SLOT_COMPLETED = 4; + SLOT_CREATED_BANK = 5; + SLOT_DEAD = 6; +} + +message SubscribeRequest { + map accounts = 1; + map slots = 2; + map transactions = 3; + map transactions_status = 10; + map blocks = 4; + map blocks_meta = 5; + map entry = 8; + optional CommitmentLevel commitment = 6; + repeated SubscribeRequestAccountsDataSlice accounts_data_slice = 7; + optional SubscribeRequestPing ping = 9; + optional uint64 from_slot = 11; +} + +message SubscribeRequestFilterAccounts { + repeated string account = 2; + repeated string owner = 3; + repeated SubscribeRequestFilterAccountsFilter filters = 4; + optional bool nonempty_txn_signature = 5; +} + +message SubscribeRequestFilterAccountsFilter { + oneof filter { + SubscribeRequestFilterAccountsFilterMemcmp memcmp = 1; + uint64 datasize = 2; + bool token_account_state = 3; + SubscribeRequestFilterAccountsFilterLamports lamports = 4; + } +} + +message SubscribeRequestFilterAccountsFilterMemcmp { + uint64 offset = 1; + oneof data { + bytes bytes = 2; + string base58 = 3; + string base64 = 4; + } +} + +message SubscribeRequestFilterAccountsFilterLamports { + oneof cmp { + uint64 eq = 1; + uint64 ne = 2; + uint64 lt = 3; + uint64 gt = 4; + } +} + +message SubscribeRequestFilterSlots { + optional bool filter_by_commitment = 1; + optional bool interslot_updates = 2; +} + +message SubscribeRequestFilterTransactions { + optional bool vote = 1; + optional bool failed = 2; + optional string signature = 5; + repeated string account_include = 3; + repeated string account_exclude = 4; + repeated string account_required = 6; +} + +message SubscribeRequestFilterBlocks { + repeated string account_include = 1; + optional bool include_transactions = 2; + optional bool include_accounts = 3; + optional bool include_entries = 4; +} + +message SubscribeRequestFilterBlocksMeta {} + +message SubscribeRequestFilterEntry {} + +message SubscribeRequestAccountsDataSlice { + uint64 offset = 1; + uint64 length = 2; +} + +message SubscribeRequestPing { + int32 id = 1; +} + +message SubscribeUpdate { + repeated string filters = 1; + oneof update_oneof { + SubscribeUpdateAccount account = 2; + SubscribeUpdateSlot slot = 3; + SubscribeUpdateTransaction transaction = 4; + SubscribeUpdateTransactionStatus transaction_status = 10; + SubscribeUpdateBlock block = 5; + SubscribeUpdatePing ping = 6; + SubscribeUpdatePong pong = 9; + SubscribeUpdateBlockMeta block_meta = 7; + SubscribeUpdateEntry entry = 8; + } + google.protobuf.Timestamp created_at = 11; +} + +message SubscribeUpdateBatch { + repeated SubscribeUpdate updates = 1; +} + +message SubscribeUpdateAccount { + SubscribeUpdateAccountInfo account = 1; + uint64 slot = 2; + bool is_startup = 3; +} + +message SubscribeUpdateAccountInfo { + bytes pubkey = 1; + uint64 lamports = 2; + bytes owner = 3; + bool executable = 4; + uint64 rent_epoch = 5; + bytes data = 6; + uint64 write_version = 7; + optional bytes txn_signature = 8; +} + +message SubscribeUpdateSlot { + uint64 slot = 1; + optional uint64 parent = 2; + SlotStatus status = 3; + optional string dead_error = 4; +} + +message SubscribeUpdateTransaction { + SubscribeUpdateTransactionInfo transaction = 1; + uint64 slot = 2; +} + +message SubscribeUpdateTransactionInfo { + bytes signature = 1; + bool is_vote = 2; + solana.storage.ConfirmedBlock.Transaction transaction = 3; + solana.storage.ConfirmedBlock.TransactionStatusMeta meta = 4; + uint64 index = 5; +} + +message SubscribeUpdateTransactionStatus { + uint64 slot = 1; + bytes signature = 2; + bool is_vote = 3; + uint64 index = 4; + solana.storage.ConfirmedBlock.TransactionError err = 5; +} + +message SubscribeUpdateBlock { + uint64 slot = 1; + string blockhash = 2; + solana.storage.ConfirmedBlock.Rewards rewards = 3; + solana.storage.ConfirmedBlock.UnixTimestamp block_time = 4; + solana.storage.ConfirmedBlock.BlockHeight block_height = 5; + uint64 parent_slot = 7; + string parent_blockhash = 8; + uint64 executed_transaction_count = 9; + repeated SubscribeUpdateTransactionInfo transactions = 6; + uint64 updated_account_count = 10; + repeated SubscribeUpdateAccountInfo accounts = 11; + uint64 entries_count = 12; + repeated SubscribeUpdateEntry entries = 13; +} + +message SubscribeUpdateBlockMeta { + uint64 slot = 1; + string blockhash = 2; + solana.storage.ConfirmedBlock.Rewards rewards = 3; + solana.storage.ConfirmedBlock.UnixTimestamp block_time = 4; + solana.storage.ConfirmedBlock.BlockHeight block_height = 5; + uint64 parent_slot = 6; + string parent_blockhash = 7; + uint64 executed_transaction_count = 8; + uint64 entries_count = 9; +} + +message SubscribeUpdateEntry { + uint64 slot = 1; + uint64 index = 2; + uint64 num_hashes = 3; + bytes hash = 4; + uint64 executed_transaction_count = 5; + uint64 starting_transaction_index = 6; // added in v1.18, for solana 1.17 value is always 0 +} + +message SubscribeUpdatePing {} + +message SubscribeUpdatePong { + int32 id = 1; +} + +// non-streaming methods + +message SubscribeReplayInfoRequest {} + +message SubscribeReplayInfoResponse { + optional uint64 first_available = 1; +} + +message PingRequest { + int32 count = 1; +} + +message PongResponse { + int32 count = 1; +} + +message GetLatestBlockhashRequest { + optional CommitmentLevel commitment = 1; +} + +message GetLatestBlockhashResponse { + uint64 slot = 1; + string blockhash = 2; + uint64 last_valid_block_height = 3; +} + +message GetBlockHeightRequest { + optional CommitmentLevel commitment = 1; +} + +message GetBlockHeightResponse { + uint64 block_height = 1; +} + +message GetSlotRequest { + optional CommitmentLevel commitment = 1; +} + +message GetSlotResponse { + uint64 slot = 1; +} + +message GetVersionRequest {} + +message GetVersionResponse { + string version = 1; +} + +message IsBlockhashValidRequest { + string blockhash = 1; + optional CommitmentLevel commitment = 2; +} + +message IsBlockhashValidResponse { + uint64 slot = 1; + bool valid = 2; +} diff --git a/vendor/laserstream-core-proto/proto/solana-storage.proto b/vendor/laserstream-core-proto/proto/solana-storage.proto new file mode 100644 index 00000000..d01c26ee --- /dev/null +++ b/vendor/laserstream-core-proto/proto/solana-storage.proto @@ -0,0 +1,151 @@ +syntax = "proto3"; + +package solana.storage.ConfirmedBlock; + +option go_package = "github.com/helius-labs/laserstream-sdk/go/proto"; + +message ConfirmedBlock { + string previous_blockhash = 1; + string blockhash = 2; + uint64 parent_slot = 3; + repeated ConfirmedTransaction transactions = 4; + repeated Reward rewards = 5; + UnixTimestamp block_time = 6; + BlockHeight block_height = 7; + NumPartitions num_partitions = 8; +} + +message ConfirmedTransaction { + Transaction transaction = 1; + TransactionStatusMeta meta = 2; +} + +message Transaction { + repeated bytes signatures = 1; + Message message = 2; +} + +message Message { + MessageHeader header = 1; + repeated bytes account_keys = 2; + bytes recent_blockhash = 3; + repeated CompiledInstruction instructions = 4; + bool versioned = 5; + repeated MessageAddressTableLookup address_table_lookups = 6; +} + +message MessageHeader { + uint32 num_required_signatures = 1; + uint32 num_readonly_signed_accounts = 2; + uint32 num_readonly_unsigned_accounts = 3; +} + +message MessageAddressTableLookup { + bytes account_key = 1; + bytes writable_indexes = 2; + bytes readonly_indexes = 3; +} + +message TransactionStatusMeta { + TransactionError err = 1; + uint64 fee = 2; + repeated uint64 pre_balances = 3; + repeated uint64 post_balances = 4; + repeated InnerInstructions inner_instructions = 5; + bool inner_instructions_none = 10; + repeated string log_messages = 6; + bool log_messages_none = 11; + repeated TokenBalance pre_token_balances = 7; + repeated TokenBalance post_token_balances = 8; + repeated Reward rewards = 9; + repeated bytes loaded_writable_addresses = 12; + repeated bytes loaded_readonly_addresses = 13; + ReturnData return_data = 14; + bool return_data_none = 15; + + // Sum of compute units consumed by all instructions. + // Available since Solana v1.10.35 / v1.11.6. + // Set to `None` for txs executed on earlier versions. + optional uint64 compute_units_consumed = 16; + // Total transaction cost + optional uint64 cost_units = 17; +} + +message TransactionError { + bytes err = 1; +} + +message InnerInstructions { + uint32 index = 1; + repeated InnerInstruction instructions = 2; +} + +message InnerInstruction { + uint32 program_id_index = 1; + bytes accounts = 2; + bytes data = 3; + + // Invocation stack height of an inner instruction. + // Available since Solana v1.14.6 + // Set to `None` for txs executed on earlier versions. + optional uint32 stack_height = 4; +} + +message CompiledInstruction { + uint32 program_id_index = 1; + bytes accounts = 2; + bytes data = 3; +} + +message TokenBalance { + uint32 account_index = 1; + string mint = 2; + UiTokenAmount ui_token_amount = 3; + string owner = 4; + string program_id = 5; +} + +message UiTokenAmount { + double ui_amount = 1; + uint32 decimals = 2; + string amount = 3; + string ui_amount_string = 4; +} + +message ReturnData { + bytes program_id = 1; + bytes data = 2; +} + +enum RewardType { + Unspecified = 0; + Fee = 1; + Rent = 2; + Staking = 3; + Voting = 4; +} + +message Reward { + string pubkey = 1; + int64 lamports = 2; + uint64 post_balance = 3; + RewardType reward_type = 4; + string commission = 5; +} + +message Rewards { + repeated Reward rewards = 1; + NumPartitions num_partitions = 2; +} + +message UnixTimestamp { + int64 timestamp = 1; +} + +message BlockHeight { + uint64 block_height = 1; +} + +message NumPartitions { + uint64 num_partitions = 1; +} diff --git a/vendor/laserstream-core-proto/src/lib.rs b/vendor/laserstream-core-proto/src/lib.rs new file mode 100644 index 00000000..1c90d88c --- /dev/null +++ b/vendor/laserstream-core-proto/src/lib.rs @@ -0,0 +1,648 @@ +#![allow(clippy::large_enum_variant)] + +pub mod geyser { + #![allow(clippy::clone_on_ref_ptr)] + #![allow(clippy::missing_const_for_fn)] + + #[cfg(feature = "tonic")] + include!(concat!(env!("OUT_DIR"), "/geyser.rs")); + #[cfg(not(feature = "tonic"))] + include!(concat!(env!("OUT_DIR"), "/no-tonic/geyser.rs")); +} + +pub mod solana { + #![allow(clippy::missing_const_for_fn)] + + pub mod storage { + pub mod confirmed_block { + #[cfg(feature = "tonic")] + include!(concat!( + env!("OUT_DIR"), + "/solana.storage.confirmed_block.rs" + )); + #[cfg(not(feature = "tonic"))] + include!(concat!( + env!("OUT_DIR"), + "/no-tonic/solana.storage.confirmed_block.rs" + )); + } + } +} + +pub mod prelude { + pub use super::{geyser::*, solana::storage::confirmed_block::*}; +} + +#[cfg(feature = "tonic")] +pub use tonic; +pub use {prost, prost_types}; + +#[cfg(feature = "plugin")] +pub mod plugin; + +#[cfg(feature = "convert")] +pub mod convert_to { + use { + super::prelude as proto, + solana_clock::UnixTimestamp, + solana_message::{ + compiled_instruction::CompiledInstruction, + v0::{LoadedMessage, MessageAddressTableLookup}, + LegacyMessage, MessageHeader, SanitizedMessage, + }, + solana_pubkey::Pubkey, + solana_signature::Signature, + solana_transaction::sanitized::SanitizedTransaction, + solana_transaction_context::TransactionReturnData, + solana_transaction_error::TransactionError, + solana_transaction_status::{ + InnerInstruction, InnerInstructions, Reward, RewardType, TransactionStatusMeta, + TransactionTokenBalance, + }, + }; + + pub fn create_transaction(tx: &SanitizedTransaction) -> proto::Transaction { + proto::Transaction { + signatures: tx + .signatures() + .iter() + .map(|signature| >::as_ref(signature).into()) + .collect(), + message: Some(create_message(tx.message())), + } + } + + pub fn create_message(message: &SanitizedMessage) -> proto::Message { + match message { + SanitizedMessage::Legacy(LegacyMessage { message, .. }) => proto::Message { + header: Some(create_header(&message.header)), + account_keys: create_pubkeys(&message.account_keys), + recent_blockhash: message.recent_blockhash.to_bytes().into(), + instructions: create_instructions(&message.instructions), + versioned: false, + address_table_lookups: vec![], + }, + SanitizedMessage::V0(LoadedMessage { message, .. }) => proto::Message { + header: Some(create_header(&message.header)), + account_keys: create_pubkeys(&message.account_keys), + recent_blockhash: message.recent_blockhash.to_bytes().into(), + instructions: create_instructions(&message.instructions), + versioned: true, + address_table_lookups: create_lookups(&message.address_table_lookups), + }, + } + } + + pub const fn create_header(header: &MessageHeader) -> proto::MessageHeader { + proto::MessageHeader { + num_required_signatures: header.num_required_signatures as u32, + num_readonly_signed_accounts: header.num_readonly_signed_accounts as u32, + num_readonly_unsigned_accounts: header.num_readonly_unsigned_accounts as u32, + } + } + + pub fn create_pubkeys(pubkeys: &[Pubkey]) -> Vec> { + pubkeys + .iter() + .map(|key| >::as_ref(key).into()) + .collect() + } + + pub fn create_instructions(ixs: &[CompiledInstruction]) -> Vec { + ixs.iter().map(create_instruction).collect() + } + + pub fn create_instruction(ix: &CompiledInstruction) -> proto::CompiledInstruction { + proto::CompiledInstruction { + program_id_index: ix.program_id_index as u32, + accounts: ix.accounts.clone(), + data: ix.data.clone(), + } + } + + pub fn create_lookups( + lookups: &[MessageAddressTableLookup], + ) -> Vec { + lookups.iter().map(create_lookup).collect() + } + + pub fn create_lookup(lookup: &MessageAddressTableLookup) -> proto::MessageAddressTableLookup { + proto::MessageAddressTableLookup { + account_key: >::as_ref(&lookup.account_key).into(), + writable_indexes: lookup.writable_indexes.clone(), + readonly_indexes: lookup.readonly_indexes.clone(), + } + } + + pub fn create_transaction_meta(meta: &TransactionStatusMeta) -> proto::TransactionStatusMeta { + let TransactionStatusMeta { + status, + fee, + pre_balances, + post_balances, + inner_instructions, + log_messages, + pre_token_balances, + post_token_balances, + rewards, + loaded_addresses, + return_data, + compute_units_consumed, + cost_units, + } = meta; + let err = create_transaction_error(status); + let inner_instructions_none = inner_instructions.is_none(); + let inner_instructions = inner_instructions + .as_deref() + .map(create_inner_instructions_vec) + .unwrap_or_default(); + let log_messages_none = log_messages.is_none(); + let log_messages = log_messages.clone().unwrap_or_default(); + let pre_token_balances = pre_token_balances + .as_deref() + .map(create_token_balances) + .unwrap_or_default(); + let post_token_balances = post_token_balances + .as_deref() + .map(create_token_balances) + .unwrap_or_default(); + let rewards = rewards.as_deref().map(create_rewards).unwrap_or_default(); + let loaded_writable_addresses = create_pubkeys(&loaded_addresses.writable); + let loaded_readonly_addresses = create_pubkeys(&loaded_addresses.readonly); + + proto::TransactionStatusMeta { + err, + fee: *fee, + pre_balances: pre_balances.clone(), + post_balances: post_balances.clone(), + inner_instructions, + inner_instructions_none, + log_messages, + log_messages_none, + pre_token_balances, + post_token_balances, + rewards, + loaded_writable_addresses, + loaded_readonly_addresses, + return_data: return_data.as_ref().map(create_return_data), + return_data_none: return_data.is_none(), + compute_units_consumed: *compute_units_consumed, + cost_units: *cost_units, + } + } + + pub fn create_transaction_error( + status: &Result<(), TransactionError>, + ) -> Option { + match status { + Ok(()) => None, + Err(err) => Some(proto::TransactionError { + err: bincode::serialize(&err).expect("transaction error to serialize to bytes"), + }), + } + } + + pub fn create_inner_instructions_vec( + ixs: &[InnerInstructions], + ) -> Vec { + ixs.iter().map(create_inner_instructions).collect() + } + + pub fn create_inner_instructions(instructions: &InnerInstructions) -> proto::InnerInstructions { + proto::InnerInstructions { + index: instructions.index as u32, + instructions: create_inner_instruction_vec(&instructions.instructions), + } + } + + pub fn create_inner_instruction_vec(ixs: &[InnerInstruction]) -> Vec { + ixs.iter().map(create_inner_instruction).collect() + } + + pub fn create_inner_instruction(instruction: &InnerInstruction) -> proto::InnerInstruction { + proto::InnerInstruction { + program_id_index: instruction.instruction.program_id_index as u32, + accounts: instruction.instruction.accounts.clone(), + data: instruction.instruction.data.clone(), + stack_height: instruction.stack_height, + } + } + + pub fn create_token_balances(balances: &[TransactionTokenBalance]) -> Vec { + balances.iter().map(create_token_balance).collect() + } + + pub fn create_token_balance(balance: &TransactionTokenBalance) -> proto::TokenBalance { + proto::TokenBalance { + account_index: balance.account_index as u32, + mint: balance.mint.clone(), + ui_token_amount: Some(proto::UiTokenAmount { + ui_amount: balance.ui_token_amount.ui_amount.unwrap_or_default(), + decimals: balance.ui_token_amount.decimals as u32, + amount: balance.ui_token_amount.amount.clone(), + ui_amount_string: balance.ui_token_amount.ui_amount_string.clone(), + }), + owner: balance.owner.clone(), + program_id: balance.program_id.clone(), + } + } + + pub fn create_rewards_obj(rewards: &[Reward], num_partitions: Option) -> proto::Rewards { + proto::Rewards { + rewards: create_rewards(rewards), + num_partitions: num_partitions.map(create_num_partitions), + } + } + + pub fn create_rewards(rewards: &[Reward]) -> Vec { + rewards.iter().map(create_reward).collect() + } + + pub fn create_reward(reward: &Reward) -> proto::Reward { + proto::Reward { + pubkey: reward.pubkey.clone(), + lamports: reward.lamports, + post_balance: reward.post_balance, + reward_type: create_reward_type(reward.reward_type) as i32, + commission: reward.commission.map(|c| c.to_string()).unwrap_or_default(), + } + } + + pub const fn create_reward_type(reward_type: Option) -> proto::RewardType { + match reward_type { + None => proto::RewardType::Unspecified, + Some(RewardType::Fee) => proto::RewardType::Fee, + Some(RewardType::Rent) => proto::RewardType::Rent, + Some(RewardType::Staking) => proto::RewardType::Staking, + Some(RewardType::Voting) => proto::RewardType::Voting, + } + } + + pub const fn create_num_partitions(num_partitions: u64) -> proto::NumPartitions { + proto::NumPartitions { num_partitions } + } + + pub fn create_return_data(return_data: &TransactionReturnData) -> proto::ReturnData { + proto::ReturnData { + program_id: return_data.program_id.to_bytes().into(), + data: return_data.data.clone(), + } + } + + pub const fn create_block_height(block_height: u64) -> proto::BlockHeight { + proto::BlockHeight { block_height } + } + + pub const fn create_timestamp(timestamp: UnixTimestamp) -> proto::UnixTimestamp { + proto::UnixTimestamp { timestamp } + } +} + +#[cfg(feature = "convert")] +pub mod convert_from { + use { + super::prelude as proto, + solana_account::Account, + solana_account_decoder::parse_token::UiTokenAmount, + solana_hash::{Hash, HASH_BYTES}, + solana_message::{ + compiled_instruction::CompiledInstruction, + v0::{LoadedAddresses, Message as MessageV0, MessageAddressTableLookup}, + Message, MessageHeader, VersionedMessage, + }, + solana_pubkey::Pubkey, + solana_signature::Signature, + solana_transaction::versioned::VersionedTransaction, + solana_transaction_context::TransactionReturnData, + solana_transaction_error::TransactionError, + solana_transaction_status::{ + ConfirmedBlock, InnerInstruction, InnerInstructions, Reward, RewardType, + RewardsAndNumPartitions, TransactionStatusMeta, TransactionTokenBalance, + TransactionWithStatusMeta, VersionedTransactionWithStatusMeta, + }, + }; + + type CreateResult = Result; + + pub fn create_block(block: proto::SubscribeUpdateBlock) -> CreateResult { + let mut transactions = vec![]; + for tx in block.transactions { + transactions.push(create_tx_with_meta(tx)?); + } + + let block_rewards = block.rewards.ok_or("failed to get rewards")?; + let mut rewards = vec![]; + for reward in block_rewards.rewards { + rewards.push(create_reward(reward)?); + } + + Ok(ConfirmedBlock { + previous_blockhash: block.parent_blockhash, + blockhash: block.blockhash, + parent_slot: block.parent_slot, + transactions, + rewards, + num_partitions: block_rewards.num_partitions.map(|msg| msg.num_partitions), + block_time: Some( + block + .block_time + .map(|wrapper| wrapper.timestamp) + .ok_or("failed to get block_time")?, + ), + block_height: Some( + block + .block_height + .map(|wrapper| wrapper.block_height) + .ok_or("failed to get block_height")?, + ), + }) + } + + pub fn create_tx_with_meta( + tx: proto::SubscribeUpdateTransactionInfo, + ) -> CreateResult { + let meta = tx.meta.ok_or("failed to get transaction meta")?; + let tx = tx + .transaction + .ok_or("failed to get transaction transaction")?; + + Ok(TransactionWithStatusMeta::Complete( + VersionedTransactionWithStatusMeta { + transaction: create_tx_versioned(tx)?, + meta: create_tx_meta(meta)?, + }, + )) + } + + pub fn create_tx_versioned(tx: proto::Transaction) -> CreateResult { + let mut signatures = Vec::with_capacity(tx.signatures.len()); + for signature in tx.signatures { + signatures.push(match Signature::try_from(signature.as_slice()) { + Ok(signature) => signature, + Err(_error) => return Err("failed to parse Signature"), + }); + } + + Ok(VersionedTransaction { + signatures, + message: create_message(tx.message.ok_or("failed to get message")?)?, + }) + } + + pub fn create_message(message: proto::Message) -> CreateResult { + let header = message.header.ok_or("failed to get MessageHeader")?; + let header = MessageHeader { + num_required_signatures: header + .num_required_signatures + .try_into() + .map_err(|_| "failed to parse num_required_signatures")?, + num_readonly_signed_accounts: header + .num_readonly_signed_accounts + .try_into() + .map_err(|_| "failed to parse num_readonly_signed_accounts")?, + num_readonly_unsigned_accounts: header + .num_readonly_unsigned_accounts + .try_into() + .map_err(|_| "failed to parse num_readonly_unsigned_accounts")?, + }; + + if message.recent_blockhash.len() != HASH_BYTES { + return Err("failed to parse hash"); + } + + Ok(if message.versioned { + let mut address_table_lookups = Vec::with_capacity(message.address_table_lookups.len()); + for table in message.address_table_lookups { + address_table_lookups.push(MessageAddressTableLookup { + account_key: Pubkey::try_from(table.account_key.as_slice()) + .map_err(|_| "failed to parse Pubkey")?, + writable_indexes: table.writable_indexes, + readonly_indexes: table.readonly_indexes, + }); + } + + VersionedMessage::V0(MessageV0 { + header, + account_keys: create_pubkey_vec(message.account_keys)?, + recent_blockhash: Hash::new_from_array( + <[u8; HASH_BYTES]>::try_from(message.recent_blockhash.as_slice()).unwrap(), + ), + instructions: create_message_instructions(message.instructions)?, + address_table_lookups, + }) + } else { + VersionedMessage::Legacy(Message { + header, + account_keys: create_pubkey_vec(message.account_keys)?, + recent_blockhash: Hash::new_from_array( + <[u8; HASH_BYTES]>::try_from(message.recent_blockhash.as_slice()).unwrap(), + ), + instructions: create_message_instructions(message.instructions)?, + }) + }) + } + + pub fn create_message_instructions( + ixs: Vec, + ) -> CreateResult> { + ixs.into_iter().map(create_message_instruction).collect() + } + + pub fn create_message_instruction( + ix: proto::CompiledInstruction, + ) -> CreateResult { + Ok(CompiledInstruction { + program_id_index: ix + .program_id_index + .try_into() + .map_err(|_| "failed to decode CompiledInstruction.program_id_index)")?, + accounts: ix.accounts, + data: ix.data, + }) + } + + pub fn create_tx_meta( + meta: proto::TransactionStatusMeta, + ) -> CreateResult { + let meta_status = match create_tx_error(meta.err.as_ref())? { + Some(err) => Err(err), + None => Ok(()), + }; + let meta_rewards = meta + .rewards + .into_iter() + .map(create_reward) + .collect::, _>>()?; + + Ok(TransactionStatusMeta { + status: meta_status, + fee: meta.fee, + pre_balances: meta.pre_balances, + post_balances: meta.post_balances, + inner_instructions: Some(create_meta_inner_instructions(meta.inner_instructions)?), + log_messages: Some(meta.log_messages), + pre_token_balances: Some(create_token_balances(meta.pre_token_balances)?), + post_token_balances: Some(create_token_balances(meta.post_token_balances)?), + rewards: Some(meta_rewards), + loaded_addresses: create_loaded_addresses( + meta.loaded_writable_addresses, + meta.loaded_readonly_addresses, + )?, + return_data: if meta.return_data_none { + None + } else { + let data = meta.return_data.ok_or("failed to get return_data")?; + Some(TransactionReturnData { + program_id: Pubkey::try_from(data.program_id.as_slice()) + .map_err(|_| "failed to parse program_id")?, + data: data.data, + }) + }, + compute_units_consumed: meta.compute_units_consumed, + cost_units: meta.cost_units, + }) + } + + pub fn create_tx_error( + err: Option<&proto::TransactionError>, + ) -> CreateResult> { + err.map(|err| bincode::deserialize::(&err.err)) + .transpose() + .map_err(|_| "failed to decode TransactionError") + } + + pub fn create_meta_inner_instructions( + ixs: Vec, + ) -> CreateResult> { + ixs.into_iter().map(create_meta_inner_instruction).collect() + } + + pub fn create_meta_inner_instruction( + ix: proto::InnerInstructions, + ) -> CreateResult { + let mut instructions = vec![]; + for ix in ix.instructions { + instructions.push(InnerInstruction { + instruction: CompiledInstruction { + program_id_index: ix + .program_id_index + .try_into() + .map_err(|_| "failed to decode CompiledInstruction.program_id_index)")?, + accounts: ix.accounts, + data: ix.data, + }, + stack_height: ix.stack_height, + }); + } + Ok(InnerInstructions { + index: ix + .index + .try_into() + .map_err(|_| "failed to decode InnerInstructions.index")?, + instructions, + }) + } + + pub fn create_rewards_obj(rewards: proto::Rewards) -> CreateResult { + Ok(RewardsAndNumPartitions { + rewards: rewards + .rewards + .into_iter() + .map(create_reward) + .collect::>()?, + num_partitions: rewards.num_partitions.map(|wrapper| wrapper.num_partitions), + }) + } + + pub fn create_reward(reward: proto::Reward) -> CreateResult { + Ok(Reward { + pubkey: reward.pubkey, + lamports: reward.lamports, + post_balance: reward.post_balance, + reward_type: match proto::RewardType::try_from(reward.reward_type) + .map_err(|_| "failed to parse reward_type")? + { + proto::RewardType::Unspecified => None, + proto::RewardType::Fee => Some(RewardType::Fee), + proto::RewardType::Rent => Some(RewardType::Rent), + proto::RewardType::Staking => Some(RewardType::Staking), + proto::RewardType::Voting => Some(RewardType::Voting), + }, + commission: if reward.commission.is_empty() { + None + } else { + Some( + reward + .commission + .parse() + .map_err(|_| "failed to parse reward commission")?, + ) + }, + }) + } + + pub fn create_token_balances( + balances: Vec, + ) -> CreateResult> { + let mut vec = Vec::with_capacity(balances.len()); + for balance in balances { + let ui_amount = balance + .ui_token_amount + .ok_or("failed to get ui_token_amount")?; + vec.push(TransactionTokenBalance { + account_index: balance + .account_index + .try_into() + .map_err(|_| "failed to parse account_index")?, + mint: balance.mint, + ui_token_amount: UiTokenAmount { + ui_amount: Some(ui_amount.ui_amount), + decimals: ui_amount + .decimals + .try_into() + .map_err(|_| "failed to parse decimals")?, + amount: ui_amount.amount, + ui_amount_string: ui_amount.ui_amount_string, + }, + owner: balance.owner, + program_id: balance.program_id, + }); + } + Ok(vec) + } + + pub fn create_loaded_addresses( + writable: Vec>, + readonly: Vec>, + ) -> CreateResult { + Ok(LoadedAddresses { + writable: create_pubkey_vec(writable)?, + readonly: create_pubkey_vec(readonly)?, + }) + } + + pub fn create_pubkey_vec(pubkeys: Vec>) -> CreateResult> { + pubkeys + .iter() + .map(|pubkey| create_pubkey(pubkey.as_slice())) + .collect() + } + + pub fn create_pubkey(pubkey: &[u8]) -> CreateResult { + Pubkey::try_from(pubkey).map_err(|_| "failed to parse Pubkey") + } + + pub fn create_account( + account: proto::SubscribeUpdateAccountInfo, + ) -> CreateResult<(Pubkey, Account)> { + let pubkey = create_pubkey(&account.pubkey)?; + let account = Account { + lamports: account.lamports, + data: account.data, + owner: create_pubkey(&account.owner)?, + executable: account.executable, + rent_epoch: account.rent_epoch, + }; + Ok((pubkey, account)) + } +} diff --git a/vendor/laserstream-core-proto/src/plugin/filter/filter.rs b/vendor/laserstream-core-proto/src/plugin/filter/filter.rs new file mode 100644 index 00000000..c90b40d5 --- /dev/null +++ b/vendor/laserstream-core-proto/src/plugin/filter/filter.rs @@ -0,0 +1,1617 @@ +use { + crate::{ + geyser::{ + subscribe_request_filter_accounts_filter::Filter as AccountsFilterDataOneof, + subscribe_request_filter_accounts_filter_lamports::Cmp as AccountsFilterLamports, + subscribe_request_filter_accounts_filter_memcmp::Data as AccountsFilterMemcmpOneof, + CommitmentLevel as CommitmentLevelProto, SubscribeRequest, + SubscribeRequestAccountsDataSlice, SubscribeRequestFilterAccounts, + SubscribeRequestFilterAccountsFilter, SubscribeRequestFilterAccountsFilterLamports, + SubscribeRequestFilterBlocks, SubscribeRequestFilterBlocksMeta, + SubscribeRequestFilterEntry, SubscribeRequestFilterSlots, + SubscribeRequestFilterTransactions, + }, + plugin::{ + filter::{ + limits::{ + FilterLimits, FilterLimitsAccounts, FilterLimitsBlocks, FilterLimitsBlocksMeta, + FilterLimitsCheckError, FilterLimitsEntries, FilterLimitsSlots, + FilterLimitsTransactions, + }, + message::{ + FilteredUpdate, FilteredUpdateBlock, FilteredUpdateFilters, + FilteredUpdateOneof, FilteredUpdates, + }, + name::{FilterName, FilterNameError, FilterNames}, + }, + message::{ + CommitmentLevel, Message, MessageAccount, MessageBlock, MessageBlockMeta, + MessageEntry, MessageSlot, MessageTransaction, SlotStatus, + }, + }, + }, + base64::{engine::general_purpose::STANDARD as base64_engine, Engine}, + bytes::buf::BufMut, + prost::encoding::{encode_key, encode_varint, WireType}, + solana_pubkey::{ParsePubkeyError, Pubkey}, + solana_signature::{ParseSignatureError, Signature}, + spl_token_2022::{generic_token_account::GenericTokenAccount, state::Account as TokenAccount}, + std::{ + collections::{HashMap, HashSet}, + ops::Range, + str::FromStr, + sync::Arc, + }, +}; + +#[derive(Debug, thiserror::Error)] +pub enum FilterError { + #[error(transparent)] + Name(#[from] FilterNameError), + #[error(transparent)] + LimitsCheck(#[from] FilterLimitsCheckError), + + #[error("failed to create CommitmentLevel from {commitment}")] + InvalidCommitment { commitment: i32 }, + #[error(transparent)] + InvalidPubkey(#[from] ParsePubkeyError), + #[error(transparent)] + InvalidSignature(#[from] ParseSignatureError), + + #[error("Too many filters provided; max {max}")] + CreateAccountStateMaxFilters { max: usize }, + #[error("{0}")] + CreateAccountState(&'static str), + #[error("`include_{0}` is not allowed")] + CreateBlocksNotAllowed(&'static str), + #[error("failed to create filter: data slices out of order")] + CreateDataSliceOutOfOrder, + #[error("failed to create filter: data slices overlapped")] + CreateDataSliceOverlap, +} + +pub type FilterResult = Result; + +macro_rules! filtered_updates_once_owned { + ($filters:ident, $message:expr, $created_at:expr) => {{ + let mut messages = FilteredUpdates::new(); + if !$filters.is_empty() { + messages.push(FilteredUpdate::new($filters, $message, $created_at)); + } + messages + }}; +} + +macro_rules! filtered_updates_once_ref { + ($filters:ident, $message:expr, $created_at:expr) => {{ + let mut messages = FilteredUpdates::new(); + if !$filters.is_empty() { + let mut message_filters = FilteredUpdateFilters::new(); + for filter in $filters { + message_filters.push(filter.clone()); + } + messages.push(FilteredUpdate::new(message_filters, $message, $created_at)); + } + messages + }}; +} + +#[derive(Debug, Clone)] +pub struct Filter { + accounts: FilterAccounts, + slots: FilterSlots, + transactions: FilterTransactions, + transactions_status: FilterTransactions, + entries: FilterEntries, + blocks: FilterBlocks, + blocks_meta: FilterBlocksMeta, + commitment: CommitmentLevel, + accounts_data_slice: FilterAccountsDataSlice, + ping: Option, +} + +impl Default for Filter { + fn default() -> Self { + Self { + accounts: FilterAccounts::default(), + slots: FilterSlots::default(), + transactions: FilterTransactions { + filter_type: FilterTransactionsType::Transaction, + filters: HashMap::new(), + }, + transactions_status: FilterTransactions { + filter_type: FilterTransactionsType::TransactionStatus, + filters: HashMap::new(), + }, + entries: FilterEntries::default(), + blocks: FilterBlocks::default(), + blocks_meta: FilterBlocksMeta::default(), + commitment: CommitmentLevel::Processed, + accounts_data_slice: FilterAccountsDataSlice::default(), + ping: None, + } + } +} + +impl Filter { + pub fn new( + config: &SubscribeRequest, + limits: &FilterLimits, + names: &mut FilterNames, + ) -> FilterResult { + Ok(Self { + accounts: FilterAccounts::new(&config.accounts, &limits.accounts, names)?, + slots: FilterSlots::new(&config.slots, &limits.slots, names)?, + transactions: FilterTransactions::new( + &config.transactions, + &limits.transactions, + FilterTransactionsType::Transaction, + names, + )?, + transactions_status: FilterTransactions::new( + &config.transactions_status, + &limits.transactions_status, + FilterTransactionsType::TransactionStatus, + names, + )?, + entries: FilterEntries::new(&config.entry, &limits.entries, names)?, + blocks: FilterBlocks::new(&config.blocks, &limits.blocks, names)?, + blocks_meta: FilterBlocksMeta::new(&config.blocks_meta, &limits.blocks_meta, names)?, + commitment: Self::decode_commitment(config.commitment)?, + accounts_data_slice: FilterAccountsDataSlice::new( + &config.accounts_data_slice, + limits.accounts.data_slice_max, + )?, + ping: config.ping.as_ref().map(|msg| msg.id), + }) + } + + fn decode_commitment(commitment: Option) -> FilterResult { + let commitment = commitment.unwrap_or(CommitmentLevelProto::Processed as i32); + let commitment = CommitmentLevelProto::try_from(commitment) + .map(Into::into) + .map_err(|_error| FilterError::InvalidCommitment { commitment })?; + if !matches!( + commitment, + CommitmentLevel::Processed | CommitmentLevel::Confirmed | CommitmentLevel::Finalized + ) { + Err(FilterError::InvalidCommitment { + commitment: commitment as i32, + }) + } else { + Ok(commitment) + } + } + + fn decode_pubkeys<'a>( + pubkeys: &'a [String], + limit: &'a HashSet, + ) -> impl Iterator> + 'a { + pubkeys.iter().map(|value| { + let pubkey = Pubkey::from_str(value)?; + FilterLimits::check_pubkey_reject(&pubkey, limit)?; + Ok(pubkey) + }) + } + + fn decode_pubkeys_into_set( + pubkeys: &[String], + limit: &HashSet, + ) -> FilterResult> { + Self::decode_pubkeys(pubkeys, limit).collect::>() + } + + pub fn get_metrics(&self) -> [(&'static str, usize); 8] { + [ + ("accounts", self.accounts.filters.len()), + ("slots", self.slots.filters.len()), + ("transactions", self.transactions.filters.len()), + ( + "transactions_status", + self.transactions_status.filters.len(), + ), + ("entries", self.entries.filters.len()), + ("blocks", self.blocks.filters.len()), + ("blocks_meta", self.blocks_meta.filters.len()), + ( + "all", + self.accounts.filters.len() + + self.slots.filters.len() + + self.transactions.filters.len() + + self.transactions_status.filters.len() + + self.entries.filters.len() + + self.blocks.filters.len() + + self.blocks_meta.filters.len(), + ), + ] + } + + pub const fn get_commitment_level(&self) -> CommitmentLevel { + self.commitment + } + + pub fn get_updates( + &self, + message: &Message, + commitment: Option, + ) -> FilteredUpdates { + match message { + Message::Account(message) => self + .accounts + .get_updates(message, &self.accounts_data_slice), + Message::Slot(message) => self.slots.get_updates(message, commitment), + Message::Transaction(message) => { + let mut updates = self.transactions.get_updates(message); + updates.append(&mut self.transactions_status.get_updates(message)); + updates + } + Message::Entry(message) => self.entries.get_updates(message), + Message::Block(message) => self.blocks.get_updates(message, &self.accounts_data_slice), + Message::BlockMeta(message) => self.blocks_meta.get_updates(message), + } + } + + pub fn get_pong_msg(&self) -> Option { + self.ping + .map(|id| FilteredUpdate::new_empty(FilteredUpdateOneof::pong(id))) + } +} + +#[derive(Debug, Default, Clone)] +struct FilterAccounts { + nonempty_txn_signature: Vec<(FilterName, Option)>, + nonempty_txn_signature_required: HashSet, + account: HashMap>, + account_required: HashSet, + owner: HashMap>, + owner_required: HashSet, + filters: Vec<(FilterName, FilterAccountsState)>, +} + +impl FilterAccounts { + fn new( + configs: &HashMap, + limits: &FilterLimitsAccounts, + names: &mut FilterNames, + ) -> FilterResult { + FilterLimits::check_max(configs.len(), limits.max)?; + + let mut this = Self::default(); + for (name, filter) in configs { + this.nonempty_txn_signature + .push((names.get(name)?, filter.nonempty_txn_signature)); + if filter.nonempty_txn_signature.is_some() { + this.nonempty_txn_signature_required + .insert(names.get(name)?); + } + + FilterLimits::check_any( + filter.account.is_empty() && filter.owner.is_empty(), + limits.any, + )?; + FilterLimits::check_pubkey_max(filter.account.len(), limits.account_max)?; + FilterLimits::check_pubkey_max(filter.owner.len(), limits.owner_max)?; + + Self::set( + &mut this.account, + &mut this.account_required, + name, + names, + Filter::decode_pubkeys(&filter.account, &limits.account_reject), + )?; + + Self::set( + &mut this.owner, + &mut this.owner_required, + name, + names, + Filter::decode_pubkeys(&filter.owner, &limits.owner_reject), + )?; + + this.filters + .push((names.get(name)?, FilterAccountsState::new(&filter.filters)?)); + } + Ok(this) + } + + fn set( + map: &mut HashMap>, + map_required: &mut HashSet, + name: &str, + names: &mut FilterNames, + keys: impl Iterator>, + ) -> FilterResult { + let mut required = false; + for maybe_key in keys { + if map.entry(maybe_key?).or_default().insert(names.get(name)?) { + required = true; + } + } + + if required { + map_required.insert(names.get(name)?); + } + Ok(required) + } + + fn get_updates( + &self, + message: &MessageAccount, + accounts_data_slice: &FilterAccountsDataSlice, + ) -> FilteredUpdates { + let mut filter = FilterAccountsMatch::new(self); + filter.match_txn_signature(&message.account.txn_signature); + filter.match_account(&message.account.pubkey); + filter.match_owner(&message.account.owner); + filter.match_data_lamports(&message.account.data, message.account.lamports); + let filters = filter.get_filters(); + filtered_updates_once_owned!( + filters, + FilteredUpdateOneof::account(message, accounts_data_slice.clone()), + message.created_at + ) + } +} + +#[derive(Debug, Default, Clone)] +struct FilterAccountsState { + memcmp: Vec<(usize, Vec)>, + datasize: Option, + token_account_state: bool, + lamports: Vec, +} + +impl FilterAccountsState { + fn new(filters: &[SubscribeRequestFilterAccountsFilter]) -> FilterResult { + const MAX_FILTERS: usize = 4; + const MAX_DATA_SIZE: usize = 128; + const MAX_DATA_BASE58_SIZE: usize = 175; + const MAX_DATA_BASE64_SIZE: usize = 172; + + if filters.len() > MAX_FILTERS { + return Err(FilterError::CreateAccountStateMaxFilters { max: MAX_FILTERS }); + } + + let mut this = Self::default(); + for filter in filters { + match &filter.filter { + Some(AccountsFilterDataOneof::Memcmp(memcmp)) => { + let data = match &memcmp.data { + Some(AccountsFilterMemcmpOneof::Bytes(data)) => data.clone(), + Some(AccountsFilterMemcmpOneof::Base58(data)) => { + if data.len() > MAX_DATA_BASE58_SIZE { + return Err(FilterError::CreateAccountState("data too large")); + } + bs58::decode(data) + .into_vec() + .map_err(|_| FilterError::CreateAccountState("invalid base58"))? + } + Some(AccountsFilterMemcmpOneof::Base64(data)) => { + if data.len() > MAX_DATA_BASE64_SIZE { + return Err(FilterError::CreateAccountState("data too large")); + } + base64_engine + .decode(data) + .map_err(|_| FilterError::CreateAccountState("invalid base64"))? + } + None => { + return Err(FilterError::CreateAccountState( + "data for memcmp should be defined", + )) + } + }; + if data.len() > MAX_DATA_SIZE { + return Err(FilterError::CreateAccountState("data too large")); + } + this.memcmp.push((memcmp.offset as usize, data)); + } + Some(AccountsFilterDataOneof::Datasize(datasize)) => { + if this.datasize.replace(*datasize as usize).is_some() { + return Err(FilterError::CreateAccountState( + "datasize used more than once", + )); + } + } + Some(AccountsFilterDataOneof::TokenAccountState(value)) => { + if !value { + return Err(FilterError::CreateAccountState( + "token_account_state only allowed to be true", + )); + } + this.token_account_state = true; + } + Some(AccountsFilterDataOneof::Lamports( + SubscribeRequestFilterAccountsFilterLamports { cmp }, + )) => { + let Some(cmp) = cmp else { + return Err(FilterError::CreateAccountState( + "cmp for lamports should be defined", + )); + }; + this.lamports.push(cmp.into()); + } + None => { + return Err(FilterError::CreateAccountState("filter should be defined")); + } + } + } + Ok(this) + } + + fn is_empty(&self) -> bool { + self.memcmp.is_empty() + && self.datasize.is_none() + && !self.token_account_state + && self.lamports.is_empty() + } + + fn is_match(&self, data: &[u8], lamports: u64) -> bool { + if matches!(self.datasize, Some(datasize) if data.len() != datasize) { + return false; + } + if self.token_account_state && !TokenAccount::valid_account_data(data) { + return false; + } + if self.lamports.iter().any(|f| !f.is_match(lamports)) { + return false; + } + for (offset, bytes) in self.memcmp.iter() { + if data.len() < *offset + bytes.len() { + return false; + } + let data = &data[*offset..*offset + bytes.len()]; + if data != bytes { + return false; + } + } + true + } +} + +#[derive(Debug, Clone, Copy, PartialEq, Eq)] +enum FilterAccountsLamports { + Eq(u64), + Ne(u64), + Lt(u64), + Gt(u64), +} + +impl From<&AccountsFilterLamports> for FilterAccountsLamports { + fn from(cmp: &AccountsFilterLamports) -> Self { + match cmp { + AccountsFilterLamports::Eq(value) => Self::Eq(*value), + AccountsFilterLamports::Ne(value) => Self::Ne(*value), + AccountsFilterLamports::Lt(value) => Self::Lt(*value), + AccountsFilterLamports::Gt(value) => Self::Gt(*value), + } + } +} + +impl FilterAccountsLamports { + const fn is_match(self, lamports: u64) -> bool { + match self { + Self::Eq(value) => value == lamports, + Self::Ne(value) => value != lamports, + Self::Lt(value) => value > lamports, + Self::Gt(value) => value < lamports, + } + } +} + +#[derive(Debug)] +struct FilterAccountsMatch<'a> { + filter: &'a FilterAccounts, + nonempty_txn_signature: HashSet<&'a str>, + account: HashSet<&'a str>, + owner: HashSet<&'a str>, + data: HashSet<&'a str>, +} + +impl<'a> FilterAccountsMatch<'a> { + fn new(filter: &'a FilterAccounts) -> Self { + Self { + filter, + nonempty_txn_signature: Default::default(), + account: Default::default(), + owner: Default::default(), + data: Default::default(), + } + } + + fn extend( + set: &mut HashSet<&'a str>, + map: &'a HashMap>, + key: &Pubkey, + ) { + if let Some(names) = map.get(key) { + for name in names { + set.insert(name.as_ref()); + } + } + } + + fn match_txn_signature(&mut self, txn_signature: &Option) { + for (name, filter) in self.filter.nonempty_txn_signature.iter() { + if let Some(nonempty_txn_signature) = filter { + if *nonempty_txn_signature == txn_signature.is_some() { + self.nonempty_txn_signature.insert(name.as_ref()); + } + } + } + } + + fn match_account(&mut self, pubkey: &Pubkey) { + Self::extend(&mut self.account, &self.filter.account, pubkey) + } + + fn match_owner(&mut self, pubkey: &Pubkey) { + Self::extend(&mut self.owner, &self.filter.owner, pubkey) + } + + fn match_data_lamports(&mut self, data: &[u8], lamports: u64) { + for (name, filter) in self.filter.filters.iter() { + if filter.is_match(data, lamports) { + self.data.insert(name.as_ref()); + } + } + } + + fn get_filters(&self) -> FilteredUpdateFilters { + self.filter + .filters + .iter() + .filter_map(|(filter_name, filter)| { + let name = filter_name.as_ref(); + let af = &self.filter; + + // If filter name in required but not in matched => return `false` + if af.nonempty_txn_signature_required.contains(name) + && !self.nonempty_txn_signature.contains(name) + { + return None; + } + if af.account_required.contains(name) && !self.account.contains(name) { + return None; + } + if af.owner_required.contains(name) && !self.owner.contains(name) { + return None; + } + if !filter.is_empty() && !self.data.contains(name) { + return None; + } + + Some(filter_name.clone()) + }) + .collect() + } +} + +#[derive(Debug, Default, Clone, Copy)] +struct FilterSlotsInner { + filter_by_commitment: bool, + interslot_updates: bool, +} + +impl FilterSlotsInner { + fn new(filter: SubscribeRequestFilterSlots) -> Self { + Self { + filter_by_commitment: filter.filter_by_commitment.unwrap_or_default(), + interslot_updates: filter.interslot_updates.unwrap_or_default(), + } + } +} + +#[derive(Debug, Default, Clone)] +struct FilterSlots { + filters: HashMap, +} + +impl FilterSlots { + fn new( + configs: &HashMap, + limits: &FilterLimitsSlots, + names: &mut FilterNames, + ) -> FilterResult { + FilterLimits::check_max(configs.len(), limits.max)?; + + Ok(Self { + filters: configs + .iter() + .map(|(name, filter)| { + names + .get(name) + .map(|name| (name, FilterSlotsInner::new(*filter))) + }) + .collect::>()?, + }) + } + + fn get_updates( + &self, + message: &MessageSlot, + commitment: Option, + ) -> FilteredUpdates { + let filters = self + .filters + .iter() + .filter_map(|(name, inner)| { + if (!inner.filter_by_commitment + || commitment + .map(|commitment| commitment == message.status) + .unwrap_or(false)) + && (inner.interslot_updates + || matches!( + message.status, + SlotStatus::Processed | SlotStatus::Confirmed | SlotStatus::Finalized + )) + { + Some(name.clone()) + } else { + None + } + }) + .collect::(); + filtered_updates_once_owned!( + filters, + FilteredUpdateOneof::slot(message.clone()), + message.created_at + ) + } +} + +#[derive(Debug, Clone, Copy, PartialEq, Eq)] +enum FilterTransactionsType { + Transaction, + TransactionStatus, +} + +#[derive(Debug, Clone)] +struct FilterTransactionsInner { + vote: Option, + failed: Option, + signature: Option, + account_include: HashSet, + account_exclude: HashSet, + account_required: HashSet, +} + +#[derive(Debug, Clone)] +struct FilterTransactions { + filter_type: FilterTransactionsType, + filters: HashMap, +} + +impl FilterTransactions { + fn new( + configs: &HashMap, + limits: &FilterLimitsTransactions, + filter_type: FilterTransactionsType, + names: &mut FilterNames, + ) -> FilterResult { + FilterLimits::check_max(configs.len(), limits.max)?; + + let mut filters = HashMap::new(); + for (name, filter) in configs { + FilterLimits::check_any( + filter.vote.is_none() + && filter.failed.is_none() + && filter.account_include.is_empty() + && filter.account_exclude.is_empty() + && filter.account_required.is_empty(), + limits.any, + )?; + FilterLimits::check_pubkey_max( + filter.account_include.len(), + limits.account_include_max, + )?; + FilterLimits::check_pubkey_max( + filter.account_exclude.len(), + limits.account_exclude_max, + )?; + FilterLimits::check_pubkey_max( + filter.account_required.len(), + limits.account_required_max, + )?; + + filters.insert( + names.get(name)?, + FilterTransactionsInner { + vote: filter.vote, + failed: filter.failed, + signature: filter + .signature + .as_ref() + .map(|signature_str| { + signature_str.parse().map_err(FilterError::InvalidSignature) + }) + .transpose()?, + account_include: Filter::decode_pubkeys_into_set( + &filter.account_include, + &limits.account_include_reject, + )?, + account_exclude: Filter::decode_pubkeys_into_set( + &filter.account_exclude, + &HashSet::new(), + )?, + account_required: Filter::decode_pubkeys_into_set( + &filter.account_required, + &HashSet::new(), + )?, + }, + ); + } + Ok(Self { + filter_type, + filters, + }) + } + + pub fn get_updates(&self, message: &MessageTransaction) -> FilteredUpdates { + let filters = self + .filters + .iter() + .filter_map(|(name, inner)| { + if let Some(is_vote) = inner.vote { + if is_vote != message.transaction.is_vote { + return None; + } + } + + if let Some(is_failed) = inner.failed { + if is_failed != message.transaction.meta.err.is_some() { + return None; + } + } + + if let Some(signature) = &inner.signature { + let tx_sig = message.transaction.transaction.signatures.first(); + if Some(signature.as_ref()) != tx_sig.map(|sig| sig.as_ref()) { + return None; + } + } + + if !inner.account_include.is_empty() + && inner + .account_include + .intersection(&message.transaction.account_keys) + .next() + .is_none() + { + return None; + } + + if !inner.account_exclude.is_empty() + && inner + .account_exclude + .intersection(&message.transaction.account_keys) + .next() + .is_some() + { + return None; + } + + if !inner.account_required.is_empty() + && !inner + .account_required + .is_subset(&message.transaction.account_keys) + { + return None; + } + + Some(name.clone()) + }) + .collect::(); + + filtered_updates_once_owned!( + filters, + match self.filter_type { + FilterTransactionsType::Transaction => FilteredUpdateOneof::transaction(message), + FilterTransactionsType::TransactionStatus => { + FilteredUpdateOneof::transaction_status(message) + } + }, + message.created_at + ) + } +} + +#[derive(Debug, Default, Clone)] +struct FilterEntries { + filters: Vec, +} + +impl FilterEntries { + fn new( + configs: &HashMap, + limits: &FilterLimitsEntries, + names: &mut FilterNames, + ) -> FilterResult { + FilterLimits::check_max(configs.len(), limits.max)?; + + Ok(Self { + filters: configs + .iter() + .map(|(name, _filter)| names.get(name)) + .collect::>()?, + }) + } + + fn get_updates(&self, message: &Arc) -> FilteredUpdates { + let filters = self.filters.as_slice(); + filtered_updates_once_ref!( + filters, + FilteredUpdateOneof::entry(Arc::clone(message)), + message.created_at + ) + } +} + +#[derive(Debug, Clone)] +struct FilterBlocksInner { + account_include: HashSet, + include_transactions: Option, + include_accounts: Option, + include_entries: Option, +} + +#[derive(Debug, Default, Clone)] +struct FilterBlocks { + filters: HashMap, +} + +impl FilterBlocks { + fn new( + configs: &HashMap, + limits: &FilterLimitsBlocks, + names: &mut FilterNames, + ) -> FilterResult { + FilterLimits::check_max(configs.len(), limits.max)?; + + let mut this = Self::default(); + for (name, filter) in configs { + FilterLimits::check_any( + filter.account_include.is_empty(), + limits.account_include_any, + )?; + FilterLimits::check_pubkey_max( + filter.account_include.len(), + limits.account_include_max, + )?; + if !(filter.include_transactions == Some(false) || limits.include_transactions) { + return Err(FilterError::CreateBlocksNotAllowed("transactions")); + } + if !(matches!(filter.include_accounts, None | Some(false)) || limits.include_accounts) { + return Err(FilterError::CreateBlocksNotAllowed("accounts")); + } + if !(matches!(filter.include_entries, None | Some(false)) || limits.include_accounts) { + return Err(FilterError::CreateBlocksNotAllowed("entries")); + } + + this.filters.insert( + names.get(name)?, + FilterBlocksInner { + account_include: Filter::decode_pubkeys_into_set( + &filter.account_include, + &limits.account_include_reject, + )?, + include_transactions: filter.include_transactions, + include_accounts: filter.include_accounts, + include_entries: filter.include_entries, + }, + ); + } + Ok(this) + } + + fn get_updates( + &self, + message: &Arc, + accounts_data_slice: &FilterAccountsDataSlice, + ) -> FilteredUpdates { + let mut updates = FilteredUpdates::new(); + for (filter, inner) in self.filters.iter() { + #[allow(clippy::unnecessary_filter_map)] + let transactions = if matches!(inner.include_transactions, None | Some(true)) { + message + .transactions + .iter() + .filter_map(|tx| { + if !inner.account_include.is_empty() + && inner + .account_include + .intersection(&tx.account_keys) + .next() + .is_none() + { + None + } else { + Some(Arc::clone(tx)) + } + }) + .collect::>() + } else { + vec![] + }; + + #[allow(clippy::unnecessary_filter_map)] + let accounts = if inner.include_accounts == Some(true) { + message + .accounts + .iter() + .filter_map(|account| { + if !inner.account_include.is_empty() + && !inner.account_include.contains(&account.pubkey) + { + None + } else { + Some(Arc::clone(account)) + } + }) + .collect::>() + } else { + vec![] + }; + + let entries = if inner.include_entries == Some(true) { + message.entries.to_vec() + } else { + vec![] + }; + + let mut filters = FilteredUpdateFilters::new(); + filters.push(filter.clone()); + updates.push(FilteredUpdate::new( + filters, + FilteredUpdateOneof::block(Box::new(FilteredUpdateBlock { + meta: Arc::clone(&message.meta), + transactions, + updated_account_count: message.updated_account_count, + accounts_data_slice: accounts_data_slice.clone(), + accounts, + entries, + })), + message.created_at, + )); + } + updates + } +} + +#[derive(Debug, Default, Clone)] +struct FilterBlocksMeta { + filters: Vec, +} + +impl FilterBlocksMeta { + fn new( + configs: &HashMap, + limits: &FilterLimitsBlocksMeta, + names: &mut FilterNames, + ) -> FilterResult { + FilterLimits::check_max(configs.len(), limits.max)?; + + Ok(Self { + filters: configs + .iter() + .map(|(name, _filter)| names.get(name)) + .collect::>()?, + }) + } + + fn get_updates(&self, message: &Arc) -> FilteredUpdates { + let filters = self.filters.as_slice(); + filtered_updates_once_ref!( + filters, + FilteredUpdateOneof::block_meta(Arc::clone(message)), + message.created_at + ) + } +} + +#[derive(Debug, Clone, Default, PartialEq)] +pub struct FilterAccountsDataSlice(Arc>>); + +impl AsRef<[Range]> for FilterAccountsDataSlice { + #[inline] + fn as_ref(&self) -> &[Range] { + &self.0 + } +} + +impl FilterAccountsDataSlice { + pub fn new(slices: &[SubscribeRequestAccountsDataSlice], limits: usize) -> FilterResult { + FilterLimits::check_max(slices.len(), limits)?; + + let slices = slices + .iter() + .map(|s| Range { + start: s.offset as usize, + end: (s.offset + s.length) as usize, + }) + .collect::>(); + + for (i, slice_a) in slices.iter().enumerate() { + // check order + for slice_b in slices[i + 1..].iter() { + if slice_a.start > slice_b.start { + return Err(FilterError::CreateDataSliceOutOfOrder); + } + } + + // check overlap + for slice_b in slices[0..i].iter() { + if slice_a.start < slice_b.end { + return Err(FilterError::CreateDataSliceOverlap); + } + } + } + + Ok(Self::new_unchecked(Arc::new(slices))) + } + + pub const fn new_unchecked(slices: Arc>>) -> Self { + Self(slices) + } + + pub fn get_slice(&self, source: &[u8]) -> Vec { + if self.0.is_empty() { + source.to_vec() + } else { + let mut data = Vec::with_capacity(self.0.iter().map(|ds| ds.end - ds.start).sum()); + for data_slice in self.0.iter() { + if source.len() >= data_slice.end { + data.extend_from_slice(&source[data_slice.start..data_slice.end]); + } + } + data + } + } + + pub fn get_slice_len(&self, source: &[u8]) -> usize { + if self.0.is_empty() { + source.len() + } else { + let mut len = 0; + for slice in self.0.iter() { + if source.len() >= slice.end { + len += source[slice.start..slice.end].len(); + } + } + len + } + } + + pub fn slice_encode_raw(&self, tag: u32, source: &[u8], buf: &mut impl BufMut) { + let len = self.get_slice_len(source) as u64; + if len > 0 { + encode_key(tag, WireType::LengthDelimited, buf); + encode_varint(len, buf); + + if self.0.is_empty() { + buf.put_slice(source); + } else { + for data_slice in self.0.iter() { + if source.len() >= data_slice.end { + buf.put_slice(&source[data_slice.start..data_slice.end]); + } + } + } + } + } +} + +#[cfg(test)] +mod tests { + use { + super::Filter, + crate::{ + convert_to, + geyser::{ + SubscribeRequest, SubscribeRequestFilterAccounts, + SubscribeRequestFilterTransactions, + }, + plugin::{ + filter::{ + limits::FilterLimits, + message::{FilteredUpdateFilters, FilteredUpdateOneof}, + name::{FilterName, FilterNames}, + }, + message::{Message, MessageTransaction, MessageTransactionInfo}, + }, + }, + prost_types::Timestamp, + solana_hash::Hash, + solana_keypair::Keypair, + solana_message::{v0::LoadedAddresses, Message as SolMessage, MessageHeader}, + solana_pubkey::Pubkey, + solana_signer::Signer, + solana_transaction::{sanitized::SanitizedTransaction, Transaction}, + solana_transaction_status::TransactionStatusMeta, + std::{ + collections::HashMap, + sync::Arc, + time::{Duration, SystemTime}, + }, + }; + + fn create_filter_names() -> FilterNames { + FilterNames::new(64, 1024, Duration::from_secs(1)) + } + + fn create_message_transaction( + keypair: &Keypair, + account_keys: Vec, + ) -> MessageTransaction { + let message = SolMessage { + header: MessageHeader { + num_required_signatures: 1, + ..MessageHeader::default() + }, + account_keys, + ..SolMessage::default() + }; + let recent_blockhash = Hash::default(); + let sanitized_transaction = SanitizedTransaction::from_transaction_for_tests( + Transaction::new(&[keypair], message, recent_blockhash), + ); + let meta = convert_to::create_transaction_meta(&TransactionStatusMeta { + status: Ok(()), + fee: 0, + pre_balances: vec![], + post_balances: vec![], + inner_instructions: None, + log_messages: None, + pre_token_balances: None, + post_token_balances: None, + rewards: None, + loaded_addresses: LoadedAddresses::default(), + return_data: None, + compute_units_consumed: None, + cost_units: None, + }); + let sig = sanitized_transaction.signature(); + let account_keys = sanitized_transaction + .message() + .account_keys() + .iter() + .copied() + .collect(); + MessageTransaction { + transaction: Arc::new(MessageTransactionInfo { + signature: *sig, + is_vote: true, + transaction: convert_to::create_transaction(&sanitized_transaction), + meta, + index: 1, + account_keys, + }), + slot: 100, + created_at: Timestamp::from(SystemTime::now()), + } + } + + #[test] + fn test_filters_all_empty() { + // ensure Filter can be created with empty values + let config = SubscribeRequest { + accounts: HashMap::new(), + slots: HashMap::new(), + transactions: HashMap::new(), + transactions_status: HashMap::new(), + blocks: HashMap::new(), + blocks_meta: HashMap::new(), + entry: HashMap::new(), + commitment: None, + accounts_data_slice: Vec::new(), + ping: None, + from_slot: None, + }; + let limit = FilterLimits::default(); + let filter = Filter::new(&config, &limit, &mut create_filter_names()); + assert!(filter.is_ok()); + } + + #[test] + fn test_filters_account_empty() { + let mut accounts = HashMap::new(); + + accounts.insert( + "solend".to_owned(), + SubscribeRequestFilterAccounts { + nonempty_txn_signature: None, + account: vec![], + owner: vec![], + filters: vec![], + }, + ); + + let config = SubscribeRequest { + accounts, + slots: HashMap::new(), + transactions: HashMap::new(), + transactions_status: HashMap::new(), + blocks: HashMap::new(), + blocks_meta: HashMap::new(), + entry: HashMap::new(), + commitment: None, + accounts_data_slice: Vec::new(), + ping: None, + from_slot: None, + }; + let mut limit = FilterLimits::default(); + limit.accounts.any = false; + let filter = Filter::new(&config, &limit, &mut create_filter_names()); + // filter should fail + assert!(filter.is_err()); + } + + #[test] + fn test_filters_transaction_empty() { + let mut transactions = HashMap::new(); + + transactions.insert( + "serum".to_string(), + SubscribeRequestFilterTransactions { + vote: None, + failed: None, + signature: None, + account_include: vec![], + account_exclude: vec![], + account_required: vec![], + }, + ); + + let config = SubscribeRequest { + accounts: HashMap::new(), + slots: HashMap::new(), + transactions, + transactions_status: HashMap::new(), + blocks: HashMap::new(), + blocks_meta: HashMap::new(), + entry: HashMap::new(), + commitment: None, + accounts_data_slice: Vec::new(), + ping: None, + from_slot: None, + }; + let mut limit = FilterLimits::default(); + limit.transactions.any = false; + let filter = Filter::new(&config, &limit, &mut create_filter_names()); + // filter should fail + assert!(filter.is_err()); + } + + #[test] + fn test_filters_transaction_not_null() { + let mut transactions = HashMap::new(); + transactions.insert( + "serum".to_string(), + SubscribeRequestFilterTransactions { + vote: Some(true), + failed: None, + signature: None, + account_include: vec![], + account_exclude: vec![], + account_required: vec![], + }, + ); + + let config = SubscribeRequest { + accounts: HashMap::new(), + slots: HashMap::new(), + transactions, + transactions_status: HashMap::new(), + blocks: HashMap::new(), + blocks_meta: HashMap::new(), + entry: HashMap::new(), + commitment: None, + accounts_data_slice: Vec::new(), + ping: None, + from_slot: None, + }; + let mut limit = FilterLimits::default(); + limit.transactions.any = false; + let filter_res = Filter::new(&config, &limit, &mut create_filter_names()); + // filter should succeed + assert!(filter_res.is_ok()); + } + + #[test] + fn test_transaction_include_a() { + let mut transactions = HashMap::new(); + + let keypair_a = Keypair::new(); + let account_key_a = keypair_a.pubkey(); + let keypair_b = Keypair::new(); + let account_key_b = keypair_b.pubkey(); + let account_include = [account_key_a].iter().map(|k| k.to_string()).collect(); + transactions.insert( + "serum".to_string(), + SubscribeRequestFilterTransactions { + vote: None, + failed: None, + signature: None, + account_include, + account_exclude: vec![], + account_required: vec![], + }, + ); + + let mut config = SubscribeRequest { + accounts: HashMap::new(), + slots: HashMap::new(), + transactions: transactions.clone(), + transactions_status: HashMap::new(), + blocks: HashMap::new(), + blocks_meta: HashMap::new(), + entry: HashMap::new(), + commitment: None, + accounts_data_slice: Vec::new(), + ping: None, + from_slot: None, + }; + let limit = FilterLimits::default(); + let filter = Filter::new(&config, &limit, &mut create_filter_names()).unwrap(); + + let message_transaction = + create_message_transaction(&keypair_b, vec![account_key_b, account_key_a]); + let message = Message::Transaction(message_transaction); + let updates = filter.get_updates(&message, None); + assert_eq!(updates.len(), 1); + assert_eq!( + updates[0].filters, + FilteredUpdateFilters::from_vec(vec![FilterName::new("serum")]) + ); + assert!(matches!( + updates[0].message, + FilteredUpdateOneof::Transaction(_) + )); + + config.transactions_status = transactions; + let filter = Filter::new(&config, &limit, &mut create_filter_names()).unwrap(); + let updates = filter.get_updates(&message, None); + assert_eq!(updates.len(), 2); + assert_eq!( + updates[1].filters, + FilteredUpdateFilters::from_vec(vec![FilterName::new("serum")]) + ); + assert!(matches!( + updates[1].message, + FilteredUpdateOneof::TransactionStatus(_) + )); + } + + #[test] + fn test_transaction_include_b() { + let mut transactions = HashMap::new(); + + let keypair_a = Keypair::new(); + let account_key_a = keypair_a.pubkey(); + let keypair_b = Keypair::new(); + let account_key_b = keypair_b.pubkey(); + let account_include = [account_key_b].iter().map(|k| k.to_string()).collect(); + transactions.insert( + "serum".to_string(), + SubscribeRequestFilterTransactions { + vote: None, + failed: None, + signature: None, + account_include, + account_exclude: vec![], + account_required: vec![], + }, + ); + + let mut config = SubscribeRequest { + accounts: HashMap::new(), + slots: HashMap::new(), + transactions: transactions.clone(), + transactions_status: HashMap::new(), + blocks: HashMap::new(), + blocks_meta: HashMap::new(), + entry: HashMap::new(), + commitment: None, + accounts_data_slice: Vec::new(), + ping: None, + from_slot: None, + }; + let limit = FilterLimits::default(); + let filter = Filter::new(&config, &limit, &mut create_filter_names()).unwrap(); + + let message_transaction = + create_message_transaction(&keypair_b, vec![account_key_b, account_key_a]); + let message = Message::Transaction(message_transaction); + let updates = filter.get_updates(&message, None); + assert_eq!(updates.len(), 1); + assert_eq!( + updates[0].filters, + FilteredUpdateFilters::from_vec(vec![FilterName::new("serum")]) + ); + assert!(matches!( + updates[0].message, + FilteredUpdateOneof::Transaction(_) + )); + + config.transactions_status = transactions; + let filter = Filter::new(&config, &limit, &mut create_filter_names()).unwrap(); + let updates = filter.get_updates(&message, None); + assert_eq!(updates.len(), 2); + assert_eq!( + updates[1].filters, + FilteredUpdateFilters::from_vec(vec![FilterName::new("serum")]) + ); + assert!(matches!( + updates[1].message, + FilteredUpdateOneof::TransactionStatus(_) + )); + } + + #[test] + fn test_transaction_exclude() { + let mut transactions = HashMap::new(); + + let keypair_a = Keypair::new(); + let account_key_a = keypair_a.pubkey(); + let keypair_b = Keypair::new(); + let account_key_b = keypair_b.pubkey(); + let account_exclude = [account_key_b].iter().map(|k| k.to_string()).collect(); + transactions.insert( + "serum".to_string(), + SubscribeRequestFilterTransactions { + vote: None, + failed: None, + signature: None, + account_include: vec![], + account_exclude, + account_required: vec![], + }, + ); + + let config = SubscribeRequest { + accounts: HashMap::new(), + slots: HashMap::new(), + transactions, + transactions_status: HashMap::new(), + blocks: HashMap::new(), + blocks_meta: HashMap::new(), + entry: HashMap::new(), + commitment: None, + accounts_data_slice: Vec::new(), + ping: None, + from_slot: None, + }; + let limit = FilterLimits::default(); + let filter = Filter::new(&config, &limit, &mut create_filter_names()).unwrap(); + + let message_transaction = + create_message_transaction(&keypair_b, vec![account_key_b, account_key_a]); + let message = Message::Transaction(message_transaction); + for message in filter.get_updates(&message, None) { + assert!(message.filters.is_empty()); + } + } + + #[test] + fn test_transaction_required_x_include_y_z_case001() { + let mut transactions = HashMap::new(); + + let keypair_x = Keypair::new(); + let account_key_x = keypair_x.pubkey(); + let account_key_y = Pubkey::new_unique(); + let account_key_z = Pubkey::new_unique(); + + // require x, include y, z + let account_include = [account_key_y, account_key_z] + .iter() + .map(|k| k.to_string()) + .collect(); + let account_required = [account_key_x].iter().map(|k| k.to_string()).collect(); + transactions.insert( + "serum".to_string(), + SubscribeRequestFilterTransactions { + vote: None, + failed: None, + signature: None, + account_include, + account_exclude: vec![], + account_required, + }, + ); + + let mut config = SubscribeRequest { + accounts: HashMap::new(), + slots: HashMap::new(), + transactions: transactions.clone(), + transactions_status: HashMap::new(), + blocks: HashMap::new(), + blocks_meta: HashMap::new(), + entry: HashMap::new(), + commitment: None, + accounts_data_slice: Vec::new(), + ping: None, + from_slot: None, + }; + let limit = FilterLimits::default(); + let filter = Filter::new(&config, &limit, &mut create_filter_names()).unwrap(); + + let message_transaction = create_message_transaction( + &keypair_x, + vec![account_key_x, account_key_y, account_key_z], + ); + let message = Message::Transaction(message_transaction); + let updates = filter.get_updates(&message, None); + assert_eq!(updates.len(), 1); + assert_eq!( + updates[0].filters, + FilteredUpdateFilters::from_vec(vec![FilterName::new("serum")]) + ); + assert!(matches!( + updates[0].message, + FilteredUpdateOneof::Transaction(_) + )); + + config.transactions_status = transactions; + let filter = Filter::new(&config, &limit, &mut create_filter_names()).unwrap(); + let updates = filter.get_updates(&message, None); + assert_eq!(updates.len(), 2); + assert_eq!( + updates[1].filters, + FilteredUpdateFilters::from_vec(vec![FilterName::new("serum")]) + ); + assert!(matches!( + updates[1].message, + FilteredUpdateOneof::TransactionStatus(_) + )); + } + + #[test] + fn test_transaction_required_y_z_include_x() { + let mut transactions = HashMap::new(); + + let keypair_x = Keypair::new(); + let account_key_x = keypair_x.pubkey(); + let account_key_y = Pubkey::new_unique(); + let account_key_z = Pubkey::new_unique(); + + // require x, include y, z + let account_include = [account_key_x].iter().map(|k| k.to_string()).collect(); + let account_required = [account_key_y, account_key_z] + .iter() + .map(|k| k.to_string()) + .collect(); + transactions.insert( + "serum".to_string(), + SubscribeRequestFilterTransactions { + vote: None, + failed: None, + signature: None, + account_include, + account_exclude: vec![], + account_required, + }, + ); + + let config = SubscribeRequest { + accounts: HashMap::new(), + slots: HashMap::new(), + transactions, + transactions_status: HashMap::new(), + blocks: HashMap::new(), + blocks_meta: HashMap::new(), + entry: HashMap::new(), + commitment: None, + accounts_data_slice: Vec::new(), + ping: None, + from_slot: None, + }; + let limit = FilterLimits::default(); + let filter = Filter::new(&config, &limit, &mut create_filter_names()).unwrap(); + + let message_transaction = + create_message_transaction(&keypair_x, vec![account_key_x, account_key_z]); + let message = Message::Transaction(message_transaction); + for message in filter.get_updates(&message, None) { + assert!(message.filters.is_empty()); + } + } +} diff --git a/vendor/laserstream-core-proto/src/plugin/filter/limits.rs b/vendor/laserstream-core-proto/src/plugin/filter/limits.rs new file mode 100644 index 00000000..5dd89dc3 --- /dev/null +++ b/vendor/laserstream-core-proto/src/plugin/filter/limits.rs @@ -0,0 +1,224 @@ +use { + serde::{de, Deserialize, Deserializer}, + solana_pubkey::Pubkey, + std::collections::HashSet, +}; + +#[derive(Debug, thiserror::Error)] +pub enum FilterLimitsCheckError { + #[error("Max amount of filters/data_slices reached, only {max} allowed")] + Max { max: usize }, + #[error("Subscribe on full stream with `any` is not allowed, at least one filter required")] + Any, + #[error("Max amount of Pubkeys reached, only {max} allowed")] + MaxPubkey { max: usize }, + #[error("Pubkey {pubkey} in filters is not allowed")] + PubkeyReject { pubkey: Pubkey }, +} + +pub type FilterLimitsCheckResult = Result<(), FilterLimitsCheckError>; + +#[derive(Debug, Default, Clone, Deserialize)] +#[serde(default, deny_unknown_fields)] +pub struct FilterLimits { + pub accounts: FilterLimitsAccounts, + pub slots: FilterLimitsSlots, + pub transactions: FilterLimitsTransactions, + pub transactions_status: FilterLimitsTransactions, + pub blocks: FilterLimitsBlocks, + pub blocks_meta: FilterLimitsBlocksMeta, + pub entries: FilterLimitsEntries, +} + +impl FilterLimits { + pub const fn check_max(len: usize, max: usize) -> FilterLimitsCheckResult { + if len <= max { + Ok(()) + } else { + Err(FilterLimitsCheckError::Max { max }) + } + } + + pub const fn check_any(is_empty: bool, any: bool) -> FilterLimitsCheckResult { + if !is_empty || any { + Ok(()) + } else { + Err(FilterLimitsCheckError::Any) + } + } + + pub const fn check_pubkey_max(len: usize, max: usize) -> FilterLimitsCheckResult { + if len <= max { + Ok(()) + } else { + Err(FilterLimitsCheckError::MaxPubkey { max }) + } + } + + pub fn check_pubkey_reject(pubkey: &Pubkey, set: &HashSet) -> FilterLimitsCheckResult { + if !set.contains(pubkey) { + Ok(()) + } else { + Err(FilterLimitsCheckError::PubkeyReject { pubkey: *pubkey }) + } + } +} + +#[derive(Debug, Clone, Deserialize)] +#[serde(default, deny_unknown_fields)] +pub struct FilterLimitsAccounts { + pub max: usize, + pub any: bool, + pub account_max: usize, + #[serde(deserialize_with = "deserialize_pubkey_set")] + pub account_reject: HashSet, + pub owner_max: usize, + #[serde(deserialize_with = "deserialize_pubkey_set")] + pub owner_reject: HashSet, + pub data_slice_max: usize, +} + +impl Default for FilterLimitsAccounts { + fn default() -> Self { + Self { + max: usize::MAX, + any: true, + account_max: usize::MAX, + account_reject: HashSet::new(), + owner_max: usize::MAX, + owner_reject: HashSet::new(), + data_slice_max: usize::MAX, + } + } +} + +#[derive(Debug, Clone, Deserialize)] +#[serde(default, deny_unknown_fields)] +pub struct FilterLimitsSlots { + #[serde(deserialize_with = "deserialize_usize_str")] + pub max: usize, +} + +impl Default for FilterLimitsSlots { + fn default() -> Self { + Self { max: usize::MAX } + } +} + +#[derive(Debug, Clone, Deserialize)] +#[serde(default, deny_unknown_fields)] +pub struct FilterLimitsTransactions { + #[serde(deserialize_with = "deserialize_usize_str")] + pub max: usize, + pub any: bool, + #[serde(deserialize_with = "deserialize_usize_str")] + pub account_include_max: usize, + #[serde(deserialize_with = "deserialize_pubkey_set")] + pub account_include_reject: HashSet, + #[serde(deserialize_with = "deserialize_usize_str")] + pub account_exclude_max: usize, + #[serde(deserialize_with = "deserialize_usize_str")] + pub account_required_max: usize, +} + +impl Default for FilterLimitsTransactions { + fn default() -> Self { + Self { + max: usize::MAX, + any: true, + account_include_max: usize::MAX, + account_include_reject: HashSet::new(), + account_exclude_max: usize::MAX, + account_required_max: usize::MAX, + } + } +} + +#[derive(Debug, Clone, Deserialize)] +#[serde(default, deny_unknown_fields)] +pub struct FilterLimitsBlocks { + #[serde(deserialize_with = "deserialize_usize_str")] + pub max: usize, + #[serde(deserialize_with = "deserialize_usize_str")] + pub account_include_max: usize, + pub account_include_any: bool, + #[serde(deserialize_with = "deserialize_pubkey_set")] + pub account_include_reject: HashSet, + pub include_transactions: bool, + pub include_accounts: bool, + pub include_entries: bool, +} + +impl Default for FilterLimitsBlocks { + fn default() -> Self { + Self { + max: usize::MAX, + account_include_max: usize::MAX, + account_include_any: true, + account_include_reject: HashSet::new(), + include_transactions: true, + include_accounts: true, + include_entries: true, + } + } +} + +#[derive(Debug, Clone, Deserialize)] +#[serde(default, deny_unknown_fields)] +pub struct FilterLimitsBlocksMeta { + #[serde(deserialize_with = "deserialize_usize_str")] + pub max: usize, +} + +impl Default for FilterLimitsBlocksMeta { + fn default() -> Self { + Self { max: usize::MAX } + } +} + +#[derive(Debug, Clone, Deserialize)] +#[serde(default, deny_unknown_fields)] +pub struct FilterLimitsEntries { + #[serde(deserialize_with = "deserialize_usize_str")] + pub max: usize, +} + +impl Default for FilterLimitsEntries { + fn default() -> Self { + Self { max: usize::MAX } + } +} + +fn deserialize_usize_str<'de, D>(deserializer: D) -> Result +where + D: Deserializer<'de>, +{ + #[derive(Deserialize)] + #[serde(untagged)] + enum Value<'a> { + Int(usize), + Str(&'a str), + } + + match Value::deserialize(deserializer)? { + Value::Int(value) => Ok(value), + Value::Str(value) => value + .replace('_', "") + .parse::() + .map_err(de::Error::custom), + } +} + +fn deserialize_pubkey_set<'de, D>(deserializer: D) -> Result, D::Error> +where + D: Deserializer<'de>, +{ + Vec::<&str>::deserialize(deserializer)? + .into_iter() + .map(|value| { + value + .parse() + .map_err(|error| de::Error::custom(format!("Invalid pubkey: {value} ({error:?})"))) + }) + .collect::>() +} diff --git a/vendor/laserstream-core-proto/src/plugin/filter/message.rs b/vendor/laserstream-core-proto/src/plugin/filter/message.rs new file mode 100644 index 00000000..1da007e3 --- /dev/null +++ b/vendor/laserstream-core-proto/src/plugin/filter/message.rs @@ -0,0 +1,1392 @@ +use { + crate::{ + geyser::{ + subscribe_update::UpdateOneof, SlotStatus as SlotStatusProto, SubscribeUpdate, + SubscribeUpdateAccount, SubscribeUpdateAccountInfo, SubscribeUpdateBlock, + SubscribeUpdateEntry, SubscribeUpdatePing, SubscribeUpdatePong, SubscribeUpdateSlot, + SubscribeUpdateTransaction, SubscribeUpdateTransactionInfo, + SubscribeUpdateTransactionStatus, + }, + plugin::{ + filter::{name::FilterName, FilterAccountsDataSlice}, + message::{ + MessageAccount, MessageAccountInfo, MessageBlock, MessageBlockMeta, MessageEntry, + MessageSlot, MessageTransaction, MessageTransactionInfo, + }, + }, + solana::storage::confirmed_block, + }, + bytes::buf::{Buf, BufMut}, + prost::{ + encoding::{ + encode_key, encode_varint, encoded_len_varint, key_len, message, DecodeContext, + WireType, + }, + DecodeError, + }, + prost_types::Timestamp, + smallvec::SmallVec, + solana_signature::Signature, + std::{ + collections::HashSet, + ops::{Deref, DerefMut}, + sync::Arc, + time::SystemTime, + }, +}; +use crate::prelude::SubscribeUpdateBatch; +use prost::Message as ProstMessage; + +#[inline] +pub const fn prost_field_encoded_len(tag: u32, len: usize) -> usize { + key_len(tag) + encoded_len_varint(len as u64) + len +} + +#[inline] +fn prost_bytes_encode_raw(tag: u32, value: &[u8], buf: &mut impl BufMut) { + encode_key(tag, WireType::LengthDelimited, buf); + encode_varint(value.len() as u64, buf); + buf.put(value); +} + +#[inline] +pub const fn prost_bytes_encoded_len(tag: u32, value: &[u8]) -> usize { + prost_field_encoded_len(tag, value.len()) +} + +macro_rules! prost_repeated_encoded_len_map { + ($tag:expr, $values:expr, $get_len:expr) => {{ + key_len($tag) * $values.len() + + $values + .iter() + .map($get_len) + .map(|len| encoded_len_varint(len as u64) + len) + .sum::() + }}; +} + +pub type FilteredUpdates = SmallVec<[FilteredUpdate; 2]>; + +#[derive(Debug, Clone, PartialEq)] +pub struct FilteredUpdateBatch { + pub updates: Vec>, +} + +impl FilteredUpdateBatch { + pub fn as_subscribe_update(&self) -> SubscribeUpdateBatch { + // Encode raw as subscribe update batch + let mut buf = Vec::new(); + self.encode(&mut buf).unwrap(); + SubscribeUpdateBatch::decode(&mut buf.as_slice()).unwrap() + } +} + +impl prost::Message for FilteredUpdateBatch { + fn encode_raw(&self, buf: &mut impl BufMut) { + for update in self.updates.iter() { + encode_key(1, WireType::LengthDelimited, buf); + encode_varint(update.encoded_len() as u64, buf); + update.encode_raw(buf); + } + } + + fn encoded_len(&self) -> usize { + let mut len = 0; + for update in self.updates.iter() { + len += 2; + len += update.encoded_len(); + } + len + } + + fn merge_field( + &mut self, + _tag: u32, + _wire_type: WireType, + _buf: &mut impl Buf, + _ctx: DecodeContext, + ) -> Result<(), DecodeError> { + unimplemented!() + } + + fn clear(&mut self) { + unimplemented!() + } +} + +#[derive(Debug, Clone, PartialEq)] +pub struct FilteredUpdate { + pub filters: FilteredUpdateFilters, + pub message: FilteredUpdateOneof, + pub created_at: Timestamp, +} + +impl prost::Message for FilteredUpdate { + fn encode_raw(&self, buf: &mut impl BufMut) { + for name in self.filters.iter().map(|filter| filter.as_ref()) { + encode_key(1u32, WireType::LengthDelimited, buf); + encode_varint(name.len() as u64, buf); + buf.put_slice(name.as_bytes()); + } + self.message.encode_raw(buf); + message::encode(11u32, &self.created_at, buf); + } + + fn encoded_len(&self) -> usize { + prost_repeated_encoded_len_map!(1u32, self.filters, |filter| filter.as_ref().len()) + + self.message.encoded_len() + + message::encoded_len(11u32, &self.created_at) + } + + fn merge_field( + &mut self, + _tag: u32, + _wire_type: WireType, + _buf: &mut impl Buf, + _ctx: DecodeContext, + ) -> Result<(), DecodeError> { + unimplemented!() + } + + fn clear(&mut self) { + unimplemented!() + } +} + +impl FilteredUpdate { + pub const fn new( + filters: FilteredUpdateFilters, + message: FilteredUpdateOneof, + created_at: Timestamp, + ) -> Self { + Self { + filters, + message, + created_at, + } + } + + pub fn new_empty(message: FilteredUpdateOneof) -> Self { + Self::new( + FilteredUpdateFilters::new(), + message, + Timestamp::from(SystemTime::now()), + ) + } + + fn as_subscribe_update_account( + message: &MessageAccountInfo, + data_slice: &FilterAccountsDataSlice, + ) -> SubscribeUpdateAccountInfo { + SubscribeUpdateAccountInfo { + pubkey: message.pubkey.as_ref().into(), + lamports: message.lamports, + owner: message.owner.as_ref().into(), + executable: message.executable, + rent_epoch: message.rent_epoch, + data: data_slice.get_slice(&message.data), + write_version: message.write_version, + txn_signature: message.txn_signature.map(|s| s.as_ref().into()), + } + } + + fn as_subscribe_update_transaction( + message: &MessageTransactionInfo, + ) -> SubscribeUpdateTransactionInfo { + SubscribeUpdateTransactionInfo { + signature: message.signature.as_ref().into(), + is_vote: message.is_vote, + transaction: Some(message.transaction.clone()), + meta: Some(message.meta.clone()), + index: message.index as u64, + } + } + + fn as_subscribe_update_entry(message: &MessageEntry) -> SubscribeUpdateEntry { + SubscribeUpdateEntry { + slot: message.slot, + index: message.index as u64, + num_hashes: message.num_hashes, + hash: message.hash.to_bytes().to_vec(), + executed_transaction_count: message.executed_transaction_count, + starting_transaction_index: message.starting_transaction_index, + } + } + + pub fn as_subscribe_update(&self) -> SubscribeUpdate { + let message = match &self.message { + FilteredUpdateOneof::Account(msg) => UpdateOneof::Account(SubscribeUpdateAccount { + account: Some(Self::as_subscribe_update_account( + msg.account.as_ref(), + &msg.data_slice, + )), + slot: msg.slot, + is_startup: msg.is_startup, + }), + FilteredUpdateOneof::Slot(msg) => UpdateOneof::Slot(SubscribeUpdateSlot { + slot: msg.slot, + parent: msg.parent, + status: msg.status as i32, + dead_error: msg.dead_error.clone(), + }), + FilteredUpdateOneof::Transaction(msg) => { + UpdateOneof::Transaction(SubscribeUpdateTransaction { + transaction: Some(Self::as_subscribe_update_transaction( + msg.transaction.as_ref(), + )), + slot: msg.slot, + }) + } + FilteredUpdateOneof::TransactionStatus(msg) => { + UpdateOneof::TransactionStatus(SubscribeUpdateTransactionStatus { + slot: msg.slot, + signature: msg.transaction.signature.as_ref().into(), + is_vote: msg.transaction.is_vote, + index: msg.transaction.index as u64, + err: msg.transaction.meta.err.clone(), + }) + } + FilteredUpdateOneof::Block(msg) => UpdateOneof::Block(SubscribeUpdateBlock { + slot: msg.meta.slot, + blockhash: msg.meta.blockhash.clone(), + rewards: msg.meta.rewards.clone(), + block_time: msg.meta.block_time, + block_height: msg.meta.block_height, + parent_slot: msg.meta.parent_slot, + parent_blockhash: msg.meta.parent_blockhash.clone(), + executed_transaction_count: msg.meta.executed_transaction_count, + transactions: msg + .transactions + .iter() + .map(|tx| Self::as_subscribe_update_transaction(tx.as_ref())) + .collect(), + updated_account_count: msg.updated_account_count, + accounts: msg + .accounts + .iter() + .map(|acc| { + Self::as_subscribe_update_account(acc.as_ref(), &msg.accounts_data_slice) + }) + .collect(), + entries_count: msg.meta.entries_count, + entries: msg + .entries + .iter() + .map(|entry| Self::as_subscribe_update_entry(entry.as_ref())) + .collect(), + }), + FilteredUpdateOneof::Ping => UpdateOneof::Ping(SubscribeUpdatePing {}), + FilteredUpdateOneof::Pong(msg) => UpdateOneof::Pong(*msg), + FilteredUpdateOneof::BlockMeta(msg) => UpdateOneof::BlockMeta(msg.block_meta.clone()), + FilteredUpdateOneof::Entry(msg) => { + UpdateOneof::Entry(Self::as_subscribe_update_entry(&msg.0)) + } + }; + + SubscribeUpdate { + filters: self + .filters + .iter() + .map(|name| name.as_ref().to_string()) + .collect(), + update_oneof: Some(message), + created_at: Some(self.created_at), + } + } + + pub fn from_subscribe_update(update: SubscribeUpdate) -> Result { + let created_at = update.created_at.ok_or("create_at should be defined")?; + + let message = match update.update_oneof.ok_or("update should be defined")? { + UpdateOneof::Account(msg) => { + let account = MessageAccount::from_update_oneof(msg, created_at)?; + FilteredUpdateOneof::Account(FilteredUpdateAccount { + account: account.account, + slot: account.slot, + is_startup: account.is_startup, + data_slice: FilterAccountsDataSlice::default(), + }) + } + UpdateOneof::Slot(msg) => { + let slot = MessageSlot::from_update_oneof(&msg, created_at)?; + FilteredUpdateOneof::Slot(FilteredUpdateSlot(slot)) + } + UpdateOneof::Transaction(msg) => { + let tx = MessageTransaction::from_update_oneof(msg, created_at)?; + FilteredUpdateOneof::Transaction(FilteredUpdateTransaction { + transaction: tx.transaction, + slot: tx.slot, + }) + } + UpdateOneof::TransactionStatus(msg) => { + FilteredUpdateOneof::TransactionStatus(FilteredUpdateTransactionStatus { + transaction: Arc::new(MessageTransactionInfo { + signature: Signature::try_from(msg.signature.as_slice()) + .map_err(|_| "invalid signature length")?, + is_vote: msg.is_vote, + transaction: confirmed_block::Transaction::default(), + meta: confirmed_block::TransactionStatusMeta { + err: msg.err, + ..confirmed_block::TransactionStatusMeta::default() + }, + index: msg.index as usize, + account_keys: HashSet::new(), + }), + slot: msg.slot, + }) + } + UpdateOneof::Block(msg) => { + let block = MessageBlock::from_update_oneof(msg, created_at)?; + FilteredUpdateOneof::Block(Box::new(FilteredUpdateBlock { + meta: block.meta, + transactions: block.transactions, + updated_account_count: block.updated_account_count, + accounts: block.accounts, + accounts_data_slice: FilterAccountsDataSlice::default(), + entries: block.entries, + })) + } + UpdateOneof::Ping(_) => FilteredUpdateOneof::Ping, + UpdateOneof::Pong(msg) => FilteredUpdateOneof::Pong(msg), + UpdateOneof::BlockMeta(msg) => { + let block_meta = MessageBlockMeta::from_update_oneof(msg, created_at); + FilteredUpdateOneof::BlockMeta(Arc::new(block_meta)) + } + UpdateOneof::Entry(msg) => { + let entry = MessageEntry::from_update_oneof(&msg, created_at)?; + FilteredUpdateOneof::Entry(FilteredUpdateEntry(Arc::new(entry))) + } + }; + + Ok(Self { + filters: update.filters.into_iter().map(FilterName::new).collect(), + message, + created_at, + }) + } +} + +pub type FilteredUpdateFilters = SmallVec<[FilterName; 4]>; + +#[derive(Debug, Clone, PartialEq)] +pub enum FilteredUpdateOneof { + Account(FilteredUpdateAccount), // 2 + Slot(FilteredUpdateSlot), // 3 + Transaction(FilteredUpdateTransaction), // 4 + TransactionStatus(FilteredUpdateTransactionStatus), // 10 + Block(Box), // 5 + Ping, // 6 + Pong(SubscribeUpdatePong), // 9 + BlockMeta(Arc), // 7 + Entry(FilteredUpdateEntry), // 8 +} + +impl FilteredUpdateOneof { + pub fn account(message: &MessageAccount, data_slice: FilterAccountsDataSlice) -> Self { + Self::Account(FilteredUpdateAccount { + slot: message.slot, + account: Arc::clone(&message.account), + is_startup: message.is_startup, + data_slice, + }) + } + + pub const fn slot(message: MessageSlot) -> Self { + Self::Slot(FilteredUpdateSlot(message)) + } + + pub fn transaction(message: &MessageTransaction) -> Self { + Self::Transaction(FilteredUpdateTransaction { + transaction: Arc::clone(&message.transaction), + slot: message.slot, + }) + } + + pub fn transaction_status(message: &MessageTransaction) -> Self { + Self::TransactionStatus(FilteredUpdateTransactionStatus { + transaction: Arc::clone(&message.transaction), + slot: message.slot, + }) + } + + pub const fn block(message: Box) -> Self { + Self::Block(message) + } + + pub const fn ping() -> Self { + Self::Ping + } + + pub const fn pong(id: i32) -> Self { + Self::Pong(SubscribeUpdatePong { id }) + } + + pub const fn block_meta(message: Arc) -> Self { + Self::BlockMeta(message) + } + + pub const fn entry(message: Arc) -> Self { + Self::Entry(FilteredUpdateEntry(message)) + } +} + +impl prost::Message for FilteredUpdateOneof { + fn encode_raw(&self, buf: &mut impl BufMut) { + match self { + Self::Account(msg) => message::encode(2u32, msg, buf), + Self::Slot(msg) => message::encode(3u32, msg, buf), + Self::Transaction(msg) => message::encode(4u32, msg, buf), + Self::TransactionStatus(msg) => message::encode(10u32, msg, buf), + Self::Block(msg) => message::encode(5u32, msg, buf), + Self::Ping => { + encode_key(6u32, WireType::LengthDelimited, buf); + encode_varint(0, buf); + } + Self::Pong(msg) => message::encode(9u32, msg, buf), + Self::BlockMeta(msg) => message::encode(7u32, &msg.block_meta, buf), + Self::Entry(msg) => message::encode(8u32, msg, buf), + } + } + + fn encoded_len(&self) -> usize { + match self { + Self::Account(msg) => message::encoded_len(2u32, msg), + Self::Slot(msg) => message::encoded_len(3u32, msg), + Self::Transaction(msg) => message::encoded_len(4u32, msg), + Self::TransactionStatus(msg) => message::encoded_len(10u32, msg), + Self::Block(msg) => message::encoded_len(5u32, msg), + Self::Ping => key_len(6u32) + encoded_len_varint(0), + Self::Pong(msg) => message::encoded_len(9u32, msg), + Self::BlockMeta(msg) => message::encoded_len(7u32, &msg.block_meta), + Self::Entry(msg) => message::encoded_len(8u32, msg), + } + } + + fn merge_field( + &mut self, + _tag: u32, + _wire_type: WireType, + _buf: &mut impl Buf, + _ctx: DecodeContext, + ) -> Result<(), DecodeError> { + unimplemented!() + } + + fn clear(&mut self) { + unimplemented!() + } +} + +#[derive(Debug, Clone, PartialEq)] +pub struct FilteredUpdateAccount { + pub account: Arc, + pub slot: u64, + pub is_startup: bool, + pub data_slice: FilterAccountsDataSlice, +} + +impl prost::Message for FilteredUpdateAccount { + fn encode_raw(&self, buf: &mut impl BufMut) { + Self::account_encode_raw(1u32, &self.account, &self.data_slice, buf); + if self.slot != 0u64 { + ::prost::encoding::uint64::encode(2u32, &self.slot, buf); + } + if self.is_startup { + ::prost::encoding::bool::encode(3u32, &self.is_startup, buf); + } + } + + fn encoded_len(&self) -> usize { + prost_field_encoded_len( + 1u32, + Self::account_encoded_len(&self.account, &self.data_slice), + ) + if self.slot != 0u64 { + ::prost::encoding::uint64::encoded_len(2u32, &self.slot) + } else { + 0 + } + if self.is_startup { + ::prost::encoding::bool::encoded_len(3u32, &self.is_startup) + } else { + 0 + } + } + + fn merge_field( + &mut self, + _tag: u32, + _wire_type: WireType, + _buf: &mut impl Buf, + _ctx: DecodeContext, + ) -> Result<(), DecodeError> { + unimplemented!() + } + + fn clear(&mut self) { + unimplemented!() + } +} + +impl FilteredUpdateAccount { + fn account_encode_raw( + tag: u32, + account: &MessageAccountInfo, + data_slice: &FilterAccountsDataSlice, + buf: &mut impl BufMut, + ) { + encode_key(tag, WireType::LengthDelimited, buf); + encode_varint(Self::account_encoded_len(account, data_slice) as u64, buf); + + prost_bytes_encode_raw(1u32, account.pubkey.as_ref(), buf); + if account.lamports != 0u64 { + ::prost::encoding::uint64::encode(2u32, &account.lamports, buf); + } + prost_bytes_encode_raw(3u32, account.owner.as_ref(), buf); + if account.executable { + ::prost::encoding::bool::encode(4u32, &account.executable, buf); + } + if account.rent_epoch != 0u64 { + ::prost::encoding::uint64::encode(5u32, &account.rent_epoch, buf); + } + data_slice.slice_encode_raw(6u32, &account.data, buf); + if account.write_version != 0u64 { + ::prost::encoding::uint64::encode(7u32, &account.write_version, buf); + } + if let Some(value) = &account.txn_signature { + prost_bytes_encode_raw(8u32, value.as_ref(), buf); + } + } + + fn account_encoded_len( + account: &MessageAccountInfo, + data_slice: &FilterAccountsDataSlice, + ) -> usize { + let data_len = data_slice.get_slice_len(&account.data); + + prost_bytes_encoded_len(1u32, account.pubkey.as_ref()) + + if account.lamports != 0u64 { + ::prost::encoding::uint64::encoded_len(2u32, &account.lamports) + } else { + 0 + } + + prost_bytes_encoded_len(3u32, account.owner.as_ref()) + + if account.executable { + ::prost::encoding::bool::encoded_len(4u32, &account.executable) + } else { + 0 + } + + if account.rent_epoch != 0u64 { + ::prost::encoding::uint64::encoded_len(5u32, &account.rent_epoch) + } else { + 0 + } + + if data_len != 0 { + prost_field_encoded_len(6u32, data_len) + } else { + 0 + } + + if account.write_version != 0u64 { + ::prost::encoding::uint64::encoded_len(7u32, &account.write_version) + } else { + 0 + } + + account + .txn_signature + .map_or(0, |sig| prost_bytes_encoded_len(8u32, sig.as_ref())) + } +} + +#[derive(Debug, Clone, PartialEq)] +pub struct FilteredUpdateSlot(MessageSlot); + +impl Deref for FilteredUpdateSlot { + type Target = MessageSlot; + + fn deref(&self) -> &Self::Target { + &self.0 + } +} + +impl DerefMut for FilteredUpdateSlot { + fn deref_mut(&mut self) -> &mut Self::Target { + &mut self.0 + } +} + +impl prost::Message for FilteredUpdateSlot { + fn encode_raw(&self, buf: &mut impl BufMut) { + let status = SlotStatusProto::from(self.status) as i32; + if self.slot != 0u64 { + ::prost::encoding::uint64::encode(1u32, &self.slot, buf); + } + if let ::core::option::Option::Some(ref value) = self.parent { + ::prost::encoding::uint64::encode(2u32, value, buf); + } + if status != SlotStatusProto::default() as i32 { + ::prost::encoding::int32::encode(3u32, &status, buf); + } + if let Some(error) = &self.dead_error { + ::prost::encoding::string::encode(4u32, error, buf); + } + } + + fn encoded_len(&self) -> usize { + let status = SlotStatusProto::from(self.status) as i32; + + (if self.slot != 0u64 { + ::prost::encoding::uint64::encoded_len(1u32, &self.slot) + } else { + 0 + }) + self.parent.as_ref().map_or(0, |value| { + ::prost::encoding::uint64::encoded_len(2u32, value) + }) + if status != SlotStatusProto::default() as i32 { + ::prost::encoding::int32::encoded_len(3u32, &status) + } else { + 0 + } + if let Some(error) = &self.dead_error { + ::prost::encoding::string::encoded_len(4u32, error) + } else { + 0 + } + } + + fn merge_field( + &mut self, + _tag: u32, + _wire_type: WireType, + _buf: &mut impl Buf, + _ctx: DecodeContext, + ) -> Result<(), DecodeError> { + unimplemented!() + } + + fn clear(&mut self) { + unimplemented!() + } +} + +#[derive(Debug, Clone, PartialEq)] +pub struct FilteredUpdateTransaction { + pub transaction: Arc, + pub slot: u64, +} + +impl prost::Message for FilteredUpdateTransaction { + fn encode_raw(&self, buf: &mut impl BufMut) { + Self::tx_encode_raw(1u32, &self.transaction, buf); + if self.slot != 0u64 { + ::prost::encoding::uint64::encode(2u32, &self.slot, buf); + } + } + + fn encoded_len(&self) -> usize { + prost_field_encoded_len(1u32, Self::tx_encoded_len(&self.transaction)) + + if self.slot != 0u64 { + ::prost::encoding::uint64::encoded_len(2u32, &self.slot) + } else { + 0 + } + } + + fn merge_field( + &mut self, + _tag: u32, + _wire_type: WireType, + _buf: &mut impl Buf, + _ctx: DecodeContext, + ) -> Result<(), DecodeError> { + unimplemented!() + } + + fn clear(&mut self) { + unimplemented!() + } +} + +impl FilteredUpdateTransaction { + fn tx_encode_raw(tag: u32, tx: &MessageTransactionInfo, buf: &mut impl BufMut) { + encode_key(tag, WireType::LengthDelimited, buf); + encode_varint(Self::tx_encoded_len(tx) as u64, buf); + + let index = tx.index as u64; + + prost_bytes_encode_raw(1u32, tx.signature.as_ref(), buf); + if tx.is_vote { + ::prost::encoding::bool::encode(2u32, &tx.is_vote, buf); + } + message::encode(3u32, &tx.transaction, buf); + message::encode(4u32, &tx.meta, buf); + if index != 0u64 { + ::prost::encoding::uint64::encode(5u32, &index, buf); + } + } + + fn tx_encoded_len(tx: &MessageTransactionInfo) -> usize { + let index = tx.index as u64; + + prost_bytes_encoded_len(1u32, tx.signature.as_ref()) + + if tx.is_vote { + ::prost::encoding::bool::encoded_len(2u32, &tx.is_vote) + } else { + 0 + } + + message::encoded_len(3u32, &tx.transaction) + + message::encoded_len(4u32, &tx.meta) + + if index != 0u64 { + ::prost::encoding::uint64::encoded_len(5u32, &index) + } else { + 0 + } + } +} + +#[derive(Debug, Clone, PartialEq)] +pub struct FilteredUpdateTransactionStatus { + pub transaction: Arc, + pub slot: u64, +} + +impl prost::Message for FilteredUpdateTransactionStatus { + fn encode_raw(&self, buf: &mut impl BufMut) { + if self.slot != 0u64 { + ::prost::encoding::uint64::encode(1u32, &self.slot, buf); + } + let tx = &self.transaction; + prost_bytes_encode_raw(2u32, tx.signature.as_ref(), buf); + if tx.is_vote { + ::prost::encoding::bool::encode(3u32, &tx.is_vote, buf); + } + let index = tx.index as u64; + if index != 0u64 { + ::prost::encoding::uint64::encode(4u32, &index, buf); + } + if let Some(msg) = &tx.meta.err { + message::encode(5u32, msg, buf) + } + } + + fn encoded_len(&self) -> usize { + let tx = &self.transaction; + let index = tx.index as u64; + + (if self.slot != 0u64 { + ::prost::encoding::uint64::encoded_len(1u32, &self.slot) + } else { + 0 + }) + prost_bytes_encoded_len(2u32, tx.signature.as_ref()) + + if tx.is_vote { + ::prost::encoding::bool::encoded_len(3u32, &tx.is_vote) + } else { + 0 + } + + if index != 0u64 { + ::prost::encoding::uint64::encoded_len(4u32, &index) + } else { + 0 + } + + tx.meta + .err + .as_ref() + .map_or(0, |msg| message::encoded_len(5u32, msg)) + } + + fn merge_field( + &mut self, + _tag: u32, + _wire_type: WireType, + _buf: &mut impl Buf, + _ctx: DecodeContext, + ) -> Result<(), DecodeError> { + unimplemented!() + } + + fn clear(&mut self) { + unimplemented!() + } +} + +#[derive(Debug, Clone, PartialEq)] +pub struct FilteredUpdateBlock { + pub meta: Arc, + pub transactions: Vec>, + pub updated_account_count: u64, + pub accounts: Vec>, + pub accounts_data_slice: FilterAccountsDataSlice, + pub entries: Vec>, +} + +impl prost::Message for FilteredUpdateBlock { + fn encode_raw(&self, buf: &mut impl BufMut) { + if self.meta.slot != 0u64 { + ::prost::encoding::uint64::encode(1u32, &self.meta.slot, buf); + } + if !self.meta.blockhash.is_empty() { + ::prost::encoding::string::encode(2u32, &self.meta.blockhash, buf); + } + if let Some(msg) = &self.meta.rewards { + message::encode(3u32, msg, buf); + } + if let Some(msg) = &self.meta.block_time { + message::encode(4u32, msg, buf); + } + if let Some(msg) = &self.meta.block_height { + message::encode(5u32, msg, buf); + } + for tx in &self.transactions { + FilteredUpdateTransaction::tx_encode_raw(6u32, tx.as_ref(), buf); + } + if self.meta.parent_slot != 0u64 { + ::prost::encoding::uint64::encode(7u32, &self.meta.parent_slot, buf); + } + if !self.meta.parent_blockhash.is_empty() { + ::prost::encoding::string::encode(8u32, &self.meta.parent_blockhash, buf); + } + if self.meta.executed_transaction_count != 0u64 { + ::prost::encoding::uint64::encode(9u32, &self.meta.executed_transaction_count, buf); + } + if self.updated_account_count != 0u64 { + ::prost::encoding::uint64::encode(10u32, &self.updated_account_count, buf); + } + for account in &self.accounts { + FilteredUpdateAccount::account_encode_raw( + 11u32, + account.as_ref(), + &self.accounts_data_slice, + buf, + ); + } + if self.meta.entries_count != 0u64 { + ::prost::encoding::uint64::encode(12u32, &self.meta.entries_count, buf); + } + for entry in &self.entries { + encode_key(13u32, WireType::LengthDelimited, buf); + encode_varint( + FilteredUpdateEntry::entry_encoded_len(entry.as_ref()) as u64, + buf, + ); + FilteredUpdateEntry::entry_encode_raw(entry, buf); + } + } + + fn encoded_len(&self) -> usize { + (if self.meta.slot != 0u64 { + ::prost::encoding::uint64::encoded_len(1u32, &self.meta.slot) + } else { + 0 + }) + if !self.meta.blockhash.is_empty() { + ::prost::encoding::string::encoded_len(2u32, &self.meta.blockhash) + } else { + 0 + } + self + .meta + .rewards + .as_ref() + .map_or(0, |msg| message::encoded_len(3u32, msg)) + + self + .meta + .block_time + .as_ref() + .map_or(0, |msg| message::encoded_len(4u32, msg)) + + self + .meta + .block_height + .as_ref() + .map_or(0, |msg| message::encoded_len(5u32, msg)) + + prost_repeated_encoded_len_map!(6u32, self.transactions, |tx| { + FilteredUpdateTransaction::tx_encoded_len(tx.as_ref()) + }) + + if self.meta.parent_slot != 0u64 { + ::prost::encoding::uint64::encoded_len(7u32, &self.meta.parent_slot) + } else { + 0 + } + + if !self.meta.parent_blockhash.is_empty() { + ::prost::encoding::string::encoded_len(8u32, &self.meta.parent_blockhash) + } else { + 0 + } + + if self.meta.executed_transaction_count != 0u64 { + ::prost::encoding::uint64::encoded_len(9u32, &self.meta.executed_transaction_count) + } else { + 0 + } + + if self.updated_account_count != 0u64 { + ::prost::encoding::uint64::encoded_len(10u32, &self.updated_account_count) + } else { + 0 + } + + prost_repeated_encoded_len_map!(11u32, self.accounts, |account| { + FilteredUpdateAccount::account_encoded_len( + account.as_ref(), + &self.accounts_data_slice, + ) + }) + + if self.meta.entries_count != 0u64 { + ::prost::encoding::uint64::encoded_len(12u32, &self.meta.entries_count) + } else { + 0 + } + + prost_repeated_encoded_len_map!(13u32, self.entries, |entry| { + FilteredUpdateEntry::entry_encoded_len(entry) + }) + } + + fn merge_field( + &mut self, + _tag: u32, + _wire_type: WireType, + _buf: &mut impl Buf, + _ctx: DecodeContext, + ) -> Result<(), DecodeError> { + unimplemented!() + } + + fn clear(&mut self) { + unimplemented!() + } +} + +#[derive(Debug, Clone, PartialEq)] +pub struct FilteredUpdateEntry(Arc); + +impl prost::Message for FilteredUpdateEntry { + fn encode_raw(&self, buf: &mut impl BufMut) { + Self::entry_encode_raw(&self.0, buf) + } + + fn encoded_len(&self) -> usize { + Self::entry_encoded_len(&self.0) + } + + fn merge_field( + &mut self, + _tag: u32, + _wire_type: WireType, + _buf: &mut impl Buf, + _ctx: DecodeContext, + ) -> Result<(), DecodeError> { + unimplemented!() + } + + fn clear(&mut self) { + unimplemented!() + } +} + +impl FilteredUpdateEntry { + fn entry_encode_raw(entry: &MessageEntry, buf: &mut impl BufMut) { + let index = entry.index as u64; + + if entry.slot != 0u64 { + ::prost::encoding::uint64::encode(1u32, &entry.slot, buf); + } + if index != 0u64 { + ::prost::encoding::uint64::encode(2u32, &index, buf); + } + if entry.num_hashes != 0u64 { + ::prost::encoding::uint64::encode(3u32, &entry.num_hashes, buf); + } + prost_bytes_encode_raw(4u32, entry.hash.as_ref(), buf); + if entry.executed_transaction_count != 0u64 { + ::prost::encoding::uint64::encode(5u32, &entry.executed_transaction_count, buf); + } + if entry.starting_transaction_index != 0u64 { + ::prost::encoding::uint64::encode(6u32, &entry.starting_transaction_index, buf); + } + } + + fn entry_encoded_len(entry: &MessageEntry) -> usize { + let index = entry.index as u64; + + (if entry.slot != 0u64 { + ::prost::encoding::uint64::encoded_len(1u32, &entry.slot) + } else { + 0 + }) + if index != 0u64 { + ::prost::encoding::uint64::encoded_len(2u32, &index) + } else { + 0 + } + if entry.num_hashes != 0u64 { + ::prost::encoding::uint64::encoded_len(3u32, &entry.num_hashes) + } else { + 0 + } + prost_bytes_encoded_len(4u32, entry.hash.as_ref()) + + if entry.executed_transaction_count != 0u64 { + ::prost::encoding::uint64::encoded_len(5u32, &entry.executed_transaction_count) + } else { + 0 + } + + if entry.starting_transaction_index != 0u64 { + ::prost::encoding::uint64::encoded_len(6u32, &entry.starting_transaction_index) + } else { + 0 + } + } +} + +#[cfg(any(test, feature = "plugin-bench"))] +pub mod tests { + #![cfg_attr(feature = "plugin-bench", allow(dead_code))] + #![cfg_attr(feature = "plugin-bench", allow(unused_imports))] + use { + super::{FilteredUpdate, FilteredUpdateBlock, FilteredUpdateFilters, FilteredUpdateOneof}, + crate::{ + convert_to, + geyser::{SubscribeUpdate, SubscribeUpdateBlockMeta}, + plugin::{ + filter::{name::FilterName, FilterAccountsDataSlice}, + message::{ + MessageAccount, MessageAccountInfo, MessageBlockMeta, MessageEntry, + MessageSlot, MessageTransaction, MessageTransactionInfo, SlotStatus, + }, + }, + }, + prost::Message as _, + prost_011::Message as _, + prost_types::Timestamp, + solana_hash::Hash, + solana_message::SimpleAddressLoader, + solana_pubkey::Pubkey, + solana_signature::Signature, + solana_storage_proto::convert::generated, + solana_transaction::sanitized::{MessageHash, SanitizedTransaction}, + solana_transaction_status::{ConfirmedBlock, TransactionWithStatusMeta}, + std::{ + collections::{HashMap, HashSet}, + fs, + ops::Range, + str::FromStr, + sync::Arc, + time::SystemTime, + }, + }; + + pub fn create_message_filters(names: &[&str]) -> FilteredUpdateFilters { + let mut filters = FilteredUpdateFilters::new(); + for name in names { + filters.push(FilterName::new(*name)); + } + filters + } + + pub fn create_account_data_slice() -> Vec { + [ + vec![], + vec![Range { start: 0, end: 0 }], + vec![Range { start: 2, end: 3 }], + vec![Range { start: 1, end: 3 }, Range { start: 5, end: 10 }], + ] + .into_iter() + .map(Arc::new) + .map(FilterAccountsDataSlice::new_unchecked) + .collect() + } + + pub fn create_accounts_raw() -> Vec> { + let pubkey = Pubkey::from_str("28Dncoh8nmzXYEGLUcBA5SUw5WDwDBn15uUCwrWBbyuu").unwrap(); + let owner = Pubkey::from_str("5jrPJWVGrFvQ2V9wRZC3kHEZhxo9pmMir15x73oHT6mn").unwrap(); + let txn_signature = Signature::from_str("4V36qYhukXcLFuvhZaudSoJpPaFNB7d5RqYKjL2xiSKrxaBfEajqqL4X6viZkEvHJ8XcTJsqVjZxFegxhN7EC9V5").unwrap(); + + let mut accounts = vec![]; + for lamports in [0, 8123] { + for executable in [true, false] { + for rent_epoch in [0, 4242] { + for data in [ + vec![], + [42; 165].to_vec(), + [42; 1024].to_vec(), + [42; 2 * 1024 * 1024].to_vec(), + ] { + for write_version in [0, 1] { + for txn_signature in [None, Some(txn_signature)] { + accounts.push(Arc::new(MessageAccountInfo { + pubkey, + lamports, + owner, + executable, + rent_epoch, + data: data.clone(), + write_version, + txn_signature, + })); + } + } + } + } + } + } + accounts + } + + pub fn create_accounts() -> Vec<(MessageAccount, FilterAccountsDataSlice)> { + let mut vec = vec![]; + for account in create_accounts_raw() { + for slot in [0, 42] { + for is_startup in [true, false] { + for data_slice in create_account_data_slice() { + let msg = MessageAccount { + account: Arc::clone(&account), + slot, + is_startup, + created_at: Timestamp::from(SystemTime::now()), + }; + vec.push((msg, data_slice)); + } + } + } + } + vec + } + + pub fn create_entries() -> Vec> { + [ + MessageEntry { + slot: 299888121, + index: 42, + num_hashes: 128, + hash: Hash::new_from_array([98; 32]), + executed_transaction_count: 32, + starting_transaction_index: 1000, + created_at: Timestamp::from(SystemTime::now()), + }, + MessageEntry { + slot: 299888121, + index: 0, + num_hashes: 16, + hash: Hash::new_from_array([42; 32]), + executed_transaction_count: 32, + starting_transaction_index: 1000, + created_at: Timestamp::from(SystemTime::now()), + }, + ] + .into_iter() + .map(Arc::new) + .collect() + } + + pub fn load_predefined() -> Vec { + fs::read_dir("./fixtures/blocks") + .expect("failed to read `blocks` dir") + .map(|entry| { + let path = entry.expect("failed to read `blocks` dir entry").path(); + let data = fs::read(path).expect("failed to read block"); + generated::ConfirmedBlock::decode(data.as_slice()) + .expect("failed to decode block") + .try_into() + .expect("failed to convert decoded block") + }) + .collect() + } + + pub fn load_predefined_blockmeta() -> Vec> { + load_predefined_blocks() + .into_iter() + .map(|block| (block.meta.blockhash.clone(), block.meta)) + .collect::>() + .into_values() + .collect() + } + + pub fn load_predefined_transactions() -> Vec> { + load_predefined_blocks() + .into_iter() + .flat_map(|block| block.transactions.into_iter().map(|tx| (tx.signature, tx))) + .collect::>() + .into_values() + .collect() + } + + pub fn load_predefined_blocks() -> Vec { + load_predefined() + .into_iter() + .flat_map(|block| { + let transactions = block + .transactions + .into_iter() + .enumerate() + .map(|(index, tx)| { + let TransactionWithStatusMeta::Complete(tx) = tx else { + panic!("tx with missed meta"); + }; + let transaction = SanitizedTransaction::try_create( + tx.transaction.clone(), + MessageHash::Compute, + None, + SimpleAddressLoader::Disabled, + &HashSet::new(), + ) + .expect("failed to create tx"); + MessageTransactionInfo { + signature: tx.transaction.signatures[0], + is_vote: true, + transaction: convert_to::create_transaction(&transaction), + meta: convert_to::create_transaction_meta(&tx.meta), + index, + account_keys: HashSet::new(), + } + }) + .map(Arc::new) + .collect::>(); + + let entries = create_entries(); + + let slot = block.parent_slot + 1; + let block_meta1 = MessageBlockMeta { + block_meta: SubscribeUpdateBlockMeta { + parent_slot: block.parent_slot, + slot, + parent_blockhash: block.previous_blockhash, + blockhash: block.blockhash, + rewards: Some(convert_to::create_rewards_obj( + &block.rewards, + block.num_partitions, + )), + block_time: block.block_time.map(convert_to::create_timestamp), + block_height: block.block_height.map(convert_to::create_block_height), + executed_transaction_count: transactions.len() as u64, + entries_count: entries.len() as u64, + }, + created_at: Timestamp::from(SystemTime::now()), + }; + let mut block_meta2 = block_meta1.clone(); + block_meta2.rewards = + Some(convert_to::create_rewards_obj(&block.rewards, Some(42))); + + let block_meta1 = Arc::new(block_meta1); + let block_meta2 = Arc::new(block_meta2); + + let accounts = create_accounts_raw(); + create_account_data_slice() + .into_iter() + .flat_map(move |data_slice| { + vec![ + FilteredUpdateBlock { + meta: Arc::clone(&block_meta1), + transactions: transactions.clone(), + updated_account_count: accounts.len() as u64, + accounts: accounts.clone(), + accounts_data_slice: data_slice.clone(), + entries: entries.clone(), + }, + FilteredUpdateBlock { + meta: Arc::clone(&block_meta2), + transactions: transactions.clone(), + updated_account_count: accounts.len() as u64, + accounts: accounts.clone(), + accounts_data_slice: data_slice, + entries: entries.clone(), + }, + ] + }) + }) + .collect() + } + + fn encode_decode_cmp(filters: &[&str], message: FilteredUpdateOneof) { + let msg = FilteredUpdate { + filters: create_message_filters(filters), + message, + created_at: Timestamp::from(SystemTime::now()), + }; + let update = msg.as_subscribe_update(); + assert_eq!(msg.encoded_len(), update.encoded_len()); + assert_eq!( + SubscribeUpdate::decode(msg.encode_to_vec().as_slice()).expect("failed to decode"), + update + ); + assert_eq!( + FilteredUpdate::from_subscribe_update(update.clone()) + .map(|msg| msg.as_subscribe_update()), + Ok(update) + ); + } + + #[test] + fn test_message_account() { + for (msg, data_slice) in create_accounts() { + encode_decode_cmp(&["123"], FilteredUpdateOneof::account(&msg, data_slice)); + } + } + + #[test] + fn test_message_slot() { + for slot in [0, 42] { + for parent in [None, Some(0), Some(42)] { + for status in [ + SlotStatus::Processed, + SlotStatus::Confirmed, + SlotStatus::Finalized, + SlotStatus::FirstShredReceived, + SlotStatus::Completed, + SlotStatus::CreatedBank, + SlotStatus::Dead, + ] { + encode_decode_cmp( + &["123"], + FilteredUpdateOneof::slot(MessageSlot { + slot, + parent, + status, + dead_error: None, + created_at: Timestamp::from(SystemTime::now()), + }), + ) + } + encode_decode_cmp( + &["123"], + FilteredUpdateOneof::slot(MessageSlot { + slot, + parent, + status: SlotStatus::Dead, + dead_error: Some("123".to_owned()), + created_at: Timestamp::from(SystemTime::now()), + }), + ) + } + } + } + + #[test] + fn test_message_transaction() { + for transaction in load_predefined_transactions() { + let msg = MessageTransaction { + transaction, + slot: 42, + created_at: Timestamp::from(SystemTime::now()), + }; + encode_decode_cmp(&["123"], FilteredUpdateOneof::transaction(&msg)); + encode_decode_cmp(&["123"], FilteredUpdateOneof::transaction_status(&msg)); + } + } + + #[test] + fn test_message_block() { + for block in load_predefined_blocks() { + encode_decode_cmp(&["123"], FilteredUpdateOneof::block(Box::new(block))); + } + } + + #[test] + fn test_message_ping() { + encode_decode_cmp(&["123"], FilteredUpdateOneof::Ping) + } + + #[test] + fn test_message_pong() { + encode_decode_cmp(&["123"], FilteredUpdateOneof::pong(0)); + encode_decode_cmp(&["123"], FilteredUpdateOneof::pong(42)); + } + + #[test] + fn test_message_blockmeta() { + for block_meta in load_predefined_blockmeta() { + encode_decode_cmp(&["123"], FilteredUpdateOneof::block_meta(block_meta)); + } + } + + #[test] + fn test_message_entry() { + for entry in create_entries() { + encode_decode_cmp(&["123"], FilteredUpdateOneof::entry(entry)); + } + } +} diff --git a/vendor/laserstream-core-proto/src/plugin/filter/mod.rs b/vendor/laserstream-core-proto/src/plugin/filter/mod.rs new file mode 100644 index 00000000..a3323f2a --- /dev/null +++ b/vendor/laserstream-core-proto/src/plugin/filter/mod.rs @@ -0,0 +1,7 @@ +#[allow(clippy::module_inception)] +mod filter; +pub mod limits; +pub mod message; +pub mod name; + +pub use filter::{Filter, FilterAccountsDataSlice, FilterError, FilterResult}; diff --git a/vendor/laserstream-core-proto/src/plugin/filter/name.rs b/vendor/laserstream-core-proto/src/plugin/filter/name.rs new file mode 100644 index 00000000..a9f9b846 --- /dev/null +++ b/vendor/laserstream-core-proto/src/plugin/filter/name.rs @@ -0,0 +1,102 @@ +use std::{ + borrow::Borrow, + collections::HashSet, + ops::Deref, + sync::Arc, + time::{Duration, Instant}, +}; + +#[derive(Debug, thiserror::Error)] +pub enum FilterNameError { + #[error("oversized filter name (max allowed size {limit}), found {size}")] + Oversized { limit: usize, size: usize }, +} + +pub type FilterNameResult = Result; + +#[derive(Debug, Clone, PartialEq, Eq, Hash)] +pub struct FilterName(Arc); + +impl AsRef for FilterName { + #[inline] + fn as_ref(&self) -> &str { + &self.0 + } +} + +impl Deref for FilterName { + type Target = str; + + fn deref(&self) -> &Self::Target { + &self.0 + } +} + +impl Borrow for FilterName { + #[inline] + fn borrow(&self) -> &str { + &self.0[..] + } +} + +impl FilterName { + pub fn new(name: impl Into) -> Self { + Self(Arc::new(name.into())) + } + + pub fn is_uniq(&self) -> bool { + Arc::strong_count(&self.0) == 1 + } +} + +#[derive(Debug)] +pub struct FilterNames { + name_size_limit: usize, + names: HashSet, + names_size_limit: usize, + cleanup_ts: Instant, + cleanup_interval: Duration, +} + +impl FilterNames { + pub fn new( + name_size_limit: usize, + names_size_limit: usize, + cleanup_interval: Duration, + ) -> Self { + Self { + name_size_limit, + names: HashSet::with_capacity(names_size_limit), + names_size_limit, + cleanup_ts: Instant::now(), + cleanup_interval, + } + } + + pub fn try_clean(&mut self) { + if self.names.len() > self.names_size_limit + && self.cleanup_ts.elapsed() > self.cleanup_interval + { + self.names.retain(|name| !name.is_uniq()); + self.cleanup_ts = Instant::now(); + } + } + + pub fn get(&mut self, name: &str) -> FilterNameResult { + match self.names.get(name) { + Some(name) => Ok(name.clone()), + None => { + if name.len() > self.name_size_limit { + Err(FilterNameError::Oversized { + limit: self.name_size_limit, + size: name.len(), + }) + } else { + let name = FilterName::new(name); + self.names.insert(name.clone()); + Ok(name) + } + } + } + } +} diff --git a/vendor/laserstream-core-proto/src/plugin/message.rs b/vendor/laserstream-core-proto/src/plugin/message.rs new file mode 100644 index 00000000..c21e9df2 --- /dev/null +++ b/vendor/laserstream-core-proto/src/plugin/message.rs @@ -0,0 +1,587 @@ +use { + crate::{ + convert_to, + geyser::{ + subscribe_update::UpdateOneof, CommitmentLevel as CommitmentLevelProto, + SlotStatus as SlotStatusProto, SubscribeUpdateAccount, SubscribeUpdateAccountInfo, + SubscribeUpdateBlock, SubscribeUpdateBlockMeta, SubscribeUpdateEntry, + SubscribeUpdateSlot, SubscribeUpdateTransaction, SubscribeUpdateTransactionInfo, + }, + solana::storage::confirmed_block, + }, + agave_geyser_plugin_interface::geyser_plugin_interface::{ + ReplicaAccountInfoV3, ReplicaBlockInfoV4, ReplicaEntryInfoV2, ReplicaTransactionInfoV2, + SlotStatus as GeyserSlotStatus, + }, + prost_types::Timestamp, + solana_clock::Slot, + solana_hash::{Hash, HASH_BYTES}, + solana_pubkey::Pubkey, + solana_signature::Signature, + std::{ + collections::HashSet, + ops::{Deref, DerefMut}, + sync::Arc, + time::SystemTime, + }, +}; + +type FromUpdateOneofResult = Result; + +#[derive(Debug, Clone, Copy, PartialEq, Eq, PartialOrd, Ord)] +pub enum CommitmentLevel { + Processed, + Confirmed, + Finalized, +} + +impl From for CommitmentLevelProto { + fn from(commitment: CommitmentLevel) -> Self { + match commitment { + CommitmentLevel::Processed => Self::Processed, + CommitmentLevel::Confirmed => Self::Confirmed, + CommitmentLevel::Finalized => Self::Finalized, + } + } +} + +impl From for CommitmentLevel { + fn from(status: CommitmentLevelProto) -> Self { + match status { + CommitmentLevelProto::Processed => Self::Processed, + CommitmentLevelProto::Confirmed => Self::Confirmed, + CommitmentLevelProto::Finalized => Self::Finalized, + } + } +} + +impl CommitmentLevel { + pub const fn as_str(&self) -> &'static str { + match self { + Self::Processed => "processed", + Self::Confirmed => "confirmed", + Self::Finalized => "finalized", + } + } +} + +#[derive(Debug, Clone, Copy, PartialEq, Eq, PartialOrd, Ord)] +pub enum SlotStatus { + Processed, + Confirmed, + Finalized, + FirstShredReceived, + Completed, + CreatedBank, + Dead, +} + +impl From<&GeyserSlotStatus> for SlotStatus { + fn from(status: &GeyserSlotStatus) -> Self { + match status { + GeyserSlotStatus::Processed => Self::Processed, + GeyserSlotStatus::Confirmed => Self::Confirmed, + GeyserSlotStatus::Rooted => Self::Finalized, + GeyserSlotStatus::FirstShredReceived => Self::FirstShredReceived, + GeyserSlotStatus::Completed => Self::Completed, + GeyserSlotStatus::CreatedBank => Self::CreatedBank, + GeyserSlotStatus::Dead(_error) => Self::Dead, + } + } +} + +impl From for SlotStatus { + fn from(status: SlotStatusProto) -> Self { + match status { + SlotStatusProto::SlotProcessed => Self::Processed, + SlotStatusProto::SlotConfirmed => Self::Confirmed, + SlotStatusProto::SlotFinalized => Self::Finalized, + SlotStatusProto::SlotFirstShredReceived => Self::FirstShredReceived, + SlotStatusProto::SlotCompleted => Self::Completed, + SlotStatusProto::SlotCreatedBank => Self::CreatedBank, + SlotStatusProto::SlotDead => Self::Dead, + } + } +} + +impl From for SlotStatusProto { + fn from(status: SlotStatus) -> Self { + match status { + SlotStatus::Processed => Self::SlotProcessed, + SlotStatus::Confirmed => Self::SlotConfirmed, + SlotStatus::Finalized => Self::SlotFinalized, + SlotStatus::FirstShredReceived => Self::SlotFirstShredReceived, + SlotStatus::Completed => Self::SlotCompleted, + SlotStatus::CreatedBank => Self::SlotCreatedBank, + SlotStatus::Dead => Self::SlotDead, + } + } +} + +impl PartialEq for CommitmentLevel { + fn eq(&self, other: &SlotStatus) -> bool { + match self { + Self::Processed if *other == SlotStatus::Processed => true, + Self::Confirmed if *other == SlotStatus::Confirmed => true, + Self::Finalized if *other == SlotStatus::Finalized => true, + _ => false, + } + } +} + +impl SlotStatus { + pub const fn as_str(&self) -> &'static str { + match self { + Self::Processed => "processed", + Self::Confirmed => "confirmed", + Self::Finalized => "finalized", + Self::FirstShredReceived => "first_shread_received", + Self::Completed => "completed", + Self::CreatedBank => "created_bank", + Self::Dead => "dead", + } + } +} + +#[derive(Debug, Clone, PartialEq)] +pub struct MessageSlot { + pub slot: Slot, + pub parent: Option, + pub status: SlotStatus, + pub dead_error: Option, + pub created_at: Timestamp, +} + +impl MessageSlot { + pub fn from_geyser(slot: Slot, parent: Option, status: &GeyserSlotStatus) -> Self { + Self { + slot, + parent, + status: status.into(), + dead_error: if let GeyserSlotStatus::Dead(error) = status { + Some(error.clone()) + } else { + None + }, + created_at: Timestamp::from(SystemTime::now()), + } + } + + pub fn from_update_oneof( + msg: &SubscribeUpdateSlot, + created_at: Timestamp, + ) -> FromUpdateOneofResult { + Ok(Self { + slot: msg.slot, + parent: msg.parent, + status: SlotStatusProto::try_from(msg.status) + .map_err(|_| "failed to parse slot status")? + .into(), + dead_error: msg.dead_error.clone(), + created_at, + }) + } +} + +#[derive(Debug, Clone, PartialEq)] +pub struct MessageAccountInfo { + pub pubkey: Pubkey, + pub lamports: u64, + pub owner: Pubkey, + pub executable: bool, + pub rent_epoch: u64, + pub data: Vec, + pub write_version: u64, + pub txn_signature: Option, +} + +impl MessageAccountInfo { + pub fn from_geyser(info: &ReplicaAccountInfoV3<'_>) -> Self { + Self { + pubkey: Pubkey::try_from(info.pubkey).expect("valid Pubkey"), + lamports: info.lamports, + owner: Pubkey::try_from(info.owner).expect("valid Pubkey"), + executable: info.executable, + rent_epoch: info.rent_epoch, + data: info.data.into(), + write_version: info.write_version, + txn_signature: info.txn.map(|txn| *txn.signature()), + } + } + + pub fn from_update_oneof(msg: SubscribeUpdateAccountInfo) -> FromUpdateOneofResult { + Ok(Self { + pubkey: Pubkey::try_from(msg.pubkey.as_slice()).map_err(|_| "invalid pubkey length")?, + lamports: msg.lamports, + owner: Pubkey::try_from(msg.owner.as_slice()).map_err(|_| "invalid owner length")?, + executable: msg.executable, + rent_epoch: msg.rent_epoch, + data: msg.data, + write_version: msg.write_version, + txn_signature: msg + .txn_signature + .map(|sig| { + Signature::try_from(sig.as_slice()).map_err(|_| "invalid signature length") + }) + .transpose()?, + }) + } +} + +#[derive(Debug, Clone, PartialEq)] +pub struct MessageAccount { + pub account: Arc, + pub slot: Slot, + pub is_startup: bool, + pub created_at: Timestamp, +} + +impl MessageAccount { + pub fn from_geyser(info: &ReplicaAccountInfoV3<'_>, slot: Slot, is_startup: bool) -> Self { + Self { + account: Arc::new(MessageAccountInfo::from_geyser(info)), + slot, + is_startup, + created_at: Timestamp::from(SystemTime::now()), + } + } + + pub fn from_update_oneof( + msg: SubscribeUpdateAccount, + created_at: Timestamp, + ) -> FromUpdateOneofResult { + Ok(Self { + account: Arc::new(MessageAccountInfo::from_update_oneof( + msg.account.ok_or("account message should be defined")?, + )?), + slot: msg.slot, + is_startup: msg.is_startup, + created_at, + }) + } +} + +#[derive(Debug, Clone, PartialEq)] +pub struct MessageTransactionInfo { + pub signature: Signature, + pub is_vote: bool, + pub transaction: confirmed_block::Transaction, + pub meta: confirmed_block::TransactionStatusMeta, + pub index: usize, + pub account_keys: HashSet, +} + +impl MessageTransactionInfo { + pub fn from_geyser(info: &ReplicaTransactionInfoV2<'_>) -> Self { + let account_keys = info + .transaction + .message() + .account_keys() + .iter() + .copied() + .collect(); + + Self { + signature: *info.signature, + is_vote: info.is_vote, + transaction: convert_to::create_transaction(info.transaction), + meta: convert_to::create_transaction_meta(info.transaction_status_meta), + index: info.index, + account_keys, + } + } + + pub fn from_update_oneof(msg: SubscribeUpdateTransactionInfo) -> FromUpdateOneofResult { + Ok(Self { + signature: Signature::try_from(msg.signature.as_slice()) + .map_err(|_| "invalid signature length")?, + is_vote: msg.is_vote, + transaction: msg + .transaction + .ok_or("transaction message should be defined")?, + meta: msg.meta.ok_or("meta message should be defined")?, + index: msg.index as usize, + account_keys: HashSet::new(), + }) + } + + pub fn fill_account_keys(&mut self) -> FromUpdateOneofResult<()> { + let mut account_keys = HashSet::new(); + + // static + if let Some(pubkeys) = self + .transaction + .message + .as_ref() + .map(|msg| msg.account_keys.as_slice()) + { + for pubkey in pubkeys { + account_keys.insert( + Pubkey::try_from(pubkey.as_slice()).map_err(|_| "invalid pubkey length")?, + ); + } + } + + // dynamic + for pubkey in self.meta.loaded_writable_addresses.iter() { + account_keys + .insert(Pubkey::try_from(pubkey.as_slice()).map_err(|_| "invalid pubkey length")?); + } + for pubkey in self.meta.loaded_readonly_addresses.iter() { + account_keys + .insert(Pubkey::try_from(pubkey.as_slice()).map_err(|_| "invalid pubkey length")?); + } + + self.account_keys = account_keys; + Ok(()) + } +} + +#[derive(Debug, Clone, PartialEq)] +pub struct MessageTransaction { + pub transaction: Arc, + pub slot: u64, + pub created_at: Timestamp, +} + +impl MessageTransaction { + pub fn from_geyser(info: &ReplicaTransactionInfoV2<'_>, slot: Slot) -> Self { + Self { + transaction: Arc::new(MessageTransactionInfo::from_geyser(info)), + slot, + created_at: Timestamp::from(SystemTime::now()), + } + } + + pub fn from_update_oneof( + msg: SubscribeUpdateTransaction, + created_at: Timestamp, + ) -> FromUpdateOneofResult { + Ok(Self { + transaction: Arc::new(MessageTransactionInfo::from_update_oneof( + msg.transaction + .ok_or("transaction message should be defined")?, + )?), + slot: msg.slot, + created_at, + }) + } +} + +#[derive(Debug, Clone, Copy, PartialEq)] +pub struct MessageEntry { + pub slot: u64, + pub index: usize, + pub num_hashes: u64, + pub hash: Hash, + pub executed_transaction_count: u64, + pub starting_transaction_index: u64, + pub created_at: Timestamp, +} + +impl MessageEntry { + pub fn from_geyser(info: &ReplicaEntryInfoV2) -> Self { + Self { + slot: info.slot, + index: info.index, + num_hashes: info.num_hashes, + hash: Hash::new_from_array(<[u8; HASH_BYTES]>::try_from(info.hash).unwrap()), + executed_transaction_count: info.executed_transaction_count, + starting_transaction_index: info + .starting_transaction_index + .try_into() + .expect("failed convert usize to u64"), + created_at: Timestamp::from(SystemTime::now()), + } + } + + pub fn from_update_oneof( + msg: &SubscribeUpdateEntry, + created_at: Timestamp, + ) -> FromUpdateOneofResult { + Ok(Self { + slot: msg.slot, + index: msg.index as usize, + num_hashes: msg.num_hashes, + hash: Hash::new_from_array( + <[u8; HASH_BYTES]>::try_from(msg.hash.as_slice()) + .map_err(|_| "invalid hash length")?, + ), + executed_transaction_count: msg.executed_transaction_count, + starting_transaction_index: msg.starting_transaction_index, + created_at, + }) + } +} + +#[derive(Debug, Clone, PartialEq)] +pub struct MessageBlockMeta { + pub block_meta: SubscribeUpdateBlockMeta, + pub created_at: Timestamp, +} + +impl Deref for MessageBlockMeta { + type Target = SubscribeUpdateBlockMeta; + + fn deref(&self) -> &Self::Target { + &self.block_meta + } +} + +impl DerefMut for MessageBlockMeta { + fn deref_mut(&mut self) -> &mut Self::Target { + &mut self.block_meta + } +} + +impl MessageBlockMeta { + pub fn from_geyser(info: &ReplicaBlockInfoV4<'_>) -> Self { + Self { + block_meta: SubscribeUpdateBlockMeta { + parent_slot: info.parent_slot, + slot: info.slot, + parent_blockhash: info.parent_blockhash.to_string(), + blockhash: info.blockhash.to_string(), + rewards: Some(convert_to::create_rewards_obj( + &info.rewards.rewards, + info.rewards.num_partitions, + )), + block_time: info.block_time.map(convert_to::create_timestamp), + block_height: info.block_height.map(convert_to::create_block_height), + executed_transaction_count: info.executed_transaction_count, + entries_count: info.entry_count, + }, + created_at: Timestamp::from(SystemTime::now()), + } + } + + pub const fn from_update_oneof( + block_meta: SubscribeUpdateBlockMeta, + created_at: Timestamp, + ) -> Self { + Self { + block_meta, + created_at, + } + } +} + +#[derive(Debug, Clone, PartialEq)] +pub struct MessageBlock { + pub meta: Arc, + pub transactions: Vec>, + pub updated_account_count: u64, + pub accounts: Vec>, + pub entries: Vec>, + pub created_at: Timestamp, +} + +impl MessageBlock { + pub fn new( + meta: Arc, + transactions: Vec>, + accounts: Vec>, + entries: Vec>, + ) -> Self { + Self { + meta, + transactions, + updated_account_count: accounts.len() as u64, + accounts, + entries, + created_at: Timestamp::from(SystemTime::now()), + } + } + + pub fn from_update_oneof( + msg: SubscribeUpdateBlock, + created_at: Timestamp, + ) -> FromUpdateOneofResult { + Ok(Self { + meta: Arc::new(MessageBlockMeta { + block_meta: SubscribeUpdateBlockMeta { + slot: msg.slot, + blockhash: msg.blockhash, + rewards: msg.rewards, + block_time: msg.block_time, + block_height: msg.block_height, + parent_slot: msg.parent_slot, + parent_blockhash: msg.parent_blockhash, + executed_transaction_count: msg.executed_transaction_count, + entries_count: msg.entries_count, + }, + created_at, + }), + transactions: msg + .transactions + .into_iter() + .map(|tx| MessageTransactionInfo::from_update_oneof(tx).map(Arc::new)) + .collect::, _>>()?, + updated_account_count: msg.updated_account_count, + accounts: msg + .accounts + .into_iter() + .map(|account| MessageAccountInfo::from_update_oneof(account).map(Arc::new)) + .collect::, _>>()?, + entries: msg + .entries + .iter() + .map(|entry| MessageEntry::from_update_oneof(entry, created_at).map(Arc::new)) + .collect::, _>>()?, + created_at, + }) + } +} + +#[derive(Debug, Clone, PartialEq)] +pub enum Message { + Slot(MessageSlot), + Account(MessageAccount), + Transaction(MessageTransaction), + Entry(Arc), + BlockMeta(Arc), + Block(Arc), +} + +impl Message { + #[allow(clippy::missing_const_for_fn)] + pub fn get_slot(&self) -> u64 { + match self { + Self::Slot(msg) => msg.slot, + Self::Account(msg) => msg.slot, + Self::Transaction(msg) => msg.slot, + Self::Entry(msg) => msg.slot, + Self::BlockMeta(msg) => msg.slot, + Self::Block(msg) => msg.meta.slot, + } + } + + pub fn from_update_oneof( + oneof: UpdateOneof, + created_at: Timestamp, + ) -> FromUpdateOneofResult { + Ok(match oneof { + UpdateOneof::Account(msg) => { + Self::Account(MessageAccount::from_update_oneof(msg, created_at)?) + } + UpdateOneof::Slot(msg) => Self::Slot(MessageSlot::from_update_oneof(&msg, created_at)?), + UpdateOneof::Transaction(msg) => { + Self::Transaction(MessageTransaction::from_update_oneof(msg, created_at)?) + } + UpdateOneof::TransactionStatus(_) => { + return Err("TransactionStatus message is not supported") + } + UpdateOneof::Block(msg) => { + Self::Block(Arc::new(MessageBlock::from_update_oneof(msg, created_at)?)) + } + UpdateOneof::Ping(_) => return Err("Ping message is not supported"), + UpdateOneof::Pong(_) => return Err("Pong message is not supported"), + UpdateOneof::BlockMeta(msg) => Self::BlockMeta(Arc::new( + MessageBlockMeta::from_update_oneof(msg, created_at), + )), + UpdateOneof::Entry(msg) => { + Self::Entry(Arc::new(MessageEntry::from_update_oneof(&msg, created_at)?)) + } + }) + } +} diff --git a/vendor/laserstream-core-proto/src/plugin/mod.rs b/vendor/laserstream-core-proto/src/plugin/mod.rs new file mode 100644 index 00000000..7cac8b9b --- /dev/null +++ b/vendor/laserstream-core-proto/src/plugin/mod.rs @@ -0,0 +1,8 @@ +pub mod filter; +pub mod message; + +pub mod proto { + #![allow(clippy::clone_on_ref_ptr)] + #![allow(clippy::missing_const_for_fn)] + tonic::include_proto!("geyser.Geyser"); +} diff --git a/vendor/yellowstone-grpc-client/Cargo.toml b/vendor/yellowstone-grpc-client/Cargo.toml new file mode 100644 index 00000000..fcd0686c --- /dev/null +++ b/vendor/yellowstone-grpc-client/Cargo.toml @@ -0,0 +1,82 @@ +# THIS FILE IS AUTOMATICALLY GENERATED BY CARGO +# +# When uploading crates to the registry Cargo will automatically +# "normalize" Cargo.toml files for maximal compatibility +# with all versions of Cargo and also rewrite `path` dependencies +# to registry (e.g., crates.io) dependencies. +# +# If you are reading this file be aware that the original Cargo.toml +# will likely look very different (and much more reasonable). +# See Cargo.toml.orig for the original contents. + +[package] +edition = "2021" +name = "yellowstone-grpc-client" +version = "12.2.0" +authors = ["Triton One"] +build = false +publish = true +autolib = false +autobins = false +autoexamples = false +autotests = false +autobenches = false +description = "Yellowstone gRPC Geyser Simple Client" +homepage = "https://triton.one" +readme = "README.md" +keywords = ["solana"] +license = "Apache-2.0" +repository = "https://github.com/rpcpool/yellowstone-grpc" + +[features] +account-data-as-bytes = ["yellowstone-grpc-proto/account-data-as-bytes"] + +[lib] +name = "yellowstone_grpc_client" +path = "src/lib.rs" + +[dependencies.bytes] +version = "1.10.1" + +[dependencies.futures] +version = "0.3.24" + +[dependencies.hyper-util] +version = "0.1.7" + +[dependencies.thiserror] +version = "2.0.16" + +[dependencies.tokio] +version = "1.47.1" + +[dependencies.tonic] +version = "0.14.0" +features = ["tls-native-roots"] + +[dependencies.tonic-health] +version = "0.14.0" + +[dependencies.tower] +version = "0.4" +features = ["util"] + +[dependencies.yellowstone-grpc-proto] +version = "12.1.0" +features = [ + "tonic", + "tonic-compression", +] +default-features = false + +[dev-dependencies.tokio] +version = "1.47.1" +features = [ + "rt-multi-thread", + "macros", +] + +[lints.clippy] +clone_on_ref_ptr = "deny" +missing_const_for_fn = "deny" +trivially_copy_pass_by_ref = "deny" diff --git a/vendor/yellowstone-grpc-client/LICENSE_APACHE2 b/vendor/yellowstone-grpc-client/LICENSE_APACHE2 new file mode 100644 index 00000000..373dde57 --- /dev/null +++ b/vendor/yellowstone-grpc-client/LICENSE_APACHE2 @@ -0,0 +1,202 @@ + + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright 2015 Grafana Labs + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/vendor/yellowstone-grpc-client/README.md b/vendor/yellowstone-grpc-client/README.md new file mode 100644 index 00000000..b92b019b --- /dev/null +++ b/vendor/yellowstone-grpc-client/README.md @@ -0,0 +1,3 @@ +# Simple gRPC Client to Yellowstone gRPC Geyser + +See usage example in [repository](https://github.com/rpcpool/yellowstone-grpc). diff --git a/vendor/yellowstone-grpc-client/src/lib.rs b/vendor/yellowstone-grpc-client/src/lib.rs new file mode 100644 index 00000000..941dec1e --- /dev/null +++ b/vendor/yellowstone-grpc-client/src/lib.rs @@ -0,0 +1,571 @@ +pub use tonic::{service::Interceptor, transport::ClientTlsConfig}; +use { + bytes::Bytes, + futures::{ + channel::mpsc, + sink::{Sink, SinkExt}, + stream::Stream, + }, + std::time::Duration, + tonic::{ + codec::{CompressionEncoding, Streaming}, + metadata::{errors::InvalidMetadataValue, AsciiMetadataValue, MetadataValue}, + service::interceptor::InterceptedService, + transport::channel::{Channel, Endpoint}, + Request, Response, Status, + }, + tonic_health::pb::{health_client::HealthClient, HealthCheckRequest, HealthCheckResponse}, + yellowstone_grpc_proto::prelude::{ + geyser_client::GeyserClient, CommitmentLevel, GetBlockHeightRequest, + GetBlockHeightResponse, GetLatestBlockhashRequest, GetLatestBlockhashResponse, + GetSlotRequest, GetSlotResponse, GetVersionRequest, GetVersionResponse, + IsBlockhashValidRequest, IsBlockhashValidResponse, PingRequest, PongResponse, + SubscribeDeshredRequest, SubscribeReplayInfoRequest, SubscribeReplayInfoResponse, + SubscribeRequest, SubscribeUpdate, SubscribeUpdateDeshred, + }, +}; + +#[cfg(unix)] +use {std::path::PathBuf, tokio::net::UnixStream, tonic::transport::Uri}; + +#[derive(Debug, Clone)] +pub struct InterceptorXToken { + pub x_token: Option, + pub x_request_snapshot: bool, +} + +impl Interceptor for InterceptorXToken { + fn call(&mut self, mut request: Request<()>) -> Result, Status> { + if let Some(x_token) = self.x_token.clone() { + request.metadata_mut().insert("x-token", x_token); + } + if self.x_request_snapshot { + request + .metadata_mut() + .insert("x-request-snapshot", MetadataValue::from_static("true")); + } + Ok(request) + } +} + +#[derive(Debug, thiserror::Error)] +pub enum GeyserGrpcClientError { + #[error("gRPC status: {0}")] + TonicStatus(#[from] Status), + #[error("Failed to send subscribe request: {0}")] + SubscribeSendError(#[from] mpsc::SendError), +} + +pub type GeyserGrpcClientResult = Result; + +#[derive(Clone)] +pub struct GeyserGrpcClient { + pub health: HealthClient>, + pub geyser: GeyserClient>, +} + +impl GeyserGrpcClient<()> { + pub fn build_from_shared( + endpoint: impl Into, + ) -> GeyserGrpcBuilderResult { + Ok(GeyserGrpcBuilder::new(Endpoint::from_shared(endpoint)?)) + } + + pub fn build_from_static(endpoint: &'static str) -> GeyserGrpcBuilder { + GeyserGrpcBuilder::new(Endpoint::from_static(endpoint)) + } +} + +impl GeyserGrpcClient { + pub const fn new( + health: HealthClient>, + geyser: GeyserClient>, + ) -> Self { + Self { health, geyser } + } + + // Health + pub async fn health_check(&mut self) -> GeyserGrpcClientResult { + let request = HealthCheckRequest { + service: "geyser.Geyser".to_owned(), + }; + let response = self.health.check(request).await?; + Ok(response.into_inner()) + } + + pub async fn health_watch( + &mut self, + ) -> GeyserGrpcClientResult>> { + let request = HealthCheckRequest { + service: "geyser.Geyser".to_owned(), + }; + let response = self.health.watch(request).await?; + Ok(response.into_inner()) + } + + // Subscribe + pub async fn subscribe( + &mut self, + ) -> GeyserGrpcClientResult<( + impl Sink, + impl Stream>, + )> { + self.subscribe_with_request(None).await + } + + pub async fn subscribe_with_request( + &mut self, + request: Option, + ) -> GeyserGrpcClientResult<( + impl Sink + use, + impl Stream> + use, + )> { + let (mut subscribe_tx, subscribe_rx) = mpsc::unbounded(); + if let Some(request) = request { + subscribe_tx + .send(request) + .await + .map_err(GeyserGrpcClientError::SubscribeSendError)?; + } + let response: Response> = + self.geyser.subscribe(subscribe_rx).await?; + Ok((subscribe_tx, response.into_inner())) + } + + pub async fn subscribe_once( + &mut self, + request: SubscribeRequest, + ) -> GeyserGrpcClientResult>> { + self.subscribe_with_request(Some(request)) + .await + .map(|(_sink, stream)| stream) + } + + // Subscribe Deshred + pub async fn subscribe_deshred( + &mut self, + ) -> GeyserGrpcClientResult<( + impl Sink, + impl Stream>, + )> { + self.subscribe_deshred_with_request(None).await + } + + pub async fn subscribe_deshred_with_request( + &mut self, + request: Option, + ) -> GeyserGrpcClientResult<( + impl Sink + use, + impl Stream> + use, + )> { + let (mut subscribe_tx, subscribe_rx) = mpsc::unbounded(); + if let Some(request) = request { + subscribe_tx + .send(request) + .await + .map_err(GeyserGrpcClientError::SubscribeSendError)?; + } + let response: Response> = + self.geyser.subscribe_deshred(subscribe_rx).await?; + Ok((subscribe_tx, response.into_inner())) + } + + pub async fn subscribe_deshred_once( + &mut self, + request: SubscribeDeshredRequest, + ) -> GeyserGrpcClientResult>> { + self.subscribe_deshred_with_request(Some(request)) + .await + .map(|(_sink, stream)| stream) + } + + // RPC calls + pub async fn subscribe_replay_info( + &mut self, + ) -> GeyserGrpcClientResult { + let message = SubscribeReplayInfoRequest {}; + let request = tonic::Request::new(message); + let response = self.geyser.subscribe_replay_info(request).await?; + Ok(response.into_inner()) + } + + pub async fn ping(&mut self, count: i32) -> GeyserGrpcClientResult { + let message = PingRequest { count }; + let request = tonic::Request::new(message); + let response = self.geyser.ping(request).await?; + Ok(response.into_inner()) + } + + pub async fn get_latest_blockhash( + &mut self, + commitment: Option, + ) -> GeyserGrpcClientResult { + let request = tonic::Request::new(GetLatestBlockhashRequest { + commitment: commitment.map(|value| value as i32), + }); + let response = self.geyser.get_latest_blockhash(request).await?; + Ok(response.into_inner()) + } + + pub async fn get_block_height( + &mut self, + commitment: Option, + ) -> GeyserGrpcClientResult { + let request = tonic::Request::new(GetBlockHeightRequest { + commitment: commitment.map(|value| value as i32), + }); + let response = self.geyser.get_block_height(request).await?; + Ok(response.into_inner()) + } + + pub async fn get_slot( + &mut self, + commitment: Option, + ) -> GeyserGrpcClientResult { + let request = tonic::Request::new(GetSlotRequest { + commitment: commitment.map(|value| value as i32), + }); + let response = self.geyser.get_slot(request).await?; + Ok(response.into_inner()) + } + + pub async fn is_blockhash_valid( + &mut self, + blockhash: String, + commitment: Option, + ) -> GeyserGrpcClientResult { + let request = tonic::Request::new(IsBlockhashValidRequest { + blockhash, + commitment: commitment.map(|value| value as i32), + }); + let response = self.geyser.is_blockhash_valid(request).await?; + Ok(response.into_inner()) + } + + pub async fn get_version(&mut self) -> GeyserGrpcClientResult { + let request = tonic::Request::new(GetVersionRequest {}); + let response = self.geyser.get_version(request).await?; + Ok(response.into_inner()) + } +} + +#[derive(Debug, thiserror::Error)] +pub enum GeyserGrpcBuilderError { + #[error("Failed to parse x-token: {0}")] + MetadataValueError(#[from] InvalidMetadataValue), + #[error("gRPC transport error: {0}")] + TonicError(#[from] tonic::transport::Error), +} + +pub type GeyserGrpcBuilderResult = Result; + +#[derive(Debug)] +pub struct GeyserGrpcBuilder { + pub endpoint: Endpoint, + pub x_token: Option, + pub x_request_snapshot: bool, + pub send_compressed: Option, + pub accept_compressed: Option, + pub max_decoding_message_size: Option, + pub max_encoding_message_size: Option, +} + +impl GeyserGrpcBuilder { + // Create new builder + const fn new(endpoint: Endpoint) -> Self { + Self { + endpoint, + x_token: None, + x_request_snapshot: false, + send_compressed: None, + accept_compressed: None, + max_decoding_message_size: None, + max_encoding_message_size: None, + } + } + + pub fn from_shared(endpoint: impl Into) -> GeyserGrpcBuilderResult { + Ok(Self::new(Endpoint::from_shared(endpoint)?)) + } + + pub fn from_static(endpoint: &'static str) -> Self { + Self::new(Endpoint::from_static(endpoint)) + } + + // Create client + fn build( + self, + channel: Channel, + ) -> GeyserGrpcBuilderResult> { + let interceptor = InterceptorXToken { + x_token: self.x_token, + x_request_snapshot: self.x_request_snapshot, + }; + + let mut geyser = GeyserClient::with_interceptor(channel.clone(), interceptor.clone()); + if let Some(encoding) = self.send_compressed { + geyser = geyser.send_compressed(encoding); + } + if let Some(encoding) = self.accept_compressed { + geyser = geyser.accept_compressed(encoding); + } + if let Some(limit) = self.max_decoding_message_size { + geyser = geyser.max_decoding_message_size(limit); + } + if let Some(limit) = self.max_encoding_message_size { + geyser = geyser.max_encoding_message_size(limit); + } + + Ok(GeyserGrpcClient::new( + HealthClient::with_interceptor(channel, interceptor), + geyser, + )) + } + + pub async fn connect( + self, + ) -> GeyserGrpcBuilderResult> { + let channel = self.endpoint.connect().await?; + self.build(channel) + } + + pub fn connect_lazy( + self, + ) -> GeyserGrpcBuilderResult> { + let channel = self.endpoint.connect_lazy(); + self.build(channel) + } + + /// Connect to a gRPC server over a Unix Domain Socket. + /// + /// The `path` is the filesystem path to the socket (e.g. "/tmp/yellowstone.sock"). + /// tonic requires a dummy HTTP URI for the channel, but the actual transport + /// goes through the UDS connector. + #[cfg(unix)] + pub async fn connect_uds( + self, + path: impl Into, + ) -> GeyserGrpcBuilderResult> { + let path = path.into(); + + // tonic needs an Endpoint to hang config off of, but the URI is ignored + // by the connector — all traffic goes through the UnixStream. + let channel = Endpoint::from_static("http://[::]:0") + .connect_with_connector(tower::service_fn(move |_: Uri| { + let path = path.clone(); + async move { + let stream = UnixStream::connect(path).await?; + Ok::<_, std::io::Error>(hyper_util::rt::TokioIo::new(stream)) + } + })) + .await?; + + self.build(channel) + } + + // Set x-token + pub fn x_token(self, x_token: Option) -> GeyserGrpcBuilderResult + where + T: TryInto, + { + Ok(Self { + x_token: x_token.map(|x_token| x_token.try_into()).transpose()?, + ..self + }) + } + + // Include `x-request-snapshot` + pub fn set_x_request_snapshot(self, value: bool) -> Self { + Self { + x_request_snapshot: value, + ..self + } + } + + // Endpoint options + pub fn connect_timeout(self, dur: Duration) -> Self { + Self { + endpoint: self.endpoint.connect_timeout(dur), + ..self + } + } + + pub fn buffer_size(self, sz: impl Into>) -> Self { + Self { + endpoint: self.endpoint.buffer_size(sz), + ..self + } + } + + pub fn http2_adaptive_window(self, enabled: bool) -> Self { + Self { + endpoint: self.endpoint.http2_adaptive_window(enabled), + ..self + } + } + + pub fn http2_keep_alive_interval(self, interval: Duration) -> Self { + Self { + endpoint: self.endpoint.http2_keep_alive_interval(interval), + ..self + } + } + + pub fn initial_connection_window_size(self, sz: impl Into>) -> Self { + Self { + endpoint: self.endpoint.initial_connection_window_size(sz), + ..self + } + } + + pub fn initial_stream_window_size(self, sz: impl Into>) -> Self { + Self { + endpoint: self.endpoint.initial_stream_window_size(sz), + ..self + } + } + + pub fn keep_alive_timeout(self, duration: Duration) -> Self { + Self { + endpoint: self.endpoint.keep_alive_timeout(duration), + ..self + } + } + + pub fn keep_alive_while_idle(self, enabled: bool) -> Self { + Self { + endpoint: self.endpoint.keep_alive_while_idle(enabled), + ..self + } + } + + pub fn tcp_keepalive(self, tcp_keepalive: Option) -> Self { + Self { + endpoint: self.endpoint.tcp_keepalive(tcp_keepalive), + ..self + } + } + + pub fn tcp_nodelay(self, enabled: bool) -> Self { + Self { + endpoint: self.endpoint.tcp_nodelay(enabled), + ..self + } + } + + pub fn timeout(self, dur: Duration) -> Self { + Self { + endpoint: self.endpoint.timeout(dur), + ..self + } + } + + pub fn tls_config(self, tls_config: ClientTlsConfig) -> GeyserGrpcBuilderResult { + Ok(Self { + endpoint: self.endpoint.tls_config(tls_config)?, + ..self + }) + } + + // Geyser options + pub fn send_compressed(self, encoding: CompressionEncoding) -> Self { + Self { + send_compressed: Some(encoding), + ..self + } + } + + pub fn accept_compressed(self, encoding: CompressionEncoding) -> Self { + Self { + accept_compressed: Some(encoding), + ..self + } + } + + pub fn max_decoding_message_size(self, limit: usize) -> Self { + Self { + max_decoding_message_size: Some(limit), + ..self + } + } + + pub fn max_encoding_message_size(self, limit: usize) -> Self { + Self { + max_encoding_message_size: Some(limit), + ..self + } + } +} + +#[cfg(test)] +mod tests { + use super::GeyserGrpcClient; + + #[tokio::test] + async fn test_channel_https_success() { + let endpoint = "https://ams17.rpcpool.com:443"; + let x_token = "1000000000000000000000000007"; + + let res = GeyserGrpcClient::build_from_shared(endpoint); + assert!(res.is_ok()); + + let res = res.unwrap().x_token(Some(x_token)); + assert!(res.is_ok()); + + let res = res.unwrap().connect_lazy(); + assert!(res.is_ok()); + } + + #[tokio::test] + async fn test_channel_http_success() { + let endpoint = "http://127.0.0.1:10000"; + let x_token = "1234567891012141618202224268"; + + let res = GeyserGrpcClient::build_from_shared(endpoint); + assert!(res.is_ok()); + + let res = res.unwrap().x_token(Some(x_token)); + assert!(res.is_ok()); + + let res = res.unwrap().connect_lazy(); + assert!(res.is_ok()); + } + + #[tokio::test] + async fn test_channel_empty_token_some() { + let endpoint = "http://127.0.0.1:10000"; + let x_token = ""; + + let res = GeyserGrpcClient::build_from_shared(endpoint); + assert!(res.is_ok()); + + let res = res.unwrap().x_token(Some(x_token)); + assert!(res.is_ok()); + } + + #[tokio::test] + async fn test_channel_invalid_token_none() { + let endpoint = "http://127.0.0.1:10000"; + + let res = GeyserGrpcClient::build_from_shared(endpoint); + assert!(res.is_ok()); + + let res = res.unwrap().x_token::(None); + assert!(res.is_ok()); + + let res = res.unwrap().connect_lazy(); + assert!(res.is_ok()); + } + + #[tokio::test] + async fn test_channel_invalid_uri() { + let endpoint = "sites/files/images/picture.png"; + + let res = GeyserGrpcClient::build_from_shared(endpoint); + assert_eq!( + format!("{:?}", res), + "Err(TonicError(tonic::transport::Error(InvalidUri, InvalidUri(InvalidFormat))))" + .to_owned() + ); + } +}