diff --git a/.github/workflows/ci.yml b/.github/workflows/ci.yml index 54e9b9a4fb..e3df5c7262 100644 --- a/.github/workflows/ci.yml +++ b/.github/workflows/ci.yml @@ -59,7 +59,21 @@ jobs: steps: - uses: actions/checkout@v4 - uses: dtolnay/rust-toolchain@stable - - run: cargo check --no-default-features -p revm --features=${{ matrix.features }} + - run: cargo check --no-default-features -p revm --features=${{ matrix.features }} + + feature-checks: + name: features + runs-on: ubuntu-latest + timeout-minutes: 30 + steps: + - uses: actions/checkout@v4 + - uses: dtolnay/rust-toolchain@stable + - uses: taiki-e/install-action@cargo-hack + - uses: Swatinem/rust-cache@v2 + with: + cache-on-failure: true + - name: cargo hack + run: cargo hack check --feature-powerset --depth 1 clippy: name: clippy @@ -95,7 +109,7 @@ jobs: with: components: rustfmt - run: cargo fmt --all --check - + # Check crates correctly propagate features feature-propagation: runs-on: ubuntu-latest diff --git a/.github/workflows/ethereum-tests.yml b/.github/workflows/ethereum-tests.yml index 9e0e211b98..8bd366d24b 100644 --- a/.github/workflows/ethereum-tests.yml +++ b/.github/workflows/ethereum-tests.yml @@ -22,13 +22,6 @@ jobs: - name: Checkout sources uses: actions/checkout@v4 - - name: Checkout ethereum/tests - uses: actions/checkout@v4 - with: - repository: ethereum/legacytests - path: legacytests - submodules: recursive - - name: Install toolchain uses: dtolnay/rust-toolchain@stable @@ -41,9 +34,4 @@ jobs: - name: Run tests run: | - cross run --target ${{matrix.target}} --profile ${{ matrix.profile }} \ - ${{ matrix.target != 'i686-unknown-linux-gnu' && '--features gmp' || '' }} \ - -p revme -- statetest \ - legacytests/Cancun/GeneralStateTests/ \ - legacytests/Constantinople/GeneralStateTests/ ./scripts/run-tests.sh clean cross ${{ matrix.profile }} ${{ matrix.target }} diff --git a/.gitmodules b/.gitmodules index abaa1ea075..e69de29bb2 100644 --- a/.gitmodules +++ b/.gitmodules @@ -1,3 +0,0 @@ -[submodule "legacytests"] - path = legacytests - url = https://github.com/ethereum/legacytests.git diff --git a/CHANGELOG.md b/CHANGELOG.md index afc456c1b7..4ef1d8d6c3 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -1,7 +1,96 @@ Because this is workspace with multi libraries, tags will be simplified, and with this document you can match version of project with git tag. + +# v83 +date: 23.07.2025 + +Fusaka devnet-3 support. Performance regresion fixes. + +`revm-primitives`: 20.0.0 -> 20.1.0 (✓ API compatible changes) +`revm-bytecode`: 6.0.1 -> 6.1.0 (✓ API compatible changes) +`revm-database-interface`: 7.0.1 -> 7.0.2 (✓ API compatible changes) +`revm-context-interface`: 8.0.1 -> 9.0.0 (⚠ API breaking changes) +`revm-context`: 8.0.3 -> 8.0.4 (✓ API compatible changes) +`revm-interpreter`: 23.0.2 -> 24.0.0 (⚠ API breaking changes) +`revm-precompile`: 24.0.1 -> 25.0.0 (⚠ API breaking changes) +`revm-handler`: 8.0.3 -> 8.1.0 (✓ API compatible changes) +`revm-inspector`: 8.0.3 -> 8.1.0 (✓ API compatible changes) +`revm`: 27.0.3 -> 27.1.0 (✓ API compatible changes) +`revme`: 7.0.4 -> 7.1.0 (✓ API compatible changes) +`op-revm`: 8.0.3 -> 8.1.0 (✓ API compatible changes) +`revm-state`: 7.0.1 -> 7.0.2 +`revm-database`: 7.0.1 -> 7.0.2 +`revm-statetest-types`: 8.0.4 -> 8.0.5 + +# v82 +date 14.07.2025 + +Fix for inspector not calling `step_end`. + +* `revm-context`: 8.0.2 -> 8.0.3 (✓ API compatible changes) +* `revm-interpreter`: 23.0.1 -> 23.0.2 (✓ API compatible changes) +* `revm-precompile`: 24.0.0 -> 24.0.1 (✓ API compatible changes) +* `revm-handler`: 8.0.2 -> 8.0.3 (✓ API compatible changes) +* `revm-inspector`: 8.0.2 -> 8.0.3 (✓ API compatible changes) +* `revme`: 7.0.3 -> 7.0.4 (✓ API compatible changes) +* `op-revm`: 8.0.2 -> 8.0.3 (✓ API compatible changes) +* `custom_precompile_journal`: 0.1.0 +* `revm`: 27.0.2 -> 27.0.3 +* `revm-statetest-types`: 8.0.3 -> 8.0.4 + +# v81 +date: 03.07.2025 + +Fix inspector step_end panic for opcode fn. + +* `revm-bytecode`: 6.0.0 -> 6.0.1 (✓ API compatible changes) +* `revm-handler`: 8.0.1 -> 8.0.2 (✓ API compatible changes) +* `revm-inspector`: 8.0.1 -> 8.0.2 (✓ API compatible changes) +* `revme`: 7.0.2 -> 7.0.3 (✓ API compatible changes) +* `custom_precompile_journal`: 0.1.0 +* `revm-state`: 7.0.0 -> 7.0.1 +* `revm-database-interface`: 7.0.0 -> 7.0.1 +* `revm-context-interface`: 8.0.0 -> 8.0.1 +* `revm-context`: 8.0.1 -> 8.0.2 +* `revm-database`: 7.0.0 -> 7.0.1 +* `revm-interpreter`: 23.0.0 -> 23.0.1 +* `revm`: 27.0.1 -> 27.0.2 +* `revm-statetest-types`: 8.0.2 -> 8.0.3 +* `op-revm`: 8.0.1 -> 8.0.2 + +# v80 +date 01.07.2025 + +Fix `build` and `build_fill` for OpTransactionBuilder + +* `revm-context`: 8.0.0 -> 8.0.1 (✓ API compatible changes) +* `revm-handler`: 8.0.0 -> 8.0.1 (✓ API compatible changes) +* `revm-inspector`: 8.0.0 -> 8.0.1 (✓ API compatible changes) +* `revm`: 27.0.0 -> 27.0.1 (✓ API compatible changes) +* `op-revm`: 8.0.0 -> 8.0.1 (✓ API compatible changes) + +# v79 +date: 01.07.2025 + +Fix for bytecode eq operation. + +* `revm-bytecode`: 5.0.0 -> 6.0.0 (⚠ API breaking changes) +* `revm-state`: 6.0.0 -> 7.0.0 (✓ API compatible changes) +* `revm-database-interface`: 6.0.0 -> 7.0.0 (✓ API compatible changes) +* `revm-context-interface`: 7.0.1 -> 8.0.0 (⚠ API breaking changes) +* `revm-context`: 7.0.1 -> 8.0.0 (✓ API compatible changes) +* `revm-interpreter`: 22.0.1 -> 23.0.0 (✓ API compatible changes) +* `revm-precompile`: 23.0.0 -> 24.0.0 (✓ API compatible changes) +* `revm-handler`: 7.0.1 -> 8.0.0 (⚠ API breaking changes) +* `revm-inspector`: 7.0.1 -> 8.0.0 (✓ API compatible changes) +* `revm`: 26.0.1 -> 27.0.0 (✓ API compatible changes) +* `revm-statetest-types`: 8.0.1 -> 8.0.2 (✓ API compatible changes) +* `revme`: 7.0.1 -> 7.0.2 (✓ API compatible changes) +* `op-revm`: 7.0.1 -> 8.0.0 (⚠ API breaking changes) +* `revm-database`: 6.0.0 -> 7.0.0 + # v78 -date 20.05.2025 +date: 20.05.2025 Quick fix for not calling `frame_stack.clear()` https://github.com/bluealloy/revm/pull/2656 diff --git a/Cargo.lock b/Cargo.lock index 390ff12c51..09958e91f3 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -1560,6 +1560,14 @@ dependencies = [ "memchr", ] +[[package]] +name = "custom_precompile_journal" +version = "0.1.0" +dependencies = [ + "anyhow", + "revm", +] + [[package]] name = "darling" version = "0.20.11" @@ -3026,7 +3034,7 @@ checksum = "d6790f58c7ff633d8771f42965289203411a5e5c68388703c06e14f24770b41e" [[package]] name = "op-revm" -version = "7.0.1" +version = "8.1.0" dependencies = [ "alloy-primitives", "alloy-sol-types", @@ -3765,7 +3773,7 @@ dependencies = [ [[package]] name = "revm" -version = "26.0.1" +version = "27.1.0" dependencies = [ "revm-bytecode", "revm-context", @@ -3784,7 +3792,7 @@ dependencies = [ [[package]] name = "revm-bytecode" -version = "5.0.0" +version = "6.1.0" dependencies = [ "bitvec", "once_cell", @@ -3796,7 +3804,7 @@ dependencies = [ [[package]] name = "revm-context" -version = "7.0.1" +version = "8.0.4" dependencies = [ "cfg-if", "derive-where", @@ -3811,7 +3819,7 @@ dependencies = [ [[package]] name = "revm-context-interface" -version = "7.0.1" +version = "9.0.0" dependencies = [ "alloy-eip2930", "alloy-eip7702", @@ -3825,7 +3833,7 @@ dependencies = [ [[package]] name = "revm-database" -version = "6.0.0" +version = "7.0.2" dependencies = [ "alloy-eips", "alloy-provider", @@ -3843,10 +3851,11 @@ dependencies = [ [[package]] name = "revm-database-interface" -version = "6.0.0" +version = "7.0.2" dependencies = [ "anyhow", "auto_impl", + "either", "revm-primitives", "revm-state", "rstest", @@ -3856,7 +3865,7 @@ dependencies = [ [[package]] name = "revm-handler" -version = "7.0.1" +version = "8.1.0" dependencies = [ "alloy-eip7702", "alloy-provider", @@ -3878,7 +3887,7 @@ dependencies = [ [[package]] name = "revm-inspector" -version = "7.0.1" +version = "8.1.0" dependencies = [ "auto_impl", "either", @@ -3895,7 +3904,7 @@ dependencies = [ [[package]] name = "revm-interpreter" -version = "22.0.1" +version = "24.0.0" dependencies = [ "bincode 2.0.1", "revm-bytecode", @@ -3906,7 +3915,7 @@ dependencies = [ [[package]] name = "revm-precompile" -version = "23.0.0" +version = "25.0.0" dependencies = [ "ark-bls12-381", "ark-bn254", @@ -3914,6 +3923,7 @@ dependencies = [ "ark-ff 0.5.0", "ark-serialize 0.5.0", "ark-std 0.5.0", + "arrayref", "aurora-engine-modexp", "blst", "c-kzg", @@ -3936,7 +3946,7 @@ dependencies = [ [[package]] name = "revm-primitives" -version = "20.0.0" +version = "20.1.0" dependencies = [ "alloy-primitives", "num_enum", @@ -3945,7 +3955,7 @@ dependencies = [ [[package]] name = "revm-state" -version = "6.0.0" +version = "7.0.2" dependencies = [ "bitflags", "revm-bytecode", @@ -3955,16 +3965,18 @@ dependencies = [ [[package]] name = "revm-statetest-types" -version = "8.0.1" +version = "8.0.5" dependencies = [ + "k256", "revm", "serde", "serde_json", + "thiserror", ] [[package]] name = "revme" -version = "7.0.1" +version = "7.1.0" dependencies = [ "alloy-rlp", "alloy-sol-types", @@ -4438,6 +4450,16 @@ dependencies = [ "cfg-if", "cpufeatures", "digest 0.10.7", + "sha2-asm", +] + +[[package]] +name = "sha2-asm" +version = "0.6.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "b845214d6175804686b2bd482bcffe96651bb2d1200742b712003504a2dac1ab" +dependencies = [ + "cc", ] [[package]] diff --git a/Cargo.toml b/Cargo.toml index dfff6509c0..b096e420d4 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -33,31 +33,32 @@ members = [ "examples/erc20_gas", "examples/my_evm", "examples/custom_opcodes", + "examples/custom_precompile_journal", ] resolver = "2" default-members = ["crates/revm"] [workspace.dependencies] # revm -revm = { path = "crates/revm", version = "26.0.1", default-features = false } -primitives = { path = "crates/primitives", package = "revm-primitives", version = "20.0.0", default-features = false } -bytecode = { path = "crates/bytecode", package = "revm-bytecode", version = "5.0.0", default-features = false } -database = { path = "crates/database", package = "revm-database", version = "6.0.0", default-features = false } -database-interface = { path = "crates/database/interface", package = "revm-database-interface", version = "6.0.0", default-features = false } -state = { path = "crates/state", package = "revm-state", version = "6.0.0", default-features = false } -interpreter = { path = "crates/interpreter", package = "revm-interpreter", version = "22.0.1", default-features = false } -inspector = { path = "crates/inspector", package = "revm-inspector", version = "7.0.1", default-features = false } -precompile = { path = "crates/precompile", package = "revm-precompile", version = "23.0.0", default-features = false } -statetest-types = { path = "crates/statetest-types", package = "revm-statetest-types", version = "8.0.1", default-features = false } -context = { path = "crates/context", package = "revm-context", version = "7.0.1", default-features = false } -context-interface = { path = "crates/context/interface", package = "revm-context-interface", version = "7.0.1", default-features = false } -handler = { path = "crates/handler", package = "revm-handler", version = "7.0.1", default-features = false } -op-revm = { path = "crates/op-revm", package = "op-revm", version = "7.0.1", default-features = false } +revm = { path = "crates/revm", version = "27.1.0", default-features = false } +primitives = { path = "crates/primitives", package = "revm-primitives", version = "20.1.0", default-features = false } +bytecode = { path = "crates/bytecode", package = "revm-bytecode", version = "6.1.0", default-features = false } +database = { path = "crates/database", package = "revm-database", version = "7.0.2", default-features = false } +database-interface = { path = "crates/database/interface", package = "revm-database-interface", version = "7.0.2", default-features = false } +state = { path = "crates/state", package = "revm-state", version = "7.0.2", default-features = false } +interpreter = { path = "crates/interpreter", package = "revm-interpreter", version = "24.0.0", default-features = false } +inspector = { path = "crates/inspector", package = "revm-inspector", version = "8.1.0", default-features = false } +precompile = { path = "crates/precompile", package = "revm-precompile", version = "25.0.0", default-features = false } +statetest-types = { path = "crates/statetest-types", package = "revm-statetest-types", version = "8.0.5", default-features = false } +context = { path = "crates/context", package = "revm-context", version = "8.0.4", default-features = false } +context-interface = { path = "crates/context/interface", package = "revm-context-interface", version = "9.0.0", default-features = false } +handler = { path = "crates/handler", package = "revm-handler", version = "8.1.0", default-features = false } +op-revm = { path = "crates/op-revm", package = "op-revm", version = "8.1.0", default-features = false } # alloy alloy-eip2930 = { version = "0.2.1", default-features = false } alloy-eip7702 = { version = "0.6.1", default-features = false } -alloy-primitives = { version = "1.2.0", default-features = false, features = [] } +alloy-primitives = { version = "1.2.0", default-features = false } # alloy in examples, revme or feature flagged. alloy-rlp = { version = "0.3.12", default-features = false } diff --git a/MIGRATION_GUIDE.md b/MIGRATION_GUIDE.md index 2423e1dbc1..d1bb31cd23 100644 --- a/MIGRATION_GUIDE.md +++ b/MIGRATION_GUIDE.md @@ -1,3 +1,20 @@ +# v82 tag (revm v27.1.0) from v81 tag (revm v27.0.3) + +* `ContextTr` gained `Host` supertrait. + * Previously Host was implemented for any T that has ContextTr, this restricts specializations. + https://github.com/bluealloy/revm/issues/2732 + * `Host` is moved to `revm-context-interface` + * If you custom struct that implement `ContextTr` you would need to manually implement `Host` trait, in most cases no action needed. +* In `revm-interpreter`, fn `cast_slice_to_u256` was removed and `push_slice` fn is added to `StackTrait`. +* `PrecompileOutput` now contains revert flag. + * It is safe to put to false. +* In `kzg` and `blake2` modules few internal functions were made private or removed. + +# v80 tag (revm v27.0.0) -> v81 tag ( revm v27.0.1) + +* Inspector fn `step_end` is now called even if Inspector `step` sets the action. Previously this was not the case. + * https://github.com/bluealloy/revm/pull/2687 + * this additionally fixes panic bug where `bytecode.opcode()` would panic in `step_end` # v70 tag (revm v22.0.2) -> v71 tag ( revm v23.0.0) diff --git a/bins/revme/CHANGELOG.md b/bins/revme/CHANGELOG.md index ff5f95367f..3932b96718 100644 --- a/bins/revme/CHANGELOG.md +++ b/bins/revme/CHANGELOG.md @@ -6,6 +6,43 @@ and this project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0 ## [Unreleased] +## [7.1.0](https://github.com/bluealloy/revm/compare/revme-v7.0.4...revme-v7.1.0) - 2025-07-23 + +### Added + +- count inspector and bench test ([#2730](https://github.com/bluealloy/revm/pull/2730)) + +### Fixed + +- fully deprecate serde-json ([#2767](https://github.com/bluealloy/revm/pull/2767)) + +### Other + +- back to hashbrown map in revme ([#2770](https://github.com/bluealloy/revm/pull/2770)) +- back to better map ([#2768](https://github.com/bluealloy/revm/pull/2768)) +- bump develop statetests to devnet-3 ([#2754](https://github.com/bluealloy/revm/pull/2754)) +- add clz_50 codspeed ([#2743](https://github.com/bluealloy/revm/pull/2743)) + +## [7.0.4](https://github.com/bluealloy/revm/compare/revme-v7.0.3...revme-v7.0.4) - 2025-07-14 + +### Other + +- incorrect StorageKey and StorageValue parameter order in burntpix benchmark ([#2704](https://github.com/bluealloy/revm/pull/2704)) + +## [7.0.3](https://github.com/bluealloy/revm/compare/revme-v7.0.2...revme-v7.0.3) - 2025-07-03 + +### Other + +- update Cargo.lock dependencies + +## [7.0.2](https://github.com/bluealloy/revm/compare/revme-v7.0.1...revme-v7.0.2) - 2025-06-30 + +### Other + +- cargo clippy --fix --all ([#2671](https://github.com/bluealloy/revm/pull/2671)) +- statetest runner cleanup ([#2665](https://github.com/bluealloy/revm/pull/2665)) +- use TxEnv::builder ([#2652](https://github.com/bluealloy/revm/pull/2652)) + ## [7.0.1](https://github.com/bluealloy/revm/compare/revme-v7.0.0...revme-v7.0.1) - 2025-06-20 ### Other diff --git a/bins/revme/Cargo.toml b/bins/revme/Cargo.toml index 842f54ecde..20c416ed6e 100644 --- a/bins/revme/Cargo.toml +++ b/bins/revme/Cargo.toml @@ -1,7 +1,7 @@ [package] name = "revme" description = "Rust Ethereum Virtual Machine Executable" -version = "7.0.1" +version = "7.1.0" authors.workspace = true edition.workspace = true keywords.workspace = true @@ -10,13 +10,7 @@ repository.workspace = true [dependencies] # revm -revm = { workspace = true, features = [ - "std", - "c-kzg", - "blst", - "serde-json", - "hashbrown", -] } +revm = { workspace = true, features = ["std", "c-kzg", "blst", "hashbrown"] } primitives.workspace = true database.workspace = true database-interface.workspace = true @@ -24,7 +18,7 @@ state.workspace = true bytecode = { workspace = true, features = ["std", "parse"] } context.workspace = true context-interface.workspace = true -inspector = { workspace = true, features = ["std", "serde-json"] } +inspector = { workspace = true, features = ["std", "tracer"] } statetest-types.workspace = true criterion.workspace = true diff --git a/bins/revme/src/cmd/bench/analysis.rs b/bins/revme/src/cmd/bench/analysis.rs index 6b0a45322c..e8fdb3704f 100644 --- a/bins/revme/src/cmd/bench/analysis.rs +++ b/bins/revme/src/cmd/bench/analysis.rs @@ -15,12 +15,12 @@ pub fn run(criterion: &mut Criterion) { let context = Context::mainnet() .with_db(BenchmarkDB::new_bytecode(bytecode)) .modify_cfg_chained(|c| c.disable_nonce_check = true); - let tx = TxEnv { - caller: BENCH_CALLER, - kind: TxKind::Call(BENCH_TARGET), - data: bytes!("8035F0CE"), - ..Default::default() - }; + let tx = TxEnv::builder() + .caller(BENCH_CALLER) + .kind(TxKind::Call(BENCH_TARGET)) + .data(bytes!("8035F0CE")) + .build() + .unwrap(); let mut evm = context.build_mainnet(); criterion.bench_function("analysis", |b| { b.iter_batched( diff --git a/bins/revme/src/cmd/bench/burntpix.rs b/bins/revme/src/cmd/bench/burntpix.rs index 3a191d064b..1e65627fe4 100644 --- a/bins/revme/src/cmd/bench/burntpix.rs +++ b/bins/revme/src/cmd/bench/burntpix.rs @@ -41,13 +41,13 @@ pub fn run(criterion: &mut Criterion) { .modify_cfg_chained(|c| c.disable_nonce_check = true) .build_mainnet(); - let tx = TxEnv { - caller: BENCH_CALLER, - kind: TxKind::Call(BURNTPIX_MAIN_ADDRESS), - data: run_call_data.clone().into(), - gas_limit: u64::MAX, - ..Default::default() - }; + let tx = TxEnv::builder() + .caller(BENCH_CALLER) + .kind(TxKind::Call(BURNTPIX_MAIN_ADDRESS)) + .data(run_call_data.clone().into()) + .gas_limit(u64::MAX) + .build() + .unwrap(); criterion.bench_function("burntpix", |b| { b.iter_batched( @@ -163,8 +163,8 @@ fn init_db() -> CacheDB { cache_db .insert_account_storage( BURNTPIX_MAIN_ADDRESS, - StorageValue::from(2), - StorageKey::from_be_bytes(*STORAGE_TWO), + StorageKey::from(2), + StorageValue::from_be_bytes(*STORAGE_TWO), ) .unwrap(); diff --git a/bins/revme/src/cmd/bench/gas_cost_estimator.rs b/bins/revme/src/cmd/bench/gas_cost_estimator.rs index 5348386a1e..d596ac1dab 100644 --- a/bins/revme/src/cmd/bench/gas_cost_estimator.rs +++ b/bins/revme/src/cmd/bench/gas_cost_estimator.rs @@ -26,12 +26,12 @@ pub fn run(criterion: &mut Criterion) { .modify_cfg_chained(|c| c.disable_nonce_check = true) .build_mainnet(); - let tx = TxEnv { - caller: BENCH_CALLER, - kind: TxKind::Call(BENCH_TARGET), - gas_limit: 1_000_000_000, - ..Default::default() - }; + let tx = TxEnv::builder() + .caller(BENCH_CALLER) + .kind(TxKind::Call(BENCH_TARGET)) + .gas_limit(1_000_000_000) + .build() + .unwrap(); criterion.bench_function(name, |b| { b.iter_batched( diff --git a/bins/revme/src/cmd/bench/gas_cost_estimator_sample.csv b/bins/revme/src/cmd/bench/gas_cost_estimator_sample.csv index 3086610bc9..29eb985378 100644 --- a/bins/revme/src/cmd/bench/gas_cost_estimator_sample.csv +++ b/bins/revme/src/cmd/bench/gas_cost_estimator_sample.csv @@ -135,4 +135,5 @@ SWAP12_50,SWAP12,50,600360036003600360036003600360036003600360036003600360036003 SWAP13_50,SWAP13,50,600360036003600360036003600360036003600360036003600360036003600360036003600360036003600360036003600360036003600360036003600360036003600360036003600360036003600360036003600360036003600360036003600360036003600360036003600360036003600360036003600360036003600360036003600360036003600360036003600360036003600360036003600360036003600360036003600360036003600360036003600360036003600360036003600360036003600360036003600360036003600360036003600360036003600360036003600360036003600360036003600360036003600360036003600360036003600360036003600360036003600360036003600360036003600360036003600360036003600360036003600360036003600360036003600360036003600360036003600360036003600360036003600360036003600360036003600360036003600360036003600360039c9c9c9c9c9c9c9c9c9c9c9c9c9c9c9c9c9c9c9c9c9c9c9c9c9c9c9c9c9c9c9c9c9c9c9c9c9c9c9c9c9c9c9c9c9c9c9c9c9c SWAP14_50,SWAP14,50,6003600360036003600360036003600360036003600360036003600360036003600360036003600360036003600360036003600360036003600360036003600360036003600360036003600360036003600360036003600360036003600360036003600360036003600360036003600360036003600360036003600360036003600360036003600360036003600360036003600360036003600360036003600360036003600360036003600360036003600360036003600360036003600360036003600360036003600360036003600360036003600360036003600360036003600360036003600360036003600360036003600360036003600360036003600360036003600360036003600360036003600360036003600360036003600360036003600360036003600360036003600360036003600360036003600360036003600360036003600360036003600360036003600360036003600360036003600360036003600360039d9d9d9d9d9d9d9d9d9d9d9d9d9d9d9d9d9d9d9d9d9d9d9d9d9d9d9d9d9d9d9d9d9d9d9d9d9d9d9d9d9d9d9d9d9d9d9d9d9d SWAP15_50,SWAP15,50,6003600360036003600360036003600360036003600360036003600360036003600360036003600360036003600360036003600360036003600360036003600360036003600360036003600360036003600360036003600360036003600360036003600360036003600360036003600360036003600360036003600360036003600360036003600360036003600360036003600360036003600360036003600360036003600360036003600360036003600360036003600360036003600360036003600360036003600360036003600360036003600360036003600360036003600360036003600360036003600360036003600360036003600360036003600360036003600360036003600360036003600360036003600360036003600360036003600360036003600360036003600360036003600360036003600360036003600360036003600360036003600360036003600360036003600360036003600360036003600360036003600360036003600360036003600360036003600360039e9e9e9e9e9e9e9e9e9e9e9e9e9e9e9e9e9e9e9e9e9e9e9e9e9e9e9e9e9e9e9e9e9e9e9e9e9e9e9e9e9e9e9e9e9e9e9e9e9e -SWAP16_50,SWAP16,50,60036003600360036003600360036003600360036003600360036003600360036003600360036003600360036003600360036003600360036003600360036003600360036003600360036003600360036003600360036003600360036003600360036003600360036003600360036003600360036003600360036003600360036003600360036003600360036003600360036003600360036003600360036003600360036003600360036003600360036003600360036003600360036003600360036003600360036003600360036003600360036003600360036003600360036003600360036003600360036003600360036003600360036003600360036003600360036003600360036003600360036003600360036003600360036003600360036003600360036003600360036003600360036003600360036003600360036003600360036003600360036003600360036003600360036003600360036003600360036003600360036003600360036003600360039f9f9f9f9f9f9f9f9f9f9f9f9f9f9f9f9f9f9f9f9f9f9f9f9f9f9f9f9f9f9f9f9f9f9f9f9f9f9f9f9f9f9f9f9f9f9f9f9f9f \ No newline at end of file +SWAP16_50,SWAP16,50,60036003600360036003600360036003600360036003600360036003600360036003600360036003600360036003600360036003600360036003600360036003600360036003600360036003600360036003600360036003600360036003600360036003600360036003600360036003600360036003600360036003600360036003600360036003600360036003600360036003600360036003600360036003600360036003600360036003600360036003600360036003600360036003600360036003600360036003600360036003600360036003600360036003600360036003600360036003600360036003600360036003600360036003600360036003600360036003600360036003600360036003600360036003600360036003600360036003600360036003600360036003600360036003600360036003600360036003600360036003600360036003600360036003600360036003600360036003600360036003600360036003600360036003600360039f9f9f9f9f9f9f9f9f9f9f9f9f9f9f9f9f9f9f9f9f9f9f9f9f9f9f9f9f9f9f9f9f9f9f9f9f9f9f9f9f9f9f9f9f9f9f9f9f9f +CLZ_50,CLZ,50,6000600060006000600060006000600060006000600060006000600060006000600060006000600060006000600060006000600060006000600060006000600060006000600060006000600060006000600060006000600060006000600060006000600060006000600060006000600060006000600060006003600360036003600360036003600360036003600360036003600360036003600360036003600360036003600360036003600360036003600360036003600360036003600360036003600360036003600360036003600360036003600360036003600360036003600360036003600360036003600360036003600360036003600360036003600360036003600360036003600360036003600360036003600360036003600360036003600360036003600360036003600360036003600360036003600360036003600360036003600360036003600360036003600360036003600360036003600360036003600360036003600360036003600360036003600360036003600360036003600360036003600360036003600360036003600360036003600360036003600360036003600360036003600360036003600360036003600360036003600360036003600360036003600360036003600360036003600360036003600360031e501e501e501e501e501e501e501e501e501e501e501e501e501e501e501e501e501e501e501e501e501e501e501e501e501e501e501e501e501e501e501e501e501e501e501e501e501e501e501e501e501e501e501e501e501e501e501e501e501e5050505050505050505050 \ No newline at end of file diff --git a/bins/revme/src/cmd/bench/snailtracer.rs b/bins/revme/src/cmd/bench/snailtracer.rs index e9713364c3..0228b3df4c 100644 --- a/bins/revme/src/cmd/bench/snailtracer.rs +++ b/bins/revme/src/cmd/bench/snailtracer.rs @@ -1,10 +1,11 @@ use context::TxEnv; use criterion::Criterion; use database::{BenchmarkDB, BENCH_CALLER, BENCH_TARGET}; +use inspector::CountInspector; use revm::{ bytecode::Bytecode, primitives::{bytes, hex, Bytes, TxKind}, - Context, ExecuteEvm, MainBuilder, MainContext, + Context, ExecuteEvm, InspectEvm, MainBuilder, MainContext, }; pub fn run(criterion: &mut Criterion) { @@ -13,15 +14,16 @@ pub fn run(criterion: &mut Criterion) { let mut evm = Context::mainnet() .with_db(BenchmarkDB::new_bytecode(bytecode.clone())) .modify_cfg_chained(|c| c.disable_nonce_check = true) - .build_mainnet(); + .build_mainnet() + .with_inspector(CountInspector::new()); - let tx = TxEnv { - caller: BENCH_CALLER, - kind: TxKind::Call(BENCH_TARGET), - data: bytes!("30627b7c"), - gas_limit: 1_000_000_000, - ..Default::default() - }; + let tx = TxEnv::builder() + .caller(BENCH_CALLER) + .kind(TxKind::Call(BENCH_TARGET)) + .data(bytes!("30627b7c")) + .gas_limit(1_000_000_000) + .build() + .unwrap(); criterion.bench_function("snailtracer", |b| { b.iter_batched( @@ -35,6 +37,19 @@ pub fn run(criterion: &mut Criterion) { criterion::BatchSize::SmallInput, ); }); + + criterion.bench_function("analysis-inspector", |b| { + b.iter_batched( + || { + // create a transaction input + tx.clone() + }, + |input| { + let _ = evm.inspect_one_tx(input); + }, + criterion::BatchSize::SmallInput, + ); + }); } const BYTES: &str = include_str!("snailtracer.hex"); diff --git a/bins/revme/src/cmd/bench/transfer.rs b/bins/revme/src/cmd/bench/transfer.rs index 14bddbaf87..0914eff60d 100644 --- a/bins/revme/src/cmd/bench/transfer.rs +++ b/bins/revme/src/cmd/bench/transfer.rs @@ -14,14 +14,14 @@ pub fn run(criterion: &mut Criterion) { .modify_cfg_chained(|cfg| cfg.disable_nonce_check = true) .build_mainnet(); - let tx = TxEnv { - caller: BENCH_CALLER, - kind: TxKind::Call(BENCH_TARGET), - value: U256::from(1), - gas_price: 1, - gas_priority_fee: None, - ..Default::default() - }; + let tx = TxEnv::builder() + .caller(BENCH_CALLER) + .kind(TxKind::Call(BENCH_TARGET)) + .value(U256::from(1)) + .gas_price(1) + .gas_priority_fee(None) + .build() + .unwrap(); evm.ctx.tx = tx.clone(); diff --git a/bins/revme/src/cmd/bench/transfer_multi.rs b/bins/revme/src/cmd/bench/transfer_multi.rs index 3a69af2e6d..9c3e85eefb 100644 --- a/bins/revme/src/cmd/bench/transfer_multi.rs +++ b/bins/revme/src/cmd/bench/transfer_multi.rs @@ -33,21 +33,20 @@ pub fn run(criterion: &mut Criterion) { .modify_cfg_chained(|cfg| cfg.disable_nonce_check = true) .build_mainnet(); - let tx = TxEnv { - caller: BENCH_CALLER, - kind: TxKind::Call(BENCH_TARGET), - value: U256::from(1), - gas_price: 0, - gas_priority_fee: None, - gas_limit: 30_000, - ..Default::default() - }; - let target = U256::from(10000); - let mut txs = vec![tx.clone(); 1000]; + let mut txs = Vec::with_capacity(1000); - for (i, tx_mut) in txs.iter_mut().enumerate() { - tx_mut.kind = TxKind::Call((target + U256::from(i)).into_address()); + for i in 0..1000 { + let tx = TxEnv::builder() + .caller(BENCH_CALLER) + .kind(TxKind::Call((target + U256::from(i)).into_address())) + .value(U256::from(1)) + .gas_price(0) + .gas_priority_fee(None) + .gas_limit(30_000) + .build() + .unwrap(); + txs.push(tx); } criterion.bench_function("transact_commit_1000txs", |b| { diff --git a/bins/revme/src/cmd/evmrunner.rs b/bins/revme/src/cmd/evmrunner.rs index 6fdd476a3c..8fda0fac16 100644 --- a/bins/revme/src/cmd/evmrunner.rs +++ b/bins/revme/src/cmd/evmrunner.rs @@ -87,13 +87,13 @@ impl Cmd { .with_db(db) .build_mainnet_with_inspector(TracerEip3155::new(Box::new(std::io::stdout()))); - let tx = TxEnv { - caller: BENCH_CALLER, - kind: TxKind::Call(BENCH_TARGET), - data: input, - nonce, - ..Default::default() - }; + let tx = TxEnv::builder() + .caller(BENCH_CALLER) + .kind(TxKind::Call(BENCH_TARGET)) + .data(input) + .nonce(nonce) + .build() + .unwrap(); if self.bench { let mut criterion = criterion::Criterion::default() diff --git a/bins/revme/src/cmd/statetest/merkle_trie.rs b/bins/revme/src/cmd/statetest/merkle_trie.rs index e294c4e4be..c6b6418787 100644 --- a/bins/revme/src/cmd/statetest/merkle_trie.rs +++ b/bins/revme/src/cmd/statetest/merkle_trie.rs @@ -1,10 +1,28 @@ +use std::convert::Infallible; + use alloy_rlp::{RlpEncodable, RlpMaxEncodedLen}; -use database::PlainAccount; +use context::result::{EVMError, ExecutionResult, HaltReason, InvalidTransaction}; +use database::{EmptyDB, PlainAccount, State}; use hash_db::Hasher; use plain_hasher::PlainHasher; use revm::primitives::{keccak256, Address, Log, B256, U256}; use triehash::sec_trie_root; +pub struct TestValidationResult { + pub logs_root: B256, + pub state_root: B256, +} + +pub fn compute_test_roots( + exec_result: &Result, EVMError>, + db: &State, +) -> TestValidationResult { + TestValidationResult { + logs_root: log_rlp_hash(exec_result.as_ref().map(|r| r.logs()).unwrap_or_default()), + state_root: state_merkle_trie_root(db.cache.trie_account()), + } +} + pub fn log_rlp_hash(logs: &[Log]) -> B256 { let mut out = Vec::with_capacity(alloy_rlp::list_length(logs)); alloy_rlp::encode_list(logs, &mut out); diff --git a/bins/revme/src/cmd/statetest/runner.rs b/bins/revme/src/cmd/statetest/runner.rs index 45b56ed2d9..f989daa614 100644 --- a/bins/revme/src/cmd/statetest/runner.rs +++ b/bins/revme/src/cmd/statetest/runner.rs @@ -1,29 +1,20 @@ -use super::{ - merkle_trie::{log_rlp_hash, state_merkle_trie_root}, - utils::recover_address, -}; -use context::either::Either; +use crate::cmd::statetest::merkle_trie::{compute_test_roots, TestValidationResult}; use database::State; use indicatif::{ProgressBar, ProgressDrawTarget}; use inspector::{inspectors::TracerEip3155, InspectCommitEvm}; use primitives::U256; use revm::{ - bytecode::Bytecode, context::{block::BlockEnv, cfg::CfgEnv, tx::TxEnv}, context_interface::{ - block::calc_excess_blob_gas, result::{EVMError, ExecutionResult, HaltReason, InvalidTransaction}, Cfg, }, database_interface::EmptyDB, - primitives::{ - eip4844::TARGET_BLOB_GAS_PER_BLOCK_CANCUN, hardfork::SpecId, keccak256, Bytes, TxKind, B256, - }, + primitives::{hardfork::SpecId, Bytes, B256}, Context, ExecuteCommitEvm, MainBuilder, MainContext, }; use serde_json::json; -use statetest_types::{SpecName, Test, TestSuite}; - +use statetest_types::{SpecName, Test, TestSuite, TestUnit}; use std::{ convert::Infallible, fmt::Debug, @@ -38,6 +29,7 @@ use std::{ use thiserror::Error; use walkdir::{DirEntry, WalkDir}; +/// Error that occurs during test execution #[derive(Debug, Error)] #[error("Path: {path}\nName: {name}\nError: {kind}")] pub struct TestError { @@ -46,6 +38,7 @@ pub struct TestError { pub kind: TestErrorKind, } +/// Specific kind of error that occurred during test execution #[derive(Debug, Error)] pub enum TestErrorKind { #[error("logs root mismatch: got {got}, expected {expected}")] @@ -74,6 +67,9 @@ pub enum TestErrorKind { NoJsonFiles, } +/// Find all JSON test files in the given path +/// If path is a file, returns it in a vector +/// If path is a directory, recursively finds all .json files pub fn find_all_json_tests(path: &Path) -> Vec { if path.is_file() { vec![path.to_path_buf()] @@ -87,6 +83,8 @@ pub fn find_all_json_tests(path: &Path) -> Vec { } } +/// Check if a test should be skipped based on its filename +/// Some tests are known to be problematic or take too long fn skip_test(path: &Path) -> bool { let name = path.file_name().unwrap().to_str().unwrap(); @@ -119,6 +117,98 @@ fn skip_test(path: &Path) -> bool { ) } +struct TestExecutionContext<'a> { + name: &'a str, + unit: &'a TestUnit, + test: &'a Test, + cfg: &'a CfgEnv, + block: &'a BlockEnv, + tx: &'a TxEnv, + cache_state: &'a database::CacheState, + elapsed: &'a Arc>, + trace: bool, + print_json_outcome: bool, +} + +struct DebugContext<'a> { + name: &'a str, + path: &'a str, + index: usize, + test: &'a Test, + cfg: &'a CfgEnv, + block: &'a BlockEnv, + tx: &'a TxEnv, + cache_state: &'a database::CacheState, + error: &'a TestErrorKind, +} + +fn build_json_output( + test: &Test, + test_name: &str, + exec_result: &Result, EVMError>, + validation: &TestValidationResult, + spec: SpecId, + error: Option, +) -> serde_json::Value { + json!({ + "stateRoot": validation.state_root, + "logsRoot": validation.logs_root, + "output": exec_result.as_ref().ok().and_then(|r| r.output().cloned()).unwrap_or_default(), + "gasUsed": exec_result.as_ref().ok().map(|r| r.gas_used()).unwrap_or_default(), + "pass": error.is_none(), + "errorMsg": error.unwrap_or_default(), + "evmResult": format_evm_result(exec_result), + "postLogsHash": validation.logs_root, + "fork": spec, + "test": test_name, + "d": test.indexes.data, + "g": test.indexes.gas, + "v": test.indexes.value, + }) +} + +fn format_evm_result( + exec_result: &Result, EVMError>, +) -> String { + match exec_result { + Ok(r) => match r { + ExecutionResult::Success { reason, .. } => format!("Success: {reason:?}"), + ExecutionResult::Revert { .. } => "Revert".to_string(), + ExecutionResult::Halt { reason, .. } => format!("Halt: {reason:?}"), + }, + Err(e) => e.to_string(), + } +} + +fn validate_exception( + test: &Test, + exec_result: &Result, EVMError>, +) -> Result { + match (&test.expect_exception, exec_result) { + (None, Ok(_)) => Ok(false), // No exception expected, execution succeeded + (Some(_), Err(_)) => Ok(true), // Exception expected and occurred + _ => Err(TestErrorKind::UnexpectedException { + expected_exception: test.expect_exception.clone(), + got_exception: exec_result.as_ref().err().map(|e| e.to_string()), + }), + } +} + +fn validate_output( + expected_output: Option<&Bytes>, + actual_result: &ExecutionResult, +) -> Result<(), TestErrorKind> { + if let Some((expected, actual)) = expected_output.zip(actual_result.output()) { + if expected != actual { + return Err(TestErrorKind::UnexpectedOutput { + expected_output: Some(expected.clone()), + got_output: actual_result.output().cloned(), + }); + } + } + Ok(()) +} + fn check_evm_execution( test: &Test, expected_output: Option<&Bytes>, @@ -128,97 +218,71 @@ fn check_evm_execution( spec: SpecId, print_json_outcome: bool, ) -> Result<(), TestErrorKind> { - let logs_root = log_rlp_hash(exec_result.as_ref().map(|r| r.logs()).unwrap_or_default()); - let state_root = state_merkle_trie_root(db.cache.trie_account()); + let validation = compute_test_roots(exec_result, db); - let print_json_output = |error: Option| { + let print_json = |error: Option<&TestErrorKind>| { if print_json_outcome { - let json = json!({ - "stateRoot": state_root, - "logsRoot": logs_root, - "output": exec_result.as_ref().ok().and_then(|r| r.output().cloned()).unwrap_or_default(), - "gasUsed": exec_result.as_ref().ok().map(|r| r.gas_used()).unwrap_or_default(), - "pass": error.is_none(), - "errorMsg": error.unwrap_or_default(), - "evmResult": match exec_result { - Ok(r) => match r { - ExecutionResult::Success { reason, .. } => format!("Success: {reason:?}"), - ExecutionResult::Revert { .. } => "Revert".to_string(), - ExecutionResult::Halt { reason, .. } => format!("Halt: {reason:?}"), - }, - Err(e) => e.to_string(), - }, - "postLogsHash": logs_root, - "fork": spec, - "test": test_name, - "d": test.indexes.data, - "g": test.indexes.gas, - "v": test.indexes.value, - }); + let json = build_json_output( + test, + test_name, + exec_result, + &validation, + spec, + error.map(|e| e.to_string()), + ); eprintln!("{json}"); } }; - // If we expect exception revm should return error from execution. - // So we do not check logs and state root. - // - // Note that some tests that have exception and run tests from before state clear - // would touch the caller account and make it appear in state root calculation. - // This is not something that we would expect as invalid tx should not touch state. - // but as this is a cleanup of invalid tx it is not properly defined and in the end - // it does not matter. - // Test where this happens: `tests/GeneralStateTests/stTransactionTest/NoSrcAccountCreate.json` - // and you can check that we have only two "hash" values for before and after state clear. - match (&test.expect_exception, exec_result) { - // Do nothing - (None, Ok(result)) => { - // Check output - if let Some((expected_output, output)) = expected_output.zip(result.output()) { - if expected_output != output { - let kind = TestErrorKind::UnexpectedOutput { - expected_output: Some(expected_output.clone()), - got_output: result.output().cloned(), - }; - print_json_output(Some(kind.to_string())); - return Err(kind); - } - } - } - // Return okay, exception is expected. - (Some(_), Err(_)) => return Ok(()), - _ => { - let kind = TestErrorKind::UnexpectedException { - expected_exception: test.expect_exception.clone(), - got_exception: exec_result.clone().err().map(|e| e.to_string()), - }; - print_json_output(Some(kind.to_string())); - return Err(kind); - } + // Check if exception handling is correct + let exception_expected = validate_exception(test, exec_result).inspect_err(|e| { + print_json(Some(e)); + })?; + + // If exception was expected and occurred, we're done + if exception_expected { + print_json(None); + return Ok(()); + } + + // Validate output if execution succeeded + if let Ok(result) = exec_result { + validate_output(expected_output, result).inspect_err(|e| { + print_json(Some(e)); + })?; } - if logs_root != test.logs { - let kind = TestErrorKind::LogsRootMismatch { - got: logs_root, + // Validate logs root + if validation.logs_root != test.logs { + let error = TestErrorKind::LogsRootMismatch { + got: validation.logs_root, expected: test.logs, }; - print_json_output(Some(kind.to_string())); - return Err(kind); + print_json(Some(&error)); + return Err(error); } - if state_root != test.hash { - let kind = TestErrorKind::StateRootMismatch { - got: state_root, + // Validate state root + if validation.state_root != test.hash { + let error = TestErrorKind::StateRootMismatch { + got: validation.state_root, expected: test.hash, }; - print_json_output(Some(kind.to_string())); - return Err(kind); + print_json(Some(&error)); + return Err(error); } - print_json_output(None); - + print_json(None); Ok(()) } +/// Execute a single test suite file containing multiple tests +/// +/// # Arguments +/// * `path` - Path to the JSON test file +/// * `elapsed` - Shared counter for total execution time +/// * `trace` - Whether to enable EVM tracing +/// * `print_json_outcome` - Whether to print JSON formatted results pub fn execute_test_suite( path: &Path, elapsed: &Arc>, @@ -238,25 +302,11 @@ pub fn execute_test_suite( })?; for (name, unit) in suite.0 { - // Create database and insert cache - let mut cache_state = database::CacheState::new(false); - for (address, info) in unit.pre { - let code_hash = keccak256(&info.code); - let bytecode = Bytecode::new_raw_checked(info.code.clone()) - .unwrap_or(Bytecode::new_legacy(info.code)); - let acc_info = revm::state::AccountInfo { - balance: info.balance, - code_hash, - code: Some(bytecode), - nonce: info.nonce, - }; - cache_state.insert_account_with_storage(address, acc_info, info.storage); - } + // Prepare initial state + let cache_state = unit.state(); + // Setup base configuration let mut cfg = CfgEnv::default(); - let mut block = BlockEnv::default(); - let mut tx = TxEnv::default(); - // For mainnet cfg.chain_id = unit .env .current_chain_id @@ -264,313 +314,295 @@ pub fn execute_test_suite( .try_into() .unwrap_or(1); - // Block env - block.number = unit.env.current_number; - block.beneficiary = unit.env.current_coinbase; - block.timestamp = unit.env.current_timestamp; - block.gas_limit = unit.env.current_gas_limit.try_into().unwrap_or(u64::MAX); - block.basefee = unit - .env - .current_base_fee - .unwrap_or_default() - .try_into() - .unwrap_or(u64::MAX); - block.difficulty = unit.env.current_difficulty; - // After the Merge prevrandao replaces mix_hash field in block and replaced difficulty opcode in EVM. - block.prevrandao = unit.env.current_random; - - // Tx env - tx.caller = if let Some(address) = unit.transaction.sender { - address - } else { - recover_address(unit.transaction.secret_key.as_slice()).ok_or_else(|| TestError { - name: name.clone(), - path: path.clone(), - kind: TestErrorKind::UnknownPrivateKey(unit.transaction.secret_key), - })? - }; - tx.gas_price = unit - .transaction - .gas_price - .or(unit.transaction.max_fee_per_gas) - .unwrap_or_default() - .try_into() - .unwrap_or(u128::MAX); - tx.gas_priority_fee = unit - .transaction - .max_priority_fee_per_gas - .map(|b| u128::try_from(b).expect("max priority fee less than u128::MAX")); - // EIP-4844 - tx.blob_hashes = unit.transaction.blob_versioned_hashes.clone(); - tx.max_fee_per_blob_gas = unit - .transaction - .max_fee_per_blob_gas - .map(|b| u128::try_from(b).expect("max fee less than u128::MAX")) - .unwrap_or(u128::MAX); - // Post and execution - for (spec_name, tests) in unit.post { - // Constantinople was immediately extended by Petersburg. - // There isn't any production Constantinople transaction - // so we don't support it and skip right to Petersburg. - if spec_name == SpecName::Constantinople { + for (spec_name, tests) in &unit.post { + // Skip Constantinople spec + if *spec_name == SpecName::Constantinople { continue; } cfg.spec = spec_name.to_spec_id(); - // set default max blobs number to be 9 for prague - if cfg.spec.is_enabled_in(SpecId::PRAGUE) { + // Configure max blobs per spec + if cfg.spec.is_enabled_in(SpecId::OSAKA) { + cfg.set_max_blobs_per_tx(6); + } else if cfg.spec.is_enabled_in(SpecId::PRAGUE) { cfg.set_max_blobs_per_tx(9); } else { cfg.set_max_blobs_per_tx(6); } - // EIP-4844 - if let Some(current_excess_blob_gas) = unit.env.current_excess_blob_gas { - block.set_blob_excess_gas_and_price( - current_excess_blob_gas.to(), - cfg.blob_base_fee_update_fraction(), - ); - } else if let (Some(parent_blob_gas_used), Some(parent_excess_blob_gas)) = ( - unit.env.parent_blob_gas_used, - unit.env.parent_excess_blob_gas, - ) { - block.set_blob_excess_gas_and_price( - calc_excess_blob_gas( - parent_blob_gas_used.to(), - parent_excess_blob_gas.to(), - unit.env - .parent_target_blobs_per_block - .map(|i| i.to()) - .unwrap_or(TARGET_BLOB_GAS_PER_BLOCK_CANCUN), - ), - cfg.blob_base_fee_update_fraction(), - ); - } - - if cfg.spec.is_enabled_in(SpecId::MERGE) && block.prevrandao.is_none() { - // If spec is merge and prevrandao is not set, set it to default - block.prevrandao = Some(B256::default()); - } - - for (index, test) in tests.into_iter().enumerate() { - let Some(tx_type) = unit.transaction.tx_type(test.indexes.data) else { - if test.expect_exception.is_some() { - continue; - } else { - panic!("Invalid transaction type without expected exception"); + // Setup block environment for this spec + let block = unit.block_env(&cfg); + + for (index, test) in tests.iter().enumerate() { + // Setup transaction environment + let tx = match test.tx_env(&unit) { + Ok(tx) => tx, + Err(_) if test.expect_exception.is_some() => continue, + Err(_) => { + return Err(TestError { + name: name.clone(), + path: path.clone(), + kind: TestErrorKind::UnknownPrivateKey(unit.transaction.secret_key), + }); } }; - tx.tx_type = tx_type as u8; - - tx.gas_limit = unit.transaction.gas_limit[test.indexes.gas].saturating_to(); - tx.data = unit - .transaction - .data - .get(test.indexes.data) - .unwrap() - .clone(); - - tx.nonce = u64::try_from(unit.transaction.nonce).unwrap(); - tx.value = unit.transaction.value[test.indexes.value]; - - tx.access_list = unit - .transaction - .access_lists - .get(test.indexes.data) - .cloned() - .flatten() - .unwrap_or_default(); - - tx.authorization_list = unit - .transaction - .authorization_list - .clone() - .map(|auth_list| { - auth_list - .into_iter() - .map(|i| Either::Left(i.into())) - .collect::>() - }) - .unwrap_or_default(); - - let to = match unit.transaction.to { - Some(add) => TxKind::Call(add), - None => TxKind::Create, - }; - tx.kind = to; - - let mut cache = cache_state.clone(); - cache.set_state_clear_flag(cfg.spec.is_enabled_in(SpecId::SPURIOUS_DRAGON)); - let mut state = database::State::builder() - .with_cached_prestate(cache) - .with_bundle_update() - .build(); - - let evm_context = Context::mainnet() - .with_block(&block) - .with_tx(&tx) - .with_cfg(&cfg) - .with_db(&mut state); - - // Do the deed - let timer = Instant::now(); - let (db, exec_result) = if trace { - let mut evm = evm_context.build_mainnet_with_inspector( - TracerEip3155::buffered(stderr()).without_summary(), - ); - let res = evm.inspect_tx_commit(&tx); - let db = evm.ctx.journaled_state.database; - (db, res) - } else { - let mut evm = evm_context.build_mainnet(); - let res = evm.transact_commit(&tx); - let db = evm.ctx.journaled_state.database; - (db, res) - }; - *elapsed.lock().unwrap() += timer.elapsed(); - let spec = cfg.spec(); - // Dump state and traces if test failed - let output = check_evm_execution( - &test, - unit.out.as_ref(), - &name, - &exec_result, - db, - spec, + + // Execute the test + let result = execute_single_test(TestExecutionContext { + name: &name, + unit: &unit, + test, + cfg: &cfg, + block: &block, + tx: &tx, + cache_state: &cache_state, + elapsed, + trace, print_json_outcome, - ); - let Err(e) = output else { - continue; - }; + }); + + if let Err(e) = result { + // Handle error with debug trace if needed + static FAILED: AtomicBool = AtomicBool::new(false); + if print_json_outcome || FAILED.swap(true, Ordering::SeqCst) { + return Err(TestError { + name: name.clone(), + path: path.clone(), + kind: e, + }); + } + + // Re-run with trace for debugging + debug_failed_test(DebugContext { + name: &name, + path: &path, + index, + test, + cfg: &cfg, + block: &block, + tx: &tx, + cache_state: &cache_state, + error: &e, + }); - // Print only once or if we are already in trace mode, just return error - // If trace is true that print_json_outcome will be also true. - static FAILED: AtomicBool = AtomicBool::new(false); - if print_json_outcome || FAILED.swap(true, Ordering::SeqCst) { return Err(TestError { - name: name.clone(), path: path.clone(), + name: name.clone(), kind: e, }); } - - // Re-build to run with tracing - let mut cache = cache_state.clone(); - cache.set_state_clear_flag(cfg.spec.is_enabled_in(SpecId::SPURIOUS_DRAGON)); - let mut state = database::State::builder() - .with_cached_prestate(cache) - .with_bundle_update() - .build(); - - println!("\nTraces:"); - - let mut evm = Context::mainnet() - .with_db(&mut state) - .with_block(&block) - .with_tx(&tx) - .with_cfg(&cfg) - .build_mainnet_with_inspector( - TracerEip3155::buffered(stderr()).without_summary(), - ); - - let _ = evm.inspect_tx_commit(&tx); - - println!("\nExecution result: {exec_result:#?}"); - println!("\nExpected exception: {:?}", test.expect_exception); - println!("\nState before: {cache_state:#?}"); - println!( - "\nState after: {:#?}", - evm.ctx.journaled_state.database.cache - ); - println!("\nSpecification: {:?}", cfg.spec); - println!("\nTx: {tx:#?}"); - println!("Block: {block:#?}"); - println!("Cfg: {cfg:#?}"); - println!("\nTest name: {name:?} (index: {index}, path: {path:?}) failed:\n{e}"); - - return Err(TestError { - path: path.clone(), - name: name.clone(), - kind: e, - }); } } } Ok(()) } -pub fn run( - test_files: Vec, - mut single_thread: bool, +fn execute_single_test(ctx: TestExecutionContext) -> Result<(), TestErrorKind> { + // Prepare state + let mut cache = ctx.cache_state.clone(); + cache.set_state_clear_flag(ctx.cfg.spec.is_enabled_in(SpecId::SPURIOUS_DRAGON)); + let mut state = database::State::builder() + .with_cached_prestate(cache) + .with_bundle_update() + .build(); + + let evm_context = Context::mainnet() + .with_block(ctx.block) + .with_tx(ctx.tx) + .with_cfg(ctx.cfg) + .with_db(&mut state); + + // Execute + let timer = Instant::now(); + let (db, exec_result) = if ctx.trace { + let mut evm = evm_context + .build_mainnet_with_inspector(TracerEip3155::buffered(stderr()).without_summary()); + let res = evm.inspect_tx_commit(ctx.tx); + let db = evm.ctx.journaled_state.database; + (db, res) + } else { + let mut evm = evm_context.build_mainnet(); + let res = evm.transact_commit(ctx.tx); + let db = evm.ctx.journaled_state.database; + (db, res) + }; + *ctx.elapsed.lock().unwrap() += timer.elapsed(); + + // Check results + check_evm_execution( + ctx.test, + ctx.unit.out.as_ref(), + ctx.name, + &exec_result, + db, + ctx.cfg.spec(), + ctx.print_json_outcome, + ) +} + +fn debug_failed_test(ctx: DebugContext) { + println!("\nTraces:"); + + // Re-run with tracing + let mut cache = ctx.cache_state.clone(); + cache.set_state_clear_flag(ctx.cfg.spec.is_enabled_in(SpecId::SPURIOUS_DRAGON)); + let mut state = database::State::builder() + .with_cached_prestate(cache) + .with_bundle_update() + .build(); + + let mut evm = Context::mainnet() + .with_db(&mut state) + .with_block(ctx.block) + .with_tx(ctx.tx) + .with_cfg(ctx.cfg) + .build_mainnet_with_inspector(TracerEip3155::buffered(stderr()).without_summary()); + + let exec_result = evm.inspect_tx_commit(ctx.tx); + + println!("\nExecution result: {exec_result:#?}"); + println!("\nExpected exception: {:?}", ctx.test.expect_exception); + println!("\nState before: {:#?}", ctx.cache_state); + println!( + "\nState after: {:#?}", + evm.ctx.journaled_state.database.cache + ); + println!("\nSpecification: {:?}", ctx.cfg.spec); + println!("\nTx: {:#?}", ctx.tx); + println!("Block: {:#?}", ctx.block); + println!("Cfg: {:#?}", ctx.cfg); + println!( + "\nTest name: {:?} (index: {}, path: {:?}) failed:\n{}", + ctx.name, ctx.index, ctx.path, ctx.error + ); +} + +#[derive(Clone, Copy)] +struct TestRunnerConfig { + single_thread: bool, trace: bool, - mut print_outcome: bool, + print_outcome: bool, keep_going: bool, -) -> Result<(), TestError> { - // Trace implies print_outcome - if trace { - print_outcome = true; +} + +impl TestRunnerConfig { + fn new(single_thread: bool, trace: bool, print_outcome: bool, keep_going: bool) -> Self { + // Trace implies print_outcome + let print_outcome = print_outcome || trace; + // print_outcome or trace implies single_thread + let single_thread = single_thread || print_outcome; + + Self { + single_thread, + trace, + print_outcome, + keep_going, + } } - // `print_outcome` or trace implies single_thread - if print_outcome { - single_thread = true; +} + +#[derive(Clone)] +struct TestRunnerState { + n_errors: Arc, + console_bar: Arc, + queue: Arc)>>, + elapsed: Arc>, +} + +impl TestRunnerState { + fn new(test_files: Vec) -> Self { + let n_files = test_files.len(); + Self { + n_errors: Arc::new(AtomicUsize::new(0)), + console_bar: Arc::new(ProgressBar::with_draw_target( + Some(n_files as u64), + ProgressDrawTarget::stdout(), + )), + queue: Arc::new(Mutex::new((0usize, test_files))), + elapsed: Arc::new(Mutex::new(Duration::ZERO)), + } } - let n_files = test_files.len(); - let n_errors = Arc::new(AtomicUsize::new(0)); - let console_bar = Arc::new(ProgressBar::with_draw_target( - Some(n_files as u64), - ProgressDrawTarget::stdout(), - )); - let queue = Arc::new(Mutex::new((0usize, test_files))); - let elapsed = Arc::new(Mutex::new(std::time::Duration::ZERO)); + fn next_test(&self) -> Option { + let (current_idx, queue) = &mut *self.queue.lock().unwrap(); + let idx = *current_idx; + let test_path = queue.get(idx).cloned()?; + *current_idx = idx + 1; + Some(test_path) + } +} - let num_threads = match (single_thread, std::thread::available_parallelism()) { - (true, _) | (false, Err(_)) => 1, - (false, Ok(n)) => n.get(), - }; - let num_threads = num_threads.min(n_files); - let mut handles = Vec::with_capacity(num_threads); - for i in 0..num_threads { - let queue = queue.clone(); - let n_errors = n_errors.clone(); - let console_bar = console_bar.clone(); - let elapsed = elapsed.clone(); +fn run_test_worker(state: TestRunnerState, config: TestRunnerConfig) -> Result<(), TestError> { + loop { + if !config.keep_going && state.n_errors.load(Ordering::SeqCst) > 0 { + return Ok(()); + } + + let Some(test_path) = state.next_test() else { + return Ok(()); + }; - let thread = std::thread::Builder::new().name(format!("runner-{i}")); + let result = execute_test_suite( + &test_path, + &state.elapsed, + config.trace, + config.print_outcome, + ); - let f = move || loop { - if !keep_going && n_errors.load(Ordering::SeqCst) > 0 { - return Ok(()); + state.console_bar.inc(1); + + if let Err(err) = result { + state.n_errors.fetch_add(1, Ordering::SeqCst); + if !config.keep_going { + return Err(err); } + } + } +} - let (_index, test_path) = { - let (current_idx, queue) = &mut *queue.lock().unwrap(); - let prev_idx = *current_idx; - let Some(test_path) = queue.get(prev_idx).cloned() else { - return Ok(()); - }; - *current_idx = prev_idx + 1; - (prev_idx, test_path) - }; +fn determine_thread_count(single_thread: bool, n_files: usize) -> usize { + match (single_thread, std::thread::available_parallelism()) { + (true, _) | (false, Err(_)) => 1, + (false, Ok(n)) => n.get().min(n_files), + } +} - let result = execute_test_suite(&test_path, &elapsed, trace, print_outcome); +/// Run all test files in parallel or single-threaded mode +/// +/// # Arguments +/// * `test_files` - List of test files to execute +/// * `single_thread` - Force single-threaded execution +/// * `trace` - Enable EVM execution tracing +/// * `print_outcome` - Print test outcomes in JSON format +/// * `keep_going` - Continue running tests even if some fail +pub fn run( + test_files: Vec, + single_thread: bool, + trace: bool, + print_outcome: bool, + keep_going: bool, +) -> Result<(), TestError> { + let config = TestRunnerConfig::new(single_thread, trace, print_outcome, keep_going); + let n_files = test_files.len(); + let state = TestRunnerState::new(test_files); + let num_threads = determine_thread_count(config.single_thread, n_files); - // Increment after the test is done. - console_bar.inc(1); + // Spawn worker threads + let mut handles = Vec::with_capacity(num_threads); + for i in 0..num_threads { + let state = state.clone(); - if let Err(err) = result { - n_errors.fetch_add(1, Ordering::SeqCst); - if !keep_going { - return Err(err); - } - } - }; - handles.push(thread.spawn(f).unwrap()); + let thread = std::thread::Builder::new() + .name(format!("runner-{i}")) + .spawn(move || run_test_worker(state, config)) + .unwrap(); + + handles.push(thread); } - // join all threads before returning an error + // Collect results from all threads let mut thread_errors = Vec::new(); for (i, handle) in handles.into_iter().enumerate() { match handle.join() { @@ -578,20 +610,23 @@ pub fn run( Ok(Err(e)) => thread_errors.push(e), Err(_) => thread_errors.push(TestError { name: format!("thread {i} panicked"), - path: "".to_string(), + path: String::new(), kind: TestErrorKind::Panic, }), } } - console_bar.finish(); + state.console_bar.finish(); + + // Print summary println!( "Finished execution. Total CPU time: {:.6}s", - elapsed.lock().unwrap().as_secs_f64() + state.elapsed.lock().unwrap().as_secs_f64() ); - let n_errors = n_errors.load(Ordering::SeqCst); + let n_errors = state.n_errors.load(Ordering::SeqCst); let n_thread_errors = thread_errors.len(); + if n_errors == 0 && n_thread_errors == 0 { println!("All tests passed!"); Ok(()) diff --git a/book/src/SUMMARY.md b/book/src/SUMMARY.md index e18eb9de3a..b84fe78c5d 100644 --- a/book/src/SUMMARY.md +++ b/book/src/SUMMARY.md @@ -3,7 +3,9 @@ - [Introduction](./../../README.md) - [Awesome Revm](./awesome.md) - [Architecture](./architecture.md) +- [Inspector - EVM Tracing](./inspector.md) +- [External State Transitions](./external_state_transitions.md) - [Dev section](./dev.md) - [Revme](./revme.md) - [Release procedure](./release_procedure.md) -- [Contact](./contact.md) \ No newline at end of file +- [Contact](./contact.md) diff --git a/book/src/architecture.md b/book/src/architecture.md index eb7a840a8d..9832b1f3cf 100644 --- a/book/src/architecture.md +++ b/book/src/architecture.md @@ -15,6 +15,13 @@ REVM works in `no_std` environments which means it can be used in zero-knowledge # Execution API +The Execution API provides the primary interface for running Ethereum transactions and interacting with the EVM. Whether you're building a blockchain client, testing framework, or analysis tool, this API offers multiple execution modes to suit your needs. + +The API is designed around four key execution patterns: +- **Basic execution**: Run transactions and get results +- **Execution with commit**: Run transactions and automatically persist state changes +- **Execution with inspection**: Run transactions with detailed tracing and observation + [`Evm`](https://docs.rs/revm-context/1.0.0/revm_context/evm/struct.Evm.html) the main structure for executing mainnet ethereum transaction is built with a [`Context`](https://docs.rs/revm-context/latest/revm_context/context/struct.Context.html) and a builder, code for it looks like this: ```rust,ignore @@ -47,6 +54,19 @@ let mut evm = Context::mainnet().with_block(block).build_mainnet().with_inspecto let _ = evm.inspect_tx(tx); ``` +## Inspector - EVM Execution Tracing + +The [`Inspector`](https://docs.rs/revm-inspector/latest/revm_inspector/trait.Inspector.html) trait is REVM's powerful mechanism for observing EVM execution. It provides hooks into every aspect of transaction execution, enabling sophisticated debugging, tracing and custom tooling. + +Key capabilities include: +- **Step-by-step execution tracing**: Hook into every opcode before and after execution +- **State monitoring**: Track stack, memory, and storage changes in real-time +- **Call and creation tracing**: Observe contract interactions and deployments +- **Event capture**: Record logs, self-destructs, and other EVM events +- **Execution override**: Optionally modify execution flow and outcomes + +The Inspector is ideal for building debuggers, gas analyzers, security tools, testing frameworks, and any application that needs deep visibility into EVM execution. For detailed usage examples and advanced features, see the [Inspector documentation](./inspector.md). + # EVM Framework To learn how to build your own custom EVM: diff --git a/book/src/external_state_transitions.md b/book/src/external_state_transitions.md new file mode 100644 index 0000000000..131b1b012f --- /dev/null +++ b/book/src/external_state_transitions.md @@ -0,0 +1,40 @@ +# External State Transitions (EIP-4788 & EIP-2935) + +Some Ethereum Improvement Proposals (EIPs) require state transitions that are not triggered by regular user transactions, but are instead performed by the client using special system calls (such as `transact_system_call`). These transitions are part of the EVM state changes, but are initiated by the client at specific block boundaries (pre- or post-block hooks), as required by the EIP. + +- [EIP-4788: Beacon block root in the EVM](https://eips.ethereum.org/EIPS/eip-4788) +- [EIP-2935: Add `blockHash` and `blockNumber` to the EVM](https://eips.ethereum.org/EIPS/eip-2935) + +## What are external state transitions? + +External state transitions refer to updates to the Ethereum state that are not performed by regular user transactions, but are instead performed by the client using system calls at block boundaries. These are typically required for EIPs that introduce new system contracts or require special state updates at block boundaries. + +## EIP-4788: Beacon block root in the EVM + +EIP-4788 requires that the root of each beacon chain block is committed to the execution layer and made available in the EVM via a special contract. This is achieved by the client calling a system contract at a fixed address (`0x000F3df6D732807Ef1319fB7B8bB8522d0Beac02`) with the beacon root as input, at the start of each block. The contract maintains a ring buffer of recent roots. + +- The system call is performed by the client, not by EVM transaction execution. +- If the contract does not exist, the call fails silently. +- See [EIP-4788](https://eips.ethereum.org/EIPS/eip-4788) for full details. +- Example implementation in Reth: [reth#4457](https://github.com/paradigmxyz/reth/pull/4457) + +## EIP-2935: Add blockHash and blockNumber to the EVM + +EIP-2935 introduces a system contract that stores recent block hashes, allowing contracts to query them. The client is responsible for updating this contract at each block, by calling a system contract at a fixed address (`0x0000F90827F1C53a10cb7A02335B175320002935`) with the new block hash. + +- The system call is performed by the client, not by EVM transaction execution. +- See [EIP-2935](https://eips.ethereum.org/EIPS/eip-2935) for full details. +- Example implementation in Reth: [reth#7818](https://github.com/paradigmxyz/reth/pull/7818) + +## How does this affect REVM users? + +- To perform these block state transitions, the client or test harness should use the system call mechanism (`transact_system_call`) provided by REVM. +- REVM itself does not automatically perform these transitions; it expects the client to initiate them at the appropriate block boundaries, as specified by the EIPs. +- If you are building a full Ethereum client or a test harness, you are responsible for performing these system calls at the appropriate block boundaries, as specified in the EIPs. +- If you are only using REVM for transaction execution, you may need to ensure that the state of these system contracts is kept up to date externally. + +## References +- [EIP-4788: Beacon block root in the EVM](https://eips.ethereum.org/EIPS/eip-4788) +- [EIP-2935: Add blockHash and blockNumber to the EVM](https://eips.ethereum.org/EIPS/eip-2935) +- [reth#4457: EIP-4788 implementation](https://github.com/paradigmxyz/reth/pull/4457) +- [reth#7818: EIP-2935 implementation](https://github.com/paradigmxyz/reth/pull/7818) diff --git a/book/src/inspector.md b/book/src/inspector.md new file mode 100644 index 0000000000..8388132e0f --- /dev/null +++ b/book/src/inspector.md @@ -0,0 +1,138 @@ +# Inspector - EVM Execution Tracing + +The Inspector trait is REVM's powerful mechanism for observing and tracing EVM execution. It provides hooks into every aspect of transaction execution, making it ideal for building debuggers, analyzers, and custom tooling. + +## What is the Inspector? + +The [`Inspector`](https://docs.rs/revm-inspector/latest/revm_inspector/trait.Inspector.html) trait defines callbacks that are invoked during EVM execution. It allows you to: + +- **Step through execution**: Hook into every opcode before and after execution +- **Monitor state changes**: Track stack, memory, and storage modifications +- **Trace calls**: Observe contract calls, creations, and their outcomes +- **Capture events**: Record logs, self-destructs, and other EVM events +- **Override behavior**: Optionally modify execution flow and results + +## Core Inspector Methods + +```rust,ignore +pub trait Inspector { + // Opcode-level tracing + fn step(&mut self, interp: &mut Interpreter, context: &mut CTX) {} + fn step_end(&mut self, interp: &mut Interpreter, context: &mut CTX) {} + + // Call and creation tracing + fn call(&mut self, context: &mut CTX, inputs: &mut CallInputs) -> Option { None } + fn call_end(&mut self, context: &mut CTX, inputs: &CallInputs, outcome: &mut CallOutcome) {} + fn create(&mut self, context: &mut CTX, inputs: &mut CreateInputs) -> Option { None } + fn create_end(&mut self, context: &mut CTX, inputs: &CreateInputs, outcome: &mut CreateOutcome) {} + + // Event tracing + fn log(&mut self, interp: &mut Interpreter, context: &mut CTX, log: Log) {} + fn selfdestruct(&mut self, contract: Address, target: Address, value: U256) {} +} +``` + +## Basic Usage + +### 1. Create an Inspector + +```rust,ignore +#[derive(Default)] +struct MyInspector { + gas_used: u64, + call_count: usize, +} + +impl Inspector for MyInspector { + fn step(&mut self, interp: &mut Interpreter, _context: &mut CTX) { + self.gas_used += interp.gas.spent(); + } + + fn call(&mut self, _context: &mut CTX, _inputs: &mut CallInputs) -> Option { + self.call_count += 1; + None // Don't override the call + } +} +``` + +### 2. Use with EVM + +```rust,ignore +let inspector = MyInspector::default(); +let mut evm = Context::mainnet() + .with_db(db) + .build_mainnet_with_inspector(inspector); + +// Execute with inspection +let result = evm.inspect_one_tx(tx)?; +println!("Gas used: {}", evm.inspector.gas_used); +println!("Calls made: {}", evm.inspector.call_count); +``` + +## Advanced Features + +### State Inspection +Access complete interpreter state during execution: + +```rust,ignore +fn step(&mut self, interp: &mut Interpreter, _context: &mut CTX) { + let pc = interp.bytecode.pc(); + let opcode = interp.bytecode.opcode(); + let stack_len = interp.stack.len(); + let memory_size = interp.memory.size(); + + println!("PC: {}, Opcode: 0x{:02x}, Stack: {}, Memory: {}", + pc, opcode, stack_len, memory_size); +} +``` + +### Call Override +Modify execution by returning custom outcomes: + +```rust,ignore +fn call(&mut self, _context: &mut CTX, inputs: &mut CallInputs) -> Option { + if inputs.target_address == SPECIAL_ADDRESS { + // Override this call with custom logic + return Some(CallOutcome::new( + InterpreterResult::new(InstructionResult::Return, Bytes::from("custom")), + 0..0 + )); + } + None // Let normal execution continue +} +``` + +### Event Logging +Capture and process EVM events: + +```rust,ignore +fn log(&mut self, _interp: &mut Interpreter, _ctx: &mut CTX, log: Log) { + println!("LOG emitted from: {:?}", log.address); + println!("Topics: {:?}", log.topics()); + println!("Data: {}", hex::encode(log.data.data)); +} +``` + +## Built-in Inspectors + +REVM provides several ready-to-use inspectors: + +- **`GasInspector`**: Tracks gas consumption throughout execution +- **`TracerEip3155`**: Generates EIP-3155 compatible execution traces +- **`NoOpInspector`**: Default no-operation inspector for when inspection is disabled + +## Performance Considerations + +- Inspector callbacks have minimal overhead when not implemented (empty default methods) +- Use inspection judiciously in production - detailed tracing can impact performance +- Consider batching inspector data collection for high-throughput scenarios + +## Common Use Cases + +- **Debuggers**: Step-by-step execution analysis +- **Gas analyzers**: Detailed gas consumption tracking +- **Security tools**: Detecting suspicious patterns or calls +- **Development tools**: Contract interaction tracing +- **Testing frameworks**: Execution verification and state checking + +The Inspector trait makes REVM very observable EVM implementations available, enabling sophisticated tooling and analysis capabilities. \ No newline at end of file diff --git a/book/src/revme.md b/book/src/revme.md index 7545fee6e1..90cb09f2f0 100644 --- a/book/src/revme.md +++ b/book/src/revme.md @@ -29,5 +29,5 @@ Revm can run statetest type of tests with `revme` using the following command: For running EEST tests, we can use the `./scripts/run-tests.sh.` -For legacy tests, we need to first to download the repo `git clone https://github.com/ethereum/legacytests` and run then run it with `cargo run --release -p revme -- statetest legacytests/Cancun/GeneralStateTests ` +For legacy tests, we need to first to download the repo `git clone https://github.com/ethereum/legacytests` and then run it with `cargo run --release -p revme -- statetest legacytests/Cancun/GeneralStateTests ` All statetest that can be run by revme can be found in the `GeneralStateTests` folder. diff --git a/crates/README.md b/crates/README.md index c365455696..8bb52201fc 100644 --- a/crates/README.md +++ b/crates/README.md @@ -5,7 +5,7 @@ Crates version and their description: * ![revm-precompile](https://img.shields.io/crates/v/revm-precompile?label=revm-precompile) Precompiles defined by ethereum * ![revm-database-interface](https://img.shields.io/crates/v/revm-database-interface?label=revm-database-interface) Interfaces for database implementation, database is used to fetch runtime state data (accounts, storages and block hash) * ![revm-database](https://img.shields.io/crates/v/revm-database?label=revm-database) A few structures that implement database interface -* ![revm-bytecode](https://img.shields.io/crates/v/revm-bytecode?label=revm-bytecode) Bytecode legacy analysis and EOF validation. Create contains opcode tables. +* ![revm-bytecode](https://img.shields.io/crates/v/revm-bytecode?label=revm-bytecode) Bytecode legacy analysis and EOF validation. Crate contains opcode tables. * ![revm-state](https://img.shields.io/crates/v/revm-state?label=revm-state) Small crate with accounts and storage types. * ![revm-context-interface](https://img.shields.io/crates/v/revm-context-interface?label=revm-context-interface) traits for Block/Transaction/Cfg/Journal. * ![revm-context](https://img.shields.io/crates/v/revm-context?label=revm-context) default implementation for traits from context interface. diff --git a/crates/bytecode/CHANGELOG.md b/crates/bytecode/CHANGELOG.md index 9feb63e4f0..9c484b890e 100644 --- a/crates/bytecode/CHANGELOG.md +++ b/crates/bytecode/CHANGELOG.md @@ -7,6 +7,38 @@ and this project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0 ## [Unreleased] +## [6.1.0](https://github.com/bluealloy/revm/compare/revm-bytecode-v6.0.1...revm-bytecode-v6.1.0) - 2025-07-23 + +### Added + +- *(bytecode)* add version getter + make versoin dynamic ([#2751](https://github.com/bluealloy/revm/pull/2751)) + +### Fixed + +- fully deprecate serde-json ([#2767](https://github.com/bluealloy/revm/pull/2767)) + +### Other + +- clean up jump map ([#2764](https://github.com/bluealloy/revm/pull/2764)) +- clean up bytecode analysis ([#2763](https://github.com/bluealloy/revm/pull/2763)) +- Fix typo in EIP-7702 bytecode format comment (magic byte) ([#2733](https://github.com/bluealloy/revm/pull/2733)) + +## [6.0.1](https://github.com/bluealloy/revm/compare/revm-bytecode-v6.0.0...revm-bytecode-v6.0.1) - 2025-07-03 + +### Other + +- add PartialEq u8 ([#2688](https://github.com/bluealloy/revm/pull/2688)) + +## [6.0.0](https://github.com/bluealloy/revm/compare/revm-bytecode-v5.0.0...revm-bytecode-v6.0.0) - 2025-06-30 + +### Fixed + +- implement `PartialEq` for `JumpTable` correctly ([#2654](https://github.com/bluealloy/revm/pull/2654)) + +### Other + +- cargo clippy --fix --all ([#2671](https://github.com/bluealloy/revm/pull/2671)) + ## [5.0.0](https://github.com/bluealloy/revm/compare/revm-bytecode-v4.1.0...revm-bytecode-v5.0.0) - 2025-06-19 ### Added diff --git a/crates/bytecode/Cargo.toml b/crates/bytecode/Cargo.toml index 78a0d8f48d..78114d50f8 100644 --- a/crates/bytecode/Cargo.toml +++ b/crates/bytecode/Cargo.toml @@ -1,7 +1,7 @@ [package] name = "revm-bytecode" description = "EVM Bytecodes" -version = "5.0.0" +version = "6.1.0" authors.workspace = true edition.workspace = true keywords.workspace = true @@ -33,8 +33,16 @@ phf = { workspace = true, features = ["macros"], optional = true } [features] default = ["std", "parse"] -std = ["serde?/std", "primitives/std", "bitvec/std", "once_cell/std", "phf?/std"] +std = [ + "serde?/std", + "primitives/std", + "bitvec/std", + "once_cell/std", + "phf?/std", +] hashbrown = ["primitives/hashbrown"] serde = ["dep:serde", "primitives/serde", "bitvec/serde", "phf?/serde"] -serde-json = ["serde"] parse = ["phf", "paste"] + +# Deprecated, please use `serde` feature instead. +serde-json = ["serde"] diff --git a/crates/bytecode/src/eip7702.rs b/crates/bytecode/src/eip7702.rs index f89c85c15c..f2c7ef8855 100644 --- a/crates/bytecode/src/eip7702.rs +++ b/crates/bytecode/src/eip7702.rs @@ -17,7 +17,7 @@ pub const EIP7702_VERSION: u8 = 0; /// Bytecode of delegated account, specified in EIP-7702 /// /// Format of EIP-7702 bytecode consist of: -/// `0xEF00` (MAGIC) + `0x00` (VERSION) + 20 bytes of address. +/// `0xEF01` (MAGIC) + `0x00` (VERSION) + 20 bytes of address. #[derive(Clone, Debug, PartialEq, Eq, Hash, Ord, PartialOrd)] #[cfg_attr(feature = "serde", derive(serde::Serialize, serde::Deserialize))] pub struct Eip7702Bytecode { @@ -47,7 +47,7 @@ impl Eip7702Bytecode { Ok(Self { delegated_address: Address::new(raw[3..].try_into().unwrap()), - version: EIP7702_VERSION, + version: raw[2], raw, }) } @@ -75,6 +75,12 @@ impl Eip7702Bytecode { pub fn address(&self) -> Address { self.delegated_address } + + /// Returns the EIP7702 version of the delegated contract. + #[inline] + pub fn version(&self) -> u8 { + self.version + } } /// Bytecode errors diff --git a/crates/bytecode/src/legacy/analysis.rs b/crates/bytecode/src/legacy/analysis.rs index a281077672..193b188642 100644 --- a/crates/bytecode/src/legacy/analysis.rs +++ b/crates/bytecode/src/legacy/analysis.rs @@ -2,21 +2,13 @@ use super::JumpTable; use crate::opcode; use bitvec::{bitvec, order::Lsb0, vec::BitVec}; use primitives::Bytes; -use std::{vec, vec::Vec}; +use std::vec::Vec; -/// Analyze the bytecode to find the jumpdests. Used to create a jump table -/// that is needed for [`crate::LegacyAnalyzedBytecode`]. -/// This function contains a hot loop and should be optimized as much as possible. +/// Analyzes the bytecode for use in [`LegacyAnalyzedBytecode`](crate::LegacyAnalyzedBytecode). /// -/// # Safety +/// See [`LegacyAnalyzedBytecode`](crate::LegacyAnalyzedBytecode) for more details. /// -/// The function uses unsafe pointer arithmetic, but maintains the following invariants: -/// - The iterator never advances beyond the end of the bytecode -/// - All pointer offsets are within bounds of the bytecode -/// - The jump table is never accessed beyond its allocated size -/// -/// Undefined behavior if the bytecode does not end with a valid STOP opcode. Please check -/// [`crate::LegacyAnalyzedBytecode::new`] for details on how the bytecode is validated. +/// Prefer using [`LegacyAnalyzedBytecode::analyze`](crate::LegacyAnalyzedBytecode::analyze) instead. pub fn analyze_legacy(bytecode: Bytes) -> (JumpTable, Bytes) { if bytecode.is_empty() { return (JumpTable::default(), Bytes::from_static(&[opcode::STOP])); @@ -31,38 +23,38 @@ pub fn analyze_legacy(bytecode: Bytes) -> (JumpTable, Bytes) { while iterator < end { opcode = unsafe { *iterator }; - if opcode::JUMPDEST == opcode { + if opcode == opcode::JUMPDEST { // SAFETY: Jumps are max length of the code unsafe { jumps.set_unchecked(iterator.offset_from(start) as usize, true) } - iterator = unsafe { iterator.offset(1) }; + iterator = unsafe { iterator.add(1) }; } else { let push_offset = opcode.wrapping_sub(opcode::PUSH1); if push_offset < 32 { // SAFETY: Iterator access range is checked in the while loop - iterator = unsafe { iterator.offset((push_offset + 2) as isize) }; + iterator = unsafe { iterator.add(push_offset as usize + 2) }; } else { // SAFETY: Iterator access range is checked in the while loop - iterator = unsafe { iterator.offset(1) }; + iterator = unsafe { iterator.add(1) }; } } } - // Calculate padding needed to ensure bytecode ends with STOP - // If we're at the end and last opcode is not STOP, we need 1 more byte - let padding_size = (iterator as usize) - (end as usize) + (opcode != opcode::STOP) as usize; - if padding_size > 0 { - let mut padded_bytecode = Vec::with_capacity(bytecode.len() + padding_size); - padded_bytecode.extend_from_slice(&bytecode); - padded_bytecode.extend(vec![0; padding_size]); - (JumpTable::new(jumps), Bytes::from(padded_bytecode)) + let padding = (iterator as usize) - (end as usize) + (opcode != opcode::STOP) as usize; + let bytecode = if padding > 0 { + let mut padded = Vec::with_capacity(bytecode.len() + padding); + padded.extend_from_slice(&bytecode); + padded.resize(padded.len() + padding, 0); + Bytes::from(padded) } else { - (JumpTable::new(jumps), bytecode) - } + bytecode + }; + + (JumpTable::new(jumps), bytecode) } +#[cfg(test)] mod tests { - #[allow(unused_imports)] - use crate::{legacy::analyze_legacy, opcode}; + use super::*; #[test] fn test_bytecode_ends_with_stop_no_padding_needed() { @@ -110,14 +102,14 @@ mod tests { fn test_bytecode_with_jumpdest_at_start() { let bytecode = vec![opcode::JUMPDEST, opcode::PUSH1, 0x01, opcode::STOP]; let (jump_table, _) = analyze_legacy(bytecode.clone().into()); - assert!(jump_table.table[0]); // First byte should be a valid jumpdest + assert!(jump_table.is_valid(0)); // First byte should be a valid jumpdest } #[test] fn test_bytecode_with_jumpdest_after_push() { let bytecode = vec![opcode::PUSH1, 0x01, opcode::JUMPDEST, opcode::STOP]; let (jump_table, _) = analyze_legacy(bytecode.clone().into()); - assert!(jump_table.table[2]); // JUMPDEST should be at position 2 + assert!(jump_table.is_valid(2)); // JUMPDEST should be at position 2 } #[test] @@ -130,8 +122,8 @@ mod tests { opcode::STOP, ]; let (jump_table, _) = analyze_legacy(bytecode.clone().into()); - assert!(jump_table.table[0]); // First JUMPDEST - assert!(jump_table.table[3]); // Second JUMPDEST + assert!(jump_table.is_valid(0)); // First JUMPDEST + assert!(jump_table.is_valid(3)); // Second JUMPDEST } #[test] @@ -145,7 +137,7 @@ mod tests { fn test_bytecode_with_invalid_opcode() { let bytecode = vec![0xFF, opcode::STOP]; // 0xFF is an invalid opcode let (jump_table, _) = analyze_legacy(bytecode.clone().into()); - assert!(!jump_table.table[0]); // Invalid opcode should not be a jumpdest + assert!(!jump_table.is_valid(0)); // Invalid opcode should not be a jumpdest } #[test] @@ -165,9 +157,9 @@ mod tests { ]; let (jump_table, padded_bytecode) = analyze_legacy(bytecode.clone().into()); assert_eq!(padded_bytecode.len(), bytecode.len()); - assert!(!jump_table.table[0]); // PUSH1 - assert!(!jump_table.table[2]); // PUSH2 - assert!(!jump_table.table[5]); // PUSH4 + assert!(!jump_table.is_valid(0)); // PUSH1 + assert!(!jump_table.is_valid(2)); // PUSH2 + assert!(!jump_table.is_valid(5)); // PUSH4 } #[test] @@ -179,6 +171,6 @@ mod tests { opcode::STOP, ]; let (jump_table, _) = analyze_legacy(bytecode.clone().into()); - assert!(!jump_table.table[1]); // JUMPDEST in push data should not be valid + assert!(!jump_table.is_valid(1)); // JUMPDEST in push data should not be valid } } diff --git a/crates/bytecode/src/legacy/analyzed.rs b/crates/bytecode/src/legacy/analyzed.rs index f85211a97a..295ec20baf 100644 --- a/crates/bytecode/src/legacy/analyzed.rs +++ b/crates/bytecode/src/legacy/analyzed.rs @@ -14,11 +14,11 @@ use primitives::Bytes; /// /// # Bytecode Padding /// -/// All legacy bytecode is padded with 33 zero bytes at the end. This padding ensures the -/// bytecode always ends with a valid STOP (0x00) opcode. The reason for 33 bytes padding (and not one byte) -/// is handling the edge cases where a PUSH32 opcode appears at the end of the original -/// bytecode without enough remaining bytes for its immediate data. Original bytecode length -/// is stored in order to be able to copy original bytecode. +/// Legacy bytecode can be padded with up to 33 zero bytes at the end. This padding ensures that: +/// - the bytecode always ends with a valid STOP (0x00) opcode. +/// - there aren't incomplete immediates, meaning we can skip bounds checks in `PUSH*` instructions. +/// +/// The non-padded length is stored in order to be able to copy the original bytecode. /// /// # Gas safety /// @@ -29,11 +29,11 @@ use primitives::Bytes; #[derive(Clone, Debug, PartialEq, Eq, Hash, Ord, PartialOrd)] #[cfg_attr(feature = "serde", derive(serde::Serialize, serde::Deserialize))] pub struct LegacyAnalyzedBytecode { - /// Bytecode with 33 zero bytes padding + /// The potentially padded bytecode. bytecode: Bytes, - /// Original bytes length + /// The original bytecode length. original_len: usize, - /// Jump table + /// The jump table. jump_table: JumpTable, } @@ -49,32 +49,38 @@ impl Default for LegacyAnalyzedBytecode { } impl LegacyAnalyzedBytecode { + /// Analyzes the bytecode. + /// + /// See [`LegacyAnalyzedBytecode`] for more details. + pub fn analyze(bytecode: Bytes) -> Self { + let original_len = bytecode.len(); + let (jump_table, padded_bytecode) = super::analysis::analyze_legacy(bytecode); + Self::new(padded_bytecode, original_len, jump_table) + } + /// Creates new analyzed bytecode. /// + /// Prefer instantiating using [`analyze`](Self::analyze) instead. + /// /// # Panics /// /// * If `original_len` is greater than `bytecode.len()` /// * If jump table length is less than `original_len`. /// * If last bytecode byte is not `0x00` or if bytecode is empty. pub fn new(bytecode: Bytes, original_len: usize, jump_table: JumpTable) -> Self { - if original_len > bytecode.len() { - panic!("original_len is greater than bytecode length"); - } - if original_len > jump_table.len { - panic!( - "jump table length {} is less than original length {}", - jump_table.len, original_len - ); - } - - if bytecode.is_empty() { - panic!("bytecode cannot be empty"); - } - - if bytecode.last() != Some(&opcode::STOP) { - panic!("last bytecode byte should be STOP (0x00)"); - } - + assert!( + original_len <= bytecode.len(), + "original_len is greater than bytecode length" + ); + assert!( + original_len <= jump_table.len(), + "jump table length is less than original length" + ); + assert!(!bytecode.is_empty(), "bytecode cannot be empty"); + assert!( + bytecode.last() == Some(&opcode::STOP), + "last bytecode byte should be STOP (0x00)" + ); Self { bytecode, original_len, @@ -136,7 +142,7 @@ mod tests { } #[test] - #[should_panic(expected = "jump table length 1 is less than original length 2")] + #[should_panic(expected = "jump table length is less than original length")] fn test_panic_on_short_jump_table() { let bytecode = Bytes::from_static(&[opcode::PUSH1, 0x01]); let bytecode = LegacyRawBytecode(bytecode).into_analyzed(); diff --git a/crates/bytecode/src/legacy/jump_map.rs b/crates/bytecode/src/legacy/jump_map.rs index a8d55f4c45..d66dceab28 100644 --- a/crates/bytecode/src/legacy/jump_map.rs +++ b/crates/bytecode/src/legacy/jump_map.rs @@ -1,17 +1,51 @@ use bitvec::vec::BitVec; +use core::{ + cmp::Ordering, + hash::{Hash, Hasher}, +}; use once_cell::race::OnceBox; use primitives::hex; use std::{fmt::Debug, sync::Arc}; -/// A table of valid `jump` destinations. Cheap to clone and memory efficient, one bit per opcode. -#[derive(Clone, PartialEq, Eq, Hash, Ord, PartialOrd)] +/// A table of valid `jump` destinations. +/// +/// It is immutable, cheap to clone and memory efficient, with one bit per byte in the bytecode. +#[derive(Clone, Eq)] pub struct JumpTable { - /// Actual bit vec - pub table: Arc>, - /// Fast pointer that skips Arc overhead + /// Pointer into `table` to avoid `Arc` overhead on lookup. table_ptr: *const u8, - /// Number of bits in the table - pub len: usize, + /// Number of bits in the table. + len: usize, + /// Actual bit vec + table: Arc>, +} + +// SAFETY: BitVec data is immutable through Arc, pointer won't be invalidated +unsafe impl Send for JumpTable {} +unsafe impl Sync for JumpTable {} + +impl PartialEq for JumpTable { + fn eq(&self, other: &Self) -> bool { + self.table.eq(&other.table) + } +} + +impl Hash for JumpTable { + fn hash(&self, state: &mut H) { + self.table.hash(state); + } +} + +impl PartialOrd for JumpTable { + fn partial_cmp(&self, other: &Self) -> Option { + Some(self.cmp(other)) + } +} + +impl Ord for JumpTable { + fn cmp(&self, other: &Self) -> Ordering { + self.table.cmp(&other.table) + } } #[cfg(feature = "serde")] @@ -35,10 +69,6 @@ impl<'de> serde::Deserialize<'de> for JumpTable { } } -// SAFETY: BitVec data is immutable through Arc, pointer won't be invalidated -unsafe impl Send for JumpTable {} -unsafe impl Sync for JumpTable {} - impl Debug for JumpTable { fn fmt(&self, f: &mut core::fmt::Formatter<'_>) -> core::fmt::Result { f.debug_struct("JumpTable") @@ -77,6 +107,18 @@ impl JumpTable { self.table.as_raw_slice() } + /// Gets the length of the jump map. + #[inline] + pub fn len(&self) -> usize { + self.len + } + + /// Returns true if the jump map is empty. + #[inline] + pub fn is_empty(&self) -> bool { + self.len == 0 + } + /// Constructs a jump map from raw bytes and length. /// /// Bit length represents number of used bits inside slice. @@ -95,16 +137,7 @@ impl JumpTable { ); let mut bitvec = BitVec::from_slice(slice); unsafe { bitvec.set_len(bit_len) }; - - let table = Arc::new(bitvec); - let table_ptr = table.as_raw_slice().as_ptr(); - let len = table.len(); - - Self { - table, - table_ptr, - len, - } + Self::new(bitvec) } /// Checks if `pc` is a valid jump destination. diff --git a/crates/bytecode/src/legacy/raw.rs b/crates/bytecode/src/legacy/raw.rs index 6b4ba85569..20ef7ba885 100644 --- a/crates/bytecode/src/legacy/raw.rs +++ b/crates/bytecode/src/legacy/raw.rs @@ -1,22 +1,18 @@ -use super::{analyze_legacy, LegacyAnalyzedBytecode}; +use super::LegacyAnalyzedBytecode; use core::ops::Deref; use primitives::Bytes; /// Used only as intermediate representation for legacy bytecode. -/// Please check [`LegacyAnalyzedBytecode`] for the main structure that is used in Revm. +/// +/// See [`LegacyAnalyzedBytecode`] for the main structure that is used in Revm. #[derive(Clone, Debug, PartialEq, Eq, Hash, Ord, PartialOrd)] #[cfg_attr(feature = "serde", derive(serde::Serialize, serde::Deserialize))] pub struct LegacyRawBytecode(pub Bytes); impl LegacyRawBytecode { - /// Converts the raw bytecode into an analyzed bytecode. - /// - /// It extends the bytecode with 33 zero bytes and analyzes it to find the jumpdests. + /// Analyzes the bytecode, instantiating a [`LegacyAnalyzedBytecode`]. pub fn into_analyzed(self) -> LegacyAnalyzedBytecode { - let bytecode = self.0; - let len = bytecode.len(); - let (jump_table, padded_bytecode) = analyze_legacy(bytecode); - LegacyAnalyzedBytecode::new(padded_bytecode, len, jump_table) + LegacyAnalyzedBytecode::analyze(self.0) } } diff --git a/crates/bytecode/src/opcode.rs b/crates/bytecode/src/opcode.rs index 4960132313..774ddb37d7 100644 --- a/crates/bytecode/src/opcode.rs +++ b/crates/bytecode/src/opcode.rs @@ -197,6 +197,12 @@ impl OpCode { } } +impl PartialEq for OpCode { + fn eq(&self, other: &u8) -> bool { + self.get().eq(other) + } +} + /// Information about opcode, such as name, and stack inputs and outputs #[derive(Clone, Copy, PartialEq, Eq, PartialOrd, Ord, Hash)] pub struct OpCodeInfo { diff --git a/crates/context/CHANGELOG.md b/crates/context/CHANGELOG.md index ebba9f84cb..70646c5757 100644 --- a/crates/context/CHANGELOG.md +++ b/crates/context/CHANGELOG.md @@ -7,6 +7,45 @@ and this project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0 ## [Unreleased] +## [8.0.4](https://github.com/bluealloy/revm/compare/revm-context-v8.0.3...revm-context-v8.0.4) - 2025-07-23 + +### Fixed + +- fully deprecate serde-json ([#2767](https://github.com/bluealloy/revm/pull/2767)) + +### Other + +- un-Box frames ([#2761](https://github.com/bluealloy/revm/pull/2761)) +- discard generic host implementation ([#2738](https://github.com/bluealloy/revm/pull/2738)) + +## [8.0.3](https://github.com/bluealloy/revm/compare/revm-context-v8.0.2...revm-context-v8.0.3) - 2025-07-14 + +### Fixed + +- fix typo: Rename is_created_globaly to is_created_globally ([#2692](https://github.com/bluealloy/revm/pull/2692)) + +### Other + +- add comprehensive tests for TxEnvBuilder ([#2690](https://github.com/bluealloy/revm/pull/2690)) + +## [8.0.2](https://github.com/bluealloy/revm/compare/revm-context-v8.0.1...revm-context-v8.0.2) - 2025-07-03 + +### Other + +- updated the following local packages: revm-bytecode, revm-state, revm-database-interface, revm-context-interface + +## [8.0.1](https://github.com/bluealloy/revm/compare/revm-context-v7.0.1...revm-context-v8.0.1) - 2025-06-30 + +### Added + +- implement `Transaction` for `Either` ([#2662](https://github.com/bluealloy/revm/pull/2662)) +- optional_eip3541 ([#2661](https://github.com/bluealloy/revm/pull/2661)) + +### Other + +- use TxEnv::builder ([#2652](https://github.com/bluealloy/revm/pull/2652)) +- fix copy-pasted inner doc comments ([#2663](https://github.com/bluealloy/revm/pull/2663)) + ## [7.0.1](https://github.com/bluealloy/revm/compare/revm-context-v7.0.0...revm-context-v7.0.1) - 2025-06-20 ### Fixed diff --git a/crates/context/Cargo.toml b/crates/context/Cargo.toml index 4fcd07255f..b9c01d9305 100644 --- a/crates/context/Cargo.toml +++ b/crates/context/Cargo.toml @@ -1,7 +1,7 @@ [package] name = "revm-context" description = "Revm context crates" -version = "7.0.1" +version = "8.0.4" authors.workspace = true edition.workspace = true keywords.workspace = true @@ -59,6 +59,7 @@ dev = [ "memory_limit", "optional_balance_check", "optional_block_gas_limit", + "optional_eip3541", "optional_eip3607", "optional_no_base_fee", "optional_priority_fee_check", @@ -66,6 +67,7 @@ dev = [ memory_limit = [] optional_balance_check = [] optional_block_gas_limit = [] +optional_eip3541 = [] optional_eip3607 = [] optional_no_base_fee = [] optional_priority_fee_check = [] diff --git a/crates/context/interface/CHANGELOG.md b/crates/context/interface/CHANGELOG.md index 6c9bfe1c30..0cf77dcb7e 100644 --- a/crates/context/interface/CHANGELOG.md +++ b/crates/context/interface/CHANGELOG.md @@ -7,6 +7,34 @@ and this project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0 ## [Unreleased] +## [9.0.0](https://github.com/bluealloy/revm/compare/revm-context-interface-v8.0.1...revm-context-interface-v9.0.0) - 2025-07-23 + +### Fixed + +- fully deprecate serde-json ([#2767](https://github.com/bluealloy/revm/pull/2767)) + +### Other + +- un-Box frames ([#2761](https://github.com/bluealloy/revm/pull/2761)) +- discard generic host implementation ([#2738](https://github.com/bluealloy/revm/pull/2738)) + +## [8.0.1](https://github.com/bluealloy/revm/compare/revm-context-interface-v8.0.0...revm-context-interface-v8.0.1) - 2025-07-03 + +### Other + +- updated the following local packages: revm-state, revm-database-interface + +## [8.0.0](https://github.com/bluealloy/revm/compare/revm-context-interface-v7.0.1...revm-context-interface-v8.0.0) - 2025-06-30 + +### Added + +- implement `Transaction` for `Either` ([#2662](https://github.com/bluealloy/revm/pull/2662)) +- optional_eip3541 ([#2661](https://github.com/bluealloy/revm/pull/2661)) + +### Other + +- fix copy-pasted inner doc comments ([#2663](https://github.com/bluealloy/revm/pull/2663)) + ## [7.0.1](https://github.com/bluealloy/revm/compare/revm-context-interface-v7.0.0...revm-context-interface-v7.0.1) - 2025-06-20 ### Fixed diff --git a/crates/context/interface/Cargo.toml b/crates/context/interface/Cargo.toml index 85e47efea2..de26f16d8d 100644 --- a/crates/context/interface/Cargo.toml +++ b/crates/context/interface/Cargo.toml @@ -1,7 +1,7 @@ [package] name = "revm-context-interface" description = "Revm context interface crates" -version = "7.0.1" +version = "9.0.0" authors.workspace = true edition.workspace = true keywords.workspace = true @@ -54,4 +54,6 @@ serde = [ "database-interface/serde", "either/serde", ] + +# Deprecated, please use `serde` feature instead. serde-json = ["serde"] diff --git a/crates/context/interface/src/cfg.rs b/crates/context/interface/src/cfg.rs index 7d9c33939e..79958a2241 100644 --- a/crates/context/interface/src/cfg.rs +++ b/crates/context/interface/src/cfg.rs @@ -41,6 +41,9 @@ pub trait Cfg { /// Returns whether the EIP-3607 (account clearing) is disabled. fn is_eip3607_disabled(&self) -> bool; + /// Returns whether the EIP-3541 (disallowing new contracts with 0xEF prefix) is disabled. + fn is_eip3541_disabled(&self) -> bool; + /// Returns whether the balance check is disabled. fn is_balance_check_disabled(&self) -> bool; diff --git a/crates/context/interface/src/context.rs b/crates/context/interface/src/context.rs index 432d8a6543..bbf1311a18 100644 --- a/crates/context/interface/src/context.rs +++ b/crates/context/interface/src/context.rs @@ -1,7 +1,7 @@ //! Context trait and related types. pub use crate::journaled_state::StateLoad; use crate::{ - result::FromStringError, Block, Cfg, Database, JournalTr, LocalContextTr, Transaction, + result::FromStringError, Block, Cfg, Database, Host, JournalTr, LocalContextTr, Transaction, }; use auto_impl::auto_impl; use primitives::StorageValue; @@ -15,7 +15,7 @@ use std::string::String; /// /// All function has a `*_mut` variant except the function for [`ContextTr::tx`] and [`ContextTr::block`]. #[auto_impl(&mut, Box)] -pub trait ContextTr { +pub trait ContextTr: Host { /// Block type type Block: Block; /// Transaction type diff --git a/crates/interpreter/src/host.rs b/crates/context/interface/src/host.rs similarity index 53% rename from crates/interpreter/src/host.rs rename to crates/context/interface/src/host.rs index 52b3f089c7..636a5eb4dc 100644 --- a/crates/interpreter/src/host.rs +++ b/crates/context/interface/src/host.rs @@ -1,17 +1,18 @@ -use context_interface::{ - context::{ContextTr, SStoreResult, SelfDestructResult, StateLoad}, +//! Host interface for external blockchain state access. + +use crate::{ + context::{SStoreResult, SelfDestructResult, StateLoad}, journaled_state::AccountLoad, - Block, Cfg, Database, JournalTr, Transaction, TransactionType, }; +use auto_impl::auto_impl; use primitives::{Address, Bytes, Log, StorageKey, StorageValue, B256, U256}; -use crate::instructions::utility::IntoU256; - /// Host trait with all methods that are needed by the Interpreter. /// /// This trait is implemented for all types that have `ContextTr` trait. /// /// There are few groups of functions which are Block, Transaction, Config, Database and Journal functions. +#[auto_impl(&mut, Box)] pub trait Host { /* Block */ @@ -88,183 +89,6 @@ pub trait Host { fn load_account_code_hash(&mut self, address: Address) -> Option>; } -impl Host for CTX { - /* Block */ - - fn basefee(&self) -> U256 { - U256::from(self.block().basefee()) - } - - fn blob_gasprice(&self) -> U256 { - U256::from(self.block().blob_gasprice().unwrap_or(0)) - } - - fn gas_limit(&self) -> U256 { - U256::from(self.block().gas_limit()) - } - - fn difficulty(&self) -> U256 { - self.block().difficulty() - } - - fn prevrandao(&self) -> Option { - self.block().prevrandao().map(|r| r.into_u256()) - } - - fn block_number(&self) -> U256 { - self.block().number() - } - - fn timestamp(&self) -> U256 { - U256::from(self.block().timestamp()) - } - - fn beneficiary(&self) -> Address { - self.block().beneficiary() - } - - fn chain_id(&self) -> U256 { - U256::from(self.cfg().chain_id()) - } - - /* Transaction */ - - fn effective_gas_price(&self) -> U256 { - let basefee = self.block().basefee(); - U256::from(self.tx().effective_gas_price(basefee as u128)) - } - - fn caller(&self) -> Address { - self.tx().caller() - } - - fn blob_hash(&self, number: usize) -> Option { - let tx = &self.tx(); - if tx.tx_type() != TransactionType::Eip4844 { - return None; - } - tx.blob_versioned_hashes() - .get(number) - .map(|t| U256::from_be_bytes(t.0)) - } - - /* Config */ - - fn max_initcode_size(&self) -> usize { - self.cfg().max_initcode_size() - } - - /* Database */ - - fn block_hash(&mut self, requested_number: u64) -> Option { - self.db_mut() - .block_hash(requested_number) - .map_err(|e| { - *self.error() = Err(e.into()); - }) - .ok() - } - - /* Journal */ - - fn load_account_delegated(&mut self, address: Address) -> Option> { - let is_eip7702_enabled = self.cfg().is_eip7702_enabled(); - self.journal_mut() - .load_account_delegated(is_eip7702_enabled, address) - .map_err(|e| { - *self.error() = Err(e.into()); - }) - .ok() - } - - /// Gets balance of `address` and if the account is cold. - fn balance(&mut self, address: Address) -> Option> { - self.journal_mut() - .load_account(address) - .map(|acc| acc.map(|a| a.info.balance)) - .map_err(|e| { - *self.error() = Err(e.into()); - }) - .ok() - } - - /// Gets code of `address` and if the account is cold. - fn load_account_code(&mut self, address: Address) -> Option> { - self.journal_mut() - .code(address) - .map_err(|e| { - *self.error() = Err(e.into()); - }) - .ok() - } - - /// Gets code hash of `address` and if the account is cold. - fn load_account_code_hash(&mut self, address: Address) -> Option> { - self.journal_mut() - .code_hash(address) - .map_err(|e| { - *self.error() = Err(e.into()); - }) - .ok() - } - - /// Gets storage value of `address` at `index` and if the account is cold. - fn sload(&mut self, address: Address, index: StorageKey) -> Option> { - self.journal_mut() - .sload(address, index) - .map_err(|e| { - *self.error() = Err(e.into()); - }) - .ok() - } - - /// Sets storage value of account address at index. - /// - /// Returns [`StateLoad`] with [`SStoreResult`] that contains original/new/old storage value. - fn sstore( - &mut self, - address: Address, - index: StorageKey, - value: StorageValue, - ) -> Option> { - self.journal_mut() - .sstore(address, index, value) - .map_err(|e| { - *self.error() = Err(e.into()); - }) - .ok() - } - - /// Gets the transient storage value of `address` at `index`. - fn tload(&mut self, address: Address, index: StorageKey) -> StorageValue { - self.journal_mut().tload(address, index) - } - - /// Sets the transient storage value of `address` at `index`. - fn tstore(&mut self, address: Address, index: StorageKey, value: StorageValue) { - self.journal_mut().tstore(address, index, value) - } - - /// Emits a log owned by `address` with given `LogData`. - fn log(&mut self, log: Log) { - self.journal_mut().log(log); - } - - /// Marks `address` to be deleted, with funds transferred to `target`. - fn selfdestruct( - &mut self, - address: Address, - target: Address, - ) -> Option> { - self.journal_mut() - .selfdestruct(address, target) - .map_err(|e| { - *self.error() = Err(e.into()); - }) - .ok() - } -} - /// Dummy host that implements [`Host`] trait and returns all default values. #[derive(Debug)] pub struct DummyHost; diff --git a/crates/context/interface/src/lib.rs b/crates/context/interface/src/lib.rs index 7bb7a36927..0e66071368 100644 --- a/crates/context/interface/src/lib.rs +++ b/crates/context/interface/src/lib.rs @@ -1,4 +1,4 @@ -//! Optimism-specific constants, types, and helpers. +//! EVM execution context interface. #![cfg_attr(not(test), warn(unused_crate_dependencies))] #![cfg_attr(not(feature = "std"), no_std)] @@ -8,6 +8,7 @@ extern crate alloc as std; pub mod block; pub mod cfg; pub mod context; +pub mod host; pub mod journaled_state; pub mod local; pub mod result; @@ -18,6 +19,7 @@ pub use cfg::{Cfg, CreateScheme, TransactTo}; pub use context::{ContextError, ContextSetters, ContextTr}; pub use database_interface::{DBErrorMarker, Database}; pub use either; +pub use host::{DummyHost, Host}; pub use journaled_state::JournalTr; pub use local::{FrameStack, FrameToken, LocalContextTr, OutFrame}; pub use transaction::{Transaction, TransactionType}; diff --git a/crates/context/interface/src/local.rs b/crates/context/interface/src/local.rs index 5367ab061f..56c4832842 100644 --- a/crates/context/interface/src/local.rs +++ b/crates/context/interface/src/local.rs @@ -3,12 +3,12 @@ use core::{ cell::{Ref, RefCell}, ops::Range, }; -use std::{boxed::Box, rc::Rc, vec::Vec}; +use std::{rc::Rc, vec::Vec}; /// Non-empty, item-pooling Vec. #[derive(Debug, Clone)] pub struct FrameStack { - stack: Vec>, + stack: Vec, index: Option, } @@ -20,10 +20,9 @@ impl Default for FrameStack { impl FrameStack { /// Creates a new, empty stack. It must be initialized with init before use. - #[inline] pub fn new() -> Self { Self { - stack: Vec::with_capacity(1025), + stack: Vec::with_capacity(4), index: None, } } @@ -102,19 +101,19 @@ impl FrameStack { /// A potentially initialized frame. Used when initializing a new frame in the main loop. #[allow(missing_debug_implementations)] pub struct OutFrame<'a, T> { - ptr: *mut Box, + ptr: *mut T, init: bool, lt: core::marker::PhantomData<&'a mut T>, } impl<'a, T> OutFrame<'a, T> { /// Creates a new initialized `OutFrame` from a mutable reference to a type `T`. - pub fn new_init(slot: &'a mut Box) -> Self { + pub fn new_init(slot: &'a mut T) -> Self { unsafe { Self::new_maybe_uninit(slot, true) } } /// Creates a new uninitialized `OutFrame` from a mutable reference to a `MaybeUninit`. - pub fn new_uninit(slot: &'a mut core::mem::MaybeUninit>) -> Self { + pub fn new_uninit(slot: &'a mut core::mem::MaybeUninit) -> Self { unsafe { Self::new_maybe_uninit(slot.as_mut_ptr(), false) } } @@ -125,7 +124,7 @@ impl<'a, T> OutFrame<'a, T> { /// This method is unsafe because it assumes that the pointer is valid and points to a location /// where a type `T` can be stored. It also assumes that the `init` flag correctly reflects whether /// the type `T` has been initialized or not. - pub unsafe fn new_maybe_uninit(ptr: *mut Box, init: bool) -> Self { + pub unsafe fn new_maybe_uninit(ptr: *mut T, init: bool) -> Self { Self { ptr, init, @@ -141,11 +140,12 @@ impl<'a, T> OutFrame<'a, T> { unsafe { &mut *self.ptr } } + #[inline(never)] #[cold] fn do_init(&mut self, f: impl FnOnce() -> T) { unsafe { self.init = true; - self.ptr.write(Box::new(f())); + self.ptr.write(f()); } } diff --git a/crates/context/interface/src/transaction.rs b/crates/context/interface/src/transaction.rs index ae727a8704..091512df3e 100644 --- a/crates/context/interface/src/transaction.rs +++ b/crates/context/interface/src/transaction.rs @@ -2,6 +2,7 @@ mod alloy_types; pub mod eip2930; pub mod eip7702; +mod either; pub mod transaction_type; pub use alloy_types::{ @@ -23,7 +24,7 @@ pub trait TransactionError: Debug + core::error::Error {} /// Main Transaction trait that abstracts and specifies all transaction currently supported by Ethereum /// -/// Access to any associated type is gaited behind [`tx_type`][Transaction::tx_type] function. +/// Access to any associated type is gated behind [`tx_type`][Transaction::tx_type] function. /// /// It can be extended to support new transaction types and only transaction types can be /// deprecated by not returning tx_type. diff --git a/crates/context/interface/src/transaction/either.rs b/crates/context/interface/src/transaction/either.rs new file mode 100644 index 0000000000..46eb43cebe --- /dev/null +++ b/crates/context/interface/src/transaction/either.rs @@ -0,0 +1,127 @@ +use super::Transaction; +use either::Either; +use primitives::{Address, Bytes, TxKind, B256, U256}; + +impl Transaction for Either +where + L: Transaction + 'static, + R: for<'a> Transaction< + AccessListItem<'a> = L::AccessListItem<'a>, + Authorization<'a> = L::Authorization<'a>, + > + 'static, +{ + type AccessListItem<'a> + = L::AccessListItem<'a> + where + Self: 'a; + + type Authorization<'a> + = L::Authorization<'a> + where + Self: 'a; + + fn tx_type(&self) -> u8 { + match self { + Either::Left(l) => l.tx_type(), + Either::Right(r) => r.tx_type(), + } + } + + fn caller(&self) -> Address { + match self { + Either::Left(l) => l.caller(), + Either::Right(r) => r.caller(), + } + } + + fn gas_limit(&self) -> u64 { + match self { + Either::Left(l) => l.gas_limit(), + Either::Right(r) => r.gas_limit(), + } + } + + fn value(&self) -> U256 { + match self { + Either::Left(l) => l.value(), + Either::Right(r) => r.value(), + } + } + + fn input(&self) -> &Bytes { + match self { + Either::Left(l) => l.input(), + Either::Right(r) => r.input(), + } + } + + fn nonce(&self) -> u64 { + match self { + Either::Left(l) => l.nonce(), + Either::Right(r) => r.nonce(), + } + } + + fn kind(&self) -> TxKind { + match self { + Either::Left(l) => l.kind(), + Either::Right(r) => r.kind(), + } + } + + fn chain_id(&self) -> Option { + match self { + Either::Left(l) => l.chain_id(), + Either::Right(r) => r.chain_id(), + } + } + + fn gas_price(&self) -> u128 { + match self { + Either::Left(l) => l.gas_price(), + Either::Right(r) => r.gas_price(), + } + } + + fn access_list(&self) -> Option>> { + match self { + Either::Left(l) => l.access_list().map(Either::Left), + Either::Right(r) => r.access_list().map(Either::Right), + } + } + + fn blob_versioned_hashes(&self) -> &[B256] { + match self { + Either::Left(l) => l.blob_versioned_hashes(), + Either::Right(r) => r.blob_versioned_hashes(), + } + } + + fn max_fee_per_blob_gas(&self) -> u128 { + match self { + Either::Left(l) => l.max_fee_per_blob_gas(), + Either::Right(r) => r.max_fee_per_blob_gas(), + } + } + + fn authorization_list_len(&self) -> usize { + match self { + Either::Left(l) => l.authorization_list_len(), + Either::Right(r) => r.authorization_list_len(), + } + } + + fn authorization_list(&self) -> impl Iterator> { + match self { + Either::Left(l) => Either::Left(l.authorization_list()), + Either::Right(r) => Either::Right(r.authorization_list()), + } + } + + fn max_priority_fee_per_gas(&self) -> Option { + match self { + Either::Left(l) => l.max_priority_fee_per_gas(), + Either::Right(r) => r.max_priority_fee_per_gas(), + } + } +} diff --git a/crates/context/src/cfg.rs b/crates/context/src/cfg.rs index 7b57079cf2..4846549d01 100644 --- a/crates/context/src/cfg.rs +++ b/crates/context/src/cfg.rs @@ -78,6 +78,13 @@ pub struct CfgEnv { /// By default, it is set to `false`. #[cfg(feature = "optional_block_gas_limit")] pub disable_block_gas_limit: bool, + /// EIP-3541 rejects the creation of contracts that starts with 0xEF + /// + /// This is useful for chains that do not implement EIP-3541. + /// + /// By default, it is set to `false`. + #[cfg(feature = "optional_eip3541")] + pub disable_eip3541: bool, /// EIP-3607 rejects transactions from senders with deployed code /// /// In development, it can be desirable to simulate calls from contracts, which this setting allows. @@ -154,6 +161,8 @@ impl CfgEnv { disable_balance_check: false, #[cfg(feature = "optional_block_gas_limit")] disable_block_gas_limit: false, + #[cfg(feature = "optional_eip3541")] + disable_eip3541: false, #[cfg(feature = "optional_eip3607")] disable_eip3607: false, #[cfg(feature = "optional_no_base_fee")] @@ -203,6 +212,8 @@ impl CfgEnv { disable_balance_check: self.disable_balance_check, #[cfg(feature = "optional_block_gas_limit")] disable_block_gas_limit: self.disable_block_gas_limit, + #[cfg(feature = "optional_eip3541")] + disable_eip3541: self.disable_eip3541, #[cfg(feature = "optional_eip3607")] disable_eip3607: self.disable_eip3607, #[cfg(feature = "optional_no_base_fee")] @@ -301,6 +312,16 @@ impl + Copy> Cfg for CfgEnv { .unwrap_or(eip3860::MAX_INITCODE_SIZE) } + fn is_eip3541_disabled(&self) -> bool { + cfg_if::cfg_if! { + if #[cfg(feature = "optional_eip3541")] { + self.disable_eip3541 + } else { + false + } + } + } + fn is_eip3607_disabled(&self) -> bool { cfg_if::cfg_if! { if #[cfg(feature = "optional_eip3607")] { diff --git a/crates/context/src/context.rs b/crates/context/src/context.rs index 96c82152a9..1745d57493 100644 --- a/crates/context/src/context.rs +++ b/crates/context/src/context.rs @@ -1,12 +1,13 @@ //! This module contains [`Context`] struct and implements [`ContextTr`] trait for it. use crate::{block::BlockEnv, cfg::CfgEnv, journal::Journal, tx::TxEnv, LocalContext}; use context_interface::{ - context::{ContextError, ContextSetters}, - Block, Cfg, ContextTr, JournalTr, LocalContextTr, Transaction, + context::{ContextError, ContextSetters, SStoreResult, SelfDestructResult, StateLoad}, + journaled_state::AccountLoad, + Block, Cfg, ContextTr, Host, JournalTr, LocalContextTr, Transaction, TransactionType, }; use database_interface::{Database, DatabaseRef, EmptyDB, WrapDatabaseRef}; use derive_where::derive_where; -use primitives::hardfork::SpecId; +use primitives::{hardfork::SpecId, Address, Bytes, Log, StorageKey, StorageValue, B256, U256}; /// EVM context contains data that EVM needs for execution. #[derive_where(Clone, Debug; BLOCK, CFG, CHAIN, TX, DB, JOURNAL, ::Error, LOCAL)] @@ -440,3 +441,189 @@ where f(&mut self.local); } } + +impl< + BLOCK: Block, + TX: Transaction, + CFG: Cfg, + DB: Database, + JOURNAL: JournalTr, + CHAIN, + LOCAL: LocalContextTr, + > Host for Context +{ + /* Block */ + + fn basefee(&self) -> U256 { + U256::from(self.block().basefee()) + } + + fn blob_gasprice(&self) -> U256 { + U256::from(self.block().blob_gasprice().unwrap_or(0)) + } + + fn gas_limit(&self) -> U256 { + U256::from(self.block().gas_limit()) + } + + fn difficulty(&self) -> U256 { + self.block().difficulty() + } + + fn prevrandao(&self) -> Option { + self.block().prevrandao().map(|r| r.into()) + } + + fn block_number(&self) -> U256 { + self.block().number() + } + + fn timestamp(&self) -> U256 { + U256::from(self.block().timestamp()) + } + + fn beneficiary(&self) -> Address { + self.block().beneficiary() + } + + fn chain_id(&self) -> U256 { + U256::from(self.cfg().chain_id()) + } + + /* Transaction */ + + fn effective_gas_price(&self) -> U256 { + let basefee = self.block().basefee(); + U256::from(self.tx().effective_gas_price(basefee as u128)) + } + + fn caller(&self) -> Address { + self.tx().caller() + } + + fn blob_hash(&self, number: usize) -> Option { + let tx = &self.tx(); + if tx.tx_type() != TransactionType::Eip4844 { + return None; + } + tx.blob_versioned_hashes() + .get(number) + .map(|t| U256::from_be_bytes(t.0)) + } + + /* Config */ + + fn max_initcode_size(&self) -> usize { + self.cfg().max_initcode_size() + } + + /* Database */ + + fn block_hash(&mut self, requested_number: u64) -> Option { + self.db_mut() + .block_hash(requested_number) + .map_err(|e| { + *self.error() = Err(e.into()); + }) + .ok() + } + + /* Journal */ + + fn load_account_delegated(&mut self, address: Address) -> Option> { + let is_eip7702_enabled = self.cfg().is_eip7702_enabled(); + self.journal_mut() + .load_account_delegated(is_eip7702_enabled, address) + .map_err(|e| { + *self.error() = Err(e.into()); + }) + .ok() + } + + /// Gets balance of `address` and if the account is cold. + fn balance(&mut self, address: Address) -> Option> { + self.journal_mut() + .load_account(address) + .map(|acc| acc.map(|a| a.info.balance)) + .map_err(|e| { + *self.error() = Err(e.into()); + }) + .ok() + } + + /// Gets code of `address` and if the account is cold. + fn load_account_code(&mut self, address: Address) -> Option> { + self.journal_mut() + .code(address) + .map_err(|e| { + *self.error() = Err(e.into()); + }) + .ok() + } + + /// Gets code hash of `address` and if the account is cold. + fn load_account_code_hash(&mut self, address: Address) -> Option> { + self.journal_mut() + .code_hash(address) + .map_err(|e| { + *self.error() = Err(e.into()); + }) + .ok() + } + + /// Gets storage value of `address` at `index` and if the account is cold. + fn sload(&mut self, address: Address, index: StorageKey) -> Option> { + self.journal_mut() + .sload(address, index) + .map_err(|e| { + *self.error() = Err(e.into()); + }) + .ok() + } + + /// Sets storage value of account address at index. + /// + /// Returns [`StateLoad`] with [`SStoreResult`] that contains original/new/old storage value. + fn sstore( + &mut self, + address: Address, + index: StorageKey, + value: StorageValue, + ) -> Option> { + self.journal_mut() + .sstore(address, index, value) + .map_err(|e| { + *self.error() = Err(e.into()); + }) + .ok() + } + + /// Gets the transient storage value of `address` at `index`. + fn tload(&mut self, address: Address, index: StorageKey) -> StorageValue { + self.journal_mut().tload(address, index) + } + + /// Sets the transient storage value of `address` at `index`. + fn tstore(&mut self, address: Address, index: StorageKey, value: StorageValue) { + self.journal_mut().tstore(address, index, value) + } + + /// Emits a log owned by `address` with given `LogData`. + fn log(&mut self, log: Log) { + self.journal_mut().log(log); + } + + /// Marks `address` to be deleted, with funds transferred to `target`. + fn selfdestruct( + &mut self, + address: Address, + target: Address, + ) -> Option> { + self.journal_mut() + .selfdestruct(address, target) + .map_err(|e| { + *self.error() = Err(e.into()); + }) + .ok() + } +} diff --git a/crates/context/src/journal/entry.rs b/crates/context/src/journal/entry.rs index 52b7a4b953..00b5f17219 100644 --- a/crates/context/src/journal/entry.rs +++ b/crates/context/src/journal/entry.rs @@ -39,7 +39,7 @@ pub trait JournalEntryTr { fn nonce_changed(address: Address) -> Self; /// Creates a journal entry for when a new account is created - fn account_created(address: Address, is_created_globaly: bool) -> Self; + fn account_created(address: Address, is_created_globally: bool) -> Self; /// Creates a journal entry for when a storage slot is modified /// Records the previous value for reverting diff --git a/crates/context/src/journal/inner.rs b/crates/context/src/journal/inner.rs index e0c9a16445..0b805e8047 100644 --- a/crates/context/src/journal/inner.rs +++ b/crates/context/src/journal/inner.rs @@ -433,10 +433,10 @@ impl JournalInner { } // set account status to create. - let is_created_globaly = target_acc.mark_created_locally(); + let is_created_globally = target_acc.mark_created_locally(); // this entry will revert set nonce. - last_journal.push(ENTRY::account_created(target_address, is_created_globaly)); + last_journal.push(ENTRY::account_created(target_address, is_created_globally)); target_acc.info.code = None; // EIP-161: State trie clearing (invariant-preserving alternative) if spec_id.is_enabled_in(SPURIOUS_DRAGON) { diff --git a/crates/context/src/lib.rs b/crates/context/src/lib.rs index 554f25a2f4..301f0ae073 100644 --- a/crates/context/src/lib.rs +++ b/crates/context/src/lib.rs @@ -1,4 +1,4 @@ -//! Optimism-specific constants, types, and helpers. +//! EVM execution context. #![cfg_attr(not(test), warn(unused_crate_dependencies))] #![cfg_attr(not(feature = "std"), no_std)] diff --git a/crates/context/src/tx.rs b/crates/context/src/tx.rs index f7e90cc861..b7752a3aea 100644 --- a/crates/context/src/tx.rs +++ b/crates/context/src/tx.rs @@ -274,6 +274,11 @@ impl TxEnvBuilder { self } + /// Get the transaction type + pub fn get_tx_type(&self) -> Option { + self.tx_type + } + /// Set the caller address pub fn caller(mut self, caller: Address) -> Self { self.caller = caller; @@ -378,9 +383,20 @@ impl TxEnvBuilder { self } + /// Insert a list of signed authorizations into the authorization list. + pub fn authorization_list_signed(mut self, auth: Vec) -> Self { + self.authorization_list = auth.into_iter().map(Either::Left).collect(); + self + } + + /// Insert a list of recovered authorizations into the authorization list. + pub fn authorization_list_recovered(mut self, auth: Vec) -> Self { + self.authorization_list = auth.into_iter().map(Either::Right).collect(); + self + } + /// Build the final [`TxEnv`] with default values for missing fields. pub fn build_fill(mut self) -> TxEnv { - let tx_type_not_set = self.tx_type.is_some(); if let Some(tx_type) = self.tx_type { match TransactionType::from(tx_type) { TransactionType::Legacy => { @@ -460,7 +476,7 @@ impl TxEnvBuilder { }; // if tx_type is not set, derive it from fields and fix errors. - if tx_type_not_set { + if self.tx_type.is_none() { match tx.derive_tx_type() { Ok(_) => {} Err(DeriveTxTypeError::MissingTargetForEip4844) => { @@ -528,8 +544,8 @@ impl TxEnvBuilder { return Err(DeriveTxTypeError::MissingTargetForEip4844.into()); } } - _ => { - panic!() + TransactionType::Custom => { + // do nothing, custom transaction type is handled by the caller. } } } @@ -552,7 +568,9 @@ impl TxEnvBuilder { }; // Derive tx type from fields, if some fields are wrongly set it will return an error. - tx.derive_tx_type()?; + if self.tx_type.is_none() { + tx.derive_tx_type()?; + } Ok(tx) } @@ -647,6 +665,436 @@ mod tests { tx.effective_gas_price(base_fee) } + #[test] + fn test_tx_env_builder_build_valid_legacy() { + // Legacy transaction + let tx = TxEnvBuilder::new() + .tx_type(Some(0)) + .caller(Address::from([1u8; 20])) + .gas_limit(21000) + .gas_price(20) + .kind(TxKind::Call(Address::from([2u8; 20]))) + .value(U256::from(100)) + .data(Bytes::from(vec![0x01, 0x02])) + .nonce(5) + .chain_id(Some(1)) + .build() + .unwrap(); + + assert_eq!(tx.kind, TxKind::Call(Address::from([2u8; 20]))); + assert_eq!(tx.caller, Address::from([1u8; 20])); + assert_eq!(tx.gas_limit, 21000); + assert_eq!(tx.gas_price, 20); + assert_eq!(tx.value, U256::from(100)); + assert_eq!(tx.data, Bytes::from(vec![0x01, 0x02])); + assert_eq!(tx.nonce, 5); + assert_eq!(tx.chain_id, Some(1)); + assert_eq!(tx.tx_type, TransactionType::Legacy); + } + + #[test] + fn test_tx_env_builder_build_valid_eip2930() { + // EIP-2930 transaction with access list + let access_list = AccessList(vec![AccessListItem { + address: Address::from([3u8; 20]), + storage_keys: vec![B256::from([4u8; 32])], + }]); + let tx = TxEnvBuilder::new() + .tx_type(Some(1)) + .caller(Address::from([1u8; 20])) + .gas_limit(50000) + .gas_price(25) + .kind(TxKind::Call(Address::from([2u8; 20]))) + .access_list(access_list.clone()) + .build() + .unwrap(); + + assert_eq!(tx.tx_type, TransactionType::Eip2930); + assert_eq!(tx.access_list, access_list); + } + + #[test] + fn test_tx_env_builder_build_valid_eip1559() { + // EIP-1559 transaction + let tx = TxEnvBuilder::new() + .tx_type(Some(2)) + .caller(Address::from([1u8; 20])) + .gas_limit(50000) + .gas_price(30) + .gas_priority_fee(Some(10)) + .kind(TxKind::Call(Address::from([2u8; 20]))) + .build() + .unwrap(); + + assert_eq!(tx.tx_type, TransactionType::Eip1559); + assert_eq!(tx.gas_priority_fee, Some(10)); + } + + #[test] + fn test_tx_env_builder_build_valid_eip4844() { + // EIP-4844 blob transaction + let blob_hashes = vec![B256::from([5u8; 32]), B256::from([6u8; 32])]; + let tx = TxEnvBuilder::new() + .tx_type(Some(3)) + .caller(Address::from([1u8; 20])) + .gas_limit(50000) + .gas_price(30) + .gas_priority_fee(Some(10)) + .kind(TxKind::Call(Address::from([2u8; 20]))) + .blob_hashes(blob_hashes.clone()) + .max_fee_per_blob_gas(100) + .build() + .unwrap(); + + assert_eq!(tx.tx_type, TransactionType::Eip4844); + assert_eq!(tx.blob_hashes, blob_hashes); + assert_eq!(tx.max_fee_per_blob_gas, 100); + } + + #[test] + fn test_tx_env_builder_build_valid_eip7702() { + // EIP-7702 EOA code transaction + let auth = RecoveredAuthorization::new_unchecked( + Authorization { + chain_id: U256::from(1), + nonce: 0, + address: Address::default(), + }, + RecoveredAuthority::Valid(Address::default()), + ); + let auth_list = vec![Either::Right(auth)]; + + let tx = TxEnvBuilder::new() + .tx_type(Some(4)) + .caller(Address::from([1u8; 20])) + .gas_limit(50000) + .gas_price(30) + .gas_priority_fee(Some(10)) + .kind(TxKind::Call(Address::from([2u8; 20]))) + .authorization_list(auth_list.clone()) + .build() + .unwrap(); + + assert_eq!(tx.tx_type, TransactionType::Eip7702); + assert_eq!(tx.authorization_list.len(), 1); + } + + #[test] + fn test_tx_env_builder_build_create_transaction() { + // Contract creation transaction + let bytecode = Bytes::from(vec![0x60, 0x80, 0x60, 0x40]); + let tx = TxEnvBuilder::new() + .kind(TxKind::Create) + .data(bytecode.clone()) + .gas_limit(100000) + .gas_price(20) + .build() + .unwrap(); + + assert_eq!(tx.kind, TxKind::Create); + assert_eq!(tx.data, bytecode); + } + + #[test] + fn test_tx_env_builder_build_errors_eip1559_missing_priority_fee() { + // EIP-1559 without gas_priority_fee should fail + let result = TxEnvBuilder::new() + .tx_type(Some(2)) + .caller(Address::from([1u8; 20])) + .gas_limit(50000) + .gas_price(30) + .kind(TxKind::Call(Address::from([2u8; 20]))) + .build(); + + assert!(matches!( + result, + Err(TxEnvBuildError::MissingGasPriorityFeeForEip1559) + )); + } + + #[test] + fn test_tx_env_builder_build_errors_eip4844_missing_blob_hashes() { + // EIP-4844 without blob hashes should fail + let result = TxEnvBuilder::new() + .tx_type(Some(3)) + .gas_priority_fee(Some(10)) + .kind(TxKind::Call(Address::from([2u8; 20]))) + .build(); + + assert!(matches!( + result, + Err(TxEnvBuildError::MissingBlobHashesForEip4844) + )); + } + + #[test] + fn test_tx_env_builder_build_errors_eip4844_not_call() { + // EIP-4844 with Create should fail + let result = TxEnvBuilder::new() + .tx_type(Some(3)) + .gas_priority_fee(Some(10)) + .blob_hashes(vec![B256::from([5u8; 32])]) + .kind(TxKind::Create) + .build(); + + assert!(matches!( + result, + Err(TxEnvBuildError::MissingTargetForEip4844) + )); + } + + #[test] + fn test_tx_env_builder_build_errors_eip7702_missing_auth_list() { + // EIP-7702 without authorization list should fail + let result = TxEnvBuilder::new() + .tx_type(Some(4)) + .gas_priority_fee(Some(10)) + .kind(TxKind::Call(Address::from([2u8; 20]))) + .build(); + + assert!(matches!( + result, + Err(TxEnvBuildError::MissingAuthorizationListForEip7702) + )); + } + + #[test] + fn test_tx_env_builder_build_errors_eip7702_not_call() { + // EIP-7702 with Create should fail + let auth = RecoveredAuthorization::new_unchecked( + Authorization { + chain_id: U256::from(1), + nonce: 0, + address: Address::default(), + }, + RecoveredAuthority::Valid(Address::default()), + ); + let result = TxEnvBuilder::new() + .tx_type(Some(4)) + .gas_priority_fee(Some(10)) + .authorization_list(vec![Either::Right(auth)]) + .kind(TxKind::Create) + .build(); + + assert!(matches!(result, Err(TxEnvBuildError::DeriveErr(_)))); + } + + #[test] + fn test_tx_env_builder_build_fill_legacy() { + // Legacy transaction with build_fill + let tx = TxEnvBuilder::new() + .caller(Address::from([1u8; 20])) + .gas_limit(21000) + .gas_price(20) + .kind(TxKind::Call(Address::from([2u8; 20]))) + .build_fill(); + + assert_eq!(tx.tx_type, TransactionType::Legacy); + assert_eq!(tx.gas_priority_fee, None); + } + + #[test] + fn test_tx_env_builder_build_fill_eip1559_missing_priority_fee() { + // EIP-1559 without gas_priority_fee should be filled with 0 + let tx = TxEnvBuilder::new() + .tx_type(Some(2)) + .caller(Address::from([1u8; 20])) + .gas_limit(50000) + .gas_price(30) + .kind(TxKind::Call(Address::from([2u8; 20]))) + .build_fill(); + + assert_eq!(tx.tx_type, TransactionType::Eip1559); + assert_eq!(tx.gas_priority_fee, Some(0)); + } + + #[test] + fn test_tx_env_builder_build_fill_eip4844_missing_blob_hashes() { + // EIP-4844 without blob hashes should add default blob hash + let tx = TxEnvBuilder::new() + .tx_type(Some(3)) + .gas_priority_fee(Some(10)) + .kind(TxKind::Call(Address::from([2u8; 20]))) + .build_fill(); + + assert_eq!(tx.tx_type, TransactionType::Eip4844); + assert_eq!(tx.blob_hashes.len(), 1); + assert_eq!(tx.blob_hashes[0], B256::default()); + } + + #[test] + fn test_tx_env_builder_build_fill_eip4844_create_to_call() { + // EIP-4844 with Create should be converted to Call + let tx = TxEnvBuilder::new() + .tx_type(Some(3)) + .gas_priority_fee(Some(10)) + .blob_hashes(vec![B256::from([5u8; 32])]) + .kind(TxKind::Create) + .build_fill(); + + assert_eq!(tx.tx_type, TransactionType::Eip4844); + assert_eq!(tx.kind, TxKind::Call(Address::default())); + } + + #[test] + fn test_tx_env_builder_build_fill_eip7702_missing_auth_list() { + // EIP-7702 without authorization list should add dummy auth + let tx = TxEnvBuilder::new() + .tx_type(Some(4)) + .gas_priority_fee(Some(10)) + .kind(TxKind::Call(Address::from([2u8; 20]))) + .build_fill(); + + assert_eq!(tx.tx_type, TransactionType::Eip7702); + assert_eq!(tx.authorization_list.len(), 1); + } + + #[test] + fn test_tx_env_builder_build_fill_eip7702_create_to_call() { + // EIP-7702 with Create should be converted to Call + let auth = RecoveredAuthorization::new_unchecked( + Authorization { + chain_id: U256::from(1), + nonce: 0, + address: Address::default(), + }, + RecoveredAuthority::Valid(Address::default()), + ); + let tx = TxEnvBuilder::new() + .tx_type(Some(4)) + .gas_priority_fee(Some(10)) + .authorization_list(vec![Either::Right(auth)]) + .kind(TxKind::Create) + .build_fill(); + + assert_eq!(tx.tx_type, TransactionType::Eip7702); + assert_eq!(tx.kind, TxKind::Call(Address::default())); + } + + #[test] + fn test_tx_env_builder_derive_tx_type_legacy() { + // No special fields, should derive Legacy + let tx = TxEnvBuilder::new() + .caller(Address::from([1u8; 20])) + .gas_limit(21000) + .gas_price(20) + .build() + .unwrap(); + + assert_eq!(tx.tx_type, TransactionType::Legacy); + } + + #[test] + fn test_tx_env_builder_derive_tx_type_eip2930() { + // Access list present, should derive EIP-2930 + let access_list = AccessList(vec![AccessListItem { + address: Address::from([3u8; 20]), + storage_keys: vec![B256::from([4u8; 32])], + }]); + let tx = TxEnvBuilder::new() + .caller(Address::from([1u8; 20])) + .access_list(access_list) + .build() + .unwrap(); + + assert_eq!(tx.tx_type, TransactionType::Eip2930); + } + + #[test] + fn test_tx_env_builder_derive_tx_type_eip1559() { + // Gas priority fee present, should derive EIP-1559 + let tx = TxEnvBuilder::new() + .caller(Address::from([1u8; 20])) + .gas_priority_fee(Some(10)) + .build() + .unwrap(); + + assert_eq!(tx.tx_type, TransactionType::Eip1559); + } + + #[test] + fn test_tx_env_builder_derive_tx_type_eip4844() { + // Blob hashes present, should derive EIP-4844 + let tx = TxEnvBuilder::new() + .caller(Address::from([1u8; 20])) + .gas_priority_fee(Some(10)) + .blob_hashes(vec![B256::from([5u8; 32])]) + .kind(TxKind::Call(Address::from([2u8; 20]))) + .build() + .unwrap(); + + assert_eq!(tx.tx_type, TransactionType::Eip4844); + } + + #[test] + fn test_tx_env_builder_derive_tx_type_eip7702() { + // Authorization list present, should derive EIP-7702 + let auth = RecoveredAuthorization::new_unchecked( + Authorization { + chain_id: U256::from(1), + nonce: 0, + address: Address::default(), + }, + RecoveredAuthority::Valid(Address::default()), + ); + let tx = TxEnvBuilder::new() + .caller(Address::from([1u8; 20])) + .gas_priority_fee(Some(10)) + .authorization_list(vec![Either::Right(auth)]) + .kind(TxKind::Call(Address::from([2u8; 20]))) + .build() + .unwrap(); + + assert_eq!(tx.tx_type, TransactionType::Eip7702); + } + + #[test] + fn test_tx_env_builder_custom_tx_type() { + // Custom transaction type (0xFF) + let tx = TxEnvBuilder::new() + .tx_type(Some(0xFF)) + .caller(Address::from([1u8; 20])) + .build() + .unwrap(); + + assert_eq!(tx.tx_type, TransactionType::Custom); + } + + #[test] + fn test_tx_env_builder_chain_methods() { + // Test method chaining + let tx = TxEnvBuilder::new() + .caller(Address::from([1u8; 20])) + .gas_limit(50000) + .gas_price(25) + .kind(TxKind::Call(Address::from([2u8; 20]))) + .value(U256::from(1000)) + .data(Bytes::from(vec![0x12, 0x34])) + .nonce(10) + .chain_id(Some(5)) + .access_list(AccessList(vec![AccessListItem { + address: Address::from([3u8; 20]), + storage_keys: vec![], + }])) + .gas_priority_fee(Some(5)) + .blob_hashes(vec![B256::from([7u8; 32])]) + .max_fee_per_blob_gas(200) + .build_fill(); + + assert_eq!(tx.caller, Address::from([1u8; 20])); + assert_eq!(tx.gas_limit, 50000); + assert_eq!(tx.gas_price, 25); + assert_eq!(tx.kind, TxKind::Call(Address::from([2u8; 20]))); + assert_eq!(tx.value, U256::from(1000)); + assert_eq!(tx.data, Bytes::from(vec![0x12, 0x34])); + assert_eq!(tx.nonce, 10); + assert_eq!(tx.chain_id, Some(5)); + assert_eq!(tx.access_list.len(), 1); + assert_eq!(tx.gas_priority_fee, Some(5)); + assert_eq!(tx.blob_hashes.len(), 1); + assert_eq!(tx.max_fee_per_blob_gas, 200); + } + #[test] fn test_effective_gas_price() { assert_eq!(90, effective_gas_setup(TransactionType::Legacy, 90, None)); diff --git a/crates/database/CHANGELOG.md b/crates/database/CHANGELOG.md index 45067c84e8..1cfa90d7b8 100644 --- a/crates/database/CHANGELOG.md +++ b/crates/database/CHANGELOG.md @@ -7,6 +7,24 @@ and this project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0 ## [Unreleased] +## [7.0.2](https://github.com/bluealloy/revm/compare/revm-database-v7.0.1...revm-database-v7.0.2) - 2025-07-23 + +### Other + +- updated the following local packages: revm-primitives, revm-bytecode, revm-database-interface, revm-state + +## [7.0.1](https://github.com/bluealloy/revm/compare/revm-database-v7.0.0...revm-database-v7.0.1) - 2025-07-03 + +### Other + +- updated the following local packages: revm-bytecode, revm-state, revm-database-interface + +## [7.0.0](https://github.com/bluealloy/revm/compare/revm-database-v6.0.0...revm-database-v7.0.0) - 2025-06-30 + +### Other + +- updated the following local packages: revm-bytecode, revm-state, revm-database-interface + ## [6.0.0](https://github.com/bluealloy/revm/compare/revm-database-v5.0.0...revm-database-v6.0.0) - 2025-06-19 ### Added diff --git a/crates/database/Cargo.toml b/crates/database/Cargo.toml index 377e15440d..97e27554c2 100644 --- a/crates/database/Cargo.toml +++ b/crates/database/Cargo.toml @@ -1,7 +1,7 @@ [package] name = "revm-database" description = "Revm Database implementations" -version = "6.0.0" +version = "7.0.2" authors.workspace = true edition.workspace = true keywords.workspace = true diff --git a/crates/database/interface/CHANGELOG.md b/crates/database/interface/CHANGELOG.md index c3408e2a36..1e2d3eb839 100644 --- a/crates/database/interface/CHANGELOG.md +++ b/crates/database/interface/CHANGELOG.md @@ -7,6 +7,28 @@ and this project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0 ## [Unreleased] +## [7.0.2](https://github.com/bluealloy/revm/compare/revm-database-interface-v7.0.1...revm-database-interface-v7.0.2) - 2025-07-23 + +### Other + +- impl DatabaseRef for WrapDatabaseRef ([#2726](https://github.com/bluealloy/revm/pull/2726)) + +## [7.0.1](https://github.com/bluealloy/revm/compare/revm-database-interface-v7.0.0...revm-database-interface-v7.0.1) - 2025-07-03 + +### Other + +- updated the following local packages: revm-state + +## [7.0.0](https://github.com/bluealloy/revm/compare/revm-database-interface-v6.0.0...revm-database-interface-v7.0.0) - 2025-06-30 + +### Added + +- implement Database traits for either::Either ([#2673](https://github.com/bluealloy/revm/pull/2673)) + +### Other + +- fix copy-pasted inner doc comments ([#2663](https://github.com/bluealloy/revm/pull/2663)) + ## [6.0.0](https://github.com/bluealloy/revm/compare/revm-database-interface-v5.0.0...revm-database-interface-v6.0.0) - 2025-06-19 ### Added diff --git a/crates/database/interface/Cargo.toml b/crates/database/interface/Cargo.toml index d521defb8b..49ef0ba52c 100644 --- a/crates/database/interface/Cargo.toml +++ b/crates/database/interface/Cargo.toml @@ -1,7 +1,7 @@ [package] name = "revm-database-interface" description = "Revm Database interface" -version = "6.0.0" +version = "7.0.2" authors.workspace = true edition.workspace = true keywords.workspace = true @@ -23,6 +23,7 @@ primitives.workspace = true # misc auto_impl.workspace = true +either.workspace = true # Optional serde = { workspace = true, features = ["derive", "rc"], optional = true } @@ -36,6 +37,16 @@ rstest.workspace = true [features] default = ["std"] -std = ["serde?/std", "primitives/std", "state/std"] -serde = ["dep:serde", "primitives/serde", "state/serde"] +std = [ + "serde?/std", + "primitives/std", + "state/std", + "either/std" +] +serde = [ + "dep:serde", + "primitives/serde", + "state/serde", + "either/serde" +] asyncdb = ["dep:tokio", "tokio/rt-multi-thread"] diff --git a/crates/database/interface/src/either.rs b/crates/database/interface/src/either.rs new file mode 100644 index 0000000000..a22210cb4b --- /dev/null +++ b/crates/database/interface/src/either.rs @@ -0,0 +1,99 @@ +//! Database implementations for `either::Either` type. + +use crate::{Database, DatabaseCommit, DatabaseRef}; +use either::Either; +use primitives::{Address, HashMap, StorageKey, StorageValue, B256}; +use state::{Account, AccountInfo, Bytecode}; + +impl Database for Either +where + L: Database, + R: Database, +{ + type Error = L::Error; + + fn basic(&mut self, address: Address) -> Result, Self::Error> { + match self { + Self::Left(db) => db.basic(address), + Self::Right(db) => db.basic(address), + } + } + + fn code_by_hash(&mut self, code_hash: B256) -> Result { + match self { + Self::Left(db) => db.code_by_hash(code_hash), + Self::Right(db) => db.code_by_hash(code_hash), + } + } + + fn storage( + &mut self, + address: Address, + index: StorageKey, + ) -> Result { + match self { + Self::Left(db) => db.storage(address, index), + Self::Right(db) => db.storage(address, index), + } + } + + fn block_hash(&mut self, number: u64) -> Result { + match self { + Self::Left(db) => db.block_hash(number), + Self::Right(db) => db.block_hash(number), + } + } +} + +impl DatabaseCommit for Either +where + L: DatabaseCommit, + R: DatabaseCommit, +{ + fn commit(&mut self, changes: HashMap) { + match self { + Self::Left(db) => db.commit(changes), + Self::Right(db) => db.commit(changes), + } + } +} + +impl DatabaseRef for Either +where + L: DatabaseRef, + R: DatabaseRef, +{ + type Error = L::Error; + + fn basic_ref(&self, address: Address) -> Result, Self::Error> { + match self { + Self::Left(db) => db.basic_ref(address), + Self::Right(db) => db.basic_ref(address), + } + } + + fn code_by_hash_ref(&self, code_hash: B256) -> Result { + match self { + Self::Left(db) => db.code_by_hash_ref(code_hash), + Self::Right(db) => db.code_by_hash_ref(code_hash), + } + } + + fn storage_ref( + &self, + address: Address, + index: StorageKey, + ) -> Result { + match self { + Self::Left(db) => db.storage_ref(address, index), + Self::Right(db) => db.storage_ref(address, index), + } + } + + fn block_hash_ref(&self, number: u64) -> Result { + match self { + Self::Left(db) => db.block_hash_ref(number), + Self::Right(db) => db.block_hash_ref(number), + } + } +} diff --git a/crates/database/interface/src/lib.rs b/crates/database/interface/src/lib.rs index ef21676b9a..ab816aad16 100644 --- a/crates/database/interface/src/lib.rs +++ b/crates/database/interface/src/lib.rs @@ -1,4 +1,4 @@ -//! Optimism-specific constants, types, and helpers. +//! Database interface. #![cfg_attr(not(test), warn(unused_crate_dependencies))] #![cfg_attr(not(feature = "std"), no_std)] @@ -28,6 +28,7 @@ pub const BENCH_CALLER_BALANCE: U256 = U256::from_limbs([10_000_000_000_000_000, #[cfg(feature = "asyncdb")] pub mod async_db; +pub mod either; pub mod empty_db; pub mod try_commit; @@ -141,3 +142,31 @@ impl DatabaseCommit for WrapDatabaseRef { self.0.commit(changes) } } + +impl DatabaseRef for WrapDatabaseRef { + type Error = T::Error; + + #[inline] + fn basic_ref(&self, address: Address) -> Result, Self::Error> { + self.0.basic_ref(address) + } + + #[inline] + fn code_by_hash_ref(&self, code_hash: B256) -> Result { + self.0.code_by_hash_ref(code_hash) + } + + #[inline] + fn storage_ref( + &self, + address: Address, + index: StorageKey, + ) -> Result { + self.0.storage_ref(address, index) + } + + #[inline] + fn block_hash_ref(&self, number: u64) -> Result { + self.0.block_hash_ref(number) + } +} diff --git a/crates/handler/CHANGELOG.md b/crates/handler/CHANGELOG.md index 8f44c93d95..19ce478016 100644 --- a/crates/handler/CHANGELOG.md +++ b/crates/handler/CHANGELOG.md @@ -6,6 +6,49 @@ The format is based on [Keep a Changelog](https://keepachangelog.com/en/1.0.0/), and this project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0.html). +## [8.1.0](https://github.com/bluealloy/revm/compare/revm-handler-v8.0.3...revm-handler-v8.1.0) - 2025-07-23 + +### Added + +- add a way for precompiles to revert ([#2711](https://github.com/bluealloy/revm/pull/2711)) + +### Fixed + +- fully deprecate serde-json ([#2767](https://github.com/bluealloy/revm/pull/2767)) +- system call should have 30M gas limit ([#2755](https://github.com/bluealloy/revm/pull/2755)) +- gas deduction with `disable_balance_check` ([#2699](https://github.com/bluealloy/revm/pull/2699)) + +### Other + +- change gas parameter to immutable reference ([#2702](https://github.com/bluealloy/revm/pull/2702)) +- remove State bound from JournalTr in Handler::Evm ([#2715](https://github.com/bluealloy/revm/pull/2715)) + +## [8.0.3](https://github.com/bluealloy/revm/compare/revm-handler-v8.0.2...revm-handler-v8.0.3) - 2025-07-14 + +### Other + +- simplify gas calculations by introducing a used() method ([#2703](https://github.com/bluealloy/revm/pull/2703)) + +## [8.0.2](https://github.com/bluealloy/revm/compare/revm-handler-v8.0.1...revm-handler-v8.0.2) - 2025-07-03 + +### Other + +- document external state transitions for EIP-4788 and EIP-2935 ([#2678](https://github.com/bluealloy/revm/pull/2678)) +- minor fixes ([#2686](https://github.com/bluealloy/revm/pull/2686)) +- fix in pre_execution.rs about nonce bump for CREATE ([#2684](https://github.com/bluealloy/revm/pull/2684)) + +## [8.0.1](https://github.com/bluealloy/revm/compare/revm-handler-v7.0.1...revm-handler-v8.0.1) - 2025-06-30 + +### Added + +- optional_eip3541 ([#2661](https://github.com/bluealloy/revm/pull/2661)) + +### Other + +- cargo clippy --fix --all ([#2671](https://github.com/bluealloy/revm/pull/2671)) +- use TxEnv::builder ([#2652](https://github.com/bluealloy/revm/pull/2652)) +- fix copy-pasted inner doc comments ([#2663](https://github.com/bluealloy/revm/pull/2663)) + ## [7.0.1](https://github.com/bluealloy/revm/compare/revm-handler-v7.0.0...revm-handler-v7.0.1) - 2025-06-20 ### Fixed diff --git a/crates/handler/Cargo.toml b/crates/handler/Cargo.toml index 3a15dc471b..eb5a2a201f 100644 --- a/crates/handler/Cargo.toml +++ b/crates/handler/Cargo.toml @@ -1,7 +1,7 @@ [package] name = "revm-handler" description = "Revm handler crates" -version = "7.0.1" +version = "8.1.0" authors.workspace = true edition.workspace = true keywords.workspace = true @@ -27,14 +27,13 @@ primitives.workspace = true state.workspace = true bytecode.workspace = true - auto_impl.workspace = true derive-where.workspace = true # Optional serde = { version = "1.0", default-features = false, features = [ - "derive", - "rc", + "derive", + "rc", ], optional = true } [dev-dependencies] @@ -57,7 +56,7 @@ std = [ "interpreter/std", "precompile/std", "primitives/std", - "state/std" + "state/std", ] serde = [ "dep:serde", @@ -70,6 +69,8 @@ serde = [ "database/serde", "database-interface/serde", "interpreter/serde", - "derive-where/serde" + "derive-where/serde", ] + +# Deprecated, please use `serde` feature instead. serde-json = ["serde"] diff --git a/crates/handler/src/api.rs b/crates/handler/src/api.rs index 580c7231b7..020d03e4df 100644 --- a/crates/handler/src/api.rs +++ b/crates/handler/src/api.rs @@ -55,7 +55,7 @@ pub trait ExecuteEvm { /// Transact the given transaction and finalize in a single operation. /// - /// Internally calls [`ExecuteEvm::transact`] followed by [`ExecuteEvm::finalize`]. + /// Internally calls [`ExecuteEvm::transact_one`] followed by [`ExecuteEvm::finalize`]. /// /// # Outcome of Error /// @@ -111,8 +111,6 @@ pub trait ExecuteEvm { } /// Execute previous transaction and finalize it. - /// - /// Doint it without finalization fn replay( &mut self, ) -> Result, Self::Error>; @@ -142,7 +140,7 @@ pub trait ExecuteCommitEvm: ExecuteEvm { /// Transact multiple transactions and commit to the state. /// - /// Internally calls `transact_multi` and `commit` functions. + /// Internally calls `transact_many` and `commit_inner` functions. #[inline] fn transact_many_commit( &mut self, diff --git a/crates/handler/src/frame.rs b/crates/handler/src/frame.rs index d29c1a13dd..6fdb853851 100644 --- a/crates/handler/src/frame.rs +++ b/crates/handler/src/frame.rs @@ -555,12 +555,14 @@ impl EthFrame { } FrameData::Create(frame) => { let max_code_size = context.cfg().max_code_size(); + let is_eip3541_disabled = context.cfg().is_eip3541_disabled(); return_create( context.journal_mut(), self.checkpoint, &mut interpreter_result, frame.created_address, max_code_size, + is_eip3541_disabled, spec, ); @@ -673,6 +675,7 @@ pub fn return_create( interpreter_result: &mut InterpreterResult, address: Address, max_code_size: usize, + is_eip3541_disabled: bool, spec_id: SpecId, ) { // If return is not ok revert and return. @@ -684,7 +687,10 @@ pub fn return_create( // If ok, check contract creation limit and calculate gas deduction on output len. // // EIP-3541: Reject new contract code starting with the 0xEF byte - if spec_id.is_enabled_in(LONDON) && interpreter_result.output.first() == Some(&0xEF) { + if !is_eip3541_disabled + && spec_id.is_enabled_in(LONDON) + && interpreter_result.output.first() == Some(&0xEF) + { journal.checkpoint_revert(checkpoint); interpreter_result.result = InstructionResult::CreateContractStartingWithEF; return; diff --git a/crates/handler/src/handler.rs b/crates/handler/src/handler.rs index 3f00c001b2..541407ff21 100644 --- a/crates/handler/src/handler.rs +++ b/crates/handler/src/handler.rs @@ -13,7 +13,6 @@ use context_interface::{ use interpreter::interpreter_action::FrameInit; use interpreter::{Gas, InitialAndFloorGas, SharedMemory}; use primitives::U256; -use state::EvmState; /// Trait for errors that can occur during EVM execution. /// @@ -68,7 +67,7 @@ impl< pub trait Handler { /// The EVM type containing Context, Instruction, and Precompiles implementations. type Evm: EvmTr< - Context: ContextTr, Local: LocalContextTr>, + Context: ContextTr, Frame: FrameTr, >; /// The error type returned by this handler. @@ -409,7 +408,7 @@ pub trait Handler { evm: &mut Self::Evm, exec_result: &mut <::Frame as FrameTr>::FrameResult, ) -> Result<(), Self::Error> { - post_execution::reimburse_caller(evm.ctx(), exec_result.gas_mut(), U256::ZERO) + post_execution::reimburse_caller(evm.ctx(), exec_result.gas(), U256::ZERO) .map_err(From::from) } @@ -420,7 +419,7 @@ pub trait Handler { evm: &mut Self::Evm, exec_result: &mut <::Frame as FrameTr>::FrameResult, ) -> Result<(), Self::Error> { - post_execution::reward_beneficiary(evm.ctx(), exec_result.gas_mut()).map_err(From::from) + post_execution::reward_beneficiary(evm.ctx(), exec_result.gas()).map_err(From::from) } /// Processes the final execution output. diff --git a/crates/handler/src/lib.rs b/crates/handler/src/lib.rs index abe48f8f4f..a220439545 100644 --- a/crates/handler/src/lib.rs +++ b/crates/handler/src/lib.rs @@ -1,4 +1,4 @@ -//! Optimism-specific constants, types, and helpers. +//! EVM execution handling. #![cfg_attr(not(test), warn(unused_crate_dependencies))] #![cfg_attr(not(feature = "std"), no_std)] diff --git a/crates/handler/src/mainnet_builder.rs b/crates/handler/src/mainnet_builder.rs index 3163e26933..4e53287c93 100644 --- a/crates/handler/src/mainnet_builder.rs +++ b/crates/handler/src/mainnet_builder.rs @@ -82,7 +82,7 @@ mod test { Bytecode, }; use context::{Context, TxEnv}; - use context_interface::{transaction::Authorization, TransactionType}; + use context_interface::transaction::Authorization; use database::{BenchmarkDB, EEADDRESS, FFADDRESS}; use primitives::{hardfork::SpecId, TxKind, U256}; use primitives::{StorageKey, StorageValue}; @@ -107,14 +107,15 @@ mod test { let mut evm = ctx.build_mainnet(); let state = evm - .transact(TxEnv { - tx_type: TransactionType::Eip7702.into(), - gas_limit: 100_000, - authorization_list: vec![Either::Left(auth)], - caller: EEADDRESS, - kind: TxKind::Call(signer.address()), - ..Default::default() - }) + .transact( + TxEnv::builder() + .gas_limit(100_000) + .authorization_list(vec![Either::Left(auth)]) + .caller(EEADDRESS) + .kind(TxKind::Call(signer.address())) + .build() + .unwrap(), + ) .unwrap() .state; diff --git a/crates/handler/src/post_execution.rs b/crates/handler/src/post_execution.rs index f1a3501c54..69f5d453f3 100644 --- a/crates/handler/src/post_execution.rs +++ b/crates/handler/src/post_execution.rs @@ -6,7 +6,6 @@ use context_interface::{ }; use interpreter::{Gas, InitialAndFloorGas, SuccessOrHalt}; use primitives::{hardfork::SpecId, U256}; -use state::EvmState; /// Ensures minimum gas floor is spent according to EIP-7623. pub fn eip7623_check_gas_floor(gas: &mut Gas, init_and_floor_gas: InitialAndFloorGas) { @@ -32,7 +31,7 @@ pub fn refund(spec: SpecId, gas: &mut Gas, eip7702_refund: i64) { #[inline] pub fn reimburse_caller( context: &mut CTX, - gas: &mut Gas, + gas: &Gas, additional_refund: U256, ) -> Result<(), ::Error> { let basefee = context.block().basefee() as u128; @@ -54,7 +53,7 @@ pub fn reimburse_caller( #[inline] pub fn reward_beneficiary( context: &mut CTX, - gas: &mut Gas, + gas: &Gas, ) -> Result<(), ::Error> { let beneficiary = context.block().beneficiary(); let basefee = context.block().basefee() as u128; @@ -71,7 +70,7 @@ pub fn reward_beneficiary( // reward beneficiary context.journal_mut().balance_incr( beneficiary, - U256::from(coinbase_gas_price * (gas.spent() - gas.refunded() as u64) as u128), + U256::from(coinbase_gas_price * gas.used() as u128), )?; Ok(()) @@ -80,7 +79,7 @@ pub fn reward_beneficiary( /// Calculate last gas spent and transform internal reason to external. /// /// TODO make Journal FinalOutput more generic. -pub fn output>, HALTREASON: HaltReasonTr>( +pub fn output, HALTREASON: HaltReasonTr>( context: &mut CTX, // TODO, make this more generic and nice. // FrameResult should be a generic that returns gas and interpreter result. @@ -88,7 +87,7 @@ pub fn output>, HALTREASON: ) -> ExecutionResult { // Used gas with refund calculated. let gas_refunded = result.gas().refunded() as u64; - let final_gas_used = result.gas().spent() - gas_refunded; + let gas_used = result.gas().used(); let output = result.output(); let instruction_result = result.into_interpreter_result(); @@ -98,19 +97,16 @@ pub fn output>, HALTREASON: match SuccessOrHalt::::from(instruction_result.result) { SuccessOrHalt::Success(reason) => ExecutionResult::Success { reason, - gas_used: final_gas_used, + gas_used, gas_refunded, logs, output, }, SuccessOrHalt::Revert => ExecutionResult::Revert { - gas_used: final_gas_used, + gas_used, output: output.into_data(), }, - SuccessOrHalt::Halt(reason) => ExecutionResult::Halt { - reason, - gas_used: final_gas_used, - }, + SuccessOrHalt::Halt(reason) => ExecutionResult::Halt { reason, gas_used }, // Only two internal return flags. flag @ (SuccessOrHalt::FatalExternalError | SuccessOrHalt::Internal(_)) => { panic!( diff --git a/crates/handler/src/pre_execution.rs b/crates/handler/src/pre_execution.rs index 6266c0d6a3..c1a9aa3816 100644 --- a/crates/handler/src/pre_execution.rs +++ b/crates/handler/src/pre_execution.rs @@ -138,7 +138,7 @@ pub fn validate_against_state_and_deduct_caller< is_nonce_check_disabled, )?; - // Bump the nonce for calls. Nonce for CREATE will be bumped in `handle_create`. + // Bump the nonce for calls. Nonce for CREATE will be bumped in `make_create_frame`. if tx.kind().is_call() { // Nonce is already checked caller_account.info.nonce = caller_account.info.nonce.saturating_add(1); @@ -146,28 +146,31 @@ pub fn validate_against_state_and_deduct_caller< let max_balance_spending = tx.max_balance_spending()?; - let mut new_balance = caller_account.info.balance; - // Check if account has enough balance for `gas_limit * max_fee`` and value transfer. // Transfer will be done inside `*_inner` functions. - if is_balance_check_disabled { - // Make sure the caller's balance is at least the value of the transaction. - new_balance = caller_account.info.balance.max(tx.value()); - } else if max_balance_spending > caller_account.info.balance { + if max_balance_spending > caller_account.info.balance && !is_balance_check_disabled { return Err(InvalidTransaction::LackOfFundForMaxFee { fee: Box::new(max_balance_spending), balance: Box::new(caller_account.info.balance), } .into()); - } else { - let effective_balance_spending = tx - .effective_balance_spending(basefee, blob_price) - .expect("effective balance is always smaller than max balance so it can't overflow"); + } - // subtracting max balance spending with value that is going to be deducted later in the call. - let gas_balance_spending = effective_balance_spending - tx.value(); + let effective_balance_spending = tx + .effective_balance_spending(basefee, blob_price) + .expect("effective balance is always smaller than max balance so it can't overflow"); - new_balance = new_balance.saturating_sub(gas_balance_spending); + // subtracting max balance spending with value that is going to be deducted later in the call. + let gas_balance_spending = effective_balance_spending - tx.value(); + + let mut new_balance = caller_account + .info + .balance + .saturating_sub(gas_balance_spending); + + if is_balance_check_disabled { + // Make sure the caller's balance is at least the value of the transaction. + new_balance = new_balance.max(tx.value()); } let old_balance = caller_account.info.balance; diff --git a/crates/handler/src/precompile_provider.rs b/crates/handler/src/precompile_provider.rs index 57083fced6..bf960a1e51 100644 --- a/crates/handler/src/precompile_provider.rs +++ b/crates/handler/src/precompile_provider.rs @@ -124,7 +124,11 @@ impl PrecompileProvider for EthPrecompiles { Ok(output) => { let underflow = result.gas.record_cost(output.gas_used); assert!(underflow, "Gas underflow is not possible"); - result.result = InstructionResult::Return; + result.result = if output.reverted { + InstructionResult::Revert + } else { + InstructionResult::Return + }; result.output = output.bytes; } Err(PrecompileError::Fatal(e)) => return Err(e), diff --git a/crates/handler/src/system_call.rs b/crates/handler/src/system_call.rs index 86adfd2806..5f27a9fb85 100644 --- a/crates/handler/src/system_call.rs +++ b/crates/handler/src/system_call.rs @@ -1,13 +1,32 @@ +//! System call logic for external state transitions required by certain EIPs (notably [EIP-2935](https://eips.ethereum.org/EIPS/eip-2935) and [EIP-4788](https://eips.ethereum.org/EIPS/eip-4788)). +//! +//! These EIPs require the client to perform special system calls to update state (such as block hashes or beacon roots) at block boundaries, outside of normal EVM transaction execution. REVM provides the system call mechanism, but the actual state transitions must be performed by the client or test harness, not by the EVM itself. +//! +//! # Example: Using `transact_system_call` for pre/post block hooks +//! +//! The client should use [`SystemCallEvm::transact_system_call`] method to perform required state updates before or after block execution, as specified by the EIP: +//! +//! ```rust,ignore +//! // Example: update beacon root (EIP-4788) at the start of a block +//! let beacon_root: Bytes = ...; // obtained from consensus layer +//! let beacon_contract: Address = "0x000F3df6D732807Ef1319fB7B8bB8522d0Beac02".parse().unwrap(); +//! evm.transact_system_call(beacon_contract, beacon_root)?; +//! +//! // Example: update block hash (EIP-2935) at the end of a block +//! let block_hash: Bytes = ...; // new block hash +//! let history_contract: Address = "0x0000F90827F1C53a10cb7A02335B175320002935".parse().unwrap(); +//! evm.transact_system_call(history_contract, block_hash)?; +//! ``` +//! +//! See the book section on [External State Transitions](../../book/src/external_state_transitions.md) for more details. use crate::{ frame::EthFrame, instructions::InstructionProvider, ExecuteCommitEvm, ExecuteEvm, Handler, MainnetHandler, PrecompileProvider, }; -use context::{ - result::ExecResultAndState, ContextSetters, ContextTr, Evm, JournalTr, TransactionType, TxEnv, -}; +use context::{result::ExecResultAndState, ContextSetters, ContextTr, Evm, JournalTr, TxEnv}; use database_interface::DatabaseCommit; use interpreter::{interpreter::EthInterpreter, InterpreterResult}; -use primitives::{address, eip7825, Address, Bytes, TxKind}; +use primitives::{address, Address, Bytes, TxKind}; use state::EvmState; /// The system address used for system calls. @@ -39,14 +58,13 @@ impl SystemCallTx for TxEnv { system_contract_address: Address, data: Bytes, ) -> Self { - TxEnv { - tx_type: TransactionType::Legacy as u8, - caller, - data, - kind: TxKind::Call(system_contract_address), - gas_limit: eip7825::TX_GAS_LIMIT_CAP, - ..Default::default() - } + TxEnv::builder() + .caller(caller) + .data(data) + .kind(TxKind::Call(system_contract_address)) + .gas_limit(30_000_000) + .build() + .unwrap() } } @@ -179,7 +197,7 @@ mod tests { use super::*; use context::{ result::{ExecutionResult, Output, SuccessReason}, - Context, + Context, Transaction, }; use database::InMemoryDB; use primitives::{b256, bytes, StorageKey, U256}; @@ -199,15 +217,18 @@ mod tests { let block_hash = b256!("0x1111111111111111111111111111111111111111111111111111111111111111"); - let mut my_evm = Context::mainnet() + let mut evm = Context::mainnet() .with_db(db) // block with number 1 will set storage at slot 0. .modify_block_chained(|b| b.number = U256::ONE) .build_mainnet(); - let output = my_evm + let output = evm .transact_system_call_finalize(HISTORY_STORAGE_ADDRESS, block_hash.0.into()) .unwrap(); + // system call gas limit is 30M + assert_eq!(evm.ctx.tx().gas_limit(), 30_000_000); + assert_eq!( output.result, ExecutionResult::Success { diff --git a/crates/handler/src/validation.rs b/crates/handler/src/validation.rs index 0f09c3cb09..6c956cbc66 100644 --- a/crates/handler/src/validation.rs +++ b/crates/handler/src/validation.rs @@ -328,10 +328,6 @@ mod tests { spec_id: Option, ) -> Result> { let ctx = Context::mainnet() - .modify_tx_chained(|tx| { - tx.kind = TxKind::Create; - tx.data = bytecode.clone(); - }) .modify_cfg_chained(|c| { if let Some(spec_id) = spec_id { c.spec = spec_id; @@ -340,11 +336,13 @@ mod tests { .with_db(CacheDB::::default()); let mut evm = ctx.build_mainnet(); - evm.transact_commit(TxEnv { - kind: TxKind::Create, - data: bytecode.clone(), - ..Default::default() - }) + evm.transact_commit( + TxEnv::builder() + .kind(TxKind::Create) + .data(bytecode.clone()) + .build() + .unwrap(), + ) } #[test] @@ -501,12 +499,14 @@ mod tests { let call_result = Context::mainnet() .with_db(CacheDB::::default()) .build_mainnet() - .transact_commit(TxEnv { - caller: tx_caller, - kind: TxKind::Call(factory_address), - data: Bytes::new(), - ..Default::default() - }) + .transact_commit( + TxEnv::builder() + .caller(tx_caller) + .kind(TxKind::Call(factory_address)) + .data(Bytes::new()) + .build() + .unwrap(), + ) .expect("call factory contract failed"); match &call_result { @@ -583,12 +583,14 @@ mod tests { let call_result = Context::mainnet() .with_db(CacheDB::::default()) .build_mainnet() - .transact_commit(TxEnv { - caller: tx_caller, - kind: TxKind::Call(factory_address), - data: Bytes::new(), - ..Default::default() - }) + .transact_commit( + TxEnv::builder() + .caller(tx_caller) + .kind(TxKind::Call(factory_address)) + .data(Bytes::new()) + .build() + .unwrap(), + ) .expect("call factory contract failed"); match &call_result { diff --git a/crates/inspector/CHANGELOG.md b/crates/inspector/CHANGELOG.md index 7578f55547..2c6729f593 100644 --- a/crates/inspector/CHANGELOG.md +++ b/crates/inspector/CHANGELOG.md @@ -7,6 +7,45 @@ and this project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0 ## [Unreleased] +## [8.1.0](https://github.com/bluealloy/revm/compare/revm-inspector-v8.0.3...revm-inspector-v8.1.0) - 2025-07-23 + +### Added + +- count inspector and bench test ([#2730](https://github.com/bluealloy/revm/pull/2730)) + +### Fixed + +- fully deprecate serde-json ([#2767](https://github.com/bluealloy/revm/pull/2767)) +- features and check in ci ([#2766](https://github.com/bluealloy/revm/pull/2766)) + +### Other + +- *(inspector)* simplify create_end docs and intrp.bytecode.set_action ([#2723](https://github.com/bluealloy/revm/pull/2723)) +- *(inspector)* update obsolete current_opcode() comment ([#2722](https://github.com/bluealloy/revm/pull/2722)) + +## [8.0.3](https://github.com/bluealloy/revm/compare/revm-inspector-v8.0.2...revm-inspector-v8.0.3) - 2025-07-14 + +### Fixed + +- *(Inspector)* call_end not calle on first call fast return ([#2697](https://github.com/bluealloy/revm/pull/2697)) + +## [8.0.2](https://github.com/bluealloy/revm/compare/revm-inspector-v8.0.1...revm-inspector-v8.0.2) - 2025-07-03 + +### Fixed + +- *(inspector)* revert pointer before calling step_end ([#2687](https://github.com/bluealloy/revm/pull/2687)) + +### Other + +- minor fixes ([#2686](https://github.com/bluealloy/revm/pull/2686)) + +## [8.0.1](https://github.com/bluealloy/revm/compare/revm-inspector-v7.0.1...revm-inspector-v8.0.1) - 2025-06-30 + +### Other + +- cargo clippy --fix --all ([#2671](https://github.com/bluealloy/revm/pull/2671)) +- use TxEnv::builder ([#2652](https://github.com/bluealloy/revm/pull/2652)) + ## [7.0.1](https://github.com/bluealloy/revm/compare/revm-inspector-v7.0.0...revm-inspector-v7.0.1) - 2025-06-20 ### Other diff --git a/crates/inspector/Cargo.toml b/crates/inspector/Cargo.toml index ac5ea92122..1777ade610 100644 --- a/crates/inspector/Cargo.toml +++ b/crates/inspector/Cargo.toml @@ -1,7 +1,7 @@ [package] name = "revm-inspector" description = "Revm inspector interface" -version = "7.0.1" +version = "8.1.0" authors.workspace = true edition.workspace = true keywords.workspace = true @@ -52,9 +52,10 @@ std = [ "interpreter/std", "primitives/std", "state/std", - "either/std" + "either/std", ] serde = [ + "dep:serde", "database/serde", "context/serde", "database-interface/serde", @@ -62,9 +63,10 @@ serde = [ "interpreter/serde", "primitives/serde", "state/serde", - "either/serde" + "either/serde", ] -# deprecated please use [`tracer`] feature instead -serde-json = ["dep:serde", "dep:serde_json", "serde"] -tracer = ["serde-json"] +tracer = ["std", "serde", "dep:serde_json"] + +# Deprecated, please use `tracer` feature instead. +serde-json = ["tracer"] diff --git a/crates/inspector/src/count_inspector.rs b/crates/inspector/src/count_inspector.rs new file mode 100644 index 0000000000..dd29efe15f --- /dev/null +++ b/crates/inspector/src/count_inspector.rs @@ -0,0 +1,337 @@ +//! CountInspector - Inspector that counts all opcodes that were called. +use crate::inspector::Inspector; +use interpreter::{interpreter_types::Jumps, InterpreterTypes}; +use primitives::HashMap; + +/// Inspector that counts all opcodes that were called during execution. +#[derive(Clone, Debug, Default)] +pub struct CountInspector { + /// Map from opcode value to count of times it was executed. + opcode_counts: HashMap, + /// Count of initialize_interp calls. + initialize_interp_count: u64, + /// Count of step calls. + step_count: u64, + /// Count of step_end calls. + step_end_count: u64, + /// Count of log calls. + log_count: u64, + /// Count of call calls. + call_count: u64, + /// Count of call_end calls. + call_end_count: u64, + /// Count of create calls. + create_count: u64, + /// Count of create_end calls. + create_end_count: u64, + /// Count of selfdestruct calls. + selfdestruct_count: u64, +} + +impl CountInspector { + /// Create a new CountInspector. + pub fn new() -> Self { + Self { + opcode_counts: HashMap::default(), + initialize_interp_count: 0, + step_count: 0, + step_end_count: 0, + log_count: 0, + call_count: 0, + call_end_count: 0, + create_count: 0, + create_end_count: 0, + selfdestruct_count: 0, + } + } + + /// Get the count for a specific opcode. + pub fn get_count(&self, opcode: u8) -> u64 { + self.opcode_counts.get(&opcode).copied().unwrap_or(0) + } + + /// Get a reference to all opcode counts. + pub fn opcode_counts(&self) -> &HashMap { + &self.opcode_counts + } + + /// Get the total number of opcodes executed. + pub fn total_opcodes(&self) -> u64 { + self.opcode_counts.values().sum() + } + + /// Get the number of unique opcodes executed. + pub fn unique_opcodes(&self) -> usize { + self.opcode_counts.len() + } + + /// Clear all counts. + pub fn clear(&mut self) { + self.opcode_counts.clear(); + self.initialize_interp_count = 0; + self.step_count = 0; + self.step_end_count = 0; + self.log_count = 0; + self.call_count = 0; + self.call_end_count = 0; + self.create_count = 0; + self.create_end_count = 0; + self.selfdestruct_count = 0; + } + + /// Get the count of initialize_interp calls. + pub fn initialize_interp_count(&self) -> u64 { + self.initialize_interp_count + } + + /// Get the count of step calls. + pub fn step_count(&self) -> u64 { + self.step_count + } + + /// Get the count of step_end calls. + pub fn step_end_count(&self) -> u64 { + self.step_end_count + } + + /// Get the count of log calls. + pub fn log_count(&self) -> u64 { + self.log_count + } + + /// Get the count of call calls. + pub fn call_count(&self) -> u64 { + self.call_count + } + + /// Get the count of call_end calls. + pub fn call_end_count(&self) -> u64 { + self.call_end_count + } + + /// Get the count of create calls. + pub fn create_count(&self) -> u64 { + self.create_count + } + + /// Get the count of create_end calls. + pub fn create_end_count(&self) -> u64 { + self.create_end_count + } + + /// Get the count of selfdestruct calls. + pub fn selfdestruct_count(&self) -> u64 { + self.selfdestruct_count + } +} + +impl Inspector for CountInspector { + fn initialize_interp( + &mut self, + _interp: &mut interpreter::Interpreter, + _context: &mut CTX, + ) { + self.initialize_interp_count += 1; + } + + fn step(&mut self, interp: &mut interpreter::Interpreter, _context: &mut CTX) { + self.step_count += 1; + let opcode = interp.bytecode.opcode(); + *self.opcode_counts.entry(opcode).or_insert(0) += 1; + } + + fn step_end(&mut self, _interp: &mut interpreter::Interpreter, _context: &mut CTX) { + self.step_end_count += 1; + } + + fn log( + &mut self, + _interp: &mut interpreter::Interpreter, + _context: &mut CTX, + _log: primitives::Log, + ) { + self.log_count += 1; + } + + fn call( + &mut self, + _context: &mut CTX, + _inputs: &mut interpreter::CallInputs, + ) -> Option { + self.call_count += 1; + None + } + + fn call_end( + &mut self, + _context: &mut CTX, + _inputs: &interpreter::CallInputs, + _outcome: &mut interpreter::CallOutcome, + ) { + self.call_end_count += 1; + } + + fn create( + &mut self, + _context: &mut CTX, + _inputs: &mut interpreter::CreateInputs, + ) -> Option { + self.create_count += 1; + None + } + + fn create_end( + &mut self, + _context: &mut CTX, + _inputs: &interpreter::CreateInputs, + _outcome: &mut interpreter::CreateOutcome, + ) { + self.create_end_count += 1; + } + + fn selfdestruct( + &mut self, + _contract: primitives::Address, + _target: primitives::Address, + _value: primitives::U256, + ) { + self.selfdestruct_count += 1; + } +} + +#[cfg(test)] +mod tests { + use super::*; + use crate::InspectEvm; + use context::Context; + use database::BenchmarkDB; + use handler::{MainBuilder, MainContext}; + use primitives::{Bytes, TxKind}; + use state::bytecode::{opcode, Bytecode}; + + #[test] + fn test_count_inspector() { + // Create simple bytecode that just adds two numbers and stops + let contract_data: Bytes = Bytes::from(vec![ + opcode::PUSH1, + 0x10, // 0: PUSH1 16 + opcode::PUSH1, + 0x20, // 2: PUSH1 32 + opcode::ADD, // 4: ADD + opcode::DUP1, // 5: DUP1 (duplicate the result) + opcode::PUSH1, + 0x00, // 6: PUSH1 0 + opcode::MSTORE, // 8: MSTORE (store result in memory) + opcode::STOP, // 9: STOP + ]); + let bytecode = Bytecode::new_raw(contract_data); + + let ctx = Context::mainnet().with_db(BenchmarkDB::new_bytecode(bytecode.clone())); + let mut count_inspector = CountInspector::new(); + + let mut evm = ctx.build_mainnet_with_inspector(&mut count_inspector); + + // Execute the contract + evm.inspect_one_tx( + context::TxEnv::builder() + .kind(TxKind::Call(database::BENCH_TARGET)) + .gas_limit(30000) + .build() + .unwrap(), + ) + .unwrap(); + + // Check opcode counts + assert_eq!(count_inspector.get_count(opcode::PUSH1), 3); + assert_eq!(count_inspector.get_count(opcode::ADD), 1); + assert_eq!(count_inspector.get_count(opcode::DUP1), 1); + assert_eq!(count_inspector.get_count(opcode::MSTORE), 1); + assert_eq!(count_inspector.get_count(opcode::STOP), 1); + + // Check totals + assert_eq!(count_inspector.total_opcodes(), 7); + assert_eq!(count_inspector.unique_opcodes(), 5); + + // Check inspector function counts + assert_eq!(count_inspector.initialize_interp_count(), 1); + assert_eq!(count_inspector.step_count(), 7); // Each opcode triggers a step + assert_eq!(count_inspector.step_end_count(), 7); // Each opcode triggers a step_end + assert_eq!(count_inspector.log_count(), 0); // No LOG opcodes + assert_eq!(count_inspector.call_count(), 1); // The transaction itself is a call + assert_eq!(count_inspector.call_end_count(), 1); + assert_eq!(count_inspector.create_count(), 0); // No CREATE opcodes + assert_eq!(count_inspector.create_end_count(), 0); + assert_eq!(count_inspector.selfdestruct_count(), 0); // No SELFDESTRUCT opcodes + } + + #[test] + fn test_count_inspector_clear() { + let mut inspector = CountInspector::new(); + + // Add some counts manually for testing + *inspector.opcode_counts.entry(opcode::PUSH1).or_insert(0) += 5; + *inspector.opcode_counts.entry(opcode::ADD).or_insert(0) += 3; + inspector.initialize_interp_count = 2; + inspector.step_count = 10; + inspector.step_end_count = 10; + inspector.log_count = 1; + inspector.call_count = 3; + inspector.call_end_count = 3; + inspector.create_count = 1; + inspector.create_end_count = 1; + inspector.selfdestruct_count = 1; + + assert_eq!(inspector.total_opcodes(), 8); + assert_eq!(inspector.unique_opcodes(), 2); + assert_eq!(inspector.initialize_interp_count(), 2); + assert_eq!(inspector.step_count(), 10); + + // Clear and verify + inspector.clear(); + assert_eq!(inspector.total_opcodes(), 0); + assert_eq!(inspector.unique_opcodes(), 0); + assert!(inspector.opcode_counts().is_empty()); + assert_eq!(inspector.initialize_interp_count(), 0); + assert_eq!(inspector.step_count(), 0); + assert_eq!(inspector.step_end_count(), 0); + assert_eq!(inspector.log_count(), 0); + assert_eq!(inspector.call_count(), 0); + assert_eq!(inspector.call_end_count(), 0); + assert_eq!(inspector.create_count(), 0); + assert_eq!(inspector.create_end_count(), 0); + assert_eq!(inspector.selfdestruct_count(), 0); + } + + #[test] + fn test_count_inspector_with_logs() { + // Create bytecode that emits a log + let contract_data: Bytes = Bytes::from(vec![ + opcode::PUSH1, + 0x20, // 0: PUSH1 32 (length) + opcode::PUSH1, + 0x00, // 2: PUSH1 0 (offset) + opcode::LOG0, // 4: LOG0 - emit log with no topics + opcode::STOP, // 5: STOP + ]); + let bytecode = Bytecode::new_raw(contract_data); + + let ctx = Context::mainnet().with_db(BenchmarkDB::new_bytecode(bytecode.clone())); + let mut count_inspector = CountInspector::new(); + + let mut evm = ctx.build_mainnet_with_inspector(&mut count_inspector); + + // Execute the contract + evm.inspect_one_tx( + context::TxEnv::builder() + .kind(TxKind::Call(database::BENCH_TARGET)) + .gas_limit(30000) + .build() + .unwrap(), + ) + .unwrap(); + + // Check that log was counted + assert_eq!(count_inspector.log_count(), 1); + assert_eq!(count_inspector.step_count(), 4); // 2 PUSH1 + LOG0 + STOP + } +} diff --git a/crates/inspector/src/gas.rs b/crates/inspector/src/gas.rs index abaef6d48e..db286d4ca1 100644 --- a/crates/inspector/src/gas.rs +++ b/crates/inspector/src/gas.rs @@ -92,6 +92,7 @@ mod tests { #[derive(Default, Debug)] struct StackInspector { pc: usize, + opcode: u8, gas_inspector: GasInspector, gas_remaining_steps: Vec<(usize, u64)>, } @@ -103,10 +104,13 @@ mod tests { fn step(&mut self, interp: &mut Interpreter, _context: &mut CTX) { self.pc = interp.bytecode.pc(); + self.opcode = interp.bytecode.opcode(); self.gas_inspector.step(&interp.gas); } fn step_end(&mut self, interp: &mut Interpreter, _context: &mut CTX) { + interp.bytecode.pc(); + interp.bytecode.opcode(); self.gas_inspector.step_end(&mut interp.gas); self.gas_remaining_steps .push((self.pc, self.gas_inspector.gas_remaining())); @@ -145,12 +149,14 @@ mod tests { let mut evm = ctx.build_mainnet_with_inspector(StackInspector::default()); // Run evm. - evm.inspect_one_tx(TxEnv { - caller: BENCH_CALLER, - kind: TxKind::Call(BENCH_TARGET), - gas_limit: 21100, - ..Default::default() - }) + evm.inspect_one_tx( + TxEnv::builder() + .caller(BENCH_CALLER) + .kind(TxKind::Call(BENCH_TARGET)) + .gas_limit(21100) + .build() + .unwrap(), + ) .unwrap(); let inspector = &evm.inspector; @@ -246,21 +252,12 @@ mod tests { let bytecode = Bytecode::new_raw(contract_data); - let ctx = Context::mainnet() + let mut evm = Context::mainnet() .with_db(BenchmarkDB::new_bytecode(bytecode.clone())) - .modify_tx_chained(|tx| { - tx.caller = BENCH_CALLER; - tx.kind = TxKind::Call(BENCH_TARGET); - }); - - let mut evm = ctx.build_mainnet_with_inspector(inspector); + .build_mainnet_with_inspector(inspector); let _ = evm - .inspect_one_tx(TxEnv { - caller: BENCH_CALLER, - kind: TxKind::Call(BENCH_TARGET), - ..Default::default() - }) + .inspect_one_tx(TxEnv::builder_for_bench().build().unwrap()) .unwrap(); assert_eq!(evm.inspector.return_buffer.len(), 3); assert_eq!( diff --git a/crates/inspector/src/handler.rs b/crates/inspector/src/handler.rs index de5f706802..ba2b8570d5 100644 --- a/crates/inspector/src/handler.rs +++ b/crates/inspector/src/handler.rs @@ -215,11 +215,18 @@ where log_num = new_log; } + // if loops is ending, break the loop so we can revert to the previous pointer and then call step_end. + if interpreter.bytecode.is_end() { + break; + } + // Call step_end. inspector.step_end(interpreter, context); } interpreter.bytecode.revert_to_previous_pointer(); + // call step_end again to handle the last instruction + inspector.step_end(interpreter, context); let next_action = interpreter.take_next_action(); diff --git a/crates/inspector/src/inspect.rs b/crates/inspector/src/inspect.rs index 6fb2ecd2fd..5ac1ada4a6 100644 --- a/crates/inspector/src/inspect.rs +++ b/crates/inspector/src/inspect.rs @@ -56,7 +56,7 @@ pub trait InspectEvm: ExecuteEvm { /// /// Functions return CommitOutput from [`ExecuteCommitEvm`] trait. pub trait InspectCommitEvm: InspectEvm + ExecuteCommitEvm { - /// Inspect the EVM with the current inspector and previous transaction by replaying,similar to [`InspectEvm::inspect_tx`] + /// Inspect the EVM with the current inspector and previous transaction by replaying, similar to [`InspectEvm::inspect_tx`] /// and commit the state diff to the database. fn inspect_tx_commit(&mut self, tx: Self::Tx) -> Result { let output = self.inspect_one_tx(tx)?; diff --git a/crates/inspector/src/inspector.rs b/crates/inspector/src/inspector.rs index 2fba9d091e..7a2101c377 100644 --- a/crates/inspector/src/inspector.rs +++ b/crates/inspector/src/inspector.rs @@ -17,7 +17,7 @@ use state::EvmState; pub trait Inspector { /// Called before the interpreter is initialized. /// - /// If `interp.set_action` is set the execution of the interpreter is skipped. + /// If `interp.bytecode.set_action` is set the execution of the interpreter is skipped. #[inline] fn initialize_interp(&mut self, interp: &mut Interpreter, context: &mut CTX) { let _ = interp; @@ -31,7 +31,7 @@ pub trait Inspector { /// /// # Example /// - /// To get the current opcode, use `interp.current_opcode()`. + /// To get the current opcode, use `interp.bytecode.opcode()`. #[inline] fn step(&mut self, interp: &mut Interpreter, context: &mut CTX) { let _ = interp; @@ -91,8 +91,7 @@ pub trait Inspector { /// Called when a contract has been created. /// - /// InstructionResulting anything other than the values passed to this function (`(ret, remaining_gas, - /// address, out)`) will alter the result of the create. + /// Modifying the outcome will alter the result of the create operation. #[inline] fn create_end( &mut self, diff --git a/crates/inspector/src/inspector_tests.rs b/crates/inspector/src/inspector_tests.rs new file mode 100644 index 0000000000..29cfa54c94 --- /dev/null +++ b/crates/inspector/src/inspector_tests.rs @@ -0,0 +1,734 @@ +#[cfg(test)] +mod tests { + use crate::{InspectEvm, Inspector}; + use context::{Context, TxEnv}; + use database::{BenchmarkDB, BENCH_CALLER, BENCH_TARGET}; + use handler::{MainBuilder, MainContext}; + use interpreter::{ + interpreter_types::{Jumps, MemoryTr, StackTr}, + CallInputs, CallOutcome, CreateInputs, CreateOutcome, Interpreter, InterpreterTypes, + }; + use primitives::{address, Address, Bytes, Log, TxKind, U256}; + use state::{bytecode::opcode, AccountInfo, Bytecode}; + + #[derive(Debug, Clone)] + struct InterpreterState { + pc: usize, + stack_len: usize, + memory_size: usize, + } + + #[derive(Debug, Clone)] + struct StepRecord { + before: InterpreterState, + after: Option, + opcode_name: String, + } + + #[derive(Debug, Clone)] + enum InspectorEvent { + Step(StepRecord), + Call { + inputs: CallInputs, + outcome: Option, + }, + Create { + inputs: CreateInputs, + outcome: Option, + }, + Log(Log), + Selfdestruct { + address: Address, + beneficiary: Address, + value: U256, + }, + } + + #[derive(Debug, Default)] + struct TestInspector { + events: Vec, + step_count: usize, + call_depth: usize, + } + + impl TestInspector { + fn new() -> Self { + Self { + events: Vec::new(), + step_count: 0, + call_depth: 0, + } + } + + fn capture_interpreter_state( + interp: &Interpreter, + ) -> InterpreterState + where + INTR::Bytecode: Jumps, + INTR::Stack: StackTr, + INTR::Memory: MemoryTr, + { + InterpreterState { + pc: interp.bytecode.pc(), + stack_len: interp.stack.len(), + memory_size: interp.memory.size(), + } + } + + fn get_events(&self) -> Vec { + self.events.clone() + } + + fn get_step_count(&self) -> usize { + self.step_count + } + } + + impl Inspector for TestInspector + where + INTR: InterpreterTypes, + INTR::Bytecode: Jumps, + INTR::Stack: StackTr, + INTR::Memory: MemoryTr, + { + fn step(&mut self, interp: &mut Interpreter, _context: &mut CTX) { + self.step_count += 1; + + let state = Self::capture_interpreter_state(interp); + let opcode = interp.bytecode.opcode(); + let opcode_name = if let Some(op) = state::bytecode::opcode::OpCode::new(opcode) { + format!("{op}") + } else { + format!("Unknown(0x{opcode:02x})") + }; + + self.events.push(InspectorEvent::Step(StepRecord { + before: state, + after: None, + opcode_name, + })); + } + + fn step_end(&mut self, interp: &mut Interpreter, _context: &mut CTX) { + let state = Self::capture_interpreter_state(interp); + + if let Some(InspectorEvent::Step(record)) = self.events.last_mut() { + record.after = Some(state); + } + } + + fn log(&mut self, _interp: &mut Interpreter, _ctx: &mut CTX, log: Log) { + self.events.push(InspectorEvent::Log(log)); + } + + fn call(&mut self, _ctx: &mut CTX, inputs: &mut CallInputs) -> Option { + self.call_depth += 1; + self.events.push(InspectorEvent::Call { + inputs: inputs.clone(), + outcome: None, + }); + None + } + + fn call_end(&mut self, _ctx: &mut CTX, _inputs: &CallInputs, outcome: &mut CallOutcome) { + self.call_depth -= 1; + if let Some(InspectorEvent::Call { + outcome: ref mut out, + .. + }) = self + .events + .iter_mut() + .rev() + .find(|e| matches!(e, InspectorEvent::Call { outcome: None, .. })) + { + *out = Some(outcome.clone()); + } + } + + fn create(&mut self, _ctx: &mut CTX, inputs: &mut CreateInputs) -> Option { + self.events.push(InspectorEvent::Create { + inputs: inputs.clone(), + outcome: None, + }); + None + } + + fn create_end( + &mut self, + _ctx: &mut CTX, + _inputs: &CreateInputs, + outcome: &mut CreateOutcome, + ) { + if let Some(InspectorEvent::Create { + outcome: ref mut out, + .. + }) = self + .events + .iter_mut() + .rev() + .find(|e| matches!(e, InspectorEvent::Create { outcome: None, .. })) + { + *out = Some(outcome.clone()); + } + } + + fn selfdestruct(&mut self, contract: Address, beneficiary: Address, value: U256) { + self.events.push(InspectorEvent::Selfdestruct { + address: contract, + beneficiary, + value, + }); + } + } + + #[test] + fn test_push_opcodes_and_stack_operations() { + // PUSH1 0x42, PUSH2 0x1234, ADD, PUSH1 0x00, MSTORE, STOP + let code = Bytes::from(vec![ + opcode::PUSH1, + 0x42, + opcode::PUSH2, + 0x12, + 0x34, + opcode::ADD, + opcode::PUSH1, + 0x00, + opcode::MSTORE, + opcode::STOP, + ]); + + let bytecode = Bytecode::new_raw(code); + let ctx = Context::mainnet().with_db(BenchmarkDB::new_bytecode(bytecode)); + let mut evm = ctx.build_mainnet_with_inspector(TestInspector::new()); + + // Run transaction + let _ = evm.inspect_one_tx( + TxEnv::builder() + .caller(BENCH_CALLER) + .kind(TxKind::Call(BENCH_TARGET)) + .gas_limit(100_000) + .build() + .unwrap(), + ); + + let inspector = &evm.inspector; + let events = inspector.get_events(); + let step_events: Vec<_> = events + .iter() + .filter_map(|e| { + if let InspectorEvent::Step(record) = e { + Some(record) + } else { + None + } + }) + .collect(); + + // Verify PUSH1 0x42 + let push1_event = &step_events[0]; + assert_eq!(push1_event.opcode_name, "PUSH1"); + assert_eq!(push1_event.before.stack_len, 0); + assert_eq!(push1_event.after.as_ref().unwrap().stack_len, 1); + + // Verify PUSH2 0x1234 + let push2_event = &step_events[1]; + assert_eq!(push2_event.opcode_name, "PUSH2"); + assert_eq!(push2_event.before.stack_len, 1); + assert_eq!(push2_event.after.as_ref().unwrap().stack_len, 2); + + // Verify ADD + let add_event = &step_events[2]; + assert_eq!(add_event.opcode_name, "ADD"); + assert_eq!(add_event.before.stack_len, 2); + assert_eq!(add_event.after.as_ref().unwrap().stack_len, 1); + + // Verify all opcodes were tracked + assert!(inspector.get_step_count() >= 5); // PUSH1, PUSH2, ADD, PUSH1, MSTORE, STOP + } + + #[test] + fn test_jump_and_jumpi_control_flow() { + // PUSH1 0x08, JUMP, INVALID, JUMPDEST, PUSH1 0x01, PUSH1 0x0F, JUMPI, INVALID, JUMPDEST, STOP + let code = Bytes::from(vec![ + opcode::PUSH1, + 0x08, + opcode::JUMP, + opcode::INVALID, + opcode::INVALID, + opcode::INVALID, + opcode::INVALID, + opcode::INVALID, + opcode::JUMPDEST, // offset 0x08 + opcode::PUSH1, + 0x01, + opcode::PUSH1, + 0x0F, + opcode::JUMPI, + opcode::INVALID, + opcode::JUMPDEST, // offset 0x0F + opcode::STOP, + ]); + + let bytecode = Bytecode::new_raw(code); + let ctx = Context::mainnet().with_db(BenchmarkDB::new_bytecode(bytecode)); + let mut evm = ctx.build_mainnet_with_inspector(TestInspector::new()); + + // Run transaction + let _ = evm.inspect_one_tx( + TxEnv::builder() + .caller(BENCH_CALLER) + .kind(TxKind::Call(BENCH_TARGET)) + .gas_limit(100_000) + .build() + .unwrap(), + ); + + let inspector = &evm.inspector; + let events = inspector.get_events(); + let step_events: Vec<_> = events + .iter() + .filter_map(|e| { + if let InspectorEvent::Step(record) = e { + Some(record) + } else { + None + } + }) + .collect(); + + // Find JUMP instruction + let jump_event = step_events + .iter() + .find(|e| e.opcode_name == "JUMP") + .unwrap(); + assert_eq!(jump_event.before.pc, 2); // After PUSH1 0x08 + assert_eq!(jump_event.after.as_ref().unwrap().pc, 8); // Jumped to JUMPDEST + + // Find JUMPI instruction + let jumpi_event = step_events + .iter() + .find(|e| e.opcode_name == "JUMPI") + .unwrap(); + assert!(jumpi_event.before.stack_len >= 2); // Has condition and destination + // JUMPI should have jumped since condition is 1 (true) + assert_eq!(jumpi_event.after.as_ref().unwrap().pc, 0x0F); + } + + #[test] + fn test_call_operations() { + // For CALL tests, we need a more complex setup with multiple contracts + // Deploy a simple contract that returns a value + let callee_code = Bytes::from(vec![ + opcode::PUSH1, + 0x42, // Push return value + opcode::PUSH1, + 0x00, // Push memory offset + opcode::MSTORE, + opcode::PUSH1, + 0x20, // Push return size + opcode::PUSH1, + 0x00, // Push return offset + opcode::RETURN, + ]); + + // Caller contract that calls the callee + let caller_code = Bytes::from(vec![ + // Setup CALL parameters + opcode::PUSH1, + 0x20, // retSize + opcode::PUSH1, + 0x00, // retOffset + opcode::PUSH1, + 0x00, // argsSize + opcode::PUSH1, + 0x00, // argsOffset + opcode::PUSH1, + 0x00, // value + opcode::PUSH20, + // address: 20 bytes to match callee_address exactly + 0x00, + 0x00, + 0x00, + 0x00, + 0x00, + 0x00, + 0x00, + 0x00, + 0x00, + 0x00, + 0x00, + 0x00, + 0x00, + 0x00, + 0x00, + 0x00, + 0x00, + 0x00, + 0x00, + 0x01, + opcode::PUSH2, + 0xFF, + 0xFF, // gas + opcode::CALL, + opcode::STOP, + ]); + + // Create a custom database with two contracts + let mut db = database::InMemoryDB::default(); + + // Add caller contract at BENCH_TARGET + db.insert_account_info( + BENCH_TARGET, + AccountInfo { + balance: U256::from(1_000_000_000_000_000_000u64), + nonce: 0, + code_hash: primitives::keccak256(&caller_code), + code: Some(Bytecode::new_raw(caller_code)), + }, + ); + + // Add callee contract at a specific address + let callee_address = Address::new([ + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x01, + ]); + db.insert_account_info( + callee_address, + AccountInfo { + balance: U256::ZERO, + nonce: 0, + code_hash: primitives::keccak256(&callee_code), + code: Some(Bytecode::new_raw(callee_code)), + }, + ); + + let ctx = Context::mainnet().with_db(db); + let mut evm = ctx.build_mainnet_with_inspector(TestInspector::new()); + + // Run transaction + let _ = evm.inspect_one_tx( + TxEnv::builder() + .caller(BENCH_CALLER) + .kind(TxKind::Call(BENCH_TARGET)) + .gas_limit(100_000) + .build() + .unwrap(), + ); + + let inspector = &evm.inspector; + let events = inspector.get_events(); + + // Find CALL events + let call_events: Vec<_> = events + .iter() + .filter_map(|e| { + if let InspectorEvent::Call { inputs, outcome } = e { + Some((inputs, outcome)) + } else { + None + } + }) + .collect(); + + assert!(!call_events.is_empty(), "Should have recorded CALL events"); + let (call_inputs, call_outcome) = &call_events[0]; + // The test setup might be using BENCH_CALLER as the default target + // Just verify that a call was made and completed successfully + assert_eq!(call_inputs.target_address, BENCH_TARGET); + assert!(call_outcome.is_some(), "Call should have completed"); + } + + #[test] + fn test_create_opcodes() { + // CREATE test: deploy a contract that creates another contract + let init_code = vec![ + opcode::PUSH1, + 0x42, // Push constructor value + opcode::PUSH1, + 0x00, // Push memory offset + opcode::MSTORE, + opcode::PUSH1, + 0x20, // Push return size + opcode::PUSH1, + 0x00, // Push return offset + opcode::RETURN, + ]; + + let create_code = vec![ + // First, store init code in memory using CODECOPY + opcode::PUSH1, + init_code.len() as u8, // size + opcode::PUSH1, + 0x20, // code offset (after CREATE params) + opcode::PUSH1, + 0x00, // memory offset + opcode::CODECOPY, + // CREATE parameters + opcode::PUSH1, + init_code.len() as u8, // size + opcode::PUSH1, + 0x00, // offset + opcode::PUSH1, + 0x00, // value + opcode::CREATE, + opcode::STOP, + ]; + + let mut full_code = create_code; + full_code.extend_from_slice(&init_code); + + let bytecode = Bytecode::new_raw(Bytes::from(full_code)); + let ctx = Context::mainnet().with_db(BenchmarkDB::new_bytecode(bytecode)); + let mut evm = ctx.build_mainnet_with_inspector(TestInspector::new()); + + // Run transaction + let _ = evm.inspect_one_tx( + TxEnv::builder() + .caller(BENCH_CALLER) + .kind(TxKind::Call(BENCH_TARGET)) + .gas_limit(100_000) + .build() + .unwrap(), + ); + + let inspector = &evm.inspector; + let events = inspector.get_events(); + + // Find CREATE events + let create_events: Vec<_> = events + .iter() + .filter_map(|e| { + if let InspectorEvent::Create { inputs, outcome } = e { + Some((inputs, outcome)) + } else { + None + } + }) + .collect(); + + assert!( + !create_events.is_empty(), + "Should have recorded CREATE events" + ); + let (_create_inputs, create_outcome) = &create_events[0]; + assert!(create_outcome.is_some(), "CREATE should have completed"); + } + + #[test] + fn test_log_operations() { + // Simple LOG0 test - no topics + let code = vec![ + // Store some data in memory for the log + opcode::PUSH1, + 0x42, + opcode::PUSH1, + 0x00, + opcode::MSTORE, + // LOG0 parameters + opcode::PUSH1, + 0x20, // size + opcode::PUSH1, + 0x00, // offset + opcode::LOG0, + opcode::STOP, + ]; + + let bytecode = Bytecode::new_raw(Bytes::from(code)); + let ctx = Context::mainnet().with_db(BenchmarkDB::new_bytecode(bytecode)); + let mut evm = ctx.build_mainnet_with_inspector(TestInspector::new()); + + // Run transaction + let _ = evm.inspect_one_tx( + TxEnv::builder() + .caller(BENCH_CALLER) + .kind(TxKind::Call(BENCH_TARGET)) + .gas_limit(100_000) + .build() + .unwrap(), + ); + + let inspector = &evm.inspector; + let events = inspector.get_events(); + + // Find LOG events + let log_events: Vec<_> = events + .iter() + .filter_map(|e| { + if let InspectorEvent::Log(log) = e { + Some(log) + } else { + None + } + }) + .collect(); + + // Remove debug code - test should work now + + assert_eq!(log_events.len(), 1, "Should have recorded one LOG event"); + let log = &log_events[0]; + assert_eq!(log.topics().len(), 0, "LOG0 should have 0 topics"); + } + + #[test] + fn test_selfdestruct() { + // SELFDESTRUCT test + let beneficiary = address!("3000000000000000000000000000000000000000"); + let mut code = vec![opcode::PUSH20]; + code.extend_from_slice(beneficiary.as_ref()); + code.push(opcode::SELFDESTRUCT); + + let bytecode = Bytecode::new_raw(Bytes::from(code)); + let ctx = Context::mainnet().with_db(BenchmarkDB::new_bytecode(bytecode)); + let mut evm = ctx.build_mainnet_with_inspector(TestInspector::new()); + + // Run transaction + let _ = evm.inspect_one_tx( + TxEnv::builder() + .caller(BENCH_CALLER) + .kind(TxKind::Call(BENCH_TARGET)) + .gas_limit(100_000) + .build() + .unwrap(), + ); + + let inspector = &evm.inspector; + let events = inspector.get_events(); + + // Find SELFDESTRUCT events + let selfdestruct_events: Vec<_> = events + .iter() + .filter_map(|e| { + if let InspectorEvent::Selfdestruct { + address, + beneficiary, + value, + } = e + { + Some((address, beneficiary, value)) + } else { + None + } + }) + .collect(); + + assert_eq!( + selfdestruct_events.len(), + 1, + "Should have recorded SELFDESTRUCT event" + ); + let (_address, event_beneficiary, _value) = selfdestruct_events[0]; + assert_eq!(*event_beneficiary, beneficiary); + } + + #[test] + fn test_comprehensive_inspector_integration() { + // Complex contract with multiple operations: + // 1. PUSH and arithmetic + // 2. Memory operations + // 3. Conditional jump + // 4. LOG0 + + let code = vec![ + // Stack operations + opcode::PUSH1, + 0x10, + opcode::PUSH1, + 0x20, + opcode::ADD, + opcode::DUP1, + opcode::PUSH1, + 0x00, + opcode::MSTORE, + // Conditional jump + opcode::PUSH1, + 0x01, + opcode::PUSH1, + 0x00, + opcode::MLOAD, + opcode::GT, + opcode::PUSH1, + 0x17, // Jump destination (adjusted) + opcode::JUMPI, + // This should be skipped + opcode::PUSH1, + 0x00, + opcode::PUSH1, + 0x00, + opcode::REVERT, + // Jump destination + opcode::JUMPDEST, // offset 0x14 + // LOG0 + opcode::PUSH1, + 0x20, + opcode::PUSH1, + 0x00, + opcode::LOG0, + opcode::STOP, + ]; + + let bytecode = Bytecode::new_raw(Bytes::from(code)); + let ctx = Context::mainnet().with_db(BenchmarkDB::new_bytecode(bytecode)); + let mut evm = ctx.build_mainnet_with_inspector(TestInspector::new()); + + // Run transaction + let _ = evm.inspect_one_tx( + TxEnv::builder() + .caller(BENCH_CALLER) + .kind(TxKind::Call(BENCH_TARGET)) + .gas_limit(100_000) + .build() + .unwrap(), + ); + + let inspector = &evm.inspector; + let events = inspector.get_events(); + + // Verify we captured various event types + let step_count = events + .iter() + .filter(|e| matches!(e, InspectorEvent::Step(_))) + .count(); + let log_count = events + .iter() + .filter(|e| matches!(e, InspectorEvent::Log(_))) + .count(); + + assert!(step_count > 10, "Should have multiple step events"); + assert_eq!(log_count, 1, "Should have one log event"); + + // Verify stack operations were tracked + let step_events: Vec<_> = events + .iter() + .filter_map(|e| { + if let InspectorEvent::Step(record) = e { + Some(record) + } else { + None + } + }) + .collect(); + + // Find ADD operation + let add_event = step_events.iter().find(|e| e.opcode_name == "ADD").unwrap(); + assert_eq!(add_event.before.stack_len, 2); + assert_eq!(add_event.after.as_ref().unwrap().stack_len, 1); + + // Verify memory was written + let mstore_event = step_events + .iter() + .find(|e| e.opcode_name == "MSTORE") + .unwrap(); + assert!(mstore_event.after.as_ref().unwrap().memory_size > 0); + + // Verify conditional jump worked correctly + let jumpi_event = step_events + .iter() + .find(|e| e.opcode_name == "JUMPI") + .unwrap(); + assert_eq!( + jumpi_event.after.as_ref().unwrap().pc, + 0x17, + "Should have jumped to JUMPDEST" + ); + } +} diff --git a/crates/inspector/src/lib.rs b/crates/inspector/src/lib.rs index 9673e24124..3c7f331b2b 100644 --- a/crates/inspector/src/lib.rs +++ b/crates/inspector/src/lib.rs @@ -5,7 +5,8 @@ #![cfg_attr(not(test), warn(unused_crate_dependencies))] #![cfg_attr(not(feature = "std"), no_std)] -#[cfg(all(feature = "std", feature = "serde-json"))] +mod count_inspector; +#[cfg(feature = "tracer")] mod eip3155; mod either; mod gas; @@ -17,13 +18,17 @@ mod mainnet_inspect; mod noop; mod traits; +#[cfg(test)] +mod inspector_tests; + /// Inspector implementations. pub mod inspectors { - #[cfg(all(feature = "std", feature = "serde-json"))] + #[cfg(feature = "tracer")] pub use super::eip3155::TracerEip3155; pub use super::gas::GasInspector; } +pub use count_inspector::CountInspector; pub use handler::{inspect_instructions, InspectorHandler}; pub use inspect::{InspectCommitEvm, InspectEvm}; pub use inspector::*; diff --git a/crates/inspector/src/traits.rs b/crates/inspector/src/traits.rs index c78f3f6134..9fc098f377 100644 --- a/crates/inspector/src/traits.rs +++ b/crates/inspector/src/traits.rs @@ -62,8 +62,12 @@ pub trait InspectorEvmTr: frame_end(ctx, inspector, &frame_init.frame_input, &mut output); return Ok(ItemOrResult::Result(output)); } - if let ItemOrResult::Result(frame) = self.frame_init(frame_init)? { - return Ok(ItemOrResult::Result(frame)); + + let frame_input = frame_init.frame_input.clone(); + if let ItemOrResult::Result(mut output) = self.frame_init(frame_init)? { + let (ctx, inspector) = self.ctx_inspector(); + frame_end(ctx, inspector, &frame_input, &mut output); + return Ok(ItemOrResult::Result(output)); } // if it is new frame, initialize the interpreter. diff --git a/crates/interpreter/CHANGELOG.md b/crates/interpreter/CHANGELOG.md index 1ae14b2e1b..473ea6dea4 100644 --- a/crates/interpreter/CHANGELOG.md +++ b/crates/interpreter/CHANGELOG.md @@ -6,6 +6,47 @@ and this project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0 ## [Unreleased] +## [24.0.0](https://github.com/bluealloy/revm/compare/revm-interpreter-v23.0.2...revm-interpreter-v24.0.0) - 2025-07-23 + +### Added + +- *(interpreter)* update CLZ cost ([#2739](https://github.com/bluealloy/revm/pull/2739)) + +### Fixed + +- features and check in ci ([#2766](https://github.com/bluealloy/revm/pull/2766)) + +### Other + +- un-Box frames ([#2761](https://github.com/bluealloy/revm/pull/2761)) +- interpreter improvements ([#2760](https://github.com/bluealloy/revm/pull/2760)) +- evaluate instruction table initializer at compile time ([#2762](https://github.com/bluealloy/revm/pull/2762)) +- discard generic host implementation ([#2738](https://github.com/bluealloy/revm/pull/2738)) +- add release safety section for SharedMemory fn ([#2718](https://github.com/bluealloy/revm/pull/2718)) +- *(interpreter)* update docs for slice_mut and slice_range ([#2714](https://github.com/bluealloy/revm/pull/2714)) + +## [23.0.2](https://github.com/bluealloy/revm/compare/revm-interpreter-v23.0.1...revm-interpreter-v23.0.2) - 2025-07-14 + +### Other + +- simplify gas calculations by introducing a used() method ([#2703](https://github.com/bluealloy/revm/pull/2703)) + +## [23.0.1](https://github.com/bluealloy/revm/compare/revm-interpreter-v23.0.0...revm-interpreter-v23.0.1) - 2025-07-03 + +### Other + +- updated the following local packages: revm-bytecode, revm-context-interface + +## [22.1.0](https://github.com/bluealloy/revm/compare/revm-interpreter-v22.0.1...revm-interpreter-v22.1.0) - 2025-06-30 + +### Added + +- blake2 avx2 ([#2670](https://github.com/bluealloy/revm/pull/2670)) + +### Other + +- cargo clippy --fix --all ([#2671](https://github.com/bluealloy/revm/pull/2671)) + ## [22.0.1](https://github.com/bluealloy/revm/compare/revm-interpreter-v22.0.0...revm-interpreter-v22.0.1) - 2025-06-20 ### Other @@ -166,7 +207,7 @@ Stable version - fix wrong comment & remove useless struct ([#2105](https://github.com/bluealloy/revm/pull/2105)) - move all dependencies to workspace ([#2092](https://github.com/bluealloy/revm/pull/2092)) -## [16.0.0](https://github.com/bluealloy/revm/compare/revm-interpreter-v15.2.0...revm-interpreter-v16.0.0-alpha.1) - 2025-02-16 +## [16.0.0-alpha.1](https://github.com/bluealloy/revm/compare/revm-interpreter-v15.2.0...revm-interpreter-v16.0.0-alpha.1) - 2025-02-16 ### Added diff --git a/crates/interpreter/Cargo.toml b/crates/interpreter/Cargo.toml index afce8f25cf..a03818785f 100644 --- a/crates/interpreter/Cargo.toml +++ b/crates/interpreter/Cargo.toml @@ -1,7 +1,7 @@ [package] name = "revm-interpreter" description = "Revm Interpreter that executes bytecode." -version = "22.0.1" +version = "24.0.0" authors.workspace = true edition.workspace = true keywords.workspace = true diff --git a/crates/interpreter/src/gas.rs b/crates/interpreter/src/gas.rs index d069db0cf9..e270224069 100644 --- a/crates/interpreter/src/gas.rs +++ b/crates/interpreter/src/gas.rs @@ -73,6 +73,12 @@ impl Gas { self.limit - self.remaining } + /// Returns the final amount of gas used by subtracting the refund from spent gas. + #[inline] + pub const fn used(&self) -> u64 { + self.spent().saturating_sub(self.refunded() as u64) + } + /// Returns the total amount of gas spent, minus the refunded gas. #[inline] pub const fn spent_sub_refunded(&self) -> u64 { @@ -189,6 +195,7 @@ pub struct MemoryGas { impl MemoryGas { /// Creates a new `MemoryGas` instance with zero memory allocation. + #[inline] pub const fn new() -> Self { Self { words_num: 0, @@ -196,9 +203,9 @@ impl MemoryGas { } } - #[inline] /// Records a new memory length and calculates additional cost if memory is expanded. /// Returns the additional gas cost required, or None if no expansion is needed. + #[inline] pub fn record_new_len(&mut self, new_num: usize) -> Option { if new_num <= self.words_num { return None; diff --git a/crates/interpreter/src/instruction_context.rs b/crates/interpreter/src/instruction_context.rs index d7affc69c4..d8aa146829 100644 --- a/crates/interpreter/src/instruction_context.rs +++ b/crates/interpreter/src/instruction_context.rs @@ -6,10 +6,10 @@ use super::Instruction; /// This struct provides access to both the host interface for external state operations /// and the interpreter state for stack, memory, and gas operations. pub struct InstructionContext<'a, H: ?Sized, ITy: InterpreterTypes> { - /// Reference to the host interface for accessing external blockchain state. - pub host: &'a mut H, /// Reference to the interpreter containing execution state (stack, memory, gas, etc). pub interpreter: &'a mut Interpreter, + /// Reference to the host interface for accessing external blockchain state. + pub host: &'a mut H, } impl std::fmt::Debug for InstructionContext<'_, H, ITy> { diff --git a/crates/interpreter/src/instructions.rs b/crates/interpreter/src/instructions.rs index 70c63c267a..63ff98f6b6 100644 --- a/crates/interpreter/src/instructions.rs +++ b/crates/interpreter/src/instructions.rs @@ -35,8 +35,14 @@ pub type Instruction = fn(InstructionContext<'_, H, W>); /// Instruction table is list of instruction function pointers mapped to 256 EVM opcodes. pub type InstructionTable = [Instruction; 256]; -/// Returns the instruction table for the given spec. +/// Returns the default instruction table for the given interpreter types and host. +#[inline] pub const fn instruction_table( +) -> [Instruction; 256] { + const { instruction_table_impl::() } +} + +const fn instruction_table_impl( ) -> [Instruction; 256] { use bytecode::opcode::*; let mut table = [control::unknown as Instruction; 256]; @@ -224,7 +230,7 @@ mod tests { let is_instr_unknown = std::ptr::fn_addr_eq(*instr, unknown_istr); assert_eq!( is_instr_unknown, is_opcode_unknown, - "Opcode 0x{i:X?} is not handled" + "Opcode 0x{i:X?} is not handled", ); } } diff --git a/crates/interpreter/src/instructions/bitwise.rs b/crates/interpreter/src/instructions/bitwise.rs index 60da9fd092..18696e7871 100644 --- a/crates/interpreter/src/instructions/bitwise.rs +++ b/crates/interpreter/src/instructions/bitwise.rs @@ -25,7 +25,7 @@ pub fn gt(context: InstructionContext<'_, H, /// Implements the CLZ instruction - count leading zeros. pub fn clz(context: InstructionContext<'_, H, WIRE>) { check!(context.interpreter, OSAKA); - gas!(context.interpreter, gas::VERYLOW); + gas!(context.interpreter, gas::LOW); popn_top!([], op1, context.interpreter); let leading_zeros = op1.leading_zeros(); diff --git a/crates/interpreter/src/instructions/control.rs b/crates/interpreter/src/instructions/control.rs index d28c12a283..58502560ca 100644 --- a/crates/interpreter/src/instructions/control.rs +++ b/crates/interpreter/src/instructions/control.rs @@ -29,10 +29,10 @@ pub fn jumpi(context: InstructionContext<'_, } } -#[inline(always)] /// Internal helper function for jump operations. /// /// Validates jump target and performs the actual jump. +#[inline(always)] fn jump_inner(interpreter: &mut Interpreter, target: U256) { let target = as_usize_or_fail!(interpreter, target, InstructionResult::InvalidJump); if !interpreter.bytecode.is_valid_legacy_jump(target) { diff --git a/crates/interpreter/src/instructions/macros.rs b/crates/interpreter/src/instructions/macros.rs index 39a2e5c083..b361c0369a 100644 --- a/crates/interpreter/src/instructions/macros.rs +++ b/crates/interpreter/src/instructions/macros.rs @@ -99,17 +99,15 @@ macro_rules! resize_memory { $crate::resize_memory!($interpreter, $offset, $len, ()) }; ($interpreter:expr, $offset:expr, $len:expr, $ret:expr) => { - let words_num = $crate::interpreter::num_words($offset.saturating_add($len)); - match $interpreter.gas.record_memory_expansion(words_num) { - $crate::gas::MemoryExtensionResult::Extended => { - $interpreter.memory.resize(words_num * 32); - } - $crate::gas::MemoryExtensionResult::OutOfGas => { - $interpreter.halt($crate::InstructionResult::MemoryOOG); - return $ret; - } - $crate::gas::MemoryExtensionResult::Same => (), // no action - }; + if !$crate::interpreter::resize_memory( + &mut $interpreter.gas, + &mut $interpreter.memory, + $offset, + $len, + ) { + $interpreter.halt($crate::InstructionResult::MemoryOOG); + return $ret; + } }; } @@ -124,14 +122,31 @@ macro_rules! popn { }; } +#[doc(hidden)] +#[macro_export] +macro_rules! _count { + (@count) => { 0 }; + (@count $head:tt $($tail:tt)*) => { 1 + _count!(@count $($tail)*) }; + ($($arg:tt)*) => { _count!(@count $($arg)*) }; +} + /// Pops n values from the stack and returns the top value. Fails the instruction if n values can't be popped. #[macro_export] macro_rules! popn_top { ([ $($x:ident),* ], $top:ident, $interpreter:expr $(,$ret:expr)? ) => { + /* let Some(([$( $x ),*], $top)) = $interpreter.stack.popn_top() else { $interpreter.halt($crate::InstructionResult::StackUnderflow); return $($ret)?; }; + */ + + // Workaround for https://github.com/rust-lang/rust/issues/144329. + if $interpreter.stack.len() < (1 + $crate::_count!($($x)*)) { + $interpreter.halt($crate::InstructionResult::StackUnderflow); + return $($ret)?; + } + let ([$( $x ),*], $top) = unsafe { $interpreter.stack.popn_top().unwrap_unchecked() }; }; } diff --git a/crates/interpreter/src/instructions/stack.rs b/crates/interpreter/src/instructions/stack.rs index 1c84ea4b83..cdfdceccd0 100644 --- a/crates/interpreter/src/instructions/stack.rs +++ b/crates/interpreter/src/instructions/stack.rs @@ -1,6 +1,5 @@ use crate::{ gas, - instructions::utility::cast_slice_to_u256, interpreter_types::{Immediates, InterpreterTypes, Jumps, RuntimeFlag, StackTr}, InstructionResult, }; @@ -33,11 +32,12 @@ pub fn push( context: InstructionContext<'_, H, WIRE>, ) { gas!(context.interpreter, gas::VERYLOW); - push!(context.interpreter, U256::ZERO); - popn_top!([], top, context.interpreter); - let imm = context.interpreter.bytecode.read_slice(N); - cast_slice_to_u256(imm, top); + let slice = context.interpreter.bytecode.read_slice(N); + if !context.interpreter.stack.push_slice(slice) { + context.interpreter.halt(InstructionResult::StackOverflow); + return; + } // Can ignore return. as relative N jump is safe operation context.interpreter.bytecode.relative_jump(N as isize); diff --git a/crates/interpreter/src/instructions/system.rs b/crates/interpreter/src/instructions/system.rs index 8ced44d883..6d3ba86570 100644 --- a/crates/interpreter/src/instructions/system.rs +++ b/crates/interpreter/src/instructions/system.rs @@ -96,7 +96,6 @@ pub fn codecopy(context: InstructionContext<' /// Loads 32 bytes of input data from the specified offset. pub fn calldataload(context: InstructionContext<'_, H, WIRE>) { gas!(context.interpreter, gas::VERYLOW); - //pop_top!(interpreter, offset_ptr); popn_top!([], offset_ptr, context.interpreter); let mut word = B256::ZERO; let offset = as_usize_saturated!(offset_ptr); diff --git a/crates/interpreter/src/instructions/utility.rs b/crates/interpreter/src/instructions/utility.rs index 993e3e2d73..8a51ce9575 100644 --- a/crates/interpreter/src/instructions/utility.rs +++ b/crates/interpreter/src/instructions/utility.rs @@ -1,70 +1,5 @@ use primitives::{Address, B256, U256}; -/// Pushes an arbitrary length slice of bytes onto the stack, padding the last word with zeros -/// if necessary. -/// -/// # Panics -/// -/// Panics if slice is longer than 32 bytes. -#[inline] -pub fn cast_slice_to_u256(slice: &[u8], dest: &mut U256) { - if slice.is_empty() { - return; - } - assert!(slice.len() <= 32, "slice too long"); - - let n_words = slice.len().div_ceil(32); - - // SAFETY: Length checked above. - unsafe { - //let dst = self.data.as_mut_ptr().add(self.data.len()).cast::(); - //self.data.set_len(new_len); - let dst = dest.as_limbs_mut().as_mut_ptr(); - - let mut i = 0; - - // Write full words - let words = slice.chunks_exact(32); - let partial_last_word = words.remainder(); - for word in words { - // Note: We unroll `U256::from_be_bytes` here to write directly into the buffer, - // instead of creating a 32 byte array on the stack and then copying it over. - for l in word.rchunks_exact(8) { - dst.add(i).write(u64::from_be_bytes(l.try_into().unwrap())); - i += 1; - } - } - - if partial_last_word.is_empty() { - return; - } - - // Write limbs of partial last word - let limbs = partial_last_word.rchunks_exact(8); - let partial_last_limb = limbs.remainder(); - for l in limbs { - dst.add(i).write(u64::from_be_bytes(l.try_into().unwrap())); - i += 1; - } - - // Write partial last limb by padding with zeros - if !partial_last_limb.is_empty() { - let mut tmp = [0u8; 8]; - tmp[8 - partial_last_limb.len()..].copy_from_slice(partial_last_limb); - dst.add(i).write(u64::from_be_bytes(tmp)); - i += 1; - } - - debug_assert_eq!(i.div_ceil(4), n_words, "wrote too much"); - - // Zero out upper bytes of last word - let m = i % 4; // 32 / 8 - if m != 0 { - dst.add(i).write_bytes(0, 4 - m); - } - } -} - /// Trait for converting types into U256 values. pub trait IntoU256 { /// Converts the implementing type into a U256 value. diff --git a/crates/interpreter/src/interpreter.rs b/crates/interpreter/src/interpreter.rs index dee1b766a5..db0ae972f2 100644 --- a/crates/interpreter/src/interpreter.rs +++ b/crates/interpreter/src/interpreter.rs @@ -14,7 +14,7 @@ pub use ext_bytecode::ExtBytecode; pub use input::InputsImpl; pub use return_data::ReturnDataImpl; pub use runtime_flags::RuntimeFlags; -pub use shared_memory::{num_words, SharedMemory}; +pub use shared_memory::{num_words, resize_memory, SharedMemory}; pub use stack::{Stack, STACK_LIMIT}; // imports @@ -183,6 +183,13 @@ impl InterpreterTypes for EthInterpreter { } impl Interpreter { + /// Performs EVM memory resize. + #[inline] + #[must_use] + pub fn resize_memory(&mut self, offset: usize, len: usize) -> bool { + resize_memory(&mut self.gas, &mut self.memory, offset, len) + } + /// Takes the next action from the control and returns it. #[inline] pub fn take_next_action(&mut self) -> InterpreterAction { @@ -193,6 +200,8 @@ impl Interpreter { /// Halt the interpreter with the given result. /// /// This will set the action to [`InterpreterAction::Return`] and set the gas to the current gas. + #[cold] + #[inline(never)] pub fn halt(&mut self, result: InstructionResult) { self.bytecode .set_action(InterpreterAction::new_halt(result, self.gas)); diff --git a/crates/interpreter/src/interpreter/ext_bytecode.rs b/crates/interpreter/src/interpreter/ext_bytecode.rs index 82e437a54f..ede6bfcb02 100644 --- a/crates/interpreter/src/interpreter/ext_bytecode.rs +++ b/crates/interpreter/src/interpreter/ext_bytecode.rs @@ -31,6 +31,7 @@ impl Deref for ExtBytecode { } impl Default for ExtBytecode { + #[inline] fn default() -> Self { Self::new(Bytecode::default()) } @@ -38,6 +39,7 @@ impl Default for ExtBytecode { impl ExtBytecode { /// Create new extended bytecode and set the instruction pointer to the start of the bytecode. + #[inline] pub fn new(base: Bytecode) -> Self { let instruction_pointer = base.bytecode_ptr(); Self { diff --git a/crates/interpreter/src/interpreter/shared_memory.rs b/crates/interpreter/src/interpreter/shared_memory.rs index adecc36288..8e1b503954 100644 --- a/crates/interpreter/src/interpreter/shared_memory.rs +++ b/crates/interpreter/src/interpreter/shared_memory.rs @@ -8,6 +8,29 @@ use core::{ use primitives::{hex, B256, U256}; use std::{rc::Rc, vec::Vec}; +trait RefcellExt { + fn dbg_borrow(&self) -> Ref<'_, T>; + fn dbg_borrow_mut(&self) -> RefMut<'_, T>; +} + +impl RefcellExt for RefCell { + #[inline] + fn dbg_borrow(&self) -> Ref<'_, T> { + match self.try_borrow() { + Ok(b) => b, + Err(e) => debug_unreachable!("{e}"), + } + } + + #[inline] + fn dbg_borrow_mut(&self) -> RefMut<'_, T> { + match self.try_borrow_mut() { + Ok(b) => b, + Err(e) => debug_unreachable!("{e}"), + } + } +} + /// A sequential memory shared between calls, which uses /// a `Vec` for internal representation. /// A [SharedMemory] instance should always be obtained using @@ -80,13 +103,18 @@ impl MemoryTr for SharedMemory { /// Returns a byte slice of the memory region at the given offset. /// + /// # Panics + /// + /// Panics on out of bounds access in debug builds only. + /// /// # Safety /// - /// In debug this will panic on out of bounds. In release it will silently fail. + /// In release builds, calling this method with an out-of-bounds range triggers undefined + /// behavior. Callers must ensure that the range is within the bounds of the buffer. #[inline] #[cfg_attr(debug_assertions, track_caller)] fn global_slice(&self, range: Range) -> Ref<'_, [u8]> { - let buffer = self.buffer().borrow(); // Borrow the inner Vec + let buffer = self.buffer_ref(); Ref::map(buffer, |b| match b.get(range) { Some(slice) => slice, None => debug_unreachable!("slice OOB: range; len: {}", self.len()), @@ -162,6 +190,16 @@ impl SharedMemory { unsafe { self.buffer.as_ref().unwrap_unchecked() } } + #[inline] + fn buffer_ref(&self) -> Ref<'_, Vec> { + self.buffer().dbg_borrow() + } + + #[inline] + fn buffer_ref_mut(&self) -> RefMut<'_, Vec> { + self.buffer().dbg_borrow_mut() + } + /// Returns `true` if the `new_size` for the current context memory will /// make the shared buffer length exceed the `memory_limit`. #[cfg(feature = "memory_limit")] @@ -180,7 +218,7 @@ impl SharedMemory { if self.child_checkpoint.is_some() { panic!("new_child_context was already called without freeing child context"); } - let new_checkpoint = self.buffer().borrow().len(); + let new_checkpoint = self.full_len(); self.child_checkpoint = Some(new_checkpoint); SharedMemory { buffer: Some(self.buffer().clone()), @@ -199,14 +237,18 @@ impl SharedMemory { return; }; unsafe { - self.buffer().borrow_mut().set_len(child_checkpoint); + self.buffer_ref_mut().set_len(child_checkpoint); } } /// Returns the length of the current memory range. #[inline] pub fn len(&self) -> usize { - self.buffer().borrow().len() - self.my_checkpoint + self.full_len() - self.my_checkpoint + } + + fn full_len(&self) -> usize { + self.buffer_ref().len() } /// Returns `true` if the current memory range is empty. @@ -219,7 +261,7 @@ impl SharedMemory { #[inline] pub fn resize(&mut self, new_size: usize) { self.buffer() - .borrow_mut() + .dbg_borrow_mut() .resize(self.my_checkpoint + new_size, 0); } @@ -238,11 +280,17 @@ impl SharedMemory { /// /// # Panics /// - /// Panics on out of bounds. + /// Panics on out of bounds access in debug builds only. + /// + /// # Safety + /// + /// In release builds, calling this method with an out-of-bounds range triggers undefined + /// behavior. Callers must ensure that the range is within the bounds of the memory (i.e., + /// `range.end <= self.len()`). #[inline] #[cfg_attr(debug_assertions, track_caller)] pub fn slice_range(&self, range: Range) -> Ref<'_, [u8]> { - let buffer = self.buffer().borrow(); // Borrow the inner Vec + let buffer = self.buffer_ref(); Ref::map(buffer, |b| { match b.get(range.start + self.my_checkpoint..range.end + self.my_checkpoint) { Some(slice) => slice, @@ -255,11 +303,16 @@ impl SharedMemory { /// /// # Panics /// - /// Panics on out of bounds. + /// Panics on out of bounds access in debug builds only. + /// + /// # Safety + /// + /// In release builds, calling this method with an out-of-bounds range triggers undefined + /// behavior. Callers must ensure that the range is within the bounds of the buffer. #[inline] #[cfg_attr(debug_assertions, track_caller)] pub fn global_slice_range(&self, range: Range) -> Ref<'_, [u8]> { - let buffer = self.buffer().borrow(); // Borrow the inner Vec + let buffer = self.buffer_ref(); Ref::map(buffer, |b| match b.get(range) { Some(slice) => slice, None => debug_unreachable!("slice OOB: range; len: {}", self.len()), @@ -270,11 +323,17 @@ impl SharedMemory { /// /// # Panics /// - /// Panics on out of bounds. + /// Panics on out of bounds access in debug builds only. + /// + /// # Safety + /// + /// In release builds, calling this method with out-of-bounds parameters triggers undefined + /// behavior. Callers must ensure that `offset + size` does not exceed the length of the + /// memory. #[inline] #[cfg_attr(debug_assertions, track_caller)] pub fn slice_mut(&mut self, offset: usize, size: usize) -> RefMut<'_, [u8]> { - let buffer = self.buffer().borrow_mut(); // Borrow the inner Vec mutably + let buffer = self.buffer_ref_mut(); RefMut::map(buffer, |b| { match b.get_mut(self.my_checkpoint + offset..self.my_checkpoint + offset + size) { Some(slice) => slice, @@ -382,7 +441,7 @@ impl SharedMemory { len: usize, data_range: Range, ) { - let mut buffer = self.buffer().borrow_mut(); // Borrow the inner Vec mutably + let mut buffer = self.buffer_ref_mut(); let (src, dst) = buffer.split_at_mut(self.my_checkpoint); let src = if data_range.is_empty() { &mut [] @@ -404,9 +463,18 @@ impl SharedMemory { } /// Returns a reference to the memory of the current context, the active memory. + /// + /// # Panics + /// + /// Panics if the checkpoint is invalid in debug builds only. + /// + /// # Safety + /// + /// In release builds, calling this method with an invalid checkpoint triggers undefined + /// behavior. The checkpoint must be within the bounds of the buffer. #[inline] pub fn context_memory(&self) -> Ref<'_, [u8]> { - let buffer = self.buffer().borrow(); + let buffer = self.buffer_ref(); Ref::map(buffer, |b| match b.get(self.my_checkpoint..) { Some(slice) => slice, None => debug_unreachable!("Context memory should be always valid"), @@ -414,9 +482,18 @@ impl SharedMemory { } /// Returns a mutable reference to the memory of the current context. + /// + /// # Panics + /// + /// Panics if the checkpoint is invalid in debug builds only. + /// + /// # Safety + /// + /// In release builds, calling this method with an invalid checkpoint triggers undefined + /// behavior. The checkpoint must be within the bounds of the buffer. #[inline] pub fn context_memory_mut(&mut self) -> RefMut<'_, [u8]> { - let buffer = self.buffer().borrow_mut(); // Borrow the inner Vec mutably + let buffer = self.buffer_ref_mut(); RefMut::map(buffer, |b| match b.get_mut(self.my_checkpoint..) { Some(slice) => slice, None => debug_unreachable!("Context memory should be always valid"), @@ -465,6 +542,42 @@ pub const fn num_words(len: usize) -> usize { len.saturating_add(31) / 32 } +/// Performs EVM memory resize. +#[inline] +#[must_use] +pub fn resize_memory( + gas: &mut crate::Gas, + memory: &mut Memory, + offset: usize, + len: usize, +) -> bool { + let new_num_words = num_words(offset.saturating_add(len)); + if new_num_words > gas.memory().words_num { + resize_memory_cold(gas, memory, new_num_words) + } else { + true + } +} + +#[cold] +#[inline(never)] +fn resize_memory_cold( + gas: &mut crate::Gas, + memory: &mut Memory, + new_num_words: usize, +) -> bool { + let cost = unsafe { + gas.memory_mut() + .record_new_len(new_num_words) + .unwrap_unchecked() + }; + if !gas.record_cost(cost) { + return false; + } + memory.resize(new_num_words * 32); + true +} + #[cfg(test)] mod tests { use super::*; @@ -486,45 +599,45 @@ mod tests { fn new_free_child_context() { let mut sm1 = SharedMemory::new(); - assert_eq!(sm1.buffer().borrow().len(), 0); + assert_eq!(sm1.buffer_ref().len(), 0); assert_eq!(sm1.my_checkpoint, 0); - unsafe { sm1.buffer().borrow_mut().set_len(32) }; + unsafe { sm1.buffer_ref_mut().set_len(32) }; assert_eq!(sm1.len(), 32); let mut sm2 = sm1.new_child_context(); - assert_eq!(sm2.buffer().borrow().len(), 32); + assert_eq!(sm2.buffer_ref().len(), 32); assert_eq!(sm2.my_checkpoint, 32); assert_eq!(sm2.len(), 0); - unsafe { sm2.buffer().borrow_mut().set_len(96) }; + unsafe { sm2.buffer_ref_mut().set_len(96) }; assert_eq!(sm2.len(), 64); let mut sm3 = sm2.new_child_context(); - assert_eq!(sm3.buffer().borrow().len(), 96); + assert_eq!(sm3.buffer_ref().len(), 96); assert_eq!(sm3.my_checkpoint, 96); assert_eq!(sm3.len(), 0); - unsafe { sm3.buffer().borrow_mut().set_len(128) }; + unsafe { sm3.buffer_ref_mut().set_len(128) }; let sm4 = sm3.new_child_context(); - assert_eq!(sm4.buffer().borrow().len(), 128); + assert_eq!(sm4.buffer_ref().len(), 128); assert_eq!(sm4.my_checkpoint, 128); assert_eq!(sm4.len(), 0); // Free contexts drop(sm4); sm3.free_child_context(); - assert_eq!(sm3.buffer().borrow().len(), 128); + assert_eq!(sm3.buffer_ref().len(), 128); assert_eq!(sm3.my_checkpoint, 96); assert_eq!(sm3.len(), 32); sm2.free_child_context(); - assert_eq!(sm2.buffer().borrow().len(), 96); + assert_eq!(sm2.buffer_ref().len(), 96); assert_eq!(sm2.my_checkpoint, 32); assert_eq!(sm2.len(), 64); sm1.free_child_context(); - assert_eq!(sm1.buffer().borrow().len(), 32); + assert_eq!(sm1.buffer_ref().len(), 32); assert_eq!(sm1.my_checkpoint, 0); assert_eq!(sm1.len(), 32); } @@ -533,22 +646,19 @@ mod tests { fn resize() { let mut sm1 = SharedMemory::new(); sm1.resize(32); - assert_eq!(sm1.buffer().borrow().len(), 32); + assert_eq!(sm1.buffer_ref().len(), 32); assert_eq!(sm1.len(), 32); - assert_eq!(sm1.buffer().borrow().get(0..32), Some(&[0_u8; 32] as &[u8])); + assert_eq!(sm1.buffer_ref().get(0..32), Some(&[0_u8; 32] as &[u8])); let mut sm2 = sm1.new_child_context(); sm2.resize(96); - assert_eq!(sm2.buffer().borrow().len(), 128); + assert_eq!(sm2.buffer_ref().len(), 128); assert_eq!(sm2.len(), 96); - assert_eq!( - sm2.buffer().borrow().get(32..128), - Some(&[0_u8; 96] as &[u8]) - ); + assert_eq!(sm2.buffer_ref().get(32..128), Some(&[0_u8; 96] as &[u8])); sm1.free_child_context(); - assert_eq!(sm1.buffer().borrow().len(), 32); + assert_eq!(sm1.buffer_ref().len(), 32); assert_eq!(sm1.len(), 32); - assert_eq!(sm1.buffer().borrow().get(0..32), Some(&[0_u8; 32] as &[u8])); + assert_eq!(sm1.buffer_ref().get(0..32), Some(&[0_u8; 32] as &[u8])); } } diff --git a/crates/interpreter/src/interpreter/stack.rs b/crates/interpreter/src/interpreter/stack.rs index b7b182458f..c835a0202a 100644 --- a/crates/interpreter/src/interpreter/stack.rs +++ b/crates/interpreter/src/interpreter/stack.rs @@ -48,6 +48,7 @@ impl Clone for Stack { } impl StackTr for Stack { + #[inline] fn len(&self) -> usize { self.len() } @@ -75,17 +76,25 @@ impl StackTr for Stack { Some(unsafe { self.popn_top::() }) } + #[inline] fn exchange(&mut self, n: usize, m: usize) -> bool { self.exchange(n, m) } + #[inline] fn dup(&mut self, n: usize) -> bool { self.dup(n) } + #[inline] fn push(&mut self, value: U256) -> bool { self.push(value) } + + #[inline] + fn push_slice(&mut self, slice: &[u8]) -> bool { + self.push_slice_(slice) + } } impl Stack { @@ -150,6 +159,7 @@ impl Stack { #[inline] #[cfg_attr(debug_assertions, track_caller)] pub unsafe fn pop_unsafe(&mut self) -> U256 { + assume!(!self.data.is_empty()); self.data.pop().unwrap_unchecked() } @@ -161,8 +171,8 @@ impl Stack { #[inline] #[cfg_attr(debug_assertions, track_caller)] pub unsafe fn top_unsafe(&mut self) -> &mut U256 { - let len = self.data.len(); - self.data.get_unchecked_mut(len - 1) + assume!(!self.data.is_empty()); + self.data.last_mut().unwrap_unchecked() } /// Pops `N` values from the stack. @@ -173,14 +183,8 @@ impl Stack { #[inline] #[cfg_attr(debug_assertions, track_caller)] pub unsafe fn popn(&mut self) -> [U256; N] { - if N == 0 { - return [U256::ZERO; N]; - } - let mut result = [U256::ZERO; N]; - for v in &mut result { - *v = self.data.pop().unwrap_unchecked(); - } - result + assume!(self.data.len() >= N); + core::array::from_fn(|_| unsafe { self.pop_unsafe() }) } /// Pops `N` values from the stack and returns the top of the stack. @@ -292,14 +296,25 @@ impl Stack { /// if necessary. #[inline] pub fn push_slice(&mut self, slice: &[u8]) -> Result<(), InstructionResult> { + if self.push_slice_(slice) { + Ok(()) + } else { + Err(InstructionResult::StackOverflow) + } + } + + /// Pushes an arbitrary length slice of bytes onto the stack, padding the last word with zeros + /// if necessary. + #[inline] + fn push_slice_(&mut self, slice: &[u8]) -> bool { if slice.is_empty() { - return Ok(()); + return true; } let n_words = slice.len().div_ceil(32); let new_len = self.data.len() + n_words; if new_len > STACK_LIMIT { - return Err(InstructionResult::StackOverflow); + return false; } // SAFETY: Length checked above. @@ -322,7 +337,7 @@ impl Stack { } if partial_last_word.is_empty() { - return Ok(()); + return true; } // Write limbs of partial last word @@ -350,7 +365,7 @@ impl Stack { } } - Ok(()) + true } /// Set a value at given index for the stack, where the top of the diff --git a/crates/interpreter/src/interpreter_types.rs b/crates/interpreter/src/interpreter_types.rs index 4ee3e053f0..810d1eec62 100644 --- a/crates/interpreter/src/interpreter_types.rs +++ b/crates/interpreter/src/interpreter_types.rs @@ -170,6 +170,14 @@ pub trait StackTr { #[must_use] fn push(&mut self, value: U256) -> bool; + /// Pushes slice to the stack. + /// + /// Returns `true` if push was successful, `false` if stack overflow. + /// + /// # Note + /// Error is internally set in interpreter. + fn push_slice(&mut self, slice: &[u8]) -> bool; + /// Pushes B256 value to the stack. /// /// Internally converts B256 to U256 and then calls [`StackTr::push`]. @@ -189,7 +197,7 @@ pub trait StackTr { /// Returns top value from the stack. #[must_use] fn top(&mut self) -> Option<&mut U256> { - self.popn_top::<0>().map(|(_, top)| top) + self.popn_top().map(|([], top)| top) } /// Pops one value from the stack. diff --git a/crates/interpreter/src/lib.rs b/crates/interpreter/src/lib.rs index f9275cd2a1..e860126833 100644 --- a/crates/interpreter/src/lib.rs +++ b/crates/interpreter/src/lib.rs @@ -12,8 +12,6 @@ mod macros; /// Gas calculation utilities and constants. pub mod gas; -/// Host interface for external blockchain state access. -pub mod host; /// Context passed to instruction implementations. pub mod instruction_context; /// Instruction execution results and success/error types. @@ -32,8 +30,8 @@ pub use context_interface::{ context::{SStoreResult, SelfDestructResult, StateLoad}, CreateScheme, }; +pub use context_interface::{host, Host}; pub use gas::{Gas, InitialAndFloorGas}; -pub use host::Host; pub use instruction_context::InstructionContext; pub use instruction_result::*; pub use instructions::{instruction_table, Instruction, InstructionTable}; diff --git a/crates/op-revm/CHANGELOG.md b/crates/op-revm/CHANGELOG.md index cde06557fc..bc9d37965e 100644 --- a/crates/op-revm/CHANGELOG.md +++ b/crates/op-revm/CHANGELOG.md @@ -7,6 +7,45 @@ and this project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0 ## [Unreleased] +## [8.1.0](https://github.com/bluealloy/revm/compare/op-revm-v8.0.3...op-revm-v8.1.0) - 2025-07-23 + +### Added + +- *(osaka)* update EIP-7825 constant ([#2753](https://github.com/bluealloy/revm/pull/2753)) + +### Fixed + +- gas deduction with `disable_balance_check` ([#2699](https://github.com/bluealloy/revm/pull/2699)) + +### Other + +- *(op-revm)* test for optional balance check ([#2746](https://github.com/bluealloy/revm/pull/2746)) +- change gas parameter to immutable reference ([#2702](https://github.com/bluealloy/revm/pull/2702)) + +## [8.0.3](https://github.com/bluealloy/revm/compare/op-revm-v8.0.2...op-revm-v8.0.3) - 2025-07-14 + +### Other + +- simplify gas calculations by introducing a used() method ([#2703](https://github.com/bluealloy/revm/pull/2703)) + +## [8.0.2](https://github.com/bluealloy/revm/compare/op-revm-v8.0.1...op-revm-v8.0.2) - 2025-07-03 + +### Other + +- updated the following local packages: revm + +## [8.0.1](https://github.com/bluealloy/revm/compare/op-revm-v7.0.1...op-revm-v8.0.1) - 2025-06-30 + +### Added + +- optional_eip3541 ([#2661](https://github.com/bluealloy/revm/pull/2661)) + +### Other + +- cargo clippy --fix --all ([#2671](https://github.com/bluealloy/revm/pull/2671)) +- *(op/handler)* verify caller account is touched by zero value transfer ([#2669](https://github.com/bluealloy/revm/pull/2669)) +- use TxEnv::builder ([#2652](https://github.com/bluealloy/revm/pull/2652)) + ## [7.0.1](https://github.com/bluealloy/revm/compare/op-revm-v7.0.0...op-revm-v7.0.1) - 2025-06-20 ### Fixed diff --git a/crates/op-revm/Cargo.toml b/crates/op-revm/Cargo.toml index a0aa9949fc..5b8f39ae75 100644 --- a/crates/op-revm/Cargo.toml +++ b/crates/op-revm/Cargo.toml @@ -1,7 +1,7 @@ [package] name = "op-revm" description = "Optimism variant of Revm" -version = "7.0.1" +version = "8.1.0" authors.workspace = true edition.workspace = true keywords.workspace = true @@ -54,12 +54,14 @@ dev = [ "memory_limit", "optional_balance_check", "optional_block_gas_limit", + "optional_eip3541", "optional_eip3607", "optional_no_base_fee", ] memory_limit = ["revm/memory_limit"] optional_balance_check = ["revm/optional_balance_check"] optional_block_gas_limit = ["revm/optional_block_gas_limit"] +optional_eip3541 = ["revm/optional_eip3541"] optional_eip3607 = ["revm/optional_eip3607"] optional_no_base_fee = ["revm/optional_no_base_fee"] diff --git a/crates/op-revm/src/api/default_ctx.rs b/crates/op-revm/src/api/default_ctx.rs index 5506deed8b..1b21507379 100644 --- a/crates/op-revm/src/api/default_ctx.rs +++ b/crates/op-revm/src/api/default_ctx.rs @@ -19,7 +19,7 @@ pub trait DefaultOp { impl DefaultOp for OpContext { fn op() -> Self { Context::mainnet() - .with_tx(OpTransaction::default()) + .with_tx(OpTransaction::builder().build_fill()) .with_cfg(CfgEnv::new_with_spec(OpSpecId::BEDROCK)) .with_chain(L1BlockInfo::default()) } @@ -40,8 +40,8 @@ mod test { // convert to optimism context let mut evm = ctx.build_op_with_inspector(NoOpInspector {}); // execute - let _ = evm.transact(OpTransaction::default()); + let _ = evm.transact(OpTransaction::builder().build_fill()); // inspect - let _ = evm.inspect_one_tx(OpTransaction::default()); + let _ = evm.inspect_one_tx(OpTransaction::builder().build_fill()); } } diff --git a/crates/op-revm/src/fast_lz.rs b/crates/op-revm/src/fast_lz.rs index bb8ccc17a3..a1a262a079 100644 --- a/crates/op-revm/src/fast_lz.rs +++ b/crates/op-revm/src/fast_lz.rs @@ -159,6 +159,8 @@ mod tests { // This bytecode and ABI is for a contract, which wraps the LibZip library for easier fuzz testing. // The source of this contract is here: https://github.com/danyalprout/fastlz/blob/main/src/FastLz.sol#L6-L10 + use revm::context::TxEnv; + use crate::OpTransaction; sol! { interface FastLz { @@ -174,13 +176,16 @@ mod tests { .with_db(BenchmarkDB::new_bytecode(contract_bytecode.clone())) .build_op(); - let mut tx = OpTransaction::default(); - - tx.base.caller = EEADDRESS; - tx.base.kind = TxKind::Call(FFADDRESS); - tx.base.data = FastLz::fastLzCall::new((input,)).abi_encode().into(); - tx.base.gas_limit = 3_000_000; - tx.enveloped_tx = Some(Bytes::default()); + let tx = OpTransaction::builder() + .base( + TxEnv::builder() + .caller(EEADDRESS) + .kind(TxKind::Call(FFADDRESS)) + .data(FastLz::fastLzCall::new((input,)).abi_encode().into()) + .gas_limit(3_000_000), + ) + .enveloped_tx(Some(Bytes::default())) + .build_fill(); let result = evm.transact_one(tx).unwrap(); diff --git a/crates/op-revm/src/handler.rs b/crates/op-revm/src/handler.rs index 688c84d2da..e696aba055 100644 --- a/crates/op-revm/src/handler.rs +++ b/crates/op-revm/src/handler.rs @@ -174,11 +174,7 @@ where // Check if account has enough balance for `gas_limit * max_fee`` and value transfer. // Transfer will be done inside `*_inner` functions. - if is_balance_check_disabled { - // Make sure the caller's balance is at least the value of the transaction. - // this is not consensus critical, and it is used in testing. - new_balance = caller_account.info.balance.max(tx.value()); - } else if !is_deposit && max_balance_spending > new_balance { + if !is_deposit && max_balance_spending > new_balance && !is_balance_check_disabled { // skip max balance check for deposit transactions. // this check for deposit was skipped previously in `validate_tx_against_state` function return Err(InvalidTransaction::LackOfFundForMaxFee { @@ -186,23 +182,28 @@ where balance: Box::new(new_balance), } .into()); - } else { - let effective_balance_spending = - tx.effective_balance_spending(basefee, blob_price).expect( - "effective balance is always smaller than max balance so it can't overflow", - ); + } - // subtracting max balance spending with value that is going to be deducted later in the call. - let gas_balance_spending = effective_balance_spending - tx.value(); + let effective_balance_spending = tx + .effective_balance_spending(basefee, blob_price) + .expect("effective balance is always smaller than max balance so it can't overflow"); - // If the transaction is not a deposit transaction, subtract the L1 data fee from the - // caller's balance directly after minting the requested amount of ETH. - // Additionally deduct the operator fee from the caller's account. - // - // In case of deposit additional cost will be zero. - let op_gas_balance_spending = gas_balance_spending.saturating_add(additional_cost); + // subtracting max balance spending with value that is going to be deducted later in the call. + let gas_balance_spending = effective_balance_spending - tx.value(); + + // If the transaction is not a deposit transaction, subtract the L1 data fee from the + // caller's balance directly after minting the requested amount of ETH. + // Additionally deduct the operator fee from the caller's account. + // + // In case of deposit additional cost will be zero. + let op_gas_balance_spending = gas_balance_spending.saturating_add(additional_cost); + + new_balance = new_balance.saturating_sub(op_gas_balance_spending); - new_balance = new_balance.saturating_sub(op_gas_balance_spending); + if is_balance_check_disabled { + // Make sure the caller's balance is at least the value of the transaction. + // this is not consensus critical, and it is used in testing. + new_balance = new_balance.max(tx.value()); } // Touch account so we know it is changed. @@ -297,7 +298,7 @@ where .operator_fee_refund(frame_result.gas(), spec); } - reimburse_caller(evm.ctx(), frame_result.gas_mut(), additional_refund).map_err(From::from) + reimburse_caller(evm.ctx(), frame_result.gas(), additional_refund).map_err(From::from) } fn refund( @@ -353,27 +354,21 @@ where }; let l1_cost = l1_block_info.calculate_tx_l1_cost(enveloped_tx, spec); - let mut operator_fee_cost = U256::ZERO; - if spec.is_enabled_in(OpSpecId::ISTHMUS) { - operator_fee_cost = l1_block_info.operator_fee_charge( - enveloped_tx, - U256::from(frame_result.gas().spent() - frame_result.gas().refunded() as u64), - ); + let operator_fee_cost = if spec.is_enabled_in(OpSpecId::ISTHMUS) { + l1_block_info.operator_fee_charge(enveloped_tx, U256::from(frame_result.gas().used())) + } else { + U256::ZERO + }; + let base_fee_amount = U256::from(basefee.saturating_mul(frame_result.gas().used() as u128)); + + // Send fees to their respective recipients + for (recipient, amount) in [ + (L1_FEE_RECIPIENT, l1_cost), + (BASE_FEE_RECIPIENT, base_fee_amount), + (OPERATOR_FEE_RECIPIENT, operator_fee_cost), + ] { + ctx.journal_mut().balance_incr(recipient, amount)?; } - // Send the L1 cost of the transaction to the L1 Fee Vault. - ctx.journal_mut().balance_incr(L1_FEE_RECIPIENT, l1_cost)?; - - // Send the base fee of the transaction to the Base Fee Vault. - ctx.journal_mut().balance_incr( - BASE_FEE_RECIPIENT, - U256::from(basefee.saturating_mul( - (frame_result.gas().spent() - frame_result.gas().refunded() as u64) as u128, - )), - )?; - - // Send the operator fee of the transaction to the coinbase. - ctx.journal_mut() - .balance_incr(OPERATOR_FEE_RECIPIENT, operator_fee_cost)?; Ok(()) } @@ -501,11 +496,11 @@ mod tests { BASE_FEE_SCALAR_OFFSET, ECOTONE_L1_BLOB_BASE_FEE_SLOT, ECOTONE_L1_FEE_SCALARS_SLOT, L1_BASE_FEE_SLOT, L1_BLOCK_CONTRACT, OPERATOR_FEE_SCALARS_SLOT, }, - DefaultOp, OpBuilder, + DefaultOp, OpBuilder, OpTransaction, }; use alloy_primitives::uint; use revm::{ - context::{BlockEnv, Context, TransactionType}, + context::{BlockEnv, Context, TxEnv}, context_interface::result::InvalidTransaction, database::InMemoryDB, database_interface::EmptyDB, @@ -547,10 +542,11 @@ mod tests { #[test] fn test_revert_gas() { let ctx = Context::op() - .modify_tx_chained(|tx| { - tx.base.gas_limit = 100; - tx.enveloped_tx = None; - }) + .with_tx( + OpTransaction::builder() + .base(TxEnv::builder().gas_limit(100)) + .build_fill(), + ) .modify_cfg_chained(|cfg| cfg.spec = OpSpecId::BEDROCK); let gas = call_last_frame_return(ctx, InstructionResult::Revert, Gas::new(90)); @@ -562,11 +558,11 @@ mod tests { #[test] fn test_consume_gas() { let ctx = Context::op() - .modify_tx_chained(|tx| { - tx.base.gas_limit = 100; - tx.deposit.source_hash = B256::ZERO; - tx.base.tx_type = DEPOSIT_TRANSACTION_TYPE; - }) + .with_tx( + OpTransaction::builder() + .base(TxEnv::builder().gas_limit(100)) + .build_fill(), + ) .modify_cfg_chained(|cfg| cfg.spec = OpSpecId::REGOLITH); let gas = call_last_frame_return(ctx, InstructionResult::Stop, Gas::new(90)); @@ -578,11 +574,12 @@ mod tests { #[test] fn test_consume_gas_with_refund() { let ctx = Context::op() - .modify_tx_chained(|tx| { - tx.base.gas_limit = 100; - tx.base.tx_type = DEPOSIT_TRANSACTION_TYPE; - tx.deposit.source_hash = B256::ZERO; - }) + .with_tx( + OpTransaction::builder() + .base(TxEnv::builder().gas_limit(100)) + .source_hash(B256::from([1u8; 32])) + .build_fill(), + ) .modify_cfg_chained(|cfg| cfg.spec = OpSpecId::REGOLITH); let mut ret_gas = Gas::new(90); @@ -602,11 +599,12 @@ mod tests { #[test] fn test_consume_gas_deposit_tx() { let ctx = Context::op() - .modify_tx_chained(|tx| { - tx.base.tx_type = DEPOSIT_TRANSACTION_TYPE; - tx.base.gas_limit = 100; - tx.deposit.source_hash = B256::ZERO; - }) + .with_tx( + OpTransaction::builder() + .base(TxEnv::builder().gas_limit(100)) + .source_hash(B256::from([1u8; 32])) + .build_fill(), + ) .modify_cfg_chained(|cfg| cfg.spec = OpSpecId::BEDROCK); let gas = call_last_frame_return(ctx, InstructionResult::Stop, Gas::new(90)); assert_eq!(gas.remaining(), 0); @@ -617,12 +615,13 @@ mod tests { #[test] fn test_consume_gas_sys_deposit_tx() { let ctx = Context::op() - .modify_tx_chained(|tx| { - tx.base.tx_type = DEPOSIT_TRANSACTION_TYPE; - tx.base.gas_limit = 100; - tx.deposit.source_hash = B256::ZERO; - tx.deposit.is_system_transaction = true; - }) + .with_tx( + OpTransaction::builder() + .base(TxEnv::builder().gas_limit(100)) + .source_hash(B256::from([1u8; 32])) + .is_system_transaction() + .build_fill(), + ) .modify_cfg_chained(|cfg| cfg.spec = OpSpecId::BEDROCK); let gas = call_last_frame_return(ctx, InstructionResult::Stop, Gas::new(90)); assert_eq!(gas.remaining(), 100); @@ -652,8 +651,7 @@ mod tests { }) .modify_cfg_chained(|cfg| cfg.spec = OpSpecId::REGOLITH); ctx.modify_tx(|tx| { - tx.base.tx_type = DEPOSIT_TRANSACTION_TYPE; - tx.deposit.source_hash = B256::ZERO; + tx.deposit.source_hash = B256::from([1u8; 32]); tx.deposit.mint = Some(10); }); @@ -677,7 +675,7 @@ mod tests { db.insert_account_info( caller, AccountInfo { - balance: U256::from(1000), + balance: U256::from(1058), // Increased to cover L1 fees (1048) + base fees ..Default::default() }, ); @@ -690,13 +688,14 @@ mod tests { ..Default::default() }) .modify_cfg_chained(|cfg| cfg.spec = OpSpecId::REGOLITH) - .modify_tx_chained(|tx| { - tx.base.gas_limit = 100; - tx.base.tx_type = DEPOSIT_TRANSACTION_TYPE; - tx.deposit.mint = Some(10); - tx.enveloped_tx = Some(bytes!("FACADE")); - tx.deposit.source_hash = B256::ZERO; - }); + .with_tx( + OpTransaction::builder() + .base(TxEnv::builder().gas_limit(100)) + .enveloped_tx(Some(bytes!("FACADE"))) + .source_hash(B256::ZERO) + .build() + .unwrap(), + ); let mut evm = ctx.build_op(); @@ -708,7 +707,7 @@ mod tests { // Check the account balance is updated. let account = evm.ctx().journal_mut().load_account(caller).unwrap(); - assert_eq!(account.info.balance, U256::from(1010)); + assert_eq!(account.info.balance, U256::from(10)); // 1058 - 1048 = 10 } #[test] @@ -810,11 +809,14 @@ mod tests { ..Default::default() }) .modify_cfg_chained(|cfg| cfg.spec = OpSpecId::REGOLITH) - .modify_tx_chained(|tx| { - tx.base.gas_limit = 100; - tx.deposit.source_hash = B256::ZERO; - tx.enveloped_tx = Some(bytes!("FACADE")); - }); + .with_tx( + OpTransaction::builder() + .base(TxEnv::builder().gas_limit(100)) + .source_hash(B256::ZERO) + .enveloped_tx(Some(bytes!("FACADE"))) + .build() + .unwrap(), + ); let mut evm = ctx.build_op(); let handler = @@ -849,10 +851,12 @@ mod tests { ..Default::default() }) .modify_cfg_chained(|cfg| cfg.spec = OpSpecId::ISTHMUS) - .modify_tx_chained(|tx| { - tx.base.gas_limit = 10; - tx.enveloped_tx = Some(bytes!("FACADE")); - }); + .with_tx( + OpTransaction::builder() + .base(TxEnv::builder().gas_limit(10)) + .enveloped_tx(Some(bytes!("FACADE"))) + .build_fill(), + ); let mut evm = ctx.build_op(); let handler = @@ -916,7 +920,7 @@ mod tests { // mark the tx as a system transaction. let ctx = Context::op() .modify_tx_chained(|tx| { - tx.base.tx_type = DEPOSIT_TRANSACTION_TYPE; + tx.deposit.source_hash = B256::from([1u8; 32]); tx.deposit.is_system_transaction = true; }) .modify_cfg_chained(|cfg| cfg.spec = OpSpecId::REGOLITH); @@ -943,8 +947,7 @@ mod tests { // Set source hash. let ctx = Context::op() .modify_tx_chained(|tx| { - tx.base.tx_type = DEPOSIT_TRANSACTION_TYPE; - tx.deposit.source_hash = B256::ZERO; + tx.deposit.source_hash = B256::from([1u8; 32]); }) .modify_cfg_chained(|cfg| cfg.spec = OpSpecId::REGOLITH); @@ -960,8 +963,7 @@ mod tests { // Set source hash. let ctx = Context::op() .modify_tx_chained(|tx| { - tx.base.tx_type = DEPOSIT_TRANSACTION_TYPE; - tx.deposit.source_hash = B256::ZERO; + tx.deposit.source_hash = B256::from([1u8; 32]); }) .modify_cfg_chained(|cfg| cfg.spec = OpSpecId::REGOLITH); @@ -977,7 +979,8 @@ mod tests { fn test_halted_deposit_tx_post_regolith() { let ctx = Context::op() .modify_tx_chained(|tx| { - tx.base.tx_type = DEPOSIT_TRANSACTION_TYPE; + // Set up as deposit transaction by having a deposit with source_hash + tx.deposit.source_hash = B256::from([1u8; 32]); }) .modify_cfg_chained(|cfg| cfg.spec = OpSpecId::REGOLITH); @@ -1003,6 +1006,36 @@ mod tests { ) } + #[test] + fn test_tx_zero_value_touch_caller() { + let ctx = Context::op(); + + let mut evm = ctx.build_op(); + + assert!(!evm + .0 + .ctx + .journal_mut() + .load_account(Address::ZERO) + .unwrap() + .is_touched()); + + let handler = + OpHandler::<_, EVMError<_, OpTransactionError>, EthFrame>::new(); + + handler + .validate_against_state_and_deduct_caller(&mut evm) + .unwrap(); + + assert!(evm + .0 + .ctx + .journal_mut() + .load_account(Address::ZERO) + .unwrap() + .is_touched()); + } + #[rstest] #[case::deposit(true)] #[case::dyn_fee(false)] @@ -1012,16 +1045,26 @@ mod tests { const OP_FEE_MOCK_PARAM: u128 = 0xFFFF; let ctx = Context::op() - .modify_tx_chained(|tx| { - tx.base.tx_type = if is_deposit { - DEPOSIT_TRANSACTION_TYPE - } else { - TransactionType::Eip1559 as u8 - }; - tx.base.gas_price = GAS_PRICE; - tx.base.gas_priority_fee = None; - tx.base.caller = SENDER; - }) + .with_tx( + OpTransaction::builder() + .base( + TxEnv::builder() + .gas_price(GAS_PRICE) + .gas_priority_fee(None) + .caller(SENDER), + ) + .enveloped_tx(if is_deposit { + None + } else { + Some(bytes!("FACADE")) + }) + .source_hash(if is_deposit { + B256::from([1u8; 32]) + } else { + B256::ZERO + }) + .build_fill(), + ) .modify_cfg_chained(|cfg| cfg.spec = OpSpecId::ISTHMUS); let mut evm = ctx.build_op(); diff --git a/crates/op-revm/src/transaction/abstraction.rs b/crates/op-revm/src/transaction/abstraction.rs index 705d6491a7..65b50e3ddd 100644 --- a/crates/op-revm/src/transaction/abstraction.rs +++ b/crates/op-revm/src/transaction/abstraction.rs @@ -2,7 +2,10 @@ use super::deposit::{DepositTransactionParts, DEPOSIT_TRANSACTION_TYPE}; use auto_impl::auto_impl; use revm::{ - context::TxEnv, + context::{ + tx::{TxEnvBuildError, TxEnvBuilder}, + TxEnv, + }, context_interface::transaction::Transaction, handler::SystemCallTx, primitives::{Address, Bytes, TxKind, B256, U256}, @@ -46,6 +49,12 @@ pub struct OpTransaction { pub deposit: DepositTransactionParts, } +impl AsRef for OpTransaction { + fn as_ref(&self) -> &T { + &self.base + } +} + impl OpTransaction { /// Create a new Optimism transaction. pub fn new(base: T) -> Self { @@ -57,6 +66,13 @@ impl OpTransaction { } } +impl OpTransaction { + /// Create a new Optimism transaction. + pub fn builder() -> OpTransactionBuilder { + OpTransactionBuilder::new() + } +} + impl Default for OpTransaction { fn default() -> Self { Self { @@ -92,7 +108,12 @@ impl Transaction for OpTransaction { T: 'a; fn tx_type(&self) -> u8 { - self.base.tx_type() + // If this is a deposit transaction (has source_hash set), return deposit type + if self.deposit.source_hash != B256::ZERO { + DEPOSIT_TRANSACTION_TYPE + } else { + self.base.tx_type() + } } fn caller(&self) -> Address { @@ -148,6 +169,10 @@ impl Transaction for OpTransaction { } fn effective_gas_price(&self, base_fee: u128) -> u128 { + // Deposit transactions use gas_price directly + if self.tx_type() == DEPOSIT_TRANSACTION_TYPE { + return self.gas_price(); + } self.base.effective_gas_price(base_fee) } @@ -181,37 +206,181 @@ impl OpTxTr for OpTransaction { } } +/// Builder for constructing [`OpTransaction`] instances +#[derive(Default, Debug)] +pub struct OpTransactionBuilder { + base: TxEnvBuilder, + enveloped_tx: Option, + deposit: DepositTransactionParts, +} + +impl OpTransactionBuilder { + /// Create a new builder with default values + pub fn new() -> Self { + Self { + base: TxEnvBuilder::new(), + enveloped_tx: None, + deposit: DepositTransactionParts::default(), + } + } + + /// Set the base transaction builder based for TxEnvBuilder. + pub fn base(mut self, base: TxEnvBuilder) -> Self { + self.base = base; + self + } + + /// Set the enveloped transaction bytes. + pub fn enveloped_tx(mut self, enveloped_tx: Option) -> Self { + self.enveloped_tx = enveloped_tx; + self + } + + /// Set the source hash of the deposit transaction. + pub fn source_hash(mut self, source_hash: B256) -> Self { + self.deposit.source_hash = source_hash; + self + } + + /// Set the mint of the deposit transaction. + pub fn mint(mut self, mint: u128) -> Self { + self.deposit.mint = Some(mint); + self + } + + /// Set the deposit transaction to be a system transaction. + pub fn is_system_transaction(mut self) -> Self { + self.deposit.is_system_transaction = true; + self + } + + /// Set the deposit transaction to not be a system transaction. + pub fn not_system_transaction(mut self) -> Self { + self.deposit.is_system_transaction = false; + self + } + + /// Set the deposit transaction to be a deposit transaction. + pub fn is_deposit_tx(mut self) -> Self { + self.base = self.base.tx_type(Some(DEPOSIT_TRANSACTION_TYPE)); + self + } + + /// Build the [`OpTransaction`] with default values for missing fields. + /// + /// This is useful for testing and debugging where it is not necessary to + /// have full [`OpTransaction`] instance. + /// + /// If the source hash is not [`B256::ZERO`], set the transaction type to deposit and remove the enveloped transaction. + pub fn build_fill(mut self) -> OpTransaction { + let tx_type = self.base.get_tx_type(); + if tx_type.is_some() { + if tx_type == Some(DEPOSIT_TRANSACTION_TYPE) { + // source hash is required for deposit transactions + if self.deposit.source_hash == B256::ZERO { + self.deposit.source_hash = B256::from([1u8; 32]); + } + } else { + // enveloped is required for non-deposit transactions + self.enveloped_tx = Some(vec![0x00].into()); + } + } else if self.deposit.source_hash != B256::ZERO { + // if type is not set and source hash is set, set the transaction type to deposit + self.base = self.base.tx_type(Some(DEPOSIT_TRANSACTION_TYPE)); + } else if self.enveloped_tx.is_none() { + // if type is not set and source hash is not set, set the enveloped transaction to something. + self.enveloped_tx = Some(vec![0x00].into()); + } + + let base = self.base.build_fill(); + + OpTransaction { + base, + enveloped_tx: self.enveloped_tx, + deposit: self.deposit, + } + } + + /// Build the [`OpTransaction`] instance, return error if the transaction is not valid. + /// + pub fn build(mut self) -> Result, OpBuildError> { + let tx_type = self.base.get_tx_type(); + if tx_type.is_some() { + if Some(DEPOSIT_TRANSACTION_TYPE) == tx_type { + // if tx type is deposit, check if source hash is set + if self.deposit.source_hash == B256::ZERO { + return Err(OpBuildError::MissingSourceHashForDeposit); + } + } else if self.enveloped_tx.is_none() { + // enveloped is required for non-deposit transactions + return Err(OpBuildError::MissingEnvelopedTxBytes); + } + } else if self.deposit.source_hash != B256::ZERO { + // if type is not set and source hash is set, set the transaction type to deposit + self.base = self.base.tx_type(Some(DEPOSIT_TRANSACTION_TYPE)); + } else if self.enveloped_tx.is_none() { + // tx is not deposit and enveloped is required + return Err(OpBuildError::MissingEnvelopedTxBytes); + } + + let base = self.base.build()?; + + Ok(OpTransaction { + base, + enveloped_tx: self.enveloped_tx, + deposit: self.deposit, + }) + } +} + +/// Error type for building [`TxEnv`] +#[derive(Clone, Copy, Debug, Hash, PartialEq, Eq, PartialOrd, Ord)] +#[cfg_attr(feature = "serde", derive(serde::Serialize, serde::Deserialize))] +pub enum OpBuildError { + /// Base transaction build error + Base(TxEnvBuildError), + /// Missing enveloped transaction bytes + MissingEnvelopedTxBytes, + /// Missing source hash for deposit transaction + MissingSourceHashForDeposit, +} + +impl From for OpBuildError { + fn from(error: TxEnvBuildError) -> Self { + OpBuildError::Base(error) + } +} + #[cfg(test)] mod tests { - use crate::transaction::deposit::DEPOSIT_TRANSACTION_TYPE; - use super::*; - use revm::primitives::{Address, B256}; + use revm::{ + context_interface::Transaction, + primitives::{Address, B256}, + }; #[test] fn test_deposit_transaction_fields() { - let op_tx = OpTransaction { - base: TxEnv { - tx_type: DEPOSIT_TRANSACTION_TYPE, - gas_limit: 10, - gas_price: 100, - gas_priority_fee: Some(5), - ..Default::default() - }, - enveloped_tx: None, - deposit: DepositTransactionParts { - is_system_transaction: false, - mint: Some(0u128), - source_hash: B256::default(), - }, - }; - // Verify transaction type - assert_eq!(op_tx.tx_type(), DEPOSIT_TRANSACTION_TYPE); + let base_tx = TxEnv::builder() + .gas_limit(10) + .gas_price(100) + .gas_priority_fee(Some(5)); + + let op_tx = OpTransaction::builder() + .base(base_tx) + .enveloped_tx(None) + .not_system_transaction() + .mint(0u128) + .source_hash(B256::from([1u8; 32])) + .build() + .unwrap(); + // Verify transaction type (deposit transactions should have tx_type based on OpSpecId) + // The tx_type is derived from the transaction structure, not set manually // Verify common fields access assert_eq!(op_tx.gas_limit(), 10); assert_eq!(op_tx.kind(), revm::primitives::TxKind::Call(Address::ZERO)); - // Verify gas related calculations - assert_eq!(op_tx.effective_gas_price(90), 95); + // Verify gas related calculations - deposit transactions use gas_price for effective gas price + assert_eq!(op_tx.effective_gas_price(90), 100); assert_eq!(op_tx.max_fee_per_gas(), 100); } } diff --git a/crates/op-revm/tests/integration.rs b/crates/op-revm/tests/integration.rs index 16c6aac192..a41840ff36 100644 --- a/crates/op-revm/tests/integration.rs +++ b/crates/op-revm/tests/integration.rs @@ -3,8 +3,7 @@ mod common; use common::compare_or_save_testdata; use op_revm::{ - precompiles::bn128_pair::GRANITE_MAX_INPUT_SIZE, - transaction::deposit::DEPOSIT_TRANSACTION_TYPE, DefaultOp, L1BlockInfo, OpBuilder, + precompiles::bn128_pair::GRANITE_MAX_INPUT_SIZE, DefaultOp, L1BlockInfo, OpBuilder, OpHaltReason, OpSpecId, OpTransaction, }; use revm::{ @@ -29,11 +28,13 @@ use std::vec::Vec; #[test] fn test_deposit_tx() { let ctx = Context::op() - .modify_tx_chained(|tx| { - tx.enveloped_tx = None; - tx.deposit.mint = Some(100); - tx.base.tx_type = DEPOSIT_TRANSACTION_TYPE; - }) + .with_tx( + OpTransaction::builder() + .enveloped_tx(None) + .mint(100) + .source_hash(revm::primitives::B256::from([1u8; 32])) + .build_fill(), + ) .modify_cfg_chained(|cfg| cfg.spec = OpSpecId::HOLOCENE); let mut evm = ctx.build_op(); @@ -54,13 +55,18 @@ fn test_deposit_tx() { #[test] fn test_halted_deposit_tx() { let ctx = Context::op() - .modify_tx_chained(|tx| { - tx.enveloped_tx = None; - tx.deposit.mint = Some(100); - tx.base.tx_type = DEPOSIT_TRANSACTION_TYPE; - tx.base.caller = BENCH_CALLER; - tx.base.kind = TxKind::Call(BENCH_TARGET); - }) + .with_tx( + OpTransaction::builder() + .base( + TxEnv::builder() + .caller(BENCH_CALLER) + .kind(TxKind::Call(BENCH_TARGET)), + ) + .enveloped_tx(None) + .mint(100) + .source_hash(revm::primitives::B256::from([1u8; 32])) + .build_fill(), + ) .modify_cfg_chained(|cfg| cfg.spec = OpSpecId::HOLOCENE) .with_db(BenchmarkDB::new_bytecode(Bytecode::new_legacy( [opcode::POP].into(), @@ -106,10 +112,15 @@ fn p256verify_test_tx( ); Context::op() - .modify_tx_chained(|tx| { - tx.base.kind = TxKind::Call(u64_to_address(secp256r1::P256VERIFY_ADDRESS)); - tx.base.gas_limit = initial_gas + secp256r1::P256VERIFY_BASE_GAS_FEE; - }) + .with_tx( + OpTransaction::builder() + .base( + TxEnv::builder() + .kind(TxKind::Call(u64_to_address(secp256r1::P256VERIFY_ADDRESS))) + .gas_limit(initial_gas + secp256r1::P256VERIFY_BASE_GAS_FEE), + ) + .build_fill(), + ) .modify_cfg_chained(|cfg| cfg.spec = SPEC_ID) } @@ -128,7 +139,33 @@ fn test_tx_call_p256verify() { #[test] fn test_halted_tx_call_p256verify() { - let ctx = p256verify_test_tx().modify_tx_chained(|tx| tx.base.gas_limit -= 1); + const SPEC_ID: OpSpecId = OpSpecId::FJORD; + let is_eip7702_enabled = SPEC_ID >= OpSpecId::ISTHMUS; + let is_eip7623_enabled = SPEC_ID >= OpSpecId::ISTHMUS; + + let InitialAndFloorGas { initial_gas, .. } = calculate_initial_tx_gas( + SPEC_ID.into(), + &[], + false, + is_eip7702_enabled, + is_eip7623_enabled, + 0, + 0, + 0, + ); + let original_gas_limit = initial_gas + secp256r1::P256VERIFY_BASE_GAS_FEE; + + let ctx = Context::op() + .with_tx( + OpTransaction::builder() + .base( + TxEnv::builder() + .kind(TxKind::Call(u64_to_address(secp256r1::P256VERIFY_ADDRESS))) + .gas_limit(original_gas_limit - 1), + ) + .build_fill(), + ) + .modify_cfg_chained(|cfg| cfg.spec = SPEC_ID); let mut evm = ctx.build_op(); let output = evm.replay().unwrap(); @@ -164,11 +201,16 @@ fn bn128_pair_test_tx( ); Context::op() - .modify_tx_chained(|tx| { - tx.base.kind = TxKind::Call(bn128::pair::ADDRESS); - tx.base.data = input; - tx.base.gas_limit = initial_gas; - }) + .with_tx( + OpTransaction::builder() + .base( + TxEnv::builder() + .kind(TxKind::Call(bn128::pair::ADDRESS)) + .data(input) + .gas_limit(initial_gas), + ) + .build_fill(), + ) .modify_cfg_chained(|cfg| cfg.spec = spec) } @@ -213,10 +255,15 @@ fn test_halted_tx_call_bn128_pair_granite() { #[test] fn test_halted_tx_call_bls12_381_g1_add_out_of_gas() { let ctx = Context::op() - .modify_tx_chained(|tx| { - tx.base.kind = TxKind::Call(bls12_381_const::G1_ADD_ADDRESS); - tx.base.gas_limit = 21_000 + bls12_381_const::G1_ADD_BASE_GAS_FEE - 1; - }) + .with_tx( + OpTransaction::builder() + .base( + TxEnv::builder() + .kind(TxKind::Call(bls12_381_const::G1_ADD_ADDRESS)) + .gas_limit(21_000 + bls12_381_const::G1_ADD_BASE_GAS_FEE - 1), + ) + .build_fill(), + ) .modify_chain_chained(|l1_block| { l1_block.operator_fee_constant = Some(U256::ZERO); l1_block.operator_fee_scalar = Some(U256::ZERO) @@ -245,10 +292,15 @@ fn test_halted_tx_call_bls12_381_g1_add_out_of_gas() { #[test] fn test_halted_tx_call_bls12_381_g1_add_input_wrong_size() { let ctx = Context::op() - .modify_tx_chained(|tx| { - tx.base.kind = TxKind::Call(bls12_381_const::G1_ADD_ADDRESS); - tx.base.gas_limit = 21_000 + bls12_381_const::G1_ADD_BASE_GAS_FEE; - }) + .with_tx( + OpTransaction::builder() + .base( + TxEnv::builder() + .kind(TxKind::Call(bls12_381_const::G1_ADD_ADDRESS)) + .gas_limit(21_000 + bls12_381_const::G1_ADD_BASE_GAS_FEE), + ) + .build_fill(), + ) .modify_chain_chained(|l1_block| { l1_block.operator_fee_constant = Some(U256::ZERO); l1_block.operator_fee_scalar = Some(U256::ZERO) @@ -298,11 +350,16 @@ fn g1_msm_test_tx( ); Context::op() - .modify_tx_chained(|tx| { - tx.base.kind = TxKind::Call(bls12_381_const::G1_MSM_ADDRESS); - tx.base.data = input; - tx.base.gas_limit = initial_gas + gs1_msm_gas; - }) + .with_tx( + OpTransaction::builder() + .base( + TxEnv::builder() + .kind(TxKind::Call(bls12_381_const::G1_MSM_ADDRESS)) + .data(input) + .gas_limit(initial_gas + gs1_msm_gas), + ) + .build_fill(), + ) .modify_chain_chained(|l1_block| { l1_block.operator_fee_constant = Some(U256::ZERO); l1_block.operator_fee_scalar = Some(U256::ZERO) @@ -312,7 +369,43 @@ fn g1_msm_test_tx( #[test] fn test_halted_tx_call_bls12_381_g1_msm_input_wrong_size() { - let ctx = g1_msm_test_tx().modify_tx_chained(|tx| tx.base.data = tx.base.data.slice(1..)); + const SPEC_ID: OpSpecId = OpSpecId::ISTHMUS; + let is_eip7702_enabled = SPEC_ID >= OpSpecId::ISTHMUS; + let is_eip7623_enabled = SPEC_ID >= OpSpecId::ISTHMUS; + + let input = Bytes::from([1; bls12_381_const::G1_MSM_INPUT_LENGTH]); + let InitialAndFloorGas { initial_gas, .. } = calculate_initial_tx_gas( + SPEC_ID.into(), + &input[..], + false, + is_eip7702_enabled, + is_eip7623_enabled, + 0, + 0, + 0, + ); + let gs1_msm_gas = bls12_381_utils::msm_required_gas( + 1, + &bls12_381_const::DISCOUNT_TABLE_G1_MSM, + bls12_381_const::G1_MSM_BASE_GAS_FEE, + ); + + let ctx = Context::op() + .with_tx( + OpTransaction::builder() + .base( + TxEnv::builder() + .kind(TxKind::Call(bls12_381_const::G1_MSM_ADDRESS)) + .data(input.slice(1..)) + .gas_limit(initial_gas + gs1_msm_gas), + ) + .build_fill(), + ) + .modify_chain_chained(|l1_block| { + l1_block.operator_fee_constant = Some(U256::ZERO); + l1_block.operator_fee_scalar = Some(U256::ZERO) + }) + .modify_cfg_chained(|cfg| cfg.spec = SPEC_ID); let mut evm = ctx.build_op(); let output = evm.replay().unwrap(); @@ -334,7 +427,43 @@ fn test_halted_tx_call_bls12_381_g1_msm_input_wrong_size() { #[test] fn test_halted_tx_call_bls12_381_g1_msm_out_of_gas() { - let ctx = g1_msm_test_tx().modify_tx_chained(|tx| tx.base.gas_limit -= 1); + const SPEC_ID: OpSpecId = OpSpecId::ISTHMUS; + let is_eip7702_enabled = SPEC_ID >= OpSpecId::ISTHMUS; + let is_eip7623_enabled = SPEC_ID >= OpSpecId::ISTHMUS; + + let input = Bytes::from([1; bls12_381_const::G1_MSM_INPUT_LENGTH]); + let InitialAndFloorGas { initial_gas, .. } = calculate_initial_tx_gas( + SPEC_ID.into(), + &input[..], + false, + is_eip7702_enabled, + is_eip7623_enabled, + 0, + 0, + 0, + ); + let gs1_msm_gas = bls12_381_utils::msm_required_gas( + 1, + &bls12_381_const::DISCOUNT_TABLE_G1_MSM, + bls12_381_const::G1_MSM_BASE_GAS_FEE, + ); + + let ctx = Context::op() + .with_tx( + OpTransaction::builder() + .base( + TxEnv::builder() + .kind(TxKind::Call(bls12_381_const::G1_MSM_ADDRESS)) + .data(input) + .gas_limit(initial_gas + gs1_msm_gas - 1), + ) + .build_fill(), + ) + .modify_chain_chained(|l1_block| { + l1_block.operator_fee_constant = Some(U256::ZERO); + l1_block.operator_fee_scalar = Some(U256::ZERO) + }) + .modify_cfg_chained(|cfg| cfg.spec = SPEC_ID); let mut evm = ctx.build_op(); let output = evm.replay().unwrap(); @@ -379,10 +508,15 @@ fn test_halted_tx_call_bls12_381_g1_msm_wrong_input_layout() { #[test] fn test_halted_tx_call_bls12_381_g2_add_out_of_gas() { let ctx = Context::op() - .modify_tx_chained(|tx| { - tx.base.kind = TxKind::Call(bls12_381_const::G2_ADD_ADDRESS); - tx.base.gas_limit = 21_000 + bls12_381_const::G2_ADD_BASE_GAS_FEE - 1; - }) + .with_tx( + OpTransaction::builder() + .base( + TxEnv::builder() + .kind(TxKind::Call(bls12_381_const::G2_ADD_ADDRESS)) + .gas_limit(21_000 + bls12_381_const::G2_ADD_BASE_GAS_FEE - 1), + ) + .build_fill(), + ) .modify_chain_chained(|l1_block| { l1_block.operator_fee_constant = Some(U256::ZERO); l1_block.operator_fee_scalar = Some(U256::ZERO) @@ -411,10 +545,15 @@ fn test_halted_tx_call_bls12_381_g2_add_out_of_gas() { #[test] fn test_halted_tx_call_bls12_381_g2_add_input_wrong_size() { let ctx = Context::op() - .modify_tx_chained(|tx| { - tx.base.kind = TxKind::Call(bls12_381_const::G2_ADD_ADDRESS); - tx.base.gas_limit = 21_000 + bls12_381_const::G2_ADD_BASE_GAS_FEE; - }) + .with_tx( + OpTransaction::builder() + .base( + TxEnv::builder() + .kind(TxKind::Call(bls12_381_const::G2_ADD_ADDRESS)) + .gas_limit(21_000 + bls12_381_const::G2_ADD_BASE_GAS_FEE), + ) + .build_fill(), + ) .modify_chain_chained(|l1_block| { l1_block.operator_fee_constant = Some(U256::ZERO); l1_block.operator_fee_scalar = Some(U256::ZERO) @@ -465,11 +604,16 @@ fn g2_msm_test_tx( ); Context::op() - .modify_tx_chained(|tx| { - tx.base.kind = TxKind::Call(bls12_381_const::G2_MSM_ADDRESS); - tx.base.data = input; - tx.base.gas_limit = initial_gas + gs2_msm_gas; - }) + .with_tx( + OpTransaction::builder() + .base( + TxEnv::builder() + .kind(TxKind::Call(bls12_381_const::G2_MSM_ADDRESS)) + .data(input) + .gas_limit(initial_gas + gs2_msm_gas), + ) + .build_fill(), + ) .modify_chain_chained(|l1_block| { l1_block.operator_fee_constant = Some(U256::ZERO); l1_block.operator_fee_scalar = Some(U256::ZERO) @@ -479,7 +623,43 @@ fn g2_msm_test_tx( #[test] fn test_halted_tx_call_bls12_381_g2_msm_input_wrong_size() { - let ctx = g2_msm_test_tx().modify_tx_chained(|tx| tx.base.data = tx.base.data.slice(1..)); + const SPEC_ID: OpSpecId = OpSpecId::ISTHMUS; + let is_eip7702_enabled = SPEC_ID >= OpSpecId::ISTHMUS; + let is_eip7623_enabled = SPEC_ID >= OpSpecId::ISTHMUS; + + let input = Bytes::from([1; bls12_381_const::G2_MSM_INPUT_LENGTH]); + let InitialAndFloorGas { initial_gas, .. } = calculate_initial_tx_gas( + SPEC_ID.into(), + &input[..], + false, + is_eip7702_enabled, + is_eip7623_enabled, + 0, + 0, + 0, + ); + let gs2_msm_gas = bls12_381_utils::msm_required_gas( + 1, + &bls12_381_const::DISCOUNT_TABLE_G2_MSM, + bls12_381_const::G2_MSM_BASE_GAS_FEE, + ); + + let ctx = Context::op() + .with_tx( + OpTransaction::builder() + .base( + TxEnv::builder() + .kind(TxKind::Call(bls12_381_const::G2_MSM_ADDRESS)) + .data(input.slice(1..)) + .gas_limit(initial_gas + gs2_msm_gas), + ) + .build_fill(), + ) + .modify_chain_chained(|l1_block| { + l1_block.operator_fee_constant = Some(U256::ZERO); + l1_block.operator_fee_scalar = Some(U256::ZERO) + }) + .modify_cfg_chained(|cfg| cfg.spec = SPEC_ID); let mut evm = ctx.build_op(); let output = evm.replay().unwrap(); @@ -501,7 +681,43 @@ fn test_halted_tx_call_bls12_381_g2_msm_input_wrong_size() { #[test] fn test_halted_tx_call_bls12_381_g2_msm_out_of_gas() { - let ctx = g2_msm_test_tx().modify_tx_chained(|tx| tx.base.gas_limit -= 1); + const SPEC_ID: OpSpecId = OpSpecId::ISTHMUS; + let is_eip7702_enabled = SPEC_ID >= OpSpecId::ISTHMUS; + let is_eip7623_enabled = SPEC_ID >= OpSpecId::ISTHMUS; + + let input = Bytes::from([1; bls12_381_const::G2_MSM_INPUT_LENGTH]); + let InitialAndFloorGas { initial_gas, .. } = calculate_initial_tx_gas( + SPEC_ID.into(), + &input[..], + false, + is_eip7702_enabled, + is_eip7623_enabled, + 0, + 0, + 0, + ); + let gs2_msm_gas = bls12_381_utils::msm_required_gas( + 1, + &bls12_381_const::DISCOUNT_TABLE_G2_MSM, + bls12_381_const::G2_MSM_BASE_GAS_FEE, + ); + + let ctx = Context::op() + .with_tx( + OpTransaction::builder() + .base( + TxEnv::builder() + .kind(TxKind::Call(bls12_381_const::G2_MSM_ADDRESS)) + .data(input) + .gas_limit(initial_gas + gs2_msm_gas - 1), + ) + .build_fill(), + ) + .modify_chain_chained(|l1_block| { + l1_block.operator_fee_constant = Some(U256::ZERO); + l1_block.operator_fee_scalar = Some(U256::ZERO) + }) + .modify_cfg_chained(|cfg| cfg.spec = SPEC_ID); let mut evm = ctx.build_op(); let output = evm.replay().unwrap(); @@ -566,11 +782,16 @@ fn bl12_381_pairing_test_tx( bls12_381_const::PAIRING_MULTIPLIER_BASE + bls12_381_const::PAIRING_OFFSET_BASE; Context::op() - .modify_tx_chained(|tx| { - tx.base.kind = TxKind::Call(bls12_381_const::PAIRING_ADDRESS); - tx.base.data = input; - tx.base.gas_limit = initial_gas + pairing_gas; - }) + .with_tx( + OpTransaction::builder() + .base( + TxEnv::builder() + .kind(TxKind::Call(bls12_381_const::PAIRING_ADDRESS)) + .data(input) + .gas_limit(initial_gas + pairing_gas), + ) + .build_fill(), + ) .modify_chain_chained(|l1_block| { l1_block.operator_fee_constant = Some(U256::ZERO); l1_block.operator_fee_scalar = Some(U256::ZERO) @@ -580,8 +801,40 @@ fn bl12_381_pairing_test_tx( #[test] fn test_halted_tx_call_bls12_381_pairing_input_wrong_size() { - let ctx = - bl12_381_pairing_test_tx().modify_tx_chained(|tx| tx.base.data = tx.base.data.slice(1..)); + const SPEC_ID: OpSpecId = OpSpecId::ISTHMUS; + let is_eip7702_enabled = SPEC_ID >= OpSpecId::ISTHMUS; + let is_eip7623_enabled = SPEC_ID >= OpSpecId::ISTHMUS; + + let input = Bytes::from([1; bls12_381_const::PAIRING_INPUT_LENGTH]); + let InitialAndFloorGas { initial_gas, .. } = calculate_initial_tx_gas( + SPEC_ID.into(), + &input[..], + false, + is_eip7702_enabled, + is_eip7623_enabled, + 0, + 0, + 0, + ); + let pairing_gas: u64 = + bls12_381_const::PAIRING_MULTIPLIER_BASE + bls12_381_const::PAIRING_OFFSET_BASE; + + let ctx = Context::op() + .with_tx( + OpTransaction::builder() + .base( + TxEnv::builder() + .kind(TxKind::Call(bls12_381_const::PAIRING_ADDRESS)) + .data(input.slice(1..)) + .gas_limit(initial_gas + pairing_gas), + ) + .build_fill(), + ) + .modify_chain_chained(|l1_block| { + l1_block.operator_fee_constant = Some(U256::ZERO); + l1_block.operator_fee_scalar = Some(U256::ZERO) + }) + .modify_cfg_chained(|cfg| cfg.spec = OpSpecId::ISTHMUS); let mut evm = ctx.build_op(); let output = evm.replay().unwrap(); @@ -603,7 +856,40 @@ fn test_halted_tx_call_bls12_381_pairing_input_wrong_size() { #[test] fn test_halted_tx_call_bls12_381_pairing_out_of_gas() { - let ctx = bl12_381_pairing_test_tx().modify_tx_chained(|tx| tx.base.gas_limit -= 1); + const SPEC_ID: OpSpecId = OpSpecId::ISTHMUS; + let is_eip7702_enabled = SPEC_ID >= OpSpecId::ISTHMUS; + let is_eip7623_enabled = SPEC_ID >= OpSpecId::ISTHMUS; + + let input = Bytes::from([1; bls12_381_const::PAIRING_INPUT_LENGTH]); + let InitialAndFloorGas { initial_gas, .. } = calculate_initial_tx_gas( + SPEC_ID.into(), + &input[..], + false, + is_eip7702_enabled, + is_eip7623_enabled, + 0, + 0, + 0, + ); + let pairing_gas: u64 = + bls12_381_const::PAIRING_MULTIPLIER_BASE + bls12_381_const::PAIRING_OFFSET_BASE; + + let ctx = Context::op() + .with_tx( + OpTransaction::builder() + .base( + TxEnv::builder() + .kind(TxKind::Call(bls12_381_const::PAIRING_ADDRESS)) + .data(input) + .gas_limit(initial_gas + pairing_gas - 1), + ) + .build_fill(), + ) + .modify_chain_chained(|l1_block| { + l1_block.operator_fee_constant = Some(U256::ZERO); + l1_block.operator_fee_scalar = Some(U256::ZERO) + }) + .modify_cfg_chained(|cfg| cfg.spec = OpSpecId::ISTHMUS); let mut evm = ctx.build_op(); let output = evm.replay().unwrap(); @@ -645,9 +931,8 @@ fn test_tx_call_bls12_381_pairing_wrong_input_layout() { ); } -fn fp_to_g1_test_tx( -) -> Context, CfgEnv, EmptyDB, Journal, L1BlockInfo> -{ +#[test] +fn test_halted_tx_call_bls12_381_map_fp_to_g1_out_of_gas() { const SPEC_ID: OpSpecId = OpSpecId::ISTHMUS; let is_eip7702_enabled = SPEC_ID >= OpSpecId::ISTHMUS; let is_eip7623_enabled = SPEC_ID >= OpSpecId::ISTHMUS; @@ -664,22 +949,22 @@ fn fp_to_g1_test_tx( 0, ); - Context::op() - .modify_tx_chained(|tx| { - tx.base.kind = TxKind::Call(bls12_381_const::MAP_FP_TO_G1_ADDRESS); - tx.base.data = input; - tx.base.gas_limit = initial_gas + bls12_381_const::MAP_FP_TO_G1_BASE_GAS_FEE; - }) + let ctx = Context::op() + .with_tx( + OpTransaction::builder() + .base( + TxEnv::builder() + .kind(TxKind::Call(bls12_381_const::MAP_FP_TO_G1_ADDRESS)) + .data(input) + .gas_limit(initial_gas + bls12_381_const::MAP_FP_TO_G1_BASE_GAS_FEE - 1), + ) + .build_fill(), + ) .modify_chain_chained(|l1_block| { l1_block.operator_fee_constant = Some(U256::ZERO); l1_block.operator_fee_scalar = Some(U256::ZERO) }) - .modify_cfg_chained(|cfg| cfg.spec = SPEC_ID) -} - -#[test] -fn test_halted_tx_call_bls12_381_map_fp_to_g1_out_of_gas() { - let ctx = fp_to_g1_test_tx().modify_tx_chained(|tx| tx.base.gas_limit -= 1); + .modify_cfg_chained(|cfg| cfg.spec = SPEC_ID); let mut evm = ctx.build_op(); let output = evm.replay().unwrap(); @@ -701,7 +986,38 @@ fn test_halted_tx_call_bls12_381_map_fp_to_g1_out_of_gas() { #[test] fn test_halted_tx_call_bls12_381_map_fp_to_g1_input_wrong_size() { - let ctx = fp_to_g1_test_tx().modify_tx_chained(|tx| tx.base.data = tx.base.data.slice(1..)); + const SPEC_ID: OpSpecId = OpSpecId::ISTHMUS; + let is_eip7702_enabled = SPEC_ID >= OpSpecId::ISTHMUS; + let is_eip7623_enabled = SPEC_ID >= OpSpecId::ISTHMUS; + + let input = Bytes::from([1; bls12_381_const::PADDED_FP_LENGTH]); + let InitialAndFloorGas { initial_gas, .. } = calculate_initial_tx_gas( + SPEC_ID.into(), + &input[..], + false, + is_eip7702_enabled, + is_eip7623_enabled, + 0, + 0, + 0, + ); + + let ctx = Context::op() + .with_tx( + OpTransaction::builder() + .base( + TxEnv::builder() + .kind(TxKind::Call(bls12_381_const::MAP_FP_TO_G1_ADDRESS)) + .data(input.slice(1..)) + .gas_limit(initial_gas + bls12_381_const::MAP_FP_TO_G1_BASE_GAS_FEE), + ) + .build_fill(), + ) + .modify_chain_chained(|l1_block| { + l1_block.operator_fee_constant = Some(U256::ZERO); + l1_block.operator_fee_scalar = Some(U256::ZERO) + }) + .modify_cfg_chained(|cfg| cfg.spec = SPEC_ID); let mut evm = ctx.build_op(); let output = evm.replay().unwrap(); @@ -721,9 +1037,8 @@ fn test_halted_tx_call_bls12_381_map_fp_to_g1_input_wrong_size() { ); } -fn fp2_to_g2_test_tx( -) -> Context, CfgEnv, EmptyDB, Journal, L1BlockInfo> -{ +#[test] +fn test_halted_tx_call_bls12_381_map_fp2_to_g2_out_of_gas() { const SPEC_ID: OpSpecId = OpSpecId::ISTHMUS; let is_eip7702_enabled = SPEC_ID >= OpSpecId::ISTHMUS; let is_eip7623_enabled = SPEC_ID >= OpSpecId::ISTHMUS; @@ -740,22 +1055,22 @@ fn fp2_to_g2_test_tx( 0, ); - Context::op() - .modify_tx_chained(|tx| { - tx.base.kind = TxKind::Call(bls12_381_const::MAP_FP2_TO_G2_ADDRESS); - tx.base.data = input; - tx.base.gas_limit = initial_gas + bls12_381_const::MAP_FP2_TO_G2_BASE_GAS_FEE; - }) + let ctx = Context::op() + .with_tx( + OpTransaction::builder() + .base( + TxEnv::builder() + .kind(TxKind::Call(bls12_381_const::MAP_FP2_TO_G2_ADDRESS)) + .data(input) + .gas_limit(initial_gas + bls12_381_const::MAP_FP2_TO_G2_BASE_GAS_FEE - 1), + ) + .build_fill(), + ) .modify_chain_chained(|l1_block| { l1_block.operator_fee_constant = Some(U256::ZERO); l1_block.operator_fee_scalar = Some(U256::ZERO) }) - .modify_cfg_chained(|cfg| cfg.spec = SPEC_ID) -} - -#[test] -fn test_halted_tx_call_bls12_381_map_fp2_to_g2_out_of_gas() { - let ctx = fp2_to_g2_test_tx().modify_tx_chained(|tx| tx.base.gas_limit -= 1); + .modify_cfg_chained(|cfg| cfg.spec = SPEC_ID); let mut evm = ctx.build_op(); let output = evm.replay().unwrap(); @@ -777,7 +1092,38 @@ fn test_halted_tx_call_bls12_381_map_fp2_to_g2_out_of_gas() { #[test] fn test_halted_tx_call_bls12_381_map_fp2_to_g2_input_wrong_size() { - let ctx = fp2_to_g2_test_tx().modify_tx_chained(|tx| tx.base.data = tx.base.data.slice(1..)); + const SPEC_ID: OpSpecId = OpSpecId::ISTHMUS; + let is_eip7702_enabled = SPEC_ID >= OpSpecId::ISTHMUS; + let is_eip7623_enabled = SPEC_ID >= OpSpecId::ISTHMUS; + + let input = Bytes::from([1; bls12_381_const::PADDED_FP2_LENGTH]); + let InitialAndFloorGas { initial_gas, .. } = calculate_initial_tx_gas( + SPEC_ID.into(), + &input[..], + false, + is_eip7702_enabled, + is_eip7623_enabled, + 0, + 0, + 0, + ); + + let ctx = Context::op() + .with_tx( + OpTransaction::builder() + .base( + TxEnv::builder() + .kind(TxKind::Call(bls12_381_const::MAP_FP2_TO_G2_ADDRESS)) + .data(input.slice(1..)) + .gas_limit(initial_gas + bls12_381_const::MAP_FP2_TO_G2_BASE_GAS_FEE), + ) + .build_fill(), + ) + .modify_chain_chained(|l1_block| { + l1_block.operator_fee_constant = Some(U256::ZERO); + l1_block.operator_fee_scalar = Some(U256::ZERO) + }) + .modify_cfg_chained(|cfg| cfg.spec = SPEC_ID); let mut evm = ctx.build_op(); let output = evm.replay().unwrap(); @@ -797,6 +1143,56 @@ fn test_halted_tx_call_bls12_381_map_fp2_to_g2_input_wrong_size() { ); } +#[test] +#[cfg(feature = "optional_balance_check")] +fn test_disable_balance_check() { + const RETURN_CALLER_BALANCE_BYTECODE: &[u8] = &[ + opcode::CALLER, + opcode::BALANCE, + opcode::PUSH1, + 0x00, + opcode::MSTORE, + opcode::PUSH1, + 0x20, + opcode::PUSH1, + 0x00, + opcode::RETURN, + ]; + + let mut evm = Context::op() + .modify_cfg_chained(|cfg| cfg.disable_balance_check = true) + .with_db(BenchmarkDB::new_bytecode(Bytecode::new_legacy( + RETURN_CALLER_BALANCE_BYTECODE.into(), + ))) + .build_op(); + + // Construct tx so that effective cost is more than caller balance. + let gas_price = 1; + let gas_limit = 100_000; + // Make sure value doesn't consume all balance since we want to validate that all effective + // cost is deducted. + let tx_value = BENCH_CALLER_BALANCE - U256::from(1); + + let result = evm + .transact_one( + OpTransaction::builder() + .base( + TxEnv::builder_for_bench() + .gas_price(gas_price) + .gas_limit(gas_limit) + .value(tx_value), + ) + .build_fill(), + ) + .unwrap(); + + assert!(result.is_success()); + + let returned_balance = U256::from_be_slice(result.output().unwrap().as_ref()); + let expected_balance = U256::ZERO; + assert_eq!(returned_balance, expected_balance); +} + #[derive(Default, Debug)] struct LogInspector { logs: Vec, @@ -831,14 +1227,13 @@ fn test_log_inspector() { let mut evm = ctx.build_op_with_inspector(LogInspector::default()); - let tx = OpTransaction { - base: TxEnv { - caller: BENCH_CALLER, - kind: TxKind::Call(BENCH_TARGET), - ..Default::default() - }, - ..Default::default() - }; + let tx = OpTransaction::builder() + .base( + TxEnv::builder() + .caller(BENCH_CALLER) + .kind(TxKind::Call(BENCH_TARGET)), + ) + .build_fill(); // Run evm. let output = evm.inspect_tx(tx).unwrap(); diff --git a/crates/op-revm/tests/testdata/test_halted_deposit_tx.json b/crates/op-revm/tests/testdata/test_halted_deposit_tx.json index cca92e6161..8aac2ceb88 100644 --- a/crates/op-revm/tests/testdata/test_halted_deposit_tx.json +++ b/crates/op-revm/tests/testdata/test_halted_deposit_tx.json @@ -2,7 +2,7 @@ "result": { "Halt": { "reason": "FailedDeposit", - "gas_used": 30000000 + "gas_used": 16777216 } }, "state": { diff --git a/crates/precompile/CHANGELOG.md b/crates/precompile/CHANGELOG.md index 9249d8bdeb..c36189fd5c 100644 --- a/crates/precompile/CHANGELOG.md +++ b/crates/precompile/CHANGELOG.md @@ -6,6 +6,42 @@ and this project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0 ## [Unreleased] +## [25.0.0](https://github.com/bluealloy/revm/compare/revm-precompile-v24.0.1...revm-precompile-v25.0.0) - 2025-07-23 + +### Added + +- *(precompiles)* Use bytes API for underlying precompile library APIs ([#2705](https://github.com/bluealloy/revm/pull/2705)) +- *(precompile)* update osaka modexp gas divisor ([#2740](https://github.com/bluealloy/revm/pull/2740)) +- *(precompile)* update p256 verify osaka gas cost ([#2741](https://github.com/bluealloy/revm/pull/2741)) +- add a way for precompiles to revert ([#2711](https://github.com/bluealloy/revm/pull/2711)) + +### Fixed + +- features and check in ci ([#2766](https://github.com/bluealloy/revm/pull/2766)) + +### Other + +- use `EncodedPoint` to decode uncompressed public key ([#2736](https://github.com/bluealloy/revm/pull/2736)) +- *(precompile)* refactor blake2 input parsing ([#2734](https://github.com/bluealloy/revm/pull/2734)) +- Add blake2 benchmarks ([#2735](https://github.com/bluealloy/revm/pull/2735)) +- add asm-sha2 feature for sha2 precompile ([#2712](https://github.com/bluealloy/revm/pull/2712)) + +## [24.0.1](https://github.com/bluealloy/revm/compare/revm-precompile-v24.0.0...revm-precompile-v24.0.1) - 2025-07-14 + +### Other + +- use c-kzg precompute value 8 ([#2698](https://github.com/bluealloy/revm/pull/2698)) + +## [24.0.0](https://github.com/bluealloy/revm/compare/revm-precompile-v23.0.0...revm-precompile-v24.0.0) - 2025-06-30 + +### Added + +- blake2 avx2 ([#2670](https://github.com/bluealloy/revm/pull/2670)) + +### Other + +- cargo clippy --fix --all ([#2671](https://github.com/bluealloy/revm/pull/2671)) + ## [23.0.0](https://github.com/bluealloy/revm/compare/revm-precompile-v22.0.0...revm-precompile-v23.0.0) - 2025-06-19 ### Added diff --git a/crates/precompile/Cargo.toml b/crates/precompile/Cargo.toml index 6f2c64207d..450736a400 100644 --- a/crates/precompile/Cargo.toml +++ b/crates/precompile/Cargo.toml @@ -1,7 +1,7 @@ [package] name = "revm-precompile" description = "Revm Precompiles - Ethereum compatible precompiled contracts" -version = "23.0.0" +version = "25.0.0" authors.workspace = true edition.workspace = true keywords.workspace = true @@ -72,11 +72,12 @@ p256 = { workspace = true, features = ["ecdsa"] } # utils cfg-if.workspace = true +arrayref = "0.3.6" [dev-dependencies] criterion.workspace = true rand = { workspace = true, features = ["std"] } -ark-std = { workspace = true} +ark-std = { workspace = true } rstest.workspace = true [features] @@ -102,6 +103,7 @@ std = [ ] hashbrown = ["primitives/hashbrown"] asm-keccak = ["primitives/asm-keccak"] +asm-sha2 = ["sha2/asm"] # These libraries may not work on all no_std platforms as they depend on C. diff --git a/crates/precompile/bench/blake2.rs b/crates/precompile/bench/blake2.rs new file mode 100644 index 0000000000..053552fad4 --- /dev/null +++ b/crates/precompile/bench/blake2.rs @@ -0,0 +1,116 @@ +use criterion::{black_box, BenchmarkGroup}; +use primitives::hex; +use revm_precompile::blake2; + +pub fn add_benches(group: &mut BenchmarkGroup<'_, criterion::measurement::WallTime>) { + // Test vectors from the blake2 test + let inputs = [ + hex!("0000000248c9bdf267e6096a3ba7ca8485ae67bb2bf894fe72f36e3cf1361d5f3af54fa5d182e6ad7f520e511f6c3e2b8c68059b6bbd41fbabd9831f79217e1319cde05b61626300000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000300000000000000000000000000000001"), // 2 rounds + hex!("0000000448c9bdf267e6096a3ba7ca8485ae67bb2bf894fe72f36e3cf1361d5f3af54fa5d182e6ad7f520e511f6c3e2b8c68059b6bbd41fbabd9831f79217e1319cde05b616162636465666768696a6b6c6d6e6f700000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000300000000000000000000000000000001"), // 4 rounds + hex!("0000004048c9bdf267e6096a3ba7ca8485ae67bb2bf894fe72f36e3cf1361d5f3af54fa5d182e6ad7f520e511f6c3e2b8c68059b6bbd41fbabd9831f79217e1319cde05b61626300000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000300000000000000000000000000000001"), // 64 rounds + hex!("0000000a48c9bdf267e6096a3ba7ca8485ae67bb2bf894fe72f36e3cf1361d5f3af54fa5d182e6ad7f520e511f6c3e2b8c68059b6bbd41fbabd9831f79217e1319cde05b61626300000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000300000000000000000000000000000001"), // 10 rounds (Blake2s standard) + hex!("0000000c48c9bdf267e6096a3ba7ca8485ae67bb2bf894fe72f36e3cf1361d5f3af54fa5d182e6ad7f520e511f6c3e2b8c68059b6bbd41fbabd9831f79217e1319cde05b61626300000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000300000000000000000000000000000001"), // 12 rounds (Blake2b standard) + hex!("0000020048c9bdf267e6096a3ba7ca8485ae67bb2bf894fe72f36e3cf1361d5f3af54fa5d182e6ad7f520e511f6c3e2b8c68059b6bbd41fbabd9831f79217e1319cde05b61626300000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000300000000000000000000000000000001"), // 512 rounds + hex!("0000040048c9bdf267e6096a3ba7ca8485ae67bb2bf894fe72f36e3cf1361d5f3af54fa5d182e6ad7f520e511f6c3e2b8c68059b6bbd41fbabd9831f79217e1319cde05b61626300000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000300000000000000000000000000000001"), // 1024 rounds + hex!("000186a048c9bdf267e6096a3ba7ca8485ae67bb2bf894fe72f36e3cf1361d5f3af54fa5d182e6ad7f520e511f6c3e2b8c68059b6bbd41fbabd9831f79217e1319cde05b61626300000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000300000000000000000000000000000001"), // 100000 rounds (100K) + hex!("00030d4048c9bdf267e6096a3ba7ca8485ae67bb2bf894fe72f36e3cf1361d5f3af54fa5d182e6ad7f520e511f6c3e2b8c68059b6bbd41fbabd9831f79217e1319cde05b61626300000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000300000000000000000000000000000001"), // 200000 rounds (200K) + ]; + + // Benchmark with 2 rounds + group.bench_function("blake2/2_rounds", |b| { + let input = &inputs[0]; // 2 rounds + b.iter(|| { + black_box(blake2::run(black_box(input), u64::MAX).unwrap()); + }); + }); + + // Benchmark with 4 rounds + group.bench_function("blake2/4_rounds", |b| { + let input = &inputs[1]; // 4 rounds + b.iter(|| { + black_box(blake2::run(black_box(input), u64::MAX).unwrap()); + }); + }); + + // Benchmark with 64 rounds + group.bench_function("blake2/64_rounds", |b| { + let input = &inputs[2]; // 64 rounds + b.iter(|| { + black_box(blake2::run(black_box(input), u64::MAX).unwrap()); + }); + }); + + // Benchmark with 10 rounds (Blake2s standard) + group.bench_function("blake2/10_rounds", |b| { + let input = &inputs[3]; // 10 rounds + b.iter(|| { + black_box(blake2::run(black_box(input), u64::MAX).unwrap()); + }); + }); + + // Benchmark with 12 rounds (Blake2b standard) + group.bench_function("blake2/12_rounds", |b| { + let input = &inputs[4]; // 12 rounds + b.iter(|| { + black_box(blake2::run(black_box(input), u64::MAX).unwrap()); + }); + }); + + // Benchmark with 512 rounds + group.bench_function("blake2/512_rounds", |b| { + let input = &inputs[5]; // 512 rounds + b.iter(|| { + black_box(blake2::run(black_box(input), u64::MAX).unwrap()); + }); + }); + + // Benchmark with 1024 rounds + group.bench_function("blake2/1024_rounds", |b| { + let input = &inputs[6]; // 1024 rounds + b.iter(|| { + black_box(blake2::run(black_box(input), u64::MAX).unwrap()); + }); + }); + + // Benchmark with 100K rounds + group.bench_function("blake2/100K_rounds", |b| { + let input = &inputs[7]; // 100000 rounds + b.iter(|| { + black_box(blake2::run(black_box(input), u64::MAX).unwrap()); + }); + }); + + // Benchmark with 200K rounds + group.bench_function("blake2/200K_rounds", |b| { + let input = &inputs[8]; // 200000 rounds + b.iter(|| { + black_box(blake2::run(black_box(input), u64::MAX).unwrap()); + }); + }); + + // Benchmark just the compression function with different round counts + group.bench_function("blake2/compress_12_rounds", |b| { + let h = [ + 0x6a09e667f3bcc908u64, + 0xbb67ae8584caa73bu64, + 0x3c6ef372fe94f82bu64, + 0xa54ff53a5f1d36f1u64, + 0x510e527fade682d1u64, + 0x9b05688c2b3e6c1fu64, + 0x1f83d9abfb41bd6bu64, + 0x5be0cd19137e2179u64, + ]; + let m = [0u64; 16]; + let t = [0u64, 0u64]; + b.iter(|| { + let mut h_copy = h; + blake2::algo::compress( + black_box(12), + &mut h_copy, + black_box(m), + black_box(t), + black_box(false), + ); + }); + }); +} diff --git a/crates/precompile/bench/main.rs b/crates/precompile/bench/main.rs index cd3b479136..153dc40ad1 100644 --- a/crates/precompile/bench/main.rs +++ b/crates/precompile/bench/main.rs @@ -1,6 +1,7 @@ #![allow(missing_docs)] //! Benchmarks for the crypto precompiles +pub mod blake2; pub mod ecrecover; pub mod eip1962; pub mod eip2537; @@ -31,6 +32,9 @@ pub fn benchmark_crypto_precompiles(c: &mut Criterion) { // Run KZG point evaluation benchmarks eip4844::add_benches(&mut group); + + // Run Blake2 benchmarks + blake2::add_benches(&mut group); } criterion_group! { diff --git a/crates/precompile/src/blake2.rs b/crates/precompile/src/blake2.rs index 6c3e2fb615..6c446152a9 100644 --- a/crates/precompile/src/blake2.rs +++ b/crates/precompile/src/blake2.rs @@ -1,4 +1,5 @@ //! Blake2 precompile. More details in [`run`] + use crate::{PrecompileError, PrecompileOutput, PrecompileResult, PrecompileWithAddress}; const F_ROUND: u64 = 1; @@ -15,34 +16,43 @@ pub fn run(input: &[u8], gas_limit: u64) -> PrecompileResult { return Err(PrecompileError::Blake2WrongLength); } - // Rounds 4 bytes + // Parse number of rounds (4 bytes) let rounds = u32::from_be_bytes(input[..4].try_into().unwrap()) as usize; let gas_used = rounds as u64 * F_ROUND; if gas_used > gas_limit { return Err(PrecompileError::OutOfGas); } + // Parse final block flag let f = match input[212] { - 1 => true, 0 => false, + 1 => true, _ => return Err(PrecompileError::Blake2WrongFinalIndicatorFlag), }; + // Parse state vector h (8 × u64) let mut h = [0u64; 8]; + input[4..68] + .chunks_exact(8) + .enumerate() + .for_each(|(i, chunk)| { + h[i] = u64::from_le_bytes(chunk.try_into().unwrap()); + }); + + // Parse message block m (16 × u64) let mut m = [0u64; 16]; + input[68..196] + .chunks_exact(8) + .enumerate() + .for_each(|(i, chunk)| { + m[i] = u64::from_le_bytes(chunk.try_into().unwrap()); + }); - for (i, pos) in (4..68).step_by(8).enumerate() { - h[i] = u64::from_le_bytes(input[pos..pos + 8].try_into().unwrap()); - } - for (i, pos) in (68..196).step_by(8).enumerate() { - m[i] = u64::from_le_bytes(input[pos..pos + 8].try_into().unwrap()); - } - let t = [ - u64::from_le_bytes(input[196..196 + 8].try_into().unwrap()), - u64::from_le_bytes(input[204..204 + 8].try_into().unwrap()), - ]; + // Parse offset counters + let t_0 = u64::from_le_bytes(input[196..204].try_into().unwrap()); + let t_1 = u64::from_le_bytes(input[204..212].try_into().unwrap()); - algo::compress(rounds, &mut h, m, t, f); + algo::compress(rounds, &mut h, m, [t_0, t_1], f); let mut out = [0u8; 64]; for (i, h) in (0..64).step_by(8).zip(h.iter()) { @@ -80,18 +90,29 @@ pub mod algo { 0x5be0cd19137e2179, ]; - #[inline] + #[inline(always)] #[allow(clippy::many_single_char_names)] /// G function: - pub fn g(v: &mut [u64], a: usize, b: usize, c: usize, d: usize, x: u64, y: u64) { - v[a] = v[a].wrapping_add(v[b]).wrapping_add(x); - v[d] = (v[d] ^ v[a]).rotate_right(32); - v[c] = v[c].wrapping_add(v[d]); - v[b] = (v[b] ^ v[c]).rotate_right(24); - v[a] = v[a].wrapping_add(v[b]).wrapping_add(y); - v[d] = (v[d] ^ v[a]).rotate_right(16); - v[c] = v[c].wrapping_add(v[d]); - v[b] = (v[b] ^ v[c]).rotate_right(63); + fn g(v: &mut [u64; 16], a: usize, b: usize, c: usize, d: usize, x: u64, y: u64) { + let mut va = v[a]; + let mut vb = v[b]; + let mut vc = v[c]; + let mut vd = v[d]; + + va = va.wrapping_add(vb).wrapping_add(x); + vd = (vd ^ va).rotate_right(32); + vc = vc.wrapping_add(vd); + vb = (vb ^ vc).rotate_right(24); + + va = va.wrapping_add(vb).wrapping_add(y); + vd = (vd ^ va).rotate_right(16); + vc = vc.wrapping_add(vd); + vb = (vb ^ vc).rotate_right(63); + + v[a] = va; + v[b] = vb; + v[c] = vc; + v[d] = vd; } /// Compression function F takes as an argument the state vector "h", @@ -102,6 +123,27 @@ pub mod algo { /// BLAKE2b and 10 for BLAKE2s. Rounds are numbered from 0 to r - 1. #[allow(clippy::many_single_char_names)] pub fn compress(rounds: usize, h: &mut [u64; 8], m: [u64; 16], t: [u64; 2], f: bool) { + #[cfg(all(target_feature = "avx2", feature = "std"))] + { + // only if it is compiled with avx2 flag and it is std, we can use avx2. + if std::is_x86_feature_detected!("avx2") { + // avx2 is 1.8x more performant than portable implementation. + unsafe { + super::avx2::compress_block( + rounds, + &m, + h, + ((t[1] as u128) << 64) | (t[0] as u128), + if f { !0 } else { 0 }, + 0, + ); + } + return; + } + } + + // if avx2 is not available, use the fallback portable implementation + let mut v = [0u64; 16]; v[..h.len()].copy_from_slice(h); // First half from state. v[h.len()..].copy_from_slice(&IV); // Second half from IV. @@ -113,21 +155,492 @@ pub mod algo { v[14] = !v[14] // Invert all bits if the last-block-flag is set. } for i in 0..rounds { - // Message word selection permutation for this round. - let s = &SIGMA[i % 10]; - g(&mut v, 0, 4, 8, 12, m[s[0]], m[s[1]]); - g(&mut v, 1, 5, 9, 13, m[s[2]], m[s[3]]); - g(&mut v, 2, 6, 10, 14, m[s[4]], m[s[5]]); - g(&mut v, 3, 7, 11, 15, m[s[6]], m[s[7]]); - - g(&mut v, 0, 5, 10, 15, m[s[8]], m[s[9]]); - g(&mut v, 1, 6, 11, 12, m[s[10]], m[s[11]]); - g(&mut v, 2, 7, 8, 13, m[s[12]], m[s[13]]); - g(&mut v, 3, 4, 9, 14, m[s[14]], m[s[15]]); + round(&mut v, &m, i); } for i in 0..8 { h[i] ^= v[i] ^ v[i + 8]; } } + + #[inline(always)] + fn round(v: &mut [u64; 16], m: &[u64; 16], r: usize) { + // Message word selection permutation for this round. + let s = &SIGMA[r % 10]; + // g1 + g(v, 0, 4, 8, 12, m[s[0]], m[s[1]]); + g(v, 1, 5, 9, 13, m[s[2]], m[s[3]]); + g(v, 2, 6, 10, 14, m[s[4]], m[s[5]]); + g(v, 3, 7, 11, 15, m[s[6]], m[s[7]]); + + // g2 + g(v, 0, 5, 10, 15, m[s[8]], m[s[9]]); + g(v, 1, 6, 11, 12, m[s[10]], m[s[11]]); + g(v, 2, 7, 8, 13, m[s[12]], m[s[13]]); + g(v, 3, 4, 9, 14, m[s[14]], m[s[15]]); + } +} + +// Adapted from https://github.com/rust-lang-nursery/stdsimd/pull/479. +macro_rules! _MM_SHUFFLE { + ($z:expr, $y:expr, $x:expr, $w:expr) => { + ($z << 6) | ($y << 4) | ($x << 2) | $w + }; +} + +/// Code adapted from https://github.com/oconnor663/blake2_simd/blob/82b3e2aee4d2384aabbeb146058301ff0dbd453f/blake2b/src/avx2.rs +#[cfg(all(target_feature = "avx2", feature = "std"))] +mod avx2 { + #[cfg(target_arch = "x86")] + use core::arch::x86::*; + #[cfg(target_arch = "x86_64")] + use core::arch::x86_64::*; + + use super::algo::IV; + use arrayref::{array_refs, mut_array_refs}; + + type Word = u64; + type Count = u128; + /// The number input bytes passed to each call to the compression function. Small benchmarks need + /// to use an even multiple of `BLOCKBYTES`, or else their apparent throughput will be low. + const BLOCKBYTES: usize = 16 * size_of::(); + + const DEGREE: usize = 4; + + /// Compress a block of data using the BLAKE2 algorithm. + #[inline(always)] + pub(crate) unsafe fn compress_block( + mut rounds: usize, + block: &[Word; 16], + words: &mut [Word; 8], + count: Count, + last_block: Word, + last_node: Word, + ) { + let (words_low, words_high) = mut_array_refs!(words, DEGREE, DEGREE); + let (iv_low, iv_high) = array_refs!(&IV, DEGREE, DEGREE); + let mut a = loadu(words_low); + let mut b = loadu(words_high); + let mut c = loadu(iv_low); + let flags = set4(count_low(count), count_high(count), last_block, last_node); + let mut d = xor(loadu(iv_high), flags); + + let block: &[u8; BLOCKBYTES] = std::mem::transmute(block); + let msg_chunks = array_refs!(block, 16, 16, 16, 16, 16, 16, 16, 16); + let m0 = _mm256_broadcastsi128_si256(loadu_128(msg_chunks.0)); + let m1 = _mm256_broadcastsi128_si256(loadu_128(msg_chunks.1)); + let m2 = _mm256_broadcastsi128_si256(loadu_128(msg_chunks.2)); + let m3 = _mm256_broadcastsi128_si256(loadu_128(msg_chunks.3)); + let m4 = _mm256_broadcastsi128_si256(loadu_128(msg_chunks.4)); + let m5 = _mm256_broadcastsi128_si256(loadu_128(msg_chunks.5)); + let m6 = _mm256_broadcastsi128_si256(loadu_128(msg_chunks.6)); + let m7 = _mm256_broadcastsi128_si256(loadu_128(msg_chunks.7)); + + let iv0 = a; + let iv1 = b; + let mut t0; + let mut t1; + let mut b0; + + loop { + if rounds == 0 { + break; + } + rounds -= 1; + + // round 1 + t0 = _mm256_unpacklo_epi64(m0, m1); + t1 = _mm256_unpacklo_epi64(m2, m3); + b0 = _mm256_blend_epi32(t0, t1, 0xF0); + g1(&mut a, &mut b, &mut c, &mut d, &mut b0); + t0 = _mm256_unpackhi_epi64(m0, m1); + t1 = _mm256_unpackhi_epi64(m2, m3); + b0 = _mm256_blend_epi32(t0, t1, 0xF0); + g2(&mut a, &mut b, &mut c, &mut d, &mut b0); + diagonalize(&mut a, &mut b, &mut c, &mut d); + t0 = _mm256_unpacklo_epi64(m7, m4); + t1 = _mm256_unpacklo_epi64(m5, m6); + b0 = _mm256_blend_epi32(t0, t1, 0xF0); + g1(&mut a, &mut b, &mut c, &mut d, &mut b0); + t0 = _mm256_unpackhi_epi64(m7, m4); + t1 = _mm256_unpackhi_epi64(m5, m6); + b0 = _mm256_blend_epi32(t0, t1, 0xF0); + g2(&mut a, &mut b, &mut c, &mut d, &mut b0); + undiagonalize(&mut a, &mut b, &mut c, &mut d); + + if rounds == 0 { + break; + } + rounds -= 1; + + // round 2 + t0 = _mm256_unpacklo_epi64(m7, m2); + t1 = _mm256_unpackhi_epi64(m4, m6); + b0 = _mm256_blend_epi32(t0, t1, 0xF0); + g1(&mut a, &mut b, &mut c, &mut d, &mut b0); + t0 = _mm256_unpacklo_epi64(m5, m4); + t1 = _mm256_alignr_epi8(m3, m7, 8); + b0 = _mm256_blend_epi32(t0, t1, 0xF0); + g2(&mut a, &mut b, &mut c, &mut d, &mut b0); + diagonalize(&mut a, &mut b, &mut c, &mut d); + t0 = _mm256_unpackhi_epi64(m2, m0); + t1 = _mm256_blend_epi32(m5, m0, 0x33); + b0 = _mm256_blend_epi32(t0, t1, 0xF0); + g1(&mut a, &mut b, &mut c, &mut d, &mut b0); + t0 = _mm256_alignr_epi8(m6, m1, 8); + t1 = _mm256_blend_epi32(m3, m1, 0x33); + b0 = _mm256_blend_epi32(t0, t1, 0xF0); + g2(&mut a, &mut b, &mut c, &mut d, &mut b0); + undiagonalize(&mut a, &mut b, &mut c, &mut d); + + if rounds == 0 { + break; + } + rounds -= 1; + + // round 3 + t0 = _mm256_alignr_epi8(m6, m5, 8); + t1 = _mm256_unpackhi_epi64(m2, m7); + b0 = _mm256_blend_epi32(t0, t1, 0xF0); + g1(&mut a, &mut b, &mut c, &mut d, &mut b0); + t0 = _mm256_unpacklo_epi64(m4, m0); + t1 = _mm256_blend_epi32(m6, m1, 0x33); + b0 = _mm256_blend_epi32(t0, t1, 0xF0); + g2(&mut a, &mut b, &mut c, &mut d, &mut b0); + diagonalize(&mut a, &mut b, &mut c, &mut d); + t0 = _mm256_alignr_epi8(m5, m4, 8); + t1 = _mm256_unpackhi_epi64(m1, m3); + b0 = _mm256_blend_epi32(t0, t1, 0xF0); + g1(&mut a, &mut b, &mut c, &mut d, &mut b0); + t0 = _mm256_unpacklo_epi64(m2, m7); + t1 = _mm256_blend_epi32(m0, m3, 0x33); + b0 = _mm256_blend_epi32(t0, t1, 0xF0); + g2(&mut a, &mut b, &mut c, &mut d, &mut b0); + undiagonalize(&mut a, &mut b, &mut c, &mut d); + + if rounds == 0 { + break; + } + rounds -= 1; + + // round 4 + t0 = _mm256_unpackhi_epi64(m3, m1); + t1 = _mm256_unpackhi_epi64(m6, m5); + b0 = _mm256_blend_epi32(t0, t1, 0xF0); + g1(&mut a, &mut b, &mut c, &mut d, &mut b0); + t0 = _mm256_unpackhi_epi64(m4, m0); + t1 = _mm256_unpacklo_epi64(m6, m7); + b0 = _mm256_blend_epi32(t0, t1, 0xF0); + g2(&mut a, &mut b, &mut c, &mut d, &mut b0); + diagonalize(&mut a, &mut b, &mut c, &mut d); + t0 = _mm256_alignr_epi8(m1, m7, 8); + t1 = _mm256_shuffle_epi32(m2, _MM_SHUFFLE!(1, 0, 3, 2)); + b0 = _mm256_blend_epi32(t0, t1, 0xF0); + g1(&mut a, &mut b, &mut c, &mut d, &mut b0); + t0 = _mm256_unpacklo_epi64(m4, m3); + t1 = _mm256_unpacklo_epi64(m5, m0); + b0 = _mm256_blend_epi32(t0, t1, 0xF0); + g2(&mut a, &mut b, &mut c, &mut d, &mut b0); + undiagonalize(&mut a, &mut b, &mut c, &mut d); + + if rounds == 0 { + break; + } + rounds -= 1; + + // round 5 + t0 = _mm256_unpackhi_epi64(m4, m2); + t1 = _mm256_unpacklo_epi64(m1, m5); + b0 = _mm256_blend_epi32(t0, t1, 0xF0); + g1(&mut a, &mut b, &mut c, &mut d, &mut b0); + t0 = _mm256_blend_epi32(m3, m0, 0x33); + t1 = _mm256_blend_epi32(m7, m2, 0x33); + b0 = _mm256_blend_epi32(t0, t1, 0xF0); + g2(&mut a, &mut b, &mut c, &mut d, &mut b0); + diagonalize(&mut a, &mut b, &mut c, &mut d); + t0 = _mm256_alignr_epi8(m7, m1, 8); + t1 = _mm256_alignr_epi8(m3, m5, 8); + b0 = _mm256_blend_epi32(t0, t1, 0xF0); + g1(&mut a, &mut b, &mut c, &mut d, &mut b0); + t0 = _mm256_unpackhi_epi64(m6, m0); + t1 = _mm256_unpacklo_epi64(m6, m4); + b0 = _mm256_blend_epi32(t0, t1, 0xF0); + g2(&mut a, &mut b, &mut c, &mut d, &mut b0); + undiagonalize(&mut a, &mut b, &mut c, &mut d); + + if rounds == 0 { + break; + } + rounds -= 1; + + // round 6 + t0 = _mm256_unpacklo_epi64(m1, m3); + t1 = _mm256_unpacklo_epi64(m0, m4); + b0 = _mm256_blend_epi32(t0, t1, 0xF0); + g1(&mut a, &mut b, &mut c, &mut d, &mut b0); + t0 = _mm256_unpacklo_epi64(m6, m5); + t1 = _mm256_unpackhi_epi64(m5, m1); + b0 = _mm256_blend_epi32(t0, t1, 0xF0); + g2(&mut a, &mut b, &mut c, &mut d, &mut b0); + diagonalize(&mut a, &mut b, &mut c, &mut d); + t0 = _mm256_alignr_epi8(m2, m0, 8); + t1 = _mm256_unpackhi_epi64(m3, m7); + b0 = _mm256_blend_epi32(t0, t1, 0xF0); + g1(&mut a, &mut b, &mut c, &mut d, &mut b0); + t0 = _mm256_unpackhi_epi64(m4, m6); + t1 = _mm256_alignr_epi8(m7, m2, 8); + b0 = _mm256_blend_epi32(t0, t1, 0xF0); + g2(&mut a, &mut b, &mut c, &mut d, &mut b0); + undiagonalize(&mut a, &mut b, &mut c, &mut d); + + if rounds == 0 { + break; + } + rounds -= 1; + + // round 7 + t0 = _mm256_blend_epi32(m0, m6, 0x33); + t1 = _mm256_unpacklo_epi64(m7, m2); + b0 = _mm256_blend_epi32(t0, t1, 0xF0); + g1(&mut a, &mut b, &mut c, &mut d, &mut b0); + t0 = _mm256_unpackhi_epi64(m2, m7); + t1 = _mm256_alignr_epi8(m5, m6, 8); + b0 = _mm256_blend_epi32(t0, t1, 0xF0); + g2(&mut a, &mut b, &mut c, &mut d, &mut b0); + diagonalize(&mut a, &mut b, &mut c, &mut d); + t0 = _mm256_unpacklo_epi64(m4, m0); + t1 = _mm256_blend_epi32(m4, m3, 0x33); + b0 = _mm256_blend_epi32(t0, t1, 0xF0); + g1(&mut a, &mut b, &mut c, &mut d, &mut b0); + t0 = _mm256_unpackhi_epi64(m5, m3); + t1 = _mm256_shuffle_epi32(m1, _MM_SHUFFLE!(1, 0, 3, 2)); + b0 = _mm256_blend_epi32(t0, t1, 0xF0); + g2(&mut a, &mut b, &mut c, &mut d, &mut b0); + undiagonalize(&mut a, &mut b, &mut c, &mut d); + + if rounds == 0 { + break; + } + rounds -= 1; + // round 8 + t0 = _mm256_unpackhi_epi64(m6, m3); + t1 = _mm256_blend_epi32(m1, m6, 0x33); + b0 = _mm256_blend_epi32(t0, t1, 0xF0); + g1(&mut a, &mut b, &mut c, &mut d, &mut b0); + t0 = _mm256_alignr_epi8(m7, m5, 8); + t1 = _mm256_unpackhi_epi64(m0, m4); + b0 = _mm256_blend_epi32(t0, t1, 0xF0); + g2(&mut a, &mut b, &mut c, &mut d, &mut b0); + diagonalize(&mut a, &mut b, &mut c, &mut d); + t0 = _mm256_blend_epi32(m2, m1, 0x33); + t1 = _mm256_alignr_epi8(m4, m7, 8); + b0 = _mm256_blend_epi32(t0, t1, 0xF0); + g1(&mut a, &mut b, &mut c, &mut d, &mut b0); + t0 = _mm256_unpacklo_epi64(m5, m0); + t1 = _mm256_unpacklo_epi64(m2, m3); + b0 = _mm256_blend_epi32(t0, t1, 0xF0); + g2(&mut a, &mut b, &mut c, &mut d, &mut b0); + undiagonalize(&mut a, &mut b, &mut c, &mut d); + + if rounds == 0 { + break; + } + rounds -= 1; + + // round 9 + t0 = _mm256_unpacklo_epi64(m3, m7); + t1 = _mm256_alignr_epi8(m0, m5, 8); + b0 = _mm256_blend_epi32(t0, t1, 0xF0); + g1(&mut a, &mut b, &mut c, &mut d, &mut b0); + t0 = _mm256_unpackhi_epi64(m7, m4); + t1 = _mm256_alignr_epi8(m4, m1, 8); + b0 = _mm256_blend_epi32(t0, t1, 0xF0); + g2(&mut a, &mut b, &mut c, &mut d, &mut b0); + diagonalize(&mut a, &mut b, &mut c, &mut d); + t0 = _mm256_unpacklo_epi64(m5, m6); + t1 = _mm256_unpackhi_epi64(m6, m0); + b0 = _mm256_blend_epi32(t0, t1, 0xF0); + g1(&mut a, &mut b, &mut c, &mut d, &mut b0); + t0 = _mm256_alignr_epi8(m1, m2, 8); + t1 = _mm256_alignr_epi8(m2, m3, 8); + b0 = _mm256_blend_epi32(t0, t1, 0xF0); + g2(&mut a, &mut b, &mut c, &mut d, &mut b0); + undiagonalize(&mut a, &mut b, &mut c, &mut d); + + if rounds == 0 { + break; + } + rounds -= 1; + + // round 10 + t0 = _mm256_unpacklo_epi64(m5, m4); + t1 = _mm256_unpackhi_epi64(m3, m0); + b0 = _mm256_blend_epi32(t0, t1, 0xF0); + g1(&mut a, &mut b, &mut c, &mut d, &mut b0); + t0 = _mm256_unpacklo_epi64(m1, m2); + t1 = _mm256_blend_epi32(m2, m3, 0x33); + b0 = _mm256_blend_epi32(t0, t1, 0xF0); + g2(&mut a, &mut b, &mut c, &mut d, &mut b0); + diagonalize(&mut a, &mut b, &mut c, &mut d); + t0 = _mm256_unpackhi_epi64(m6, m7); + t1 = _mm256_unpackhi_epi64(m4, m1); + b0 = _mm256_blend_epi32(t0, t1, 0xF0); + g1(&mut a, &mut b, &mut c, &mut d, &mut b0); + t0 = _mm256_blend_epi32(m5, m0, 0x33); + t1 = _mm256_unpacklo_epi64(m7, m6); + b0 = _mm256_blend_epi32(t0, t1, 0xF0); + g2(&mut a, &mut b, &mut c, &mut d, &mut b0); + undiagonalize(&mut a, &mut b, &mut c, &mut d); + + // last two rounds are removed + } + a = xor(a, c); + b = xor(b, d); + a = xor(a, iv0); + b = xor(b, iv1); + + storeu(a, words_low); + storeu(b, words_high); + } + + #[inline(always)] + pub(crate) fn count_low(count: Count) -> Word { + count as Word + } + + #[inline(always)] + pub(crate) fn count_high(count: Count) -> Word { + (count >> 8 * size_of::()) as Word + } + + #[inline(always)] + unsafe fn loadu(src: *const [Word; DEGREE]) -> __m256i { + // This is an unaligned load, so the pointer cast is allowed. + _mm256_loadu_si256(src as *const __m256i) + } + + #[inline(always)] + unsafe fn storeu(src: __m256i, dest: *mut [Word; DEGREE]) { + // This is an unaligned store, so the pointer cast is allowed. + _mm256_storeu_si256(dest as *mut __m256i, src) + } + + #[inline(always)] + unsafe fn loadu_128(mem_addr: &[u8; 16]) -> __m128i { + _mm_loadu_si128(mem_addr.as_ptr() as *const __m128i) + } + + #[inline(always)] + unsafe fn add(a: __m256i, b: __m256i) -> __m256i { + _mm256_add_epi64(a, b) + } + + #[inline(always)] + unsafe fn xor(a: __m256i, b: __m256i) -> __m256i { + _mm256_xor_si256(a, b) + } + + #[inline(always)] + unsafe fn set4(a: u64, b: u64, c: u64, d: u64) -> __m256i { + _mm256_setr_epi64x(a as i64, b as i64, c as i64, d as i64) + } + + // These rotations are the "simple version". For the "complicated version", see + // https://github.com/sneves/blake2-avx2/blob/b3723921f668df09ece52dcd225a36d4a4eea1d9/blake2b-common.h#L43-L46. + // For a discussion of the tradeoffs, see + // https://github.com/sneves/blake2-avx2/pull/5. In short: + // - Due to an LLVM bug (https://bugs.llvm.org/show_bug.cgi?id=44379), this + // version performs better on recent x86 chips. + // - LLVM is able to optimize this version to AVX-512 rotation instructions + // when those are enabled. + #[inline(always)] + unsafe fn rot32(x: __m256i) -> __m256i { + _mm256_or_si256(_mm256_srli_epi64(x, 32), _mm256_slli_epi64(x, 64 - 32)) + } + + #[inline(always)] + unsafe fn rot24(x: __m256i) -> __m256i { + _mm256_or_si256(_mm256_srli_epi64(x, 24), _mm256_slli_epi64(x, 64 - 24)) + } + + #[inline(always)] + unsafe fn rot16(x: __m256i) -> __m256i { + _mm256_or_si256(_mm256_srli_epi64(x, 16), _mm256_slli_epi64(x, 64 - 16)) + } + + #[inline(always)] + unsafe fn rot63(x: __m256i) -> __m256i { + _mm256_or_si256(_mm256_srli_epi64(x, 63), _mm256_slli_epi64(x, 64 - 63)) + } + + #[inline(always)] + unsafe fn g1( + a: &mut __m256i, + b: &mut __m256i, + c: &mut __m256i, + d: &mut __m256i, + m: &mut __m256i, + ) { + *a = add(*a, *m); + *a = add(*a, *b); + *d = xor(*d, *a); + *d = rot32(*d); + *c = add(*c, *d); + *b = xor(*b, *c); + *b = rot24(*b); + } + + #[inline(always)] + unsafe fn g2( + a: &mut __m256i, + b: &mut __m256i, + c: &mut __m256i, + d: &mut __m256i, + m: &mut __m256i, + ) { + *a = add(*a, *m); + *a = add(*a, *b); + *d = xor(*d, *a); + *d = rot16(*d); + *c = add(*c, *d); + *b = xor(*b, *c); + *b = rot63(*b); + } + + // Note the optimization here of leaving b as the unrotated row, rather than a. + // All the message loads below are adjusted to compensate for this. See + // discussion at https://github.com/sneves/blake2-avx2/pull/4 + #[inline(always)] + unsafe fn diagonalize(a: &mut __m256i, _b: &mut __m256i, c: &mut __m256i, d: &mut __m256i) { + *a = _mm256_permute4x64_epi64(*a, _MM_SHUFFLE!(2, 1, 0, 3)); + *d = _mm256_permute4x64_epi64(*d, _MM_SHUFFLE!(1, 0, 3, 2)); + *c = _mm256_permute4x64_epi64(*c, _MM_SHUFFLE!(0, 3, 2, 1)); + } + + #[inline(always)] + unsafe fn undiagonalize(a: &mut __m256i, _b: &mut __m256i, c: &mut __m256i, d: &mut __m256i) { + *a = _mm256_permute4x64_epi64(*a, _MM_SHUFFLE!(0, 3, 2, 1)); + *d = _mm256_permute4x64_epi64(*d, _MM_SHUFFLE!(1, 0, 3, 2)); + *c = _mm256_permute4x64_epi64(*c, _MM_SHUFFLE!(2, 1, 0, 3)); + } +} + +#[cfg(test)] +mod tests { + use super::*; + use primitives::hex; + use std::time::Instant; + + #[test] + fn perfblake2() { + let input = [hex!("0000040048c9bdf267e6096a3ba7ca8485ae67bb2bf894fe72f36e3cf1361d5f3af54fa5d182e6ad7f520e511f6c3e2b8c68059b6bbd41fbabd9831f79217e1319cde05b616162636465666768696a6b6c6d6e6f700000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000300000000000000000000000000000001") + ,hex!("0000020048c9bdf267e6096a3ba7ca8485ae67bb2bf894fe72f36e3cf1361d5f3af54fa5d182e6ad7f520e511f6c3e2b8c68059b6bbd41fbabd9831f79217e1319cde05b61626300000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000300000000000000000000000000000001") + ,hex!("0000004048c9bdf267e6096a3ba7ca8485ae67bb2bf894fe72f36e3cf1361d5f3af54fa5d182e6ad7f520e511f6c3e2b8c68059b6bbd41fbabd9831f79217e1319cde05b61626300000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000300000000000000000000000000000001")]; + + let time = Instant::now(); + for i in 0..3000 { + let _ = run(&input[i % 3], u64::MAX).unwrap(); + } + println!("duration: {:?}", time.elapsed()); + } } diff --git a/crates/precompile/src/bls12_381.rs b/crates/precompile/src/bls12_381.rs index df0201c61a..879dcd60bc 100644 --- a/crates/precompile/src/bls12_381.rs +++ b/crates/precompile/src/bls12_381.rs @@ -12,6 +12,17 @@ cfg_if::cfg_if! { } } +// Re-export type aliases for use in submodules +use crate::bls12_381_const::FP_LENGTH; +type G1Point = ([u8; FP_LENGTH], [u8; FP_LENGTH]); +type G2Point = ( + [u8; FP_LENGTH], + [u8; FP_LENGTH], + [u8; FP_LENGTH], + [u8; FP_LENGTH], +); +type PairingPair = (G1Point, G2Point); + pub mod g1_add; pub mod g1_msm; pub mod g2_add; diff --git a/crates/precompile/src/bls12_381/arkworks.rs b/crates/precompile/src/bls12_381/arkworks.rs index 4b42a25b61..b8eb0a437a 100644 --- a/crates/precompile/src/bls12_381/arkworks.rs +++ b/crates/precompile/src/bls12_381/arkworks.rs @@ -1,7 +1,6 @@ +use super::{G1Point, G2Point, PairingPair}; use crate::{ - bls12_381_const::{ - FP_LENGTH, FP_PAD_BY, PADDED_FP_LENGTH, PADDED_G1_LENGTH, PADDED_G2_LENGTH, SCALAR_LENGTH, - }, + bls12_381_const::{FP_LENGTH, G1_LENGTH, G2_LENGTH, SCALAR_LENGTH}, PrecompileError, }; use ark_bls12_381::{Bls12_381, Fq, Fq2, Fr, G1Affine, G1Projective, G2Affine, G2Projective}; @@ -25,7 +24,7 @@ use std::{string::ToString, vec::Vec}; /// /// Panics if the input is not exactly 48 bytes long. #[inline] -pub(super) fn read_fp(input_be: &[u8]) -> Result { +fn read_fp(input_be: &[u8]) -> Result { assert_eq!(input_be.len(), FP_LENGTH, "input must be {FP_LENGTH} bytes"); let mut input_le = [0u8; FP_LENGTH]; @@ -38,21 +37,17 @@ pub(super) fn read_fp(input_be: &[u8]) -> Result { .map_err(|_| PrecompileError::Other("non-canonical fp value".to_string())) } -/// Encodes an `Fp` field element into a padded, big-endian byte array. +/// Encodes an `Fp` field element into a big-endian byte array. /// /// # Panics /// /// Panics if serialization fails, which should not occur for a valid field element. -pub(super) fn encode_fp(fp: &Fq) -> [u8; PADDED_FP_LENGTH] { +fn encode_fp(fp: &Fq) -> [u8; FP_LENGTH] { let mut bytes = [0u8; FP_LENGTH]; fp.serialize_uncompressed(&mut bytes[..]) .expect("Failed to serialize field element"); bytes.reverse(); - - let mut padded_bytes = [0; PADDED_FP_LENGTH]; - padded_bytes[FP_PAD_BY..PADDED_FP_LENGTH].copy_from_slice(&bytes); - - padded_bytes + bytes } /// Reads a Fp2 (quadratic extension field element) from the input slices. @@ -63,10 +58,7 @@ pub(super) fn encode_fp(fp: &Fq) -> [u8; PADDED_FP_LENGTH] { /// /// Panics if either input is not exactly 48 bytes long. #[inline] -pub(super) fn read_fp2( - input_1: &[u8; FP_LENGTH], - input_2: &[u8; FP_LENGTH], -) -> Result { +fn read_fp2(input_1: &[u8; FP_LENGTH], input_2: &[u8; FP_LENGTH]) -> Result { let fp_1 = read_fp(input_1)?; let fp_2 = read_fp(input_2)?; @@ -131,10 +123,7 @@ fn new_g2_point_no_subgroup_check(x: Fq2, y: Fq2) -> Result Result { +fn read_g1(x: &[u8; FP_LENGTH], y: &[u8; FP_LENGTH]) -> Result { let point = read_g1_no_subgroup_check(x, y)?; if !point.is_in_correct_subgroup_assuming_on_curve() { return Err(PrecompileError::Other( @@ -151,7 +140,7 @@ pub(super) fn read_g1( /// - The EIP specifies that no subgroup check should be performed /// - One can be certain that the point is in the correct subgroup. #[inline] -pub(super) fn read_g1_no_subgroup_check( +fn read_g1_no_subgroup_check( x: &[u8; FP_LENGTH], y: &[u8; FP_LENGTH], ) -> Result { @@ -160,13 +149,13 @@ pub(super) fn read_g1_no_subgroup_check( new_g1_point_no_subgroup_check(px, py) } -/// Encodes a G1 point into a byte array with padded elements. +/// Encodes a G1 point into a byte array. /// /// Converts a G1 point to affine coordinates and serializes the x and y coordinates -/// as big-endian byte arrays with padding to match the expected format. +/// as big-endian byte arrays. #[inline] -pub(super) fn encode_g1_point(input: &G1Affine) -> [u8; PADDED_G1_LENGTH] { - let mut output = [0u8; PADDED_G1_LENGTH]; +fn encode_g1_point(input: &G1Affine) -> [u8; G1_LENGTH] { + let mut output = [0u8; G1_LENGTH]; let Some((x, y)) = input.xy() else { return output; // Point at infinity, return all zeros @@ -176,8 +165,8 @@ pub(super) fn encode_g1_point(input: &G1Affine) -> [u8; PADDED_G1_LENGTH] { let y_encoded = encode_fp(&y); // Copy the encoded values to the output - output[..PADDED_FP_LENGTH].copy_from_slice(&x_encoded); - output[PADDED_FP_LENGTH..].copy_from_slice(&y_encoded); + output[..FP_LENGTH].copy_from_slice(&x_encoded); + output[FP_LENGTH..].copy_from_slice(&y_encoded); output } @@ -188,7 +177,7 @@ pub(super) fn encode_g1_point(input: &G1Affine) -> [u8; PADDED_G1_LENGTH] { /// representing the x and y coordinates in Big Endian format. /// Also performs a subgroup check to ensure the point is in the correct subgroup. #[inline] -pub(super) fn read_g2( +fn read_g2( a_x_0: &[u8; FP_LENGTH], a_x_1: &[u8; FP_LENGTH], a_y_0: &[u8; FP_LENGTH], @@ -210,7 +199,7 @@ pub(super) fn read_g2( /// - The EIP specifies that no subgroup check should be performed /// - One can be certain that the point is in the correct subgroup. #[inline] -pub(super) fn read_g2_no_subgroup_check( +fn read_g2_no_subgroup_check( a_x_0: &[u8; FP_LENGTH], a_x_1: &[u8; FP_LENGTH], a_y_0: &[u8; FP_LENGTH], @@ -221,13 +210,13 @@ pub(super) fn read_g2_no_subgroup_check( new_g2_point_no_subgroup_check(x, y) } -/// Encodes a G2 point into a byte array with padded elements. +/// Encodes a G2 point into a byte array. /// /// Converts a G2 point to affine coordinates and serializes the coordinates -/// as big-endian byte arrays with padding to match the expected format. +/// as big-endian byte arrays. #[inline] -pub(super) fn encode_g2_point(input: &G2Affine) -> [u8; PADDED_G2_LENGTH] { - let mut output = [0u8; PADDED_G2_LENGTH]; +fn encode_g2_point(input: &G2Affine) -> [u8; G2_LENGTH] { + let mut output = [0u8; G2_LENGTH]; let Some((x, y)) = input.xy() else { return output; // Point at infinity, return all zeros @@ -238,10 +227,10 @@ pub(super) fn encode_g2_point(input: &G2Affine) -> [u8; PADDED_G2_LENGTH] { let y_c0_encoded = encode_fp(&y.c0); let y_c1_encoded = encode_fp(&y.c1); - output[..PADDED_FP_LENGTH].copy_from_slice(&x_c0_encoded); - output[PADDED_FP_LENGTH..2 * PADDED_FP_LENGTH].copy_from_slice(&x_c1_encoded); - output[2 * PADDED_FP_LENGTH..3 * PADDED_FP_LENGTH].copy_from_slice(&y_c0_encoded); - output[3 * PADDED_FP_LENGTH..4 * PADDED_FP_LENGTH].copy_from_slice(&y_c1_encoded); + output[..FP_LENGTH].copy_from_slice(&x_c0_encoded); + output[FP_LENGTH..2 * FP_LENGTH].copy_from_slice(&x_c1_encoded); + output[2 * FP_LENGTH..3 * FP_LENGTH].copy_from_slice(&y_c0_encoded); + output[3 * FP_LENGTH..4 * FP_LENGTH].copy_from_slice(&y_c1_encoded); output } @@ -252,7 +241,7 @@ pub(super) fn encode_g2_point(input: &G2Affine) -> [u8; PADDED_G2_LENGTH] { /// Note: We do not check that the scalar is a canonical Fr element, because the EIP specifies: /// * The corresponding integer is not required to be less than or equal than main subgroup order. #[inline] -pub(super) fn read_scalar(input: &[u8]) -> Result { +fn read_scalar(input: &[u8]) -> Result { if input.len() != SCALAR_LENGTH { return Err(PrecompileError::Other(format!( "Input should be {SCALAR_LENGTH} bytes, was {}", @@ -265,7 +254,7 @@ pub(super) fn read_scalar(input: &[u8]) -> Result { /// Performs point addition on two G1 points. #[inline] -pub(super) fn p1_add_affine(p1: &G1Affine, p2: &G1Affine) -> G1Affine { +fn p1_add_affine(p1: &G1Affine, p2: &G1Affine) -> G1Affine { let p1_proj: G1Projective = (*p1).into(); let p3 = p1_proj + p2; p3.into_affine() @@ -273,7 +262,7 @@ pub(super) fn p1_add_affine(p1: &G1Affine, p2: &G1Affine) -> G1Affine { /// Performs point addition on two G2 points. #[inline] -pub(super) fn p2_add_affine(p1: &G2Affine, p2: &G2Affine) -> G2Affine { +fn p2_add_affine(p1: &G2Affine, p2: &G2Affine) -> G2Affine { let p1_proj: G2Projective = (*p1).into(); let p3 = p1_proj + p2; p3.into_affine() @@ -285,7 +274,7 @@ pub(super) fn p2_add_affine(p1: &G2Affine, p2: &G2Affine) -> G2Affine { /// /// Note: This method assumes that `g1_points` does not contain any points at infinity. #[inline] -pub(super) fn p1_msm(g1_points: Vec, scalars: Vec) -> G1Affine { +fn p1_msm(g1_points: Vec, scalars: Vec) -> G1Affine { assert_eq!( g1_points.len(), scalars.len(), @@ -313,7 +302,7 @@ pub(super) fn p1_msm(g1_points: Vec, scalars: Vec) -> G1Affine { /// /// Note: This method assumes that `g2_points` does not contain any points at infinity. #[inline] -pub(super) fn p2_msm(g2_points: Vec, scalars: Vec) -> G2Affine { +fn p2_msm(g2_points: Vec, scalars: Vec) -> G2Affine { assert_eq!( g2_points.len(), scalars.len(), @@ -339,7 +328,7 @@ pub(super) fn p2_msm(g2_points: Vec, scalars: Vec) -> G2Affine { /// /// Takes a field element (Fq) and returns the corresponding G1 point in affine form #[inline] -pub(super) fn map_fp_to_g1(fp: &Fq) -> G1Affine { +fn map_fp_to_g1(fp: &Fq) -> G1Affine { WBMap::map_to_curve(*fp) .expect("map_to_curve is infallible") .clear_cofactor() @@ -349,7 +338,7 @@ pub(super) fn map_fp_to_g1(fp: &Fq) -> G1Affine { /// /// Takes a field element (Fq2) and returns the corresponding G2 point in affine form #[inline] -pub(super) fn map_fp2_to_g2(fp2: &Fq2) -> G2Affine { +fn map_fp2_to_g2(fp2: &Fq2) -> G2Affine { WBMap::map_to_curve(*fp2) .expect("map_to_curve is infallible") .clear_cofactor() @@ -358,7 +347,7 @@ pub(super) fn map_fp2_to_g2(fp2: &Fq2) -> G2Affine { /// pairing_check performs a pairing check on a list of G1 and G2 point pairs and /// returns true if the result is equal to the identity element. #[inline] -pub(super) fn pairing_check(pairs: &[(G1Affine, G2Affine)]) -> bool { +fn pairing_check(pairs: &[(G1Affine, G2Affine)]) -> bool { if pairs.is_empty() { return true; } @@ -368,3 +357,185 @@ pub(super) fn pairing_check(pairs: &[(G1Affine, G2Affine)]) -> bool { let pairing_result = Bls12_381::multi_pairing(&g1_points, &g2_points); pairing_result.0.is_one() } + +/// pairing_check_bytes performs a pairing check on a list of G1 and G2 point pairs taking byte inputs. +#[inline] +pub(super) fn pairing_check_bytes(pairs: &[PairingPair]) -> Result { + if pairs.is_empty() { + return Ok(true); + } + + let mut parsed_pairs = Vec::with_capacity(pairs.len()); + for ((g1_x, g1_y), (g2_x_0, g2_x_1, g2_y_0, g2_y_1)) in pairs { + // Check if G1 point is zero (point at infinity) + let g1_is_zero = g1_x.iter().all(|&b| b == 0) && g1_y.iter().all(|&b| b == 0); + + // Check if G2 point is zero (point at infinity) + let g2_is_zero = g2_x_0.iter().all(|&b| b == 0) + && g2_x_1.iter().all(|&b| b == 0) + && g2_y_0.iter().all(|&b| b == 0) + && g2_y_1.iter().all(|&b| b == 0); + + // Skip this pair if either point is at infinity as it's a no-op + if g1_is_zero || g2_is_zero { + // Still need to validate the non-zero point if one exists + if !g1_is_zero { + let _ = read_g1(g1_x, g1_y)?; + } + if !g2_is_zero { + let _ = read_g2(g2_x_0, g2_x_1, g2_y_0, g2_y_1)?; + } + continue; + } + + let g1_point = read_g1(g1_x, g1_y)?; + let g2_point = read_g2(g2_x_0, g2_x_1, g2_y_0, g2_y_1)?; + parsed_pairs.push((g1_point, g2_point)); + } + + // If all pairs were filtered out, return true (identity element) + if parsed_pairs.is_empty() { + return Ok(true); + } + + Ok(pairing_check(&parsed_pairs)) +} + +// Byte-oriented versions of the functions for external API compatibility + +/// Performs point addition on two G1 points taking byte coordinates. +#[inline] +pub(super) fn p1_add_affine_bytes( + a: G1Point, + b: G1Point, +) -> Result<[u8; G1_LENGTH], PrecompileError> { + let (a_x, a_y) = a; + let (b_x, b_y) = b; + // Parse first point + let p1 = read_g1_no_subgroup_check(&a_x, &a_y)?; + + // Parse second point + let p2 = read_g1_no_subgroup_check(&b_x, &b_y)?; + + // Perform addition + let result = p1_add_affine(&p1, &p2); + + // Encode result + Ok(encode_g1_point(&result)) +} + +/// Performs point addition on two G2 points taking byte coordinates. +#[inline] +pub(super) fn p2_add_affine_bytes( + a: G2Point, + b: G2Point, +) -> Result<[u8; G2_LENGTH], PrecompileError> { + let (a_x_0, a_x_1, a_y_0, a_y_1) = a; + let (b_x_0, b_x_1, b_y_0, b_y_1) = b; + // Parse first point + let p1 = read_g2_no_subgroup_check(&a_x_0, &a_x_1, &a_y_0, &a_y_1)?; + + // Parse second point + let p2 = read_g2_no_subgroup_check(&b_x_0, &b_x_1, &b_y_0, &b_y_1)?; + + // Perform addition + let result = p2_add_affine(&p1, &p2); + + // Encode result + Ok(encode_g2_point(&result)) +} + +/// Maps a field element to a G1 point from bytes +#[inline] +pub(super) fn map_fp_to_g1_bytes( + fp_bytes: &[u8; FP_LENGTH], +) -> Result<[u8; G1_LENGTH], PrecompileError> { + let fp = read_fp(fp_bytes)?; + let result = map_fp_to_g1(&fp); + Ok(encode_g1_point(&result)) +} + +/// Maps field elements to a G2 point from bytes +#[inline] +pub(super) fn map_fp2_to_g2_bytes( + fp2_x: &[u8; FP_LENGTH], + fp2_y: &[u8; FP_LENGTH], +) -> Result<[u8; G2_LENGTH], PrecompileError> { + let fp2 = read_fp2(fp2_x, fp2_y)?; + let result = map_fp2_to_g2(&fp2); + Ok(encode_g2_point(&result)) +} + +/// Performs multi-scalar multiplication (MSM) for G1 points taking byte inputs. +#[inline] +pub(super) fn p1_msm_bytes( + point_scalar_pairs: impl Iterator>, +) -> Result<[u8; G1_LENGTH], PrecompileError> { + let mut g1_points = Vec::new(); + let mut scalars = Vec::new(); + + // Parse all points and scalars + for pair_result in point_scalar_pairs { + let ((x, y), scalar_bytes) = pair_result?; + + // NB: MSM requires subgroup check + let point = read_g1(&x, &y)?; + + // Skip zero scalars after validating the point + if scalar_bytes.iter().all(|&b| b == 0) { + continue; + } + + let scalar = read_scalar(&scalar_bytes)?; + g1_points.push(point); + scalars.push(scalar); + } + + // Return point at infinity if no pairs were provided or all scalars were zero + if g1_points.is_empty() { + return Ok([0u8; G1_LENGTH]); + } + + // Perform MSM + let result = p1_msm(g1_points, scalars); + + // Encode result + Ok(encode_g1_point(&result)) +} + +/// Performs multi-scalar multiplication (MSM) for G2 points taking byte inputs. +#[inline] +pub(super) fn p2_msm_bytes( + point_scalar_pairs: impl Iterator>, +) -> Result<[u8; G2_LENGTH], PrecompileError> { + let mut g2_points = Vec::new(); + let mut scalars = Vec::new(); + + // Parse all points and scalars + for pair_result in point_scalar_pairs { + let ((x_0, x_1, y_0, y_1), scalar_bytes) = pair_result?; + + // NB: MSM requires subgroup check + let point = read_g2(&x_0, &x_1, &y_0, &y_1)?; + + // Skip zero scalars after validating the point + if scalar_bytes.iter().all(|&b| b == 0) { + continue; + } + + let scalar = read_scalar(&scalar_bytes)?; + g2_points.push(point); + scalars.push(scalar); + } + + // Return point at infinity if no pairs were provided or all scalars were zero + if g2_points.is_empty() { + return Ok([0u8; G2_LENGTH]); + } + + // Perform MSM + let result = p2_msm(g2_points, scalars); + + // Encode result + Ok(encode_g2_point(&result)) +} diff --git a/crates/precompile/src/bls12_381/blst.rs b/crates/precompile/src/bls12_381/blst.rs index 3f448ced72..822eed35ba 100644 --- a/crates/precompile/src/bls12_381/blst.rs +++ b/crates/precompile/src/bls12_381/blst.rs @@ -1,10 +1,8 @@ // This module contains a safe wrapper around the blst library. +use super::{G1Point, G2Point, PairingPair}; use crate::{ - bls12_381_const::{ - FP_LENGTH, FP_PAD_BY, PADDED_FP_LENGTH, PADDED_G1_LENGTH, PADDED_G2_LENGTH, SCALAR_LENGTH, - SCALAR_LENGTH_BITS, - }, + bls12_381_const::{FP_LENGTH, G1_LENGTH, G2_LENGTH, SCALAR_LENGTH, SCALAR_LENGTH_BITS}, PrecompileError, }; use blst::{ @@ -78,7 +76,7 @@ fn p2_add_or_double(p: &blst_p2, p_affine: &blst_p2_affine) -> blst_p2 { /// Note: `a` and `b` can be the same, ie this method is safe to call if one wants /// to essentially double a point #[inline] -pub(super) fn p1_add_affine(a: &blst_p1_affine, b: &blst_p1_affine) -> blst_p1_affine { +fn p1_add_affine(a: &blst_p1_affine, b: &blst_p1_affine) -> blst_p1_affine { // Convert first point to Jacobian coordinates let a_jacobian = p1_from_affine(a); @@ -91,7 +89,7 @@ pub(super) fn p1_add_affine(a: &blst_p1_affine, b: &blst_p1_affine) -> blst_p1_a /// Add two G2 points in affine form, returning the result in affine form #[inline] -pub(super) fn p2_add_affine(a: &blst_p2_affine, b: &blst_p2_affine) -> blst_p2_affine { +fn p2_add_affine(a: &blst_p2_affine, b: &blst_p2_affine) -> blst_p2_affine { // Convert first point to Jacobian coordinates let a_jacobian = p2_from_affine(a); @@ -161,7 +159,7 @@ fn p2_scalar_mul(p: &blst_p2_affine, scalar: &blst_scalar) -> blst_p2_affine { /// /// Note: This method assumes that `g1_points` does not contain any points at infinity. #[inline] -pub(super) fn p1_msm(g1_points: Vec, scalars: Vec) -> blst_p1_affine { +fn p1_msm(g1_points: Vec, scalars: Vec) -> blst_p1_affine { assert_eq!( g1_points.len(), scalars.len(), @@ -199,7 +197,7 @@ pub(super) fn p1_msm(g1_points: Vec, scalars: Vec) /// Note: Scalars are expected to be in Big Endian format. /// This method assumes that `g2_points` does not contain any points at infinity. #[inline] -pub(super) fn p2_msm(g2_points: Vec, scalars: Vec) -> blst_p2_affine { +fn p2_msm(g2_points: Vec, scalars: Vec) -> blst_p2_affine { assert_eq!( g2_points.len(), scalars.len(), @@ -235,7 +233,7 @@ pub(super) fn p2_msm(g2_points: Vec, scalars: Vec) /// /// Takes a field element (blst_fp) and returns the corresponding G1 point in affine form #[inline] -pub(super) fn map_fp_to_g1(fp: &blst_fp) -> blst_p1_affine { +fn map_fp_to_g1(fp: &blst_fp) -> blst_p1_affine { // Create a new G1 point in Jacobian coordinates let mut p = blst_p1::default(); @@ -252,7 +250,7 @@ pub(super) fn map_fp_to_g1(fp: &blst_fp) -> blst_p1_affine { /// /// Takes a field element (blst_fp2) and returns the corresponding G2 point in affine form #[inline] -pub(super) fn map_fp2_to_g2(fp2: &blst_fp2) -> blst_p2_affine { +fn map_fp2_to_g2(fp2: &blst_fp2) -> blst_p2_affine { // Create a new G2 point in Jacobian coordinates let mut p = blst_p2::default(); @@ -309,7 +307,7 @@ fn is_fp12_one(f: &blst_fp12) -> bool { /// pairing_check performs a pairing check on a list of G1 and G2 point pairs and /// returns true if the result is equal to the identity element. #[inline] -pub(super) fn pairing_check(pairs: &[(blst_p1_affine, blst_p2_affine)]) -> bool { +fn pairing_check(pairs: &[(blst_p1_affine, blst_p2_affine)]) -> bool { // When no inputs are given, we return true // This case can only trigger, if the initial pairing components // all had, either the G1 element as the point at infinity @@ -337,27 +335,25 @@ pub(super) fn pairing_check(pairs: &[(blst_p1_affine, blst_p2_affine)]) -> bool is_fp12_one(&final_result) } -/// Encodes a G1 point in affine format into byte slice with padded elements. +/// Encodes a G1 point in affine format into byte slice. /// /// Note: The encoded bytes are in Big Endian format. -pub(super) fn encode_g1_point(input: &blst_p1_affine) -> [u8; PADDED_G1_LENGTH] { - let mut out = [0u8; PADDED_G1_LENGTH]; - fp_to_bytes(&mut out[..PADDED_FP_LENGTH], &input.x); - fp_to_bytes(&mut out[PADDED_FP_LENGTH..], &input.y); +fn encode_g1_point(input: &blst_p1_affine) -> [u8; G1_LENGTH] { + let mut out = [0u8; G1_LENGTH]; + fp_to_bytes(&mut out[..FP_LENGTH], &input.x); + fp_to_bytes(&mut out[FP_LENGTH..], &input.y); out } -/// Encodes a single finite field element into byte slice with padding. +/// Encodes a single finite field element into byte slice. /// /// Note: The encoded bytes are in Big Endian format. fn fp_to_bytes(out: &mut [u8], input: &blst_fp) { - if out.len() != PADDED_FP_LENGTH { + if out.len() != FP_LENGTH { return; } - let (padding, rest) = out.split_at_mut(FP_PAD_BY); - padding.fill(0); // SAFETY: Out length is checked previously, `input` is a blst value. - unsafe { blst_bendian_from_fp(rest.as_mut_ptr(), input) }; + unsafe { blst_bendian_from_fp(out.as_mut_ptr(), input) }; } /// Returns a `blst_p1_affine` from the provided byte slices, which represent the x and y @@ -397,10 +393,7 @@ fn decode_g1_on_curve( /// /// Note: Coordinates are expected to be in Big Endian format. /// By default, subgroup checks are performed. -pub(super) fn read_g1( - x: &[u8; FP_LENGTH], - y: &[u8; FP_LENGTH], -) -> Result { +fn read_g1(x: &[u8; FP_LENGTH], y: &[u8; FP_LENGTH]) -> Result { _extract_g1_input(x, y, true) } /// Extracts a G1 point in Affine format from the x and y coordinates @@ -411,7 +404,7 @@ pub(super) fn read_g1( /// This method should only be called if: /// - The EIP specifies that no subgroup check should be performed /// - One can be certain that the point is in the correct subgroup. -pub(super) fn read_g1_no_subgroup_check( +fn read_g1_no_subgroup_check( x: &[u8; FP_LENGTH], y: &[u8; FP_LENGTH], ) -> Result { @@ -448,24 +441,15 @@ fn _extract_g1_input( Ok(out) } -/// Encodes a G2 point in affine format into byte slice with padded elements. +/// Encodes a G2 point in affine format into byte slice. /// /// Note: The encoded bytes are in Big Endian format. -pub(super) fn encode_g2_point(input: &blst_p2_affine) -> [u8; PADDED_G2_LENGTH] { - let mut out = [0u8; PADDED_G2_LENGTH]; - fp_to_bytes(&mut out[..PADDED_FP_LENGTH], &input.x.fp[0]); - fp_to_bytes( - &mut out[PADDED_FP_LENGTH..2 * PADDED_FP_LENGTH], - &input.x.fp[1], - ); - fp_to_bytes( - &mut out[2 * PADDED_FP_LENGTH..3 * PADDED_FP_LENGTH], - &input.y.fp[0], - ); - fp_to_bytes( - &mut out[3 * PADDED_FP_LENGTH..4 * PADDED_FP_LENGTH], - &input.y.fp[1], - ); +fn encode_g2_point(input: &blst_p2_affine) -> [u8; G2_LENGTH] { + let mut out = [0u8; G2_LENGTH]; + fp_to_bytes(&mut out[..FP_LENGTH], &input.x.fp[0]); + fp_to_bytes(&mut out[FP_LENGTH..2 * FP_LENGTH], &input.x.fp[1]); + fp_to_bytes(&mut out[2 * FP_LENGTH..3 * FP_LENGTH], &input.y.fp[0]); + fp_to_bytes(&mut out[3 * FP_LENGTH..4 * FP_LENGTH], &input.y.fp[1]); out } @@ -508,7 +492,7 @@ fn decode_g2_on_curve( /// /// Field elements are expected to be in Big Endian format. /// Returns an error if either of the input field elements is not canonical. -pub(super) fn read_fp2( +fn read_fp2( input_1: &[u8; FP_LENGTH], input_2: &[u8; FP_LENGTH], ) -> Result { @@ -523,7 +507,7 @@ pub(super) fn read_fp2( /// /// Note: Coordinates are expected to be in Big Endian format. /// By default, subgroup checks are performed. -pub(super) fn read_g2( +fn read_g2( a_x_0: &[u8; FP_LENGTH], a_x_1: &[u8; FP_LENGTH], a_y_0: &[u8; FP_LENGTH], @@ -539,7 +523,7 @@ pub(super) fn read_g2( /// This method should only be called if: /// - The EIP specifies that no subgroup check should be performed /// - One can be certain that the point is in the correct subgroup. -pub(super) fn read_g2_no_subgroup_check( +fn read_g2_no_subgroup_check( a_x_0: &[u8; FP_LENGTH], a_x_1: &[u8; FP_LENGTH], a_y_0: &[u8; FP_LENGTH], @@ -584,7 +568,7 @@ fn _extract_g2_input( /// returning the field element if successful. /// /// Note: The field element is expected to be in big endian format. -pub(super) fn read_fp(input: &[u8; FP_LENGTH]) -> Result { +fn read_fp(input: &[u8; FP_LENGTH]) -> Result { if !is_valid_be(input) { return Err(PrecompileError::Other("non-canonical fp value".to_string())); } @@ -608,7 +592,7 @@ pub(super) fn read_fp(input: &[u8; FP_LENGTH]) -> Result Result { +fn read_scalar(input: &[u8]) -> Result { if input.len() != SCALAR_LENGTH { return Err(PrecompileError::Other(format!( "Input should be {SCALAR_LENGTH} bytes, was {}", @@ -633,3 +617,189 @@ pub(super) fn read_scalar(input: &[u8]) -> Result fn is_valid_be(input: &[u8; 48]) -> bool { *input < MODULUS_REPR } + +// Byte-oriented versions of the functions for external API compatibility + +/// Performs point addition on two G1 points taking byte coordinates. +#[inline] +pub(super) fn p1_add_affine_bytes( + a: G1Point, + b: G1Point, +) -> Result<[u8; G1_LENGTH], crate::PrecompileError> { + let (a_x, a_y) = a; + let (b_x, b_y) = b; + // Parse first point + let p1 = read_g1_no_subgroup_check(&a_x, &a_y)?; + + // Parse second point + let p2 = read_g1_no_subgroup_check(&b_x, &b_y)?; + + // Perform addition + let result = p1_add_affine(&p1, &p2); + + // Encode result + Ok(encode_g1_point(&result)) +} + +/// Performs point addition on two G2 points taking byte coordinates. +#[inline] +pub(super) fn p2_add_affine_bytes( + a: G2Point, + b: G2Point, +) -> Result<[u8; G2_LENGTH], crate::PrecompileError> { + let (a_x_0, a_x_1, a_y_0, a_y_1) = a; + let (b_x_0, b_x_1, b_y_0, b_y_1) = b; + // Parse first point + let p1 = read_g2_no_subgroup_check(&a_x_0, &a_x_1, &a_y_0, &a_y_1)?; + + // Parse second point + let p2 = read_g2_no_subgroup_check(&b_x_0, &b_x_1, &b_y_0, &b_y_1)?; + + // Perform addition + let result = p2_add_affine(&p1, &p2); + + // Encode result + Ok(encode_g2_point(&result)) +} + +/// Maps a field element to a G1 point from bytes +#[inline] +pub(super) fn map_fp_to_g1_bytes( + fp_bytes: &[u8; FP_LENGTH], +) -> Result<[u8; G1_LENGTH], crate::PrecompileError> { + let fp = read_fp(fp_bytes)?; + let result = map_fp_to_g1(&fp); + Ok(encode_g1_point(&result)) +} + +/// Maps field elements to a G2 point from bytes +#[inline] +pub(super) fn map_fp2_to_g2_bytes( + fp2_x: &[u8; FP_LENGTH], + fp2_y: &[u8; FP_LENGTH], +) -> Result<[u8; G2_LENGTH], crate::PrecompileError> { + let fp2 = read_fp2(fp2_x, fp2_y)?; + let result = map_fp2_to_g2(&fp2); + Ok(encode_g2_point(&result)) +} + +/// Performs multi-scalar multiplication (MSM) for G1 points taking byte inputs. +#[inline] +pub(super) fn p1_msm_bytes( + point_scalar_pairs: impl Iterator< + Item = Result<(G1Point, [u8; SCALAR_LENGTH]), crate::PrecompileError>, + >, +) -> Result<[u8; G1_LENGTH], crate::PrecompileError> { + let mut g1_points = Vec::new(); + let mut scalars = Vec::new(); + + // Parse all points and scalars + for pair_result in point_scalar_pairs { + let ((x, y), scalar_bytes) = pair_result?; + + // NB: MSM requires subgroup check + let point = read_g1(&x, &y)?; + + // Skip zero scalars after validating the point + if scalar_bytes.iter().all(|&b| b == 0) { + continue; + } + + let scalar = read_scalar(&scalar_bytes)?; + g1_points.push(point); + scalars.push(scalar); + } + + // Return point at infinity if no pairs were provided or all scalars were zero + if g1_points.is_empty() { + return Ok([0u8; G1_LENGTH]); + } + + // Perform MSM + let result = p1_msm(g1_points, scalars); + + // Encode result + Ok(encode_g1_point(&result)) +} + +/// Performs multi-scalar multiplication (MSM) for G2 points taking byte inputs. +#[inline] +pub(super) fn p2_msm_bytes( + point_scalar_pairs: impl Iterator< + Item = Result<(G2Point, [u8; SCALAR_LENGTH]), crate::PrecompileError>, + >, +) -> Result<[u8; G2_LENGTH], crate::PrecompileError> { + let mut g2_points = Vec::new(); + let mut scalars = Vec::new(); + + // Parse all points and scalars + for pair_result in point_scalar_pairs { + let ((x_0, x_1, y_0, y_1), scalar_bytes) = pair_result?; + + // NB: MSM requires subgroup check + let point = read_g2(&x_0, &x_1, &y_0, &y_1)?; + + // Skip zero scalars after validating the point + if scalar_bytes.iter().all(|&b| b == 0) { + continue; + } + + let scalar = read_scalar(&scalar_bytes)?; + g2_points.push(point); + scalars.push(scalar); + } + + // Return point at infinity if no pairs were provided or all scalars were zero + if g2_points.is_empty() { + return Ok([0u8; G2_LENGTH]); + } + + // Perform MSM + let result = p2_msm(g2_points, scalars); + + // Encode result + Ok(encode_g2_point(&result)) +} + +/// pairing_check_bytes performs a pairing check on a list of G1 and G2 point pairs taking byte inputs. +#[inline] +pub(super) fn pairing_check_bytes(pairs: &[PairingPair]) -> Result { + if pairs.is_empty() { + return Ok(true); + } + + let mut parsed_pairs = Vec::with_capacity(pairs.len()); + for ((g1_x, g1_y), (g2_x_0, g2_x_1, g2_y_0, g2_y_1)) in pairs { + // Check if G1 point is zero (point at infinity) + let g1_is_zero = g1_x.iter().all(|&b| b == 0) && g1_y.iter().all(|&b| b == 0); + + // Check if G2 point is zero (point at infinity) + let g2_is_zero = g2_x_0.iter().all(|&b| b == 0) + && g2_x_1.iter().all(|&b| b == 0) + && g2_y_0.iter().all(|&b| b == 0) + && g2_y_1.iter().all(|&b| b == 0); + + // Skip this pair if either point is at infinity as it's a no-op + if g1_is_zero || g2_is_zero { + // Still need to validate the non-zero point if one exists + if !g1_is_zero { + let _ = read_g1(g1_x, g1_y)?; + } + if !g2_is_zero { + let _ = read_g2(g2_x_0, g2_x_1, g2_y_0, g2_y_1)?; + } + continue; + } + + let g1_point = read_g1(g1_x, g1_y)?; + let g2_point = read_g2(g2_x_0, g2_x_1, g2_y_0, g2_y_1)?; + parsed_pairs.push((g1_point, g2_point)); + } + + // If all pairs were filtered out, return true (identity element) + if parsed_pairs.is_empty() { + return Ok(true); + } + + Ok(pairing_check(&parsed_pairs)) +} diff --git a/crates/precompile/src/bls12_381/g1_add.rs b/crates/precompile/src/bls12_381/g1_add.rs index 3dc050387e..c8aa2f80f0 100644 --- a/crates/precompile/src/bls12_381/g1_add.rs +++ b/crates/precompile/src/bls12_381/g1_add.rs @@ -1,6 +1,6 @@ //! BLS12-381 G1 add precompile. More details in [`g1_add`] -use super::crypto_backend::{encode_g1_point, p1_add_affine, read_g1_no_subgroup_check}; -use super::utils::remove_g1_padding; +use super::crypto_backend::p1_add_affine_bytes; +use super::utils::{pad_g1_point, remove_g1_padding}; use crate::bls12_381_const::{ G1_ADD_ADDRESS, G1_ADD_BASE_GAS_FEE, G1_ADD_INPUT_LENGTH, PADDED_G1_LENGTH, }; @@ -26,17 +26,21 @@ pub fn g1_add(input: &[u8], gas_limit: u64) -> PrecompileResult { ))); } + // Extract coordinates from padded input let [a_x, a_y] = remove_g1_padding(&input[..PADDED_G1_LENGTH])?; let [b_x, b_y] = remove_g1_padding(&input[PADDED_G1_LENGTH..])?; - // NB: There is no subgroup check for the G1 addition precompile because the time to do the subgroup - // check would be more than the time it takes to do the g1 addition. - // - // Users should be careful to note whether the points being added are indeed in the right subgroup. - let a_aff = &read_g1_no_subgroup_check(a_x, a_y)?; - let b_aff = &read_g1_no_subgroup_check(b_x, b_y)?; - let p_aff = p1_add_affine(a_aff, b_aff); + let a = (*a_x, *a_y); + let b = (*b_x, *b_y); - let out = encode_g1_point(&p_aff); - Ok(PrecompileOutput::new(G1_ADD_BASE_GAS_FEE, out.into())) + // Get unpadded result from crypto backend + let unpadded_result = p1_add_affine_bytes(a, b)?; + + // Pad the result for EVM compatibility + let padded_result = pad_g1_point(&unpadded_result); + + Ok(PrecompileOutput::new( + G1_ADD_BASE_GAS_FEE, + padded_result.into(), + )) } diff --git a/crates/precompile/src/bls12_381/g1_msm.rs b/crates/precompile/src/bls12_381/g1_msm.rs index 9469b2db01..f47802ef4b 100644 --- a/crates/precompile/src/bls12_381/g1_msm.rs +++ b/crates/precompile/src/bls12_381/g1_msm.rs @@ -1,14 +1,13 @@ //! BLS12-381 G1 msm precompile. More details in [`g1_msm`] -use super::crypto_backend::{encode_g1_point, p1_msm, read_g1, read_scalar}; -use crate::bls12_381::utils::remove_g1_padding; +use super::crypto_backend::p1_msm_bytes; +use super::G1Point; +use crate::bls12_381::utils::{pad_g1_point, remove_g1_padding}; use crate::bls12_381_const::{ DISCOUNT_TABLE_G1_MSM, G1_MSM_ADDRESS, G1_MSM_BASE_GAS_FEE, G1_MSM_INPUT_LENGTH, PADDED_G1_LENGTH, SCALAR_LENGTH, }; use crate::bls12_381_utils::msm_required_gas; use crate::{PrecompileError, PrecompileOutput, PrecompileResult, PrecompileWithAddress}; -use primitives::Bytes; -use std::vec::Vec; /// [EIP-2537](https://eips.ethereum.org/EIPS/eip-2537#specification) BLS12_G1MSM precompile. pub const PRECOMPILE: PrecompileWithAddress = PrecompileWithAddress(G1_MSM_ADDRESS, g1_msm); @@ -25,7 +24,7 @@ pub fn g1_msm(input: &[u8], gas_limit: u64) -> PrecompileResult { let input_len = input.len(); if input_len == 0 || input_len % G1_MSM_INPUT_LENGTH != 0 { return Err(PrecompileError::Other(format!( - "G1MSM input length should be multiple of {G1_MSM_INPUT_LENGTH}, was {input_len}" + "G1MSM input length should be multiple of {G1_MSM_INPUT_LENGTH}, was {input_len}", ))); } @@ -35,60 +34,31 @@ pub fn g1_msm(input: &[u8], gas_limit: u64) -> PrecompileResult { return Err(PrecompileError::OutOfGas); } - let mut g1_points: Vec<_> = Vec::with_capacity(k); - let mut scalars = Vec::with_capacity(k); - for i in 0..k { - let encoded_g1_element = - &input[i * G1_MSM_INPUT_LENGTH..i * G1_MSM_INPUT_LENGTH + PADDED_G1_LENGTH]; - let encoded_scalar = &input[i * G1_MSM_INPUT_LENGTH + PADDED_G1_LENGTH - ..i * G1_MSM_INPUT_LENGTH + PADDED_G1_LENGTH + SCALAR_LENGTH]; + let valid_pairs_iter = (0..k).map(|i| { + let start = i * G1_MSM_INPUT_LENGTH; + let padded_g1 = &input[start..start + PADDED_G1_LENGTH]; + let scalar_bytes = &input[start + PADDED_G1_LENGTH..start + G1_MSM_INPUT_LENGTH]; - // Filter out points infinity as an optimization, since it is a no-op. - // Note: Previously, points were being batch converted from Jacobian to Affine. - // In `blst`, this would essentially, zero out all of the points. - // Since all points are now in affine, this bug is avoided. - if encoded_g1_element.iter().all(|i| *i == 0) { - continue; - } + // Remove padding from G1 point - this validates padding format + let [x, y] = remove_g1_padding(padded_g1)?; + let scalar_array: [u8; SCALAR_LENGTH] = scalar_bytes.try_into().unwrap(); - let [a_x, a_y] = remove_g1_padding(encoded_g1_element)?; + let point: G1Point = (*x, *y); + Ok((point, scalar_array)) + }); - // NB: Scalar multiplications, MSMs and pairings MUST perform a subgroup check. - let p0_aff = read_g1(a_x, a_y)?; + let unpadded_result = p1_msm_bytes(valid_pairs_iter)?; - // If the scalar is zero, then this is a no-op. - // - // Note: This check is made after checking that g1 is valid. - // this is because we want the precompile to error when - // G1 is invalid, even if the scalar is zero. - if encoded_scalar.iter().all(|i| *i == 0) { - continue; - } + // Pad the result for EVM compatibility + let padded_result = pad_g1_point(&unpadded_result); - g1_points.push(p0_aff); - scalars.push(read_scalar(encoded_scalar)?); - } - - // Return the encoding for the point at the infinity according to EIP-2537 - // if there are no points in the MSM. - const ENCODED_POINT_AT_INFINITY: [u8; PADDED_G1_LENGTH] = [0; PADDED_G1_LENGTH]; - if g1_points.is_empty() { - return Ok(PrecompileOutput::new( - required_gas, - Bytes::from_static(&ENCODED_POINT_AT_INFINITY), - )); - } - - let multiexp_aff = p1_msm(g1_points, scalars); - - let out = encode_g1_point(&multiexp_aff); - Ok(PrecompileOutput::new(required_gas, out.into())) + Ok(PrecompileOutput::new(required_gas, padded_result.into())) } #[cfg(test)] mod test { use super::*; - use primitives::hex; + use primitives::{hex, Bytes}; #[test] fn bls_g1multiexp_g1_not_on_curve_but_in_subgroup() { diff --git a/crates/precompile/src/bls12_381/g2_add.rs b/crates/precompile/src/bls12_381/g2_add.rs index c2d3b3e392..bcd4f7984e 100644 --- a/crates/precompile/src/bls12_381/g2_add.rs +++ b/crates/precompile/src/bls12_381/g2_add.rs @@ -1,6 +1,6 @@ //! BLS12-381 G2 add precompile. More details in [`g2_add`] -use super::crypto_backend::{encode_g2_point, p2_add_affine, read_g2_no_subgroup_check}; -use super::utils::remove_g2_padding; +use super::crypto_backend::p2_add_affine_bytes; +use super::utils::{pad_g2_point, remove_g2_padding}; use crate::bls12_381_const::{ G2_ADD_ADDRESS, G2_ADD_BASE_GAS_FEE, G2_ADD_INPUT_LENGTH, PADDED_G2_LENGTH, }; @@ -27,19 +27,21 @@ pub fn g2_add(input: &[u8], gas_limit: u64) -> PrecompileResult { ))); } + // Extract coordinates from padded input let [a_x_0, a_x_1, a_y_0, a_y_1] = remove_g2_padding(&input[..PADDED_G2_LENGTH])?; let [b_x_0, b_x_1, b_y_0, b_y_1] = remove_g2_padding(&input[PADDED_G2_LENGTH..])?; - // NB: There is no subgroup check for the G2 addition precompile because the time to do the subgroup - // check would be more than the time it takes to do the g1 addition. - // - // Users should be careful to note whether the points being added are indeed in the right subgroup. - let a_aff = &read_g2_no_subgroup_check(a_x_0, a_x_1, a_y_0, a_y_1)?; - let b_aff = &read_g2_no_subgroup_check(b_x_0, b_x_1, b_y_0, b_y_1)?; + let a = (*a_x_0, *a_x_1, *a_y_0, *a_y_1); + let b = (*b_x_0, *b_x_1, *b_y_0, *b_y_1); - // Use the safe wrapper for G2 point addition - let p_aff = p2_add_affine(a_aff, b_aff); + // Get unpadded result from crypto backend + let unpadded_result = p2_add_affine_bytes(a, b)?; - let out = encode_g2_point(&p_aff); - Ok(PrecompileOutput::new(G2_ADD_BASE_GAS_FEE, out.into())) + // Pad the result for EVM compatibility + let padded_result = pad_g2_point(&unpadded_result); + + Ok(PrecompileOutput::new( + G2_ADD_BASE_GAS_FEE, + padded_result.into(), + )) } diff --git a/crates/precompile/src/bls12_381/g2_msm.rs b/crates/precompile/src/bls12_381/g2_msm.rs index 932916bd9b..98dfa71768 100644 --- a/crates/precompile/src/bls12_381/g2_msm.rs +++ b/crates/precompile/src/bls12_381/g2_msm.rs @@ -1,13 +1,13 @@ //! BLS12-381 G2 msm precompile. More details in [`g2_msm`] -use super::crypto_backend::{encode_g2_point, p2_msm, read_g2, read_scalar}; -use super::utils::remove_g2_padding; +use super::crypto_backend::p2_msm_bytes; +use super::utils::{pad_g2_point, remove_g2_padding}; +use super::G2Point; use crate::bls12_381_const::{ DISCOUNT_TABLE_G2_MSM, G2_MSM_ADDRESS, G2_MSM_BASE_GAS_FEE, G2_MSM_INPUT_LENGTH, PADDED_G2_LENGTH, SCALAR_LENGTH, }; use crate::bls12_381_utils::msm_required_gas; use crate::{PrecompileError, PrecompileOutput, PrecompileResult, PrecompileWithAddress}; -use std::vec::Vec; /// [EIP-2537](https://eips.ethereum.org/EIPS/eip-2537#specification) BLS12_G2MSM precompile. pub const PRECOMPILE: PrecompileWithAddress = PrecompileWithAddress(G2_MSM_ADDRESS, g2_msm); @@ -24,7 +24,7 @@ pub fn g2_msm(input: &[u8], gas_limit: u64) -> PrecompileResult { let input_len = input.len(); if input_len == 0 || input_len % G2_MSM_INPUT_LENGTH != 0 { return Err(PrecompileError::Other(format!( - "G2MSM input length should be multiple of {G2_MSM_INPUT_LENGTH}, was {input_len}" + "G2MSM input length should be multiple of {G2_MSM_INPUT_LENGTH}, was {input_len}", ))); } @@ -34,53 +34,23 @@ pub fn g2_msm(input: &[u8], gas_limit: u64) -> PrecompileResult { return Err(PrecompileError::OutOfGas); } - let mut g2_points: Vec<_> = Vec::with_capacity(k); - let mut scalars = Vec::with_capacity(k); - for i in 0..k { - let encoded_g2_element = - &input[i * G2_MSM_INPUT_LENGTH..i * G2_MSM_INPUT_LENGTH + PADDED_G2_LENGTH]; - let encoded_scalar = &input[i * G2_MSM_INPUT_LENGTH + PADDED_G2_LENGTH - ..i * G2_MSM_INPUT_LENGTH + PADDED_G2_LENGTH + SCALAR_LENGTH]; + let valid_pairs_iter = (0..k).map(|i| { + let start = i * G2_MSM_INPUT_LENGTH; + let padded_g2 = &input[start..start + PADDED_G2_LENGTH]; + let scalar_bytes = &input[start + PADDED_G2_LENGTH..start + G2_MSM_INPUT_LENGTH]; - // Filter out points infinity as an optimization, since it is a no-op. - // Note: Previously, points were being batch converted from Jacobian to Affine. In `blst`, this would essentially, - // zero out all of the points. Since all points are in affine, this bug is avoided. - if encoded_g2_element.iter().all(|i| *i == 0) { - continue; - } + // Remove padding from G2 point - this validates padding format + let [x_0, x_1, y_0, y_1] = remove_g2_padding(padded_g2)?; + let scalar_array: [u8; SCALAR_LENGTH] = scalar_bytes.try_into().unwrap(); - let [a_x_0, a_x_1, a_y_0, a_y_1] = remove_g2_padding(encoded_g2_element)?; + let point: G2Point = (*x_0, *x_1, *y_0, *y_1); + Ok((point, scalar_array)) + }); - // NB: Scalar multiplications, MSMs and pairings MUST perform a subgroup check. - // - // So we set the subgroup_check flag to `true` - let p0_aff = read_g2(a_x_0, a_x_1, a_y_0, a_y_1)?; + let unpadded_result = p2_msm_bytes(valid_pairs_iter)?; - // If the scalar is zero, then this is a no-op. - // - // Note: This check is made after checking that g2 is valid. - // this is because we want the precompile to error when - // G2 is invalid, even if the scalar is zero. - if encoded_scalar.iter().all(|i| *i == 0) { - continue; - } + // Pad the result for EVM compatibility + let padded_result = pad_g2_point(&unpadded_result); - // Convert affine point to Jacobian coordinates using our helper function - g2_points.push(p0_aff); - scalars.push(read_scalar(encoded_scalar)?); - } - - // Return infinity point if all points are infinity - if g2_points.is_empty() { - return Ok(PrecompileOutput::new( - required_gas, - [0; PADDED_G2_LENGTH].into(), - )); - } - - // Perform multi-scalar multiplication using the safe wrapper - let multiexp_aff = p2_msm(g2_points, scalars); - - let out = encode_g2_point(&multiexp_aff); - Ok(PrecompileOutput::new(required_gas, out.into())) + Ok(PrecompileOutput::new(required_gas, padded_result.into())) } diff --git a/crates/precompile/src/bls12_381/map_fp2_to_g2.rs b/crates/precompile/src/bls12_381/map_fp2_to_g2.rs index 313988680a..50f67a32cb 100644 --- a/crates/precompile/src/bls12_381/map_fp2_to_g2.rs +++ b/crates/precompile/src/bls12_381/map_fp2_to_g2.rs @@ -1,7 +1,7 @@ //! BLS12-381 map fp2 to g2 precompile. More details in [`map_fp2_to_g2`] use super::{ - crypto_backend::{encode_g2_point, map_fp2_to_g2 as blst_map_fp2_to_g2, read_fp2}, - utils::remove_fp_padding, + crypto_backend::map_fp2_to_g2_bytes, + utils::{pad_g2_point, remove_fp_padding}, }; use crate::bls12_381_const::{ MAP_FP2_TO_G2_ADDRESS, MAP_FP2_TO_G2_BASE_GAS_FEE, PADDED_FP2_LENGTH, PADDED_FP_LENGTH, @@ -31,12 +31,15 @@ pub fn map_fp2_to_g2(input: &[u8], gas_limit: u64) -> PrecompileResult { let input_p0_x = remove_fp_padding(&input[..PADDED_FP_LENGTH])?; let input_p0_y = remove_fp_padding(&input[PADDED_FP_LENGTH..PADDED_FP2_LENGTH])?; - let fp2 = read_fp2(input_p0_x, input_p0_y)?; - let p_aff = blst_map_fp2_to_g2(&fp2); - let out = encode_g2_point(&p_aff); + // Get unpadded result from crypto backend + let unpadded_result = map_fp2_to_g2_bytes(input_p0_x, input_p0_y)?; + + // Pad the result for EVM compatibility + let padded_result = pad_g2_point(&unpadded_result); + Ok(PrecompileOutput::new( MAP_FP2_TO_G2_BASE_GAS_FEE, - out.into(), + padded_result.into(), )) } diff --git a/crates/precompile/src/bls12_381/map_fp_to_g1.rs b/crates/precompile/src/bls12_381/map_fp_to_g1.rs index 9689d796cc..de4ee4503e 100644 --- a/crates/precompile/src/bls12_381/map_fp_to_g1.rs +++ b/crates/precompile/src/bls12_381/map_fp_to_g1.rs @@ -1,7 +1,7 @@ //! BLS12-381 map fp to g1 precompile. More details in [`map_fp_to_g1`] use super::{ - crypto_backend::{encode_g1_point, map_fp_to_g1 as blst_map_fp_to_g1, read_fp}, - utils::remove_fp_padding, + crypto_backend::map_fp_to_g1_bytes, + utils::{pad_g1_point, remove_fp_padding}, }; use crate::bls12_381_const::{MAP_FP_TO_G1_ADDRESS, MAP_FP_TO_G1_BASE_GAS_FEE, PADDED_FP_LENGTH}; use crate::{PrecompileError, PrecompileOutput, PrecompileResult, PrecompileWithAddress}; @@ -26,11 +26,17 @@ pub fn map_fp_to_g1(input: &[u8], gas_limit: u64) -> PrecompileResult { } let input_p0 = remove_fp_padding(input)?; - let fp = read_fp(input_p0)?; - let p_aff = blst_map_fp_to_g1(&fp); - let out = encode_g1_point(&p_aff); - Ok(PrecompileOutput::new(MAP_FP_TO_G1_BASE_GAS_FEE, out.into())) + // Get unpadded result from crypto backend + let unpadded_result = map_fp_to_g1_bytes(input_p0)?; + + // Pad the result for EVM compatibility + let padded_result = pad_g1_point(&unpadded_result); + + Ok(PrecompileOutput::new( + MAP_FP_TO_G1_BASE_GAS_FEE, + padded_result.into(), + )) } #[cfg(test)] diff --git a/crates/precompile/src/bls12_381/pairing.rs b/crates/precompile/src/bls12_381/pairing.rs index f5200e6d55..6f9a59bcbd 100644 --- a/crates/precompile/src/bls12_381/pairing.rs +++ b/crates/precompile/src/bls12_381/pairing.rs @@ -1,6 +1,7 @@ //! BLS12-381 pairing precompile. More details in [`pairing`] -use super::crypto_backend::{pairing_check, read_g1, read_g2}; +use super::crypto_backend::pairing_check_bytes; use super::utils::{remove_g1_padding, remove_g2_padding}; +use super::PairingPair; use crate::bls12_381_const::{ PADDED_G1_LENGTH, PADDED_G2_LENGTH, PAIRING_ADDRESS, PAIRING_INPUT_LENGTH, PAIRING_MULTIPLIER_BASE, PAIRING_OFFSET_BASE, @@ -39,38 +40,21 @@ pub fn pairing(input: &[u8], gas_limit: u64) -> PrecompileResult { } // Collect pairs of points for the pairing check - let mut pairs = Vec::with_capacity(k); + let mut pairs: Vec = Vec::with_capacity(k); for i in 0..k { let encoded_g1_element = &input[i * PAIRING_INPUT_LENGTH..i * PAIRING_INPUT_LENGTH + PADDED_G1_LENGTH]; let encoded_g2_element = &input[i * PAIRING_INPUT_LENGTH + PADDED_G1_LENGTH ..i * PAIRING_INPUT_LENGTH + PADDED_G1_LENGTH + PADDED_G2_LENGTH]; - // If either the G1 or G2 element is the encoded representation - // of the point at infinity, then these two points are no-ops - // in the pairing computation. - // - // Note: we do not skip the validation of these two elements even if - // one of them is the point at infinity because we could have G1 be - // the point at infinity and G2 be an invalid element or vice versa. - // In that case, the precompile should error because one of the elements - // was invalid. - let g1_is_zero = encoded_g1_element.iter().all(|i| *i == 0); - let g2_is_zero = encoded_g2_element.iter().all(|i| *i == 0); - let [a_x, a_y] = remove_g1_padding(encoded_g1_element)?; let [b_x_0, b_x_1, b_y_0, b_y_1] = remove_g2_padding(encoded_g2_element)?; - // NB: Scalar multiplications, MSMs and pairings MUST perform a subgroup check. - // extract_g1_input and extract_g2_input perform the necessary checks - let p1_aff = read_g1(a_x, a_y)?; - let p2_aff = read_g2(b_x_0, b_x_1, b_y_0, b_y_1)?; - - if !g1_is_zero & !g2_is_zero { - pairs.push((p1_aff, p2_aff)); - } + pairs.push(((*a_x, *a_y), (*b_x_0, *b_x_1, *b_y_0, *b_y_1))); } - let result = if pairing_check(&pairs) { 1 } else { 0 }; + + let result = pairing_check_bytes(&pairs)?; + let result = if result { 1 } else { 0 }; Ok(PrecompileOutput::new( required_gas, diff --git a/crates/precompile/src/bls12_381/utils.rs b/crates/precompile/src/bls12_381/utils.rs index 2871418916..c53f0cae5e 100644 --- a/crates/precompile/src/bls12_381/utils.rs +++ b/crates/precompile/src/bls12_381/utils.rs @@ -1,6 +1,6 @@ //! BLS12-381 utilities for padding and unpadding of input. use crate::bls12_381_const::{ - FP_LENGTH, FP_PAD_BY, PADDED_FP_LENGTH, PADDED_G1_LENGTH, PADDED_G2_LENGTH, + FP_LENGTH, FP_PAD_BY, G1_LENGTH, PADDED_FP_LENGTH, PADDED_G1_LENGTH, PADDED_G2_LENGTH, }; use crate::PrecompileError; @@ -51,3 +51,92 @@ pub(super) fn remove_g2_padding(input: &[u8]) -> Result<[&[u8; FP_LENGTH]; 4], P } Ok(input_fps) } + +/// Pads an unpadded G1 point (96 bytes) to the EVM-compatible format (128 bytes). +/// +/// Takes a G1 point with 2 field elements of 48 bytes each and adds 16 bytes of +/// zero padding before each field element. +pub(super) fn pad_g1_point(unpadded: &[u8]) -> [u8; PADDED_G1_LENGTH] { + assert_eq!( + unpadded.len(), + G1_LENGTH, + "Invalid unpadded G1 point length" + ); + + let mut padded = [0u8; PADDED_G1_LENGTH]; + + // Copy each field element (x, y) with padding + for i in 0..2 { + padded[i * PADDED_FP_LENGTH + FP_PAD_BY..(i + 1) * PADDED_FP_LENGTH] + .copy_from_slice(&unpadded[i * FP_LENGTH..(i + 1) * FP_LENGTH]); + } + + padded +} + +/// Pads an unpadded G2 point (192 bytes) to the EVM-compatible format (256 bytes). +/// +/// Takes a G2 point with 4 field elements of 48 bytes each and adds 16 bytes of +/// zero padding before each field element. +pub(super) fn pad_g2_point(unpadded: &[u8]) -> [u8; PADDED_G2_LENGTH] { + assert_eq!( + unpadded.len(), + 4 * FP_LENGTH, + "Invalid unpadded G2 point length" + ); + + let mut padded = [0u8; PADDED_G2_LENGTH]; + + // Copy each field element (x.c0, x.c1, y.c0, y.c1) with padding + for i in 0..4 { + padded[i * PADDED_FP_LENGTH + FP_PAD_BY..(i + 1) * PADDED_FP_LENGTH] + .copy_from_slice(&unpadded[i * FP_LENGTH..(i + 1) * FP_LENGTH]); + } + + padded +} + +#[cfg(test)] +mod tests { + use super::*; + + #[test] + fn test_pad_g1_point_roundtrip() { + // Create test data + let mut unpadded = [0u8; G1_LENGTH]; + for (i, byte) in unpadded.iter_mut().enumerate() { + *byte = (i * 2 + 1) as u8; + } + + // Pad the point + let padded = pad_g1_point(&unpadded); + + // Remove padding + let result = remove_g1_padding(&padded).unwrap(); + + // Verify roundtrip + assert_eq!(result[0], &unpadded[0..FP_LENGTH]); + assert_eq!(result[1], &unpadded[FP_LENGTH..G1_LENGTH]); + } + + #[test] + fn test_pad_g2_point_roundtrip() { + // Create test data for G2 point (192 bytes = 4 * 48) + let mut unpadded = [0u8; 4 * FP_LENGTH]; + for (i, byte) in unpadded.iter_mut().enumerate() { + *byte = (i * 2 + 1) as u8; + } + + // Pad the point + let padded = pad_g2_point(&unpadded); + + // Remove padding + let result = remove_g2_padding(&padded).unwrap(); + + // Verify roundtrip - G2 has 4 field elements + assert_eq!(result[0], &unpadded[0..FP_LENGTH]); + assert_eq!(result[1], &unpadded[FP_LENGTH..2 * FP_LENGTH]); + assert_eq!(result[2], &unpadded[2 * FP_LENGTH..3 * FP_LENGTH]); + assert_eq!(result[3], &unpadded[3 * FP_LENGTH..4 * FP_LENGTH]); + } +} diff --git a/crates/precompile/src/bls12_381_const.rs b/crates/precompile/src/bls12_381_const.rs index fce74dd0ce..9898aac258 100644 --- a/crates/precompile/src/bls12_381_const.rs +++ b/crates/precompile/src/bls12_381_const.rs @@ -116,6 +116,12 @@ pub const G1_LENGTH: usize = 2 * FP_LENGTH; /// a G1 element according to padding rules specified in EIP-2537. pub const PADDED_G1_LENGTH: usize = 2 * PADDED_FP_LENGTH; +/// FP2_LENGTH specifies the number of bytes needed to represent a Fp^2 element. +/// +/// Note: This is the quadratic extension of Fp, and by definition +/// means we need 2 Fp elements. +pub const FP2_LENGTH: usize = 2 * FP_LENGTH; + /// PADDED_FP2_LENGTH specifies the number of bytes that the EVM will use to represent /// a Fp^2 element according to the padding rules specified in EIP-2537. /// @@ -143,6 +149,11 @@ pub const G1_ADD_INPUT_LENGTH: usize = 2 * PADDED_G1_LENGTH; /// of these pairs. pub const G1_MSM_INPUT_LENGTH: usize = PADDED_G1_LENGTH + SCALAR_LENGTH; +/// G2_LENGTH specifies the number of bytes needed to represent a G2 element. +/// +/// Note: A G2 element contains 2 Fp^2 elements. +pub const G2_LENGTH: usize = 2 * FP2_LENGTH; + /// PADDED_G2_LENGTH specifies the number of bytes that the EVM will use to represent /// a G2 element. /// diff --git a/crates/precompile/src/bn128.rs b/crates/precompile/src/bn128.rs index b84e96f669..3cad806801 100644 --- a/crates/precompile/src/bn128.rs +++ b/crates/precompile/src/bn128.rs @@ -8,16 +8,10 @@ use std::vec::Vec; cfg_if::cfg_if! { if #[cfg(feature = "bn")]{ mod substrate; - use substrate::{ - encode_g1_point, g1_point_add, g1_point_mul, pairing_check, read_g1_point, read_g2_point, - read_scalar, - }; + use substrate::{g1_point_add, g1_point_mul, pairing_check}; } else { mod arkworks; - use arkworks::{ - encode_g1_point, g1_point_add, g1_point_mul, pairing_check, read_g1_point, read_g2_point, - read_scalar, - }; + use arkworks::{g1_point_add, g1_point_mul, pairing_check}; } } @@ -162,11 +156,9 @@ pub fn run_add(input: &[u8], gas_cost: u64, gas_limit: u64) -> PrecompileResult let input = right_pad::(input); - let p1 = read_g1_point(&input[..G1_LEN])?; - let p2 = read_g1_point(&input[G1_LEN..])?; - let result = g1_point_add(p1, p2); - - let output = encode_g1_point(result); + let p1_bytes = &input[..G1_LEN]; + let p2_bytes = &input[G1_LEN..]; + let output = g1_point_add(p1_bytes, p2_bytes)?; Ok(PrecompileOutput::new(gas_cost, output.into())) } @@ -179,12 +171,9 @@ pub fn run_mul(input: &[u8], gas_cost: u64, gas_limit: u64) -> PrecompileResult let input = right_pad::(input); - let p = read_g1_point(&input[..G1_LEN])?; - - let scalar = read_scalar(&input[G1_LEN..G1_LEN + SCALAR_LEN]); - let result = g1_point_mul(p, scalar); - - let output = encode_g1_point(result); + let point_bytes = &input[..G1_LEN]; + let scalar_bytes = &input[G1_LEN..G1_LEN + SCALAR_LEN]; + let output = g1_point_mul(point_bytes, scalar_bytes)?; Ok(PrecompileOutput::new(gas_cost, output.into())) } @@ -217,33 +206,17 @@ pub fn run_pair( // This is where G1 ends. let g2_start = start + G1_LEN; + // Get G1 and G2 points from the input let encoded_g1_element = &input[g1_start..g2_start]; let encoded_g2_element = &input[g2_start..g2_start + G2_LEN]; - - // If either the G1 or G2 element is the encoded representation - // of the point at infinity, then these two points are no-ops - // in the pairing computation. - // - // Note: we do not skip the validation of these two elements even if - // one of them is the point at infinity because we could have G1 be - // the point at infinity and G2 be an invalid element or vice versa. - // In that case, the precompile should error because one of the elements - // was invalid. - let g1_is_zero = encoded_g1_element.iter().all(|i| *i == 0); - let g2_is_zero = encoded_g2_element.iter().all(|i| *i == 0); - - // Get G1 and G2 points from the input - let a = read_g1_point(encoded_g1_element)?; - let b = read_g2_point(encoded_g2_element)?; - - if !g1_is_zero && !g2_is_zero { - points.push((a, b)); - } + points.push((encoded_g1_element, encoded_g2_element)); } - let success = pairing_check(&points); - - Ok(PrecompileOutput::new(gas_used, bool_to_bytes32(success))) + let pairing_result = pairing_check(&points)?; + Ok(PrecompileOutput::new( + gas_used, + bool_to_bytes32(pairing_result), + )) } #[cfg(test)] @@ -531,5 +504,52 @@ mod tests { 260_000, ); assert!(matches!(res, Err(PrecompileError::Bn128PairLength))); + + // Test with point at infinity - should return true (identity element) + // G1 point at infinity (0,0) followed by a valid G2 point + let input = hex::decode( + "\ + 0000000000000000000000000000000000000000000000000000000000000000\ + 0000000000000000000000000000000000000000000000000000000000000000\ + 209dd15ebff5d46c4bd888e51a93cf99a7329636c63514396b4a452003a35bf7\ + 04bf11ca01483bfa8b34b43561848d28905960114c8ac04049af4b6315a41678\ + 2bb8324af6cfc93537a2ad1a445cfd0ca2a71acd7ac41fadbf933c2a51be344d\ + 120a2a4cf30c1bf9845f20c6fe39e07ea2cce61f0c9bb048165fe5e4de877550", + ) + .unwrap(); + let expected = + hex::decode("0000000000000000000000000000000000000000000000000000000000000001") + .unwrap(); + + let outcome = run_pair( + &input, + BYZANTIUM_PAIR_PER_POINT, + BYZANTIUM_PAIR_BASE, + 260_000, + ) + .unwrap(); + assert_eq!(outcome.bytes, expected); + + // Test with G2 point at infinity - should also return true + // Valid G1 point followed by G2 point at infinity (0,0,0,0) + let input = hex::decode( + "\ + 1c76476f4def4bb94541d57ebba1193381ffa7aa76ada664dd31c16024c43f59\ + 3034dd2920f673e204fee2811c678745fc819b55d3e9d294e45c9b03a76aef41\ + 0000000000000000000000000000000000000000000000000000000000000000\ + 0000000000000000000000000000000000000000000000000000000000000000\ + 0000000000000000000000000000000000000000000000000000000000000000\ + 0000000000000000000000000000000000000000000000000000000000000000", + ) + .unwrap(); + + let outcome = run_pair( + &input, + BYZANTIUM_PAIR_PER_POINT, + BYZANTIUM_PAIR_BASE, + 260_000, + ) + .unwrap(); + assert_eq!(outcome.bytes, expected); } } diff --git a/crates/precompile/src/bn128/arkworks.rs b/crates/precompile/src/bn128/arkworks.rs index 0a991939bc..63a14a9946 100644 --- a/crates/precompile/src/bn128/arkworks.rs +++ b/crates/precompile/src/bn128/arkworks.rs @@ -180,21 +180,33 @@ pub(super) fn read_scalar(input: &[u8]) -> Fr { /// Performs point addition on two G1 points. #[inline] -pub(super) fn g1_point_add(p1: G1Affine, p2: G1Affine) -> G1Affine { +pub(super) fn g1_point_add(p1_bytes: &[u8], p2_bytes: &[u8]) -> Result<[u8; 64], PrecompileError> { + let p1 = read_g1_point(p1_bytes)?; + let p2 = read_g1_point(p2_bytes)?; + let p1_jacobian: G1Projective = p1.into(); let p3 = p1_jacobian + p2; + let output = encode_g1_point(p3.into_affine()); - p3.into_affine() + Ok(output) } /// Performs a G1 scalar multiplication. #[inline] -pub(super) fn g1_point_mul(p: G1Affine, fr: Fr) -> G1Affine { +pub(super) fn g1_point_mul( + point_bytes: &[u8], + fr_bytes: &[u8], +) -> Result<[u8; 64], PrecompileError> { + let p = read_g1_point(point_bytes)?; + let fr = read_scalar(fr_bytes); + let big_int = fr.into_bigint(); let result = p.mul_bigint(big_int); - result.into_affine() + let output = encode_g1_point(result.into_affine()); + + Ok(output) } /// pairing_check performs a pairing check on a list of G1 and G2 point pairs and @@ -203,13 +215,25 @@ pub(super) fn g1_point_mul(p: G1Affine, fr: Fr) -> G1Affine { /// Note: If the input is empty, this function returns true. /// This is different to EIP2537 which disallows the empty input. #[inline] -pub(super) fn pairing_check(pairs: &[(G1Affine, G2Affine)]) -> bool { - if pairs.is_empty() { - return true; +pub(super) fn pairing_check(pairs: &[(&[u8], &[u8])]) -> Result { + let mut g1_points = Vec::with_capacity(pairs.len()); + let mut g2_points = Vec::with_capacity(pairs.len()); + + for (g1_bytes, g2_bytes) in pairs { + let g1 = read_g1_point(g1_bytes)?; + let g2 = read_g2_point(g2_bytes)?; + + // Skip pairs where either point is at infinity + if !g1.is_zero() && !g2.is_zero() { + g1_points.push(g1); + g2_points.push(g2); + } } - let (g1_points, g2_points): (Vec, Vec) = pairs.iter().copied().unzip(); + if g1_points.is_empty() { + return Ok(true); + } let pairing_result = Bn254::multi_pairing(&g1_points, &g2_points); - pairing_result.0.is_one() + Ok(pairing_result.0.is_one()) } diff --git a/crates/precompile/src/bn128/substrate.rs b/crates/precompile/src/bn128/substrate.rs index d69cd6d8be..ee3b5c162b 100644 --- a/crates/precompile/src/bn128/substrate.rs +++ b/crates/precompile/src/bn128/substrate.rs @@ -1,6 +1,7 @@ use super::{FQ2_LEN, FQ_LEN, G1_LEN, SCALAR_LEN}; use crate::PrecompileError; use bn::{AffineG1, AffineG2, Fq, Fq2, Group, Gt, G1, G2}; +use std::vec::Vec; /// Reads a single `Fq` field element from the input slice. /// @@ -150,14 +151,23 @@ pub(super) fn read_scalar(input: &[u8]) -> bn::Fr { /// Performs point addition on two G1 points. #[inline] -pub(super) fn g1_point_add(p1: G1, p2: G1) -> G1 { - p1 + p2 +pub(super) fn g1_point_add(p1_bytes: &[u8], p2_bytes: &[u8]) -> Result<[u8; 64], PrecompileError> { + let p1 = read_g1_point(p1_bytes)?; + let p2 = read_g1_point(p2_bytes)?; + let result = p1 + p2; + Ok(encode_g1_point(result)) } /// Performs a G1 scalar multiplication. #[inline] -pub(super) fn g1_point_mul(p: G1, fr: bn::Fr) -> G1 { - p * fr +pub(super) fn g1_point_mul( + point_bytes: &[u8], + fr_bytes: &[u8], +) -> Result<[u8; 64], PrecompileError> { + let p = read_g1_point(point_bytes)?; + let fr = read_scalar(fr_bytes); + let result = p * fr; + Ok(encode_g1_point(result)) } /// pairing_check performs a pairing check on a list of G1 and G2 point pairs and @@ -166,9 +176,22 @@ pub(super) fn g1_point_mul(p: G1, fr: bn::Fr) -> G1 { /// Note: If the input is empty, this function returns true. /// This is different to EIP2537 which disallows the empty input. #[inline] -pub(super) fn pairing_check(pairs: &[(G1, G2)]) -> bool { - if pairs.is_empty() { - return true; +pub(super) fn pairing_check(pairs: &[(&[u8], &[u8])]) -> Result { + let mut parsed_pairs = Vec::with_capacity(pairs.len()); + + for (g1_bytes, g2_bytes) in pairs { + let g1 = read_g1_point(g1_bytes)?; + let g2 = read_g2_point(g2_bytes)?; + + // Skip pairs where either point is at infinity + if !g1.is_zero() && !g2.is_zero() { + parsed_pairs.push((g1, g2)); + } } - bn::pairing_batch(pairs) == Gt::one() + + if parsed_pairs.is_empty() { + return Ok(true); + } + + Ok(bn::pairing_batch(&parsed_pairs) == Gt::one()) } diff --git a/crates/precompile/src/interface.rs b/crates/precompile/src/interface.rs index 5ccea3bf0d..1c9c683458 100644 --- a/crates/precompile/src/interface.rs +++ b/crates/precompile/src/interface.rs @@ -16,12 +16,33 @@ pub struct PrecompileOutput { pub gas_used: u64, /// Output bytes pub bytes: Bytes, + /// Whether the precompile reverted + pub reverted: bool, } impl PrecompileOutput { /// Returns new precompile output with the given gas used and output bytes. pub fn new(gas_used: u64, bytes: Bytes) -> Self { - Self { gas_used, bytes } + Self { + gas_used, + bytes, + reverted: false, + } + } + + /// Returns new precompile revert with the given gas used and output bytes. + pub fn new_reverted(gas_used: u64, bytes: Bytes) -> Self { + Self { + gas_used, + bytes, + reverted: true, + } + } + + /// Flips [`Self::reverted`] to `true`. + pub fn reverted(mut self) -> Self { + self.reverted = true; + self } } diff --git a/crates/precompile/src/kzg_point_evaluation.rs b/crates/precompile/src/kzg_point_evaluation.rs index 023c49c957..9fb761cd7b 100644 --- a/crates/precompile/src/kzg_point_evaluation.rs +++ b/crates/precompile/src/kzg_point_evaluation.rs @@ -55,10 +55,10 @@ pub fn run(input: &[u8], gas_limit: u64) -> PrecompileResult { } // Verify KZG proof with z and y in big endian format - let commitment = as_bytes48(commitment); - let z = as_bytes32(&input[32..64]); - let y = as_bytes32(&input[64..96]); - let proof = as_bytes48(&input[144..192]); + let commitment: &[u8; 48] = commitment.try_into().unwrap(); + let z = input[32..64].try_into().unwrap(); + let y = input[64..96].try_into().unwrap(); + let proof = input[144..192].try_into().unwrap(); if !verify_kzg_proof(commitment, z, y, proof) { return Err(PrecompileError::BlobVerifyKzgProofFailed); } @@ -77,15 +77,20 @@ pub fn kzg_to_versioned_hash(commitment: &[u8]) -> [u8; 32] { /// Verify KZG proof. #[inline] -pub fn verify_kzg_proof(commitment: &Bytes48, z: &Bytes32, y: &Bytes32, proof: &Bytes48) -> bool { +pub fn verify_kzg_proof( + commitment: &[u8; 48], + z: &[u8; 32], + y: &[u8; 32], + proof: &[u8; 48], +) -> bool { cfg_if::cfg_if! { if #[cfg(feature = "c-kzg")] { - let kzg_settings = c_kzg::ethereum_kzg_settings(0); - kzg_settings.verify_kzg_proof(commitment, z, y, proof).unwrap_or(false) + let kzg_settings = c_kzg::ethereum_kzg_settings(8); + kzg_settings.verify_kzg_proof(as_bytes48(commitment), as_bytes32(z), as_bytes32(y), as_bytes48(proof)).unwrap_or(false) } else if #[cfg(feature = "kzg-rs")] { let env = kzg_rs::EnvKzgSettings::default(); let kzg_settings = env.get(); - KzgProof::verify_kzg_proof(commitment, z, y, proof, kzg_settings).unwrap_or(false) + KzgProof::verify_kzg_proof(as_bytes48(commitment), as_bytes32(z), as_bytes32(y), as_bytes48(proof), kzg_settings).unwrap_or(false) } } } @@ -93,14 +98,14 @@ pub fn verify_kzg_proof(commitment: &Bytes48, z: &Bytes32, y: &Bytes32, proof: & /// Convert a slice to an array of a specific size. #[inline] #[track_caller] -pub fn as_array(bytes: &[u8]) -> &[u8; N] { +fn as_array(bytes: &[u8]) -> &[u8; N] { bytes.try_into().expect("slice with incorrect length") } /// Convert a slice to a 32 byte big endian array. #[inline] #[track_caller] -pub fn as_bytes32(bytes: &[u8]) -> &Bytes32 { +fn as_bytes32(bytes: &[u8]) -> &Bytes32 { // SAFETY: `#[repr(C)] Bytes32([u8; 32])` unsafe { &*as_array::<32>(bytes).as_ptr().cast() } } @@ -108,7 +113,7 @@ pub fn as_bytes32(bytes: &[u8]) -> &Bytes32 { /// Convert a slice to a 48 byte big endian array. #[inline] #[track_caller] -pub fn as_bytes48(bytes: &[u8]) -> &Bytes48 { +fn as_bytes48(bytes: &[u8]) -> &Bytes48 { // SAFETY: `#[repr(C)] Bytes48([u8; 48])` unsafe { &*as_array::<48>(bytes).as_ptr().cast() } } diff --git a/crates/precompile/src/lib.rs b/crates/precompile/src/lib.rs index ddf05cdce5..4cbb7eeee6 100644 --- a/crates/precompile/src/lib.rs +++ b/crates/precompile/src/lib.rs @@ -35,6 +35,8 @@ cfg_if::cfg_if! { } } +use arrayref as _; + #[cfg(all(feature = "c-kzg", feature = "kzg-rs"))] // silence kzg-rs lint as c-kzg will be used as default if both are enabled. use kzg_rs as _; @@ -197,7 +199,7 @@ impl Precompiles { static INSTANCE: OnceBox = OnceBox::new(); INSTANCE.get_or_init(|| { let mut precompiles = Self::prague().clone(); - precompiles.extend([modexp::OSAKA, secp256r1::P256VERIFY]); + precompiles.extend([modexp::OSAKA, secp256r1::P256VERIFY_OSAKA]); Box::new(precompiles) }) } diff --git a/crates/precompile/src/modexp.rs b/crates/precompile/src/modexp.rs index 1dc5c0d9d5..08300abe3c 100644 --- a/crates/precompile/src/modexp.rs +++ b/crates/precompile/src/modexp.rs @@ -201,7 +201,7 @@ pub fn berlin_gas_calc(base_len: u64, exp_len: u64, mod_len: u64, exp_highp: &U2 /// 2. Increase cost when exponent is larger than 32 bytes /// 3. Increase cost when base or modulus is larger than 32 bytes pub fn osaka_gas_calc(base_len: u64, exp_len: u64, mod_len: u64, exp_highp: &U256) -> u64 { - gas_calc::<500, 16, 3, _>(base_len, exp_len, mod_len, exp_highp, |max_len| -> U256 { + gas_calc::<500, 16, 1, _>(base_len, exp_len, mod_len, exp_highp, |max_len| -> U256 { if max_len <= 32 { return U256::from(16); // multiplication_complexity = 16 } @@ -410,8 +410,8 @@ mod tests { ]; const OSAKA_GAS: [u64; 19] = [ - 151_198, 1_360, 1_360, 1_360, 500, 500, 682, 500, 500, 2_730, 682, 682, 10_922, 2_730, - 2_730, 43_690, 10_922, 10_922, 174_762, + 453_596, 4_080, 4_080, 4_080, 500, 500, 2_048, 512, 512, 8_192, 2_048, 2_048, 32_768, + 8_192, 8_192, 131_072, 32_768, 32_768, 524_288, ]; #[test] diff --git a/crates/precompile/src/secp256k1.rs b/crates/precompile/src/secp256k1.rs index 8c0676d6f9..2ac987af13 100644 --- a/crates/precompile/src/secp256k1.rs +++ b/crates/precompile/src/secp256k1.rs @@ -48,12 +48,21 @@ pub fn ec_recover_run(input: &[u8], gas_limit: u64) -> PrecompileResult { let recid = input[63] - 27; let sig = <&B512>::try_from(&input[64..128]).unwrap(); - let res = ecrecover(sig, recid, msg); - + let res = ecrecover_bytes(sig.0, recid, msg.0); let out = res.map(|o| o.to_vec().into()).unwrap_or_default(); Ok(PrecompileOutput::new(ECRECOVER_BASE, out)) } +fn ecrecover_bytes(sig: [u8; 64], recid: u8, msg: [u8; 32]) -> Option<[u8; 32]> { + let sig = B512::from_slice(&sig); + let msg = B256::from_slice(&msg); + + match ecrecover(&sig, recid, &msg) { + Ok(address) => Some(address.0), + Err(_) => None, + } +} + // Select the correct implementation based on the enabled features. cfg_if::cfg_if! { if #[cfg(feature = "secp256k1")] { diff --git a/crates/precompile/src/secp256r1.rs b/crates/precompile/src/secp256r1.rs index 6df5690fda..323f803f69 100644 --- a/crates/precompile/src/secp256r1.rs +++ b/crates/precompile/src/secp256r1.rs @@ -9,8 +9,11 @@ use crate::{ u64_to_address, PrecompileError, PrecompileOutput, PrecompileResult, PrecompileWithAddress, }; -use p256::ecdsa::{signature::hazmat::PrehashVerifier, Signature, VerifyingKey}; -use primitives::{Bytes, B256}; +use p256::{ + ecdsa::{signature::hazmat::PrehashVerifier, Signature, VerifyingKey}, + EncodedPoint, +}; +use primitives::{alloy_primitives::B512, Bytes, B256}; /// Address of secp256r1 precompile. pub const P256VERIFY_ADDRESS: u64 = 256; @@ -18,6 +21,9 @@ pub const P256VERIFY_ADDRESS: u64 = 256; /// Base gas fee for secp256r1 p256verify operation. pub const P256VERIFY_BASE_GAS_FEE: u64 = 3450; +/// Base gas fee for secp256r1 p256verify operation post Osaka. +pub const P256VERIFY_BASE_GAS_FEE_OSAKA: u64 = 6900; + /// Returns the secp256r1 precompile with its address. pub fn precompiles() -> impl Iterator { [P256VERIFY].into_iter() @@ -27,6 +33,10 @@ pub fn precompiles() -> impl Iterator { pub const P256VERIFY: PrecompileWithAddress = PrecompileWithAddress(u64_to_address(P256VERIFY_ADDRESS), p256_verify); +/// [RIP-7212](https://github.com/ethereum/RIPs/blob/master/RIPS/rip-7212.md#specification) secp256r1 precompile. +pub const P256VERIFY_OSAKA: PrecompileWithAddress = + PrecompileWithAddress(u64_to_address(P256VERIFY_ADDRESS), p256_verify_osaka); + /// secp256r1 precompile logic. It takes the input bytes sent to the precompile /// and the gas limit. The output represents the result of verifying the /// secp256r1 signature of the input. @@ -37,7 +47,24 @@ pub const P256VERIFY: PrecompileWithAddress = /// | :-----------------: | :-: | :-: | :----------: | :----------: | /// | 32 | 32 | 32 | 32 | 32 | pub fn p256_verify(input: &[u8], gas_limit: u64) -> PrecompileResult { - if P256VERIFY_BASE_GAS_FEE > gas_limit { + p256_verify_inner(input, gas_limit, P256VERIFY_BASE_GAS_FEE) +} + +/// secp256r1 precompile logic with Osaka gas cost. It takes the input bytes sent to the precompile +/// and the gas limit. The output represents the result of verifying the +/// secp256r1 signature of the input. +/// +/// The input is encoded as follows: +/// +/// | signed message hash | r | s | public key x | public key y | +/// | :-----------------: | :-: | :-: | :----------: | :----------: | +/// | 32 | 32 | 32 | 32 | 32 | +pub fn p256_verify_osaka(input: &[u8], gas_limit: u64) -> PrecompileResult { + p256_verify_inner(input, gas_limit, P256VERIFY_BASE_GAS_FEE_OSAKA) +} + +fn p256_verify_inner(input: &[u8], gas_limit: u64, gas_cost: u64) -> PrecompileResult { + if gas_cost > gas_limit { return Err(PrecompileError::OutOfGas); } let result = if verify_impl(input).is_some() { @@ -45,7 +72,7 @@ pub fn p256_verify(input: &[u8], gas_limit: u64) -> PrecompileResult { } else { Bytes::new() }; - Ok(PrecompileOutput::new(P256VERIFY_BASE_GAS_FEE, result)) + Ok(PrecompileOutput::new(gas_cost, result)) } /// Returns `Some(())` if the signature included in the input byte slice is @@ -56,23 +83,24 @@ pub fn verify_impl(input: &[u8]) -> Option<()> { } // msg signed (msg is already the hash of the original message) - let msg = &input[..32]; + let msg = <&B256>::try_from(&input[..32]).unwrap(); // r, s: signature - let sig = &input[32..96]; + let sig = <&B512>::try_from(&input[32..96]).unwrap(); // x, y: public key - let pk = &input[96..160]; + let pk = <&B512>::try_from(&input[96..160]).unwrap(); - // Prepend 0x04 to the public key: uncompressed form - let mut uncompressed_pk = [0u8; 65]; - uncompressed_pk[0] = 0x04; - uncompressed_pk[1..].copy_from_slice(pk); + verify_signature(msg.0, sig.0, pk.0) +} +fn verify_signature(msg: [u8; 32], sig: [u8; 64], pk: [u8; 64]) -> Option<()> { // Can fail only if the input is not exact length. - let signature = Signature::from_slice(sig).ok()?; - // Can fail if the input is not valid, so we have to propagate the error. - let public_key = VerifyingKey::from_sec1_bytes(&uncompressed_pk).ok()?; + let signature = Signature::from_slice(&sig).ok()?; + // Decode the public key bytes (x,y coordinates) using EncodedPoint + let encoded_point = EncodedPoint::from_untagged_bytes(&pk.into()); + // Create VerifyingKey from the encoded point + let public_key = VerifyingKey::from_encoded_point(&encoded_point).ok()?; - public_key.verify_prehash(msg, &signature).ok() + public_key.verify_prehash(&msg, &signature).ok() } #[cfg(test)] diff --git a/crates/primitives/CHANGELOG.md b/crates/primitives/CHANGELOG.md index 734048662f..cb1711f1b7 100644 --- a/crates/primitives/CHANGELOG.md +++ b/crates/primitives/CHANGELOG.md @@ -6,6 +6,17 @@ and this project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0 ## [Unreleased] +## [20.1.0](https://github.com/bluealloy/revm/compare/revm-primitives-v20.0.0...revm-primitives-v20.1.0) - 2025-07-23 + +### Added + +- *(osaka)* update EIP-7825 constant ([#2753](https://github.com/bluealloy/revm/pull/2753)) +- expose sha3-keccak in revm and revm-primitives ([#2713](https://github.com/bluealloy/revm/pull/2713)) + +### Fixed + +- features and check in ci ([#2766](https://github.com/bluealloy/revm/pull/2766)) + ## [20.0.0](https://github.com/bluealloy/revm/compare/revm-primitives-v19.2.0...revm-primitives-v20.0.0) - 2025-06-19 ### Added diff --git a/crates/primitives/Cargo.toml b/crates/primitives/Cargo.toml index 3379e60905..dc6f7a321e 100644 --- a/crates/primitives/Cargo.toml +++ b/crates/primitives/Cargo.toml @@ -1,7 +1,7 @@ [package] name = "revm-primitives" description = "Revm primitives types" -version = "20.0.0" +version = "20.1.0" authors.workspace = true edition.workspace = true keywords.workspace = true @@ -26,17 +26,14 @@ num_enum = { version = "0.7.3", default-features = false } # Optional serde = { workspace = true, features = ["derive", "rc"], optional = true } - [features] default = ["std"] -std = [ - "alloy-primitives/std", - "serde?/std", - "num_enum/std" -] +std = ["alloy-primitives/std", "serde?/std", "num_enum/std"] serde = ["dep:serde", "alloy-primitives/serde"] +map-foldhash = ["alloy-primitives/map-foldhash"] hashbrown = ["alloy-primitives/map-hashbrown"] arbitrary = ["std", "alloy-primitives/arbitrary"] asm-keccak = ["alloy-primitives/asm-keccak"] +sha3-keccak = ["alloy-primitives/sha3-keccak"] rand = ["alloy-primitives/rand"] diff --git a/crates/primitives/src/eip7825.rs b/crates/primitives/src/eip7825.rs index 5805a19fab..b8248f5882 100644 --- a/crates/primitives/src/eip7825.rs +++ b/crates/primitives/src/eip7825.rs @@ -1,12 +1,11 @@ //! EIP-7825: Transaction Gas Limit Cap -//! Introduce a protocol-level cap on the maximum gas used by a transaction to 30 million. +//! Introduce a protocol-level cap on the maximum gas used by a transaction to 16 777 216. /// Transaction gas limit cap. /// /// # Rationale from EIP /// -/// The proposed cap of 30 million gas is based on the typical size of Ethereum blocks today, -/// which often range between 30-40 million gas. This value is large enough to allow complex -/// transactions, such as contract deployments and advanced DeFi interactions, while still -/// reserving space for other transactions within a block. -pub const TX_GAS_LIMIT_CAP: u64 = 30_000_000; +/// The proposed cap of 16,777,216 gas (2^24) provides a clean power-of-two boundary that simplifies implementation while still +/// being large enough to accommodate most complex transactions, including contract deployments and advanced DeFi interactions. +/// This value represents approximately half of typical block sizes (30-40 million gas), ensuring multiple transactions can fit within each block. +pub const TX_GAS_LIMIT_CAP: u64 = 16_777_216; diff --git a/crates/revm/CHANGELOG.md b/crates/revm/CHANGELOG.md index bcd0bc28e3..f380d29a73 100644 --- a/crates/revm/CHANGELOG.md +++ b/crates/revm/CHANGELOG.md @@ -6,6 +6,45 @@ and this project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0 ## [Unreleased] +## [27.1.0](https://github.com/bluealloy/revm/compare/revm-v27.0.3...revm-v27.1.0) - 2025-07-23 + +### Added + +- expose sha3-keccak in revm and revm-primitives ([#2713](https://github.com/bluealloy/revm/pull/2713)) + +### Fixed + +- features and check in ci ([#2766](https://github.com/bluealloy/revm/pull/2766)) +- gas deduction with `disable_balance_check` ([#2699](https://github.com/bluealloy/revm/pull/2699)) + +### Other + +- add asm-sha2 feature for sha2 precompile ([#2712](https://github.com/bluealloy/revm/pull/2712)) + +## [27.0.3](https://github.com/bluealloy/revm/compare/revm-v27.0.2...revm-v27.0.3) - 2025-07-14 + +### Other + +- updated the following local packages: revm-context, revm-interpreter, revm-precompile, revm-handler, revm-inspector + +## [27.0.2](https://github.com/bluealloy/revm/compare/revm-v27.0.1...revm-v27.0.2) - 2025-07-03 + +### Other + +- updated the following local packages: revm-bytecode, revm-handler, revm-inspector, revm-state, revm-database-interface, revm-context-interface, revm-context, revm-database, revm-interpreter + +## [27.0.1](https://github.com/bluealloy/revm/compare/revm-v26.0.1...revm-v27.0.1) - 2025-06-30 + +### Added + +- optional_eip3541 ([#2661](https://github.com/bluealloy/revm/pull/2661)) + +### Other + +- cargo clippy --fix --all ([#2671](https://github.com/bluealloy/revm/pull/2671)) +- inline documentation of revm top modules ([#2666](https://github.com/bluealloy/revm/pull/2666)) +- use TxEnv::builder ([#2652](https://github.com/bluealloy/revm/pull/2652)) + ## [26.0.1](https://github.com/bluealloy/revm/compare/revm-v26.0.0...revm-v26.0.1) - 2025-06-20 ### Fixed diff --git a/crates/revm/Cargo.toml b/crates/revm/Cargo.toml index 9f4fcd04bd..9cdfad92c8 100644 --- a/crates/revm/Cargo.toml +++ b/crates/revm/Cargo.toml @@ -1,7 +1,7 @@ [package] name = "revm" description = "Revm - Rust Ethereum Virtual Machine" -version = "26.0.1" +version = "27.1.0" authors.workspace = true edition.workspace = true keywords.workspace = true @@ -52,6 +52,7 @@ std = [ "serde_json/std", ] hashbrown = ["interpreter/hashbrown", "precompile/hashbrown"] +map-foldhash = ["primitives/map-foldhash"] serde = [ "interpreter/serde", "database-interface/serde", @@ -66,6 +67,7 @@ serde = [ ] arbitrary = ["primitives/arbitrary"] asm-keccak = ["primitives/asm-keccak"] +sha3-keccak = ["primitives/sha3-keccak"] asyncdb = ["database-interface/asyncdb"] # Enables alloydb inside database crate @@ -79,12 +81,14 @@ dev = [ "memory_limit", "optional_balance_check", "optional_block_gas_limit", + "optional_eip3541", "optional_eip3607", "optional_no_base_fee", ] memory_limit = ["context/memory_limit", "interpreter/memory_limit"] optional_balance_check = ["context/optional_balance_check"] optional_block_gas_limit = ["context/optional_block_gas_limit"] +optional_eip3541 = ["context/optional_eip3541"] optional_eip3607 = ["context/optional_eip3607"] optional_no_base_fee = ["context/optional_no_base_fee"] enable_eip7702 = ["context/enable_eip7702"] @@ -99,6 +103,7 @@ c-kzg = [ kzg-rs = ["precompile/kzg-rs"] blst = ["precompile/blst"] bn = ["precompile/bn"] +asm-sha2 = ["precompile/asm-sha2"] # Compile in portable mode, without ISA extensions. # Binary can be executed on all systems. diff --git a/crates/revm/src/lib.rs b/crates/revm/src/lib.rs index a4eedb309c..091f6807e7 100644 --- a/crates/revm/src/lib.rs +++ b/crates/revm/src/lib.rs @@ -3,16 +3,27 @@ #![cfg_attr(not(feature = "std"), no_std)] // reexport dependencies +#[doc(inline)] pub use bytecode; +#[doc(inline)] pub use context; +#[doc(inline)] pub use context_interface; +#[doc(inline)] pub use database; +#[doc(inline)] pub use database_interface; +#[doc(inline)] pub use handler; +#[doc(inline)] pub use inspector; +#[doc(inline)] pub use interpreter; +#[doc(inline)] pub use precompile; +#[doc(inline)] pub use primitives; +#[doc(inline)] pub use state; // Export items. diff --git a/crates/revm/tests/integration.rs b/crates/revm/tests/integration.rs index 28df41ed3d..4aa41ce0c2 100644 --- a/crates/revm/tests/integration.rs +++ b/crates/revm/tests/integration.rs @@ -223,3 +223,52 @@ fn test_frame_stack_index() { assert_eq!(evm.frame_stack.index(), None); compare_or_save_testdata("test_frame_stack_index.json", result1); } + +#[test] +#[cfg(feature = "optional_balance_check")] +fn test_disable_balance_check() { + use database::BENCH_CALLER_BALANCE; + + const RETURN_CALLER_BALANCE_BYTECODE: &[u8] = &[ + opcode::CALLER, + opcode::BALANCE, + opcode::PUSH1, + 0x00, + opcode::MSTORE, + opcode::PUSH1, + 0x20, + opcode::PUSH1, + 0x00, + opcode::RETURN, + ]; + + let mut evm = Context::mainnet() + .modify_cfg_chained(|cfg| cfg.disable_balance_check = true) + .with_db(BenchmarkDB::new_bytecode(Bytecode::new_legacy( + RETURN_CALLER_BALANCE_BYTECODE.into(), + ))) + .build_mainnet(); + + // Construct tx so that effective cost is more than caller balance. + let gas_price = 1; + let gas_limit = 100_000; + // Make sure value doesn't consume all balance since we want to validate that all effective + // cost is deducted. + let tx_value = BENCH_CALLER_BALANCE - U256::from(1); + + let result = evm + .transact_one( + TxEnv::builder_for_bench() + .gas_price(gas_price) + .gas_limit(gas_limit) + .value(tx_value) + .build_fill(), + ) + .unwrap(); + + assert!(result.is_success()); + + let returned_balance = U256::from_be_slice(result.output().unwrap().as_ref()); + let expected_balance = U256::ZERO; + assert_eq!(returned_balance, expected_balance); +} diff --git a/crates/revm/tests/testdata/test_frame_stack_index.json b/crates/revm/tests/testdata/test_frame_stack_index.json new file mode 100644 index 0000000000..c5981e4b93 --- /dev/null +++ b/crates/revm/tests/testdata/test_frame_stack_index.json @@ -0,0 +1,11 @@ +{ + "Success": { + "reason": "Stop", + "gas_used": 21000, + "gas_refunded": 0, + "logs": [], + "output": { + "Call": "0x" + } + } +} \ No newline at end of file diff --git a/crates/state/CHANGELOG.md b/crates/state/CHANGELOG.md index 699175f2c2..6399b58a73 100644 --- a/crates/state/CHANGELOG.md +++ b/crates/state/CHANGELOG.md @@ -12,6 +12,24 @@ Dependency bump ## [Unreleased] +## [7.0.2](https://github.com/bluealloy/revm/compare/revm-state-v7.0.1...revm-state-v7.0.2) - 2025-07-23 + +### Other + +- updated the following local packages: revm-primitives, revm-bytecode + +## [7.0.1](https://github.com/bluealloy/revm/compare/revm-state-v7.0.0...revm-state-v7.0.1) - 2025-07-03 + +### Other + +- updated the following local packages: revm-bytecode + +## [6.0.1](https://github.com/bluealloy/revm/compare/revm-state-v6.0.0...revm-state-v6.0.1) - 2025-06-30 + +### Other + +- fix copy-pasted inner doc comments ([#2663](https://github.com/bluealloy/revm/pull/2663)) + ## [5.1.0](https://github.com/bluealloy/revm/compare/revm-state-v5.0.0...revm-state-v5.1.0) - 2025-06-19 ### Added diff --git a/crates/state/Cargo.toml b/crates/state/Cargo.toml index 3dac275120..2ec2dbc95e 100644 --- a/crates/state/Cargo.toml +++ b/crates/state/Cargo.toml @@ -1,7 +1,7 @@ [package] name = "revm-state" description = "Revm state types" -version = "6.0.0" +version = "7.0.2" authors.workspace = true edition.workspace = true keywords.workspace = true diff --git a/crates/state/src/lib.rs b/crates/state/src/lib.rs index 6a97401848..d10af7eac3 100644 --- a/crates/state/src/lib.rs +++ b/crates/state/src/lib.rs @@ -1,4 +1,4 @@ -//! Optimism-specific constants, types, and helpers. +//! Account and storage state. #![cfg_attr(not(test), warn(unused_crate_dependencies))] #![cfg_attr(not(feature = "std"), no_std)] diff --git a/crates/statetest-types/CHANGELOG.md b/crates/statetest-types/CHANGELOG.md index 148990528a..ae820d737d 100644 --- a/crates/statetest-types/CHANGELOG.md +++ b/crates/statetest-types/CHANGELOG.md @@ -7,6 +7,31 @@ and this project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0 ## [Unreleased] +## [8.0.5](https://github.com/bluealloy/revm/compare/revm-statetest-types-v8.0.4...revm-statetest-types-v8.0.5) - 2025-07-23 + +### Other + +- updated the following local packages: revm + +## [8.0.4](https://github.com/bluealloy/revm/compare/revm-statetest-types-v8.0.3...revm-statetest-types-v8.0.4) - 2025-07-14 + +### Other + +- updated the following local packages: revm + +## [8.0.3](https://github.com/bluealloy/revm/compare/revm-statetest-types-v8.0.2...revm-statetest-types-v8.0.3) - 2025-07-03 + +### Other + +- updated the following local packages: revm + +## [8.0.2](https://github.com/bluealloy/revm/compare/revm-statetest-types-v8.0.1...revm-statetest-types-v8.0.2) - 2025-06-30 + +### Other + +- cargo clippy --fix --all ([#2671](https://github.com/bluealloy/revm/pull/2671)) +- statetest runner cleanup ([#2665](https://github.com/bluealloy/revm/pull/2665)) + ## [8.0.1](https://github.com/bluealloy/revm/compare/revm-statetest-types-v8.0.0...revm-statetest-types-v8.0.1) - 2025-06-20 ### Other diff --git a/crates/statetest-types/Cargo.toml b/crates/statetest-types/Cargo.toml index 8d12806b22..e866c2100c 100644 --- a/crates/statetest-types/Cargo.toml +++ b/crates/statetest-types/Cargo.toml @@ -1,7 +1,7 @@ [package] name = "revm-statetest-types" description = "Statetest types for revme" -version = "8.0.1" +version = "8.0.5" authors.workspace = true edition.workspace = true keywords.workspace = true @@ -21,3 +21,5 @@ workspace = true revm = { workspace = true, features = ["std", "serde"] } serde = { workspace = true, features = ["derive", "rc"] } serde_json = { workspace = true, features = ["preserve_order"] } +k256 = { workspace = true } +thiserror = { workspace = true } diff --git a/crates/statetest-types/src/error.rs b/crates/statetest-types/src/error.rs new file mode 100644 index 0000000000..3146a2f37a --- /dev/null +++ b/crates/statetest-types/src/error.rs @@ -0,0 +1,21 @@ +use revm::primitives::B256; +use thiserror::Error; + +/// Errors that can occur during test setup and execution +#[derive(Debug, Error)] +pub enum TestError { + /// Unknown private key. + #[error("unknown private key: {0:?}")] + UnknownPrivateKey(B256), + /// Invalid transaction type. + #[error("invalid transaction type")] + InvalidTransactionType, + /// Unexpected exception. + #[error("unexpected exception: got {got_exception:?}, expected {expected_exception:?}")] + UnexpectedException { + /// Expected exception. + expected_exception: Option, + /// Got exception. + got_exception: Option, + }, +} diff --git a/crates/statetest-types/src/lib.rs b/crates/statetest-types/src/lib.rs index 578df9555c..e407782de8 100644 --- a/crates/statetest-types/src/lib.rs +++ b/crates/statetest-types/src/lib.rs @@ -9,19 +9,23 @@ mod account_info; mod deserializer; mod env; +mod error; mod spec; mod test; mod test_authorization; mod test_suite; mod test_unit; mod transaction; +mod utils; pub use account_info::*; pub use deserializer::*; pub use env::*; +pub use error::*; pub use spec::*; pub use test::*; pub use test_authorization::*; pub use test_suite::*; pub use test_unit::*; pub use transaction::*; +pub use utils::*; diff --git a/crates/statetest-types/src/test.rs b/crates/statetest-types/src/test.rs index 89552a3bc3..8bc7cb9c42 100644 --- a/crates/statetest-types/src/test.rs +++ b/crates/statetest-types/src/test.rs @@ -1,7 +1,12 @@ -use revm::primitives::{Address, Bytes, HashMap, B256}; +use revm::{ + context::tx::TxEnv, + primitives::{Address, Bytes, HashMap, TxKind, B256}, +}; use serde::Deserialize; -use crate::{transaction::TxPartIndices, AccountInfo}; +use crate::{ + error::TestError, transaction::TxPartIndices, utils::recover_address, AccountInfo, TestUnit, +}; /// State test indexed state result deserialization. #[derive(Debug, PartialEq, Eq, Deserialize)] @@ -34,3 +39,97 @@ pub struct Test { /// Tx bytes pub txbytes: Option, } + +impl Test { + /// Create a transaction environment from this test and the test unit. + /// + /// This function sets up the transaction environment using the test's + /// indices to select the appropriate transaction parameters from the + /// test unit. + /// + /// # Arguments + /// + /// * `unit` - The test unit containing transaction parts + /// + /// # Returns + /// + /// A configured [`TxEnv`] ready for execution, or an error if setup fails + /// + /// # Errors + /// + /// Returns an error if: + /// - The private key cannot be used to recover the sender address + /// - The transaction type is invalid and no exception is expected + pub fn tx_env(&self, unit: &TestUnit) -> Result { + // Setup sender + let caller = if let Some(address) = unit.transaction.sender { + address + } else { + recover_address(unit.transaction.secret_key.as_slice()) + .ok_or(TestError::UnknownPrivateKey(unit.transaction.secret_key))? + }; + + // Transaction specific fields + let tx_type = unit.transaction.tx_type(self.indexes.data).ok_or_else(|| { + if self.expect_exception.is_some() { + TestError::UnexpectedException { + expected_exception: self.expect_exception.clone(), + got_exception: Some("Invalid transaction type".to_string()), + } + } else { + TestError::InvalidTransactionType + } + })?; + + let tx = TxEnv { + caller, + gas_price: unit + .transaction + .gas_price + .or(unit.transaction.max_fee_per_gas) + .unwrap_or_default() + .try_into() + .unwrap_or(u128::MAX), + gas_priority_fee: unit + .transaction + .max_priority_fee_per_gas + .map(|b| u128::try_from(b).expect("max priority fee less than u128::MAX")), + blob_hashes: unit.transaction.blob_versioned_hashes.clone(), + max_fee_per_blob_gas: unit + .transaction + .max_fee_per_blob_gas + .map(|b| u128::try_from(b).expect("max fee less than u128::MAX")) + .unwrap_or(u128::MAX), + tx_type: tx_type as u8, + gas_limit: unit.transaction.gas_limit[self.indexes.gas].saturating_to(), + data: unit.transaction.data[self.indexes.data].clone(), + nonce: u64::try_from(unit.transaction.nonce).unwrap(), + value: unit.transaction.value[self.indexes.value], + access_list: unit + .transaction + .access_lists + .get(self.indexes.data) + .cloned() + .flatten() + .unwrap_or_default(), + authorization_list: unit + .transaction + .authorization_list + .clone() + .map(|auth_list| { + auth_list + .into_iter() + .map(|i| revm::context::either::Either::Left(i.into())) + .collect::>() + }) + .unwrap_or_default(), + kind: match unit.transaction.to { + Some(add) => TxKind::Call(add), + None => TxKind::Create, + }, + ..TxEnv::default() + }; + + Ok(tx) + } +} diff --git a/crates/statetest-types/src/test_unit.rs b/crates/statetest-types/src/test_unit.rs index 23058830ee..f92caf4fe6 100644 --- a/crates/statetest-types/src/test_unit.rs +++ b/crates/statetest-types/src/test_unit.rs @@ -2,7 +2,16 @@ use serde::Deserialize; use std::collections::{BTreeMap, HashMap}; use crate::{AccountInfo, Env, SpecName, Test, TransactionParts}; -use revm::primitives::{Address, Bytes}; +use revm::{ + context::{block::BlockEnv, cfg::CfgEnv}, + context_interface::block::calc_excess_blob_gas, + database::CacheState, + primitives::{ + eip4844::TARGET_BLOB_GAS_PER_BLOCK_CANCUN, hardfork::SpecId, keccak256, Address, Bytes, + B256, + }, + state::Bytecode, +}; /// Single test unit struct #[derive(Debug, PartialEq, Eq, Deserialize)] @@ -51,3 +60,90 @@ pub struct TestUnit { pub out: Option, //pub config } + +impl TestUnit { + /// Prepare the state from the test unit. + /// + /// This function uses [`TestUnit::pre`] to prepare the pre-state from the test unit. + /// It creates a new cache state and inserts the accounts from the test unit. + /// + /// # Returns + /// + /// A [`CacheState`] object containing the pre-state accounts and storages. + pub fn state(&self) -> CacheState { + let mut cache_state = CacheState::new(false); + for (address, info) in &self.pre { + let code_hash = keccak256(&info.code); + let bytecode = Bytecode::new_raw_checked(info.code.clone()) + .unwrap_or(Bytecode::new_legacy(info.code.clone())); + let acc_info = revm::state::AccountInfo { + balance: info.balance, + code_hash, + code: Some(bytecode), + nonce: info.nonce, + }; + cache_state.insert_account_with_storage(*address, acc_info, info.storage.clone()); + } + cache_state + } + + /// Create a block environment from the test unit. + /// + /// This function sets up the block environment using the current test unit's + /// environment settings and the provided configuration. + /// + /// # Arguments + /// + /// * `cfg` - The configuration environment containing spec and blob settings + /// + /// # Returns + /// + /// A configured [`BlockEnv`] ready for execution + pub fn block_env(&self, cfg: &CfgEnv) -> BlockEnv { + let mut block = BlockEnv { + number: self.env.current_number, + beneficiary: self.env.current_coinbase, + timestamp: self.env.current_timestamp, + gas_limit: self.env.current_gas_limit.try_into().unwrap_or(u64::MAX), + basefee: self + .env + .current_base_fee + .unwrap_or_default() + .try_into() + .unwrap_or(u64::MAX), + difficulty: self.env.current_difficulty, + prevrandao: self.env.current_random, + ..BlockEnv::default() + }; + + // Handle EIP-4844 blob gas + if let Some(current_excess_blob_gas) = self.env.current_excess_blob_gas { + block.set_blob_excess_gas_and_price( + current_excess_blob_gas.to(), + revm::primitives::eip4844::BLOB_BASE_FEE_UPDATE_FRACTION_CANCUN, + ); + } else if let (Some(parent_blob_gas_used), Some(parent_excess_blob_gas)) = ( + self.env.parent_blob_gas_used, + self.env.parent_excess_blob_gas, + ) { + block.set_blob_excess_gas_and_price( + calc_excess_blob_gas( + parent_blob_gas_used.to(), + parent_excess_blob_gas.to(), + self.env + .parent_target_blobs_per_block + .map(|i| i.to()) + .unwrap_or(TARGET_BLOB_GAS_PER_BLOCK_CANCUN), + ), + revm::primitives::eip4844::BLOB_BASE_FEE_UPDATE_FRACTION_CANCUN, + ); + } + + // Set default prevrandao for merge + if cfg.spec.is_enabled_in(SpecId::MERGE) && block.prevrandao.is_none() { + block.prevrandao = Some(B256::default()); + } + + block + } +} diff --git a/crates/statetest-types/src/transaction.rs b/crates/statetest-types/src/transaction.rs index b11f1b3e97..81a98a30a7 100644 --- a/crates/statetest-types/src/transaction.rs +++ b/crates/statetest-types/src/transaction.rs @@ -90,14 +90,6 @@ impl TransactionParts { return Some(TransactionType::Eip7702); } - // TODO(EOF) - // // And if it has initcodes it is EIP-7873 tx - // if self.initcodes.is_some() { - // // Target need to be present for EIP-7873 tx - // self.to?; - // return Some(TransactionType::Eip7873); - // } - Some(tx_type) } } diff --git a/crates/statetest-types/src/utils.rs b/crates/statetest-types/src/utils.rs new file mode 100644 index 0000000000..3ecc89594a --- /dev/null +++ b/crates/statetest-types/src/utils.rs @@ -0,0 +1,25 @@ +use k256::ecdsa::SigningKey; +use revm::primitives::Address; + +/// Recover the address from a private key ([SigningKey]). +pub fn recover_address(private_key: &[u8]) -> Option
{ + let key = SigningKey::from_slice(private_key).ok()?; + let public_key = key.verifying_key().to_encoded_point(false); + Some(Address::from_raw_public_key(&public_key.as_bytes()[1..])) +} + +#[cfg(test)] +mod tests { + use super::*; + use revm::primitives::{address, hex}; + + #[test] + fn sanity_test() { + assert_eq!( + Some(address!("a94f5374fce5edbc8e2a8697c15331677e6ebf0b")), + recover_address(&hex!( + "45a915e4d060149eb4365960e6a7a45f334393093061116b197e3240065ff2d8" + )) + ) + } +} diff --git a/examples/block_traces/src/main.rs b/examples/block_traces/src/main.rs index df63be3186..af1b3f95e9 100644 --- a/examples/block_traces/src/main.rs +++ b/examples/block_traces/src/main.rs @@ -1,4 +1,6 @@ -//! Optimism-specific constants, types, and helpers. +//! Example that show how to replay a block and trace the execution of each transaction. +//! +//! The EIP3155 trace of each transaction is saved into file `traces/{tx_number}.json`. #![cfg_attr(not(test), warn(unused_crate_dependencies))] use alloy_consensus::Transaction; @@ -119,22 +121,22 @@ async fn main() -> anyhow::Result<()> { // Construct the file writer to write the trace to let tx_number = tx.transaction_index.unwrap_or_default(); - let tx = TxEnv { - caller: tx.inner.signer(), - gas_limit: tx.gas_limit(), - gas_price: tx.gas_price().unwrap_or(tx.inner.max_fee_per_gas()), - value: tx.value(), - data: tx.input().to_owned(), - gas_priority_fee: tx.max_priority_fee_per_gas(), - chain_id: Some(chain_id), - nonce: tx.nonce(), - access_list: tx.access_list().cloned().unwrap_or_default(), - kind: match tx.to() { + let tx = TxEnv::builder() + .caller(tx.inner.signer()) + .gas_limit(tx.gas_limit()) + .gas_price(tx.gas_price().unwrap_or(tx.inner.max_fee_per_gas())) + .value(tx.value()) + .data(tx.input().to_owned()) + .gas_priority_fee(tx.max_priority_fee_per_gas()) + .chain_id(Some(chain_id)) + .nonce(tx.nonce()) + .access_list(tx.access_list().cloned().unwrap_or_default()) + .kind(match tx.to() { Some(to_address) => TxKind::Call(to_address), None => TxKind::Create, - }, - ..Default::default() - }; + }) + .build() + .unwrap(); let file_name = format!("traces/{tx_number}.json"); let write = OpenOptions::new() diff --git a/examples/contract_deployment/src/main.rs b/examples/contract_deployment/src/main.rs index c36ef7bfca..6e900b8cd9 100644 --- a/examples/contract_deployment/src/main.rs +++ b/examples/contract_deployment/src/main.rs @@ -1,4 +1,4 @@ -//! Optimism-specific constants, types, and helpers. +//! Example that deploys a contract by forging and executing a contract creation transaction. #![cfg_attr(not(test), warn(unused_crate_dependencies))] use anyhow::{anyhow, bail}; @@ -52,11 +52,13 @@ fn main() -> anyhow::Result<()> { let mut evm = ctx.build_mainnet(); println!("bytecode: {}", hex::encode(&bytecode)); - let ref_tx = evm.transact_commit(TxEnv { - kind: TxKind::Create, - data: bytecode.clone(), - ..Default::default() - })?; + let ref_tx = evm.transact_commit( + TxEnv::builder() + .kind(TxKind::Create) + .data(bytecode.clone()) + .build() + .unwrap(), + )?; let ExecutionResult::Success { output: Output::Create(_, Some(address)), .. @@ -66,12 +68,14 @@ fn main() -> anyhow::Result<()> { }; println!("Created contract at {address}"); - let output = evm.transact(TxEnv { - kind: TxKind::Call(address), - data: Default::default(), - nonce: 1, - ..Default::default() - })?; + let output = evm.transact( + TxEnv::builder() + .kind(TxKind::Call(address)) + .data(Default::default()) + .nonce(1) + .build() + .unwrap(), + )?; let Some(storage0) = output .state .get(&address) diff --git a/examples/custom_opcodes/src/main.rs b/examples/custom_opcodes/src/main.rs index d679fc1cf5..c08305de5f 100644 --- a/examples/custom_opcodes/src/main.rs +++ b/examples/custom_opcodes/src/main.rs @@ -52,10 +52,12 @@ pub fn main() { .with_inspector(TracerEip3155::new_stdout().without_summary()); // inspect the transaction. - let _ = evm.inspect_one_tx(TxEnv { - kind: TxKind::Call(BENCH_TARGET), - ..Default::default() - }); + let _ = evm.inspect_one_tx( + TxEnv::builder() + .kind(TxKind::Call(BENCH_TARGET)) + .build() + .unwrap(), + ); // Expected output where we can see that JUMPDEST is called. /* diff --git a/examples/custom_precompile_journal/CHANGELOG.md b/examples/custom_precompile_journal/CHANGELOG.md new file mode 100644 index 0000000000..30d68f8f4e --- /dev/null +++ b/examples/custom_precompile_journal/CHANGELOG.md @@ -0,0 +1,14 @@ +# Changelog + +All notable changes to this project will be documented in this file. + +The format is based on [Keep a Changelog](https://keepachangelog.com/en/1.0.0/), +and this project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0.html). + +## [Unreleased] + +## [0.1.0](https://github.com/bluealloy/revm/releases/tag/custom_precompile_journal-v0.1.0) - 2025-07-03 + +### Added + +- add custom precompile with journal access example ([#2677](https://github.com/bluealloy/revm/pull/2677)) diff --git a/examples/custom_precompile_journal/Cargo.toml b/examples/custom_precompile_journal/Cargo.toml new file mode 100644 index 0000000000..cc46f6f2f5 --- /dev/null +++ b/examples/custom_precompile_journal/Cargo.toml @@ -0,0 +1,12 @@ +[package] +name = "custom_precompile_journal" +version = "0.1.0" +edition = "2021" + +[[bin]] +name = "custom_precompile_journal" +path = "src/main.rs" + +[dependencies] +revm = { path = "../../crates/revm", features = ["optional_eip3607"] } +anyhow = "1.0" diff --git a/examples/custom_precompile_journal/README.md b/examples/custom_precompile_journal/README.md new file mode 100644 index 0000000000..f4d11ac00c --- /dev/null +++ b/examples/custom_precompile_journal/README.md @@ -0,0 +1,124 @@ +# Custom Precompile with Journal Access Example + +This example demonstrates how to create a custom precompile for REVM that can access and modify the journal (state), integrated into a custom EVM implementation similar to MyEvm. + +## Overview + +The example shows: +1. How to create a custom precompile provider that extends the standard Ethereum precompiles +2. How to implement a precompile that can read from and write to the journaled state +3. How to modify account balances and storage from within a precompile +4. How to integrate custom precompiles into a custom EVM implementation +5. How to create handlers for transaction execution + +## Architecture + +### CustomPrecompileProvider + +A custom implementation of the `PrecompileProvider` trait that: +- Extends the standard Ethereum precompiles (`EthPrecompiles`) +- Adds a custom precompile at address `0x0000000000000000000000000000000000000100` +- Delegates to standard precompiles for all other addresses +- Implements journal access for storage and balance operations + +### CustomEvm + +A custom EVM implementation that: +- Wraps the standard REVM `Evm` struct with `CustomPrecompileProvider` +- Follows the same pattern as the MyEvm example +- Maintains full compatibility with REVM's execution model +- Supports both regular and inspector-based execution + +### CustomHandler + +A handler implementation that: +- Implements the `Handler` trait for transaction execution +- Supports both `Handler` and `InspectorHandler` traits +- Can be used with `handler.run(&mut evm)` for full transaction execution + +## Custom Precompile Functionality + +The precompile at `0x0100` supports two operations: + +1. **Read Storage** (empty input data): + - Reads a value from storage slot 0 + - Returns the value as output + - Gas cost: 2,100 + +2. **Write Storage** (32 bytes input): + - Stores the input value to storage slot 0 + - Transfers 1 wei from the precompile to the caller as a reward + - Gas cost: 41,000 (21,000 base + 20,000 for SSTORE) + +## Journal Access Patterns + +The example demonstrates how to access the journal from within a precompile: + +```rust +// Reading storage +let value = context + .journal_mut() + .sload(address, key) + .map_err(|e| PrecompileError::Other(format!("Storage read failed: {:?}", e)))? + .data; + +// Writing storage +context + .journal_mut() + .sstore(address, key, value) + .map_err(|e| PrecompileError::Other(format!("Storage write failed: {:?}", e)))?; + +// Transferring balance +context + .journal_mut() + .transfer(from, to, amount) + .map_err(|e| PrecompileError::Other(format!("Transfer failed: {:?}", e)))?; + +// Incrementing balance +context + .journal_mut() + .balance_incr(address, amount) + .map_err(|e| PrecompileError::Other(format!("Balance increment failed: {:?}", e)))?; +``` + +## Usage + +To use this custom EVM in your application: + +```rust +use custom_precompile_journal::{CustomEvm, CustomHandler}; +use revm::{context::Context, inspector::NoOpInspector, MainContext}; + +// Create the custom EVM +let context = Context::mainnet().with_db(db); +let mut evm = CustomEvm::new(context, NoOpInspector); + +// Create the handler +let handler = CustomHandler::>::default(); + +// Execute transactions +let result = handler.run(&mut evm); +``` + +## Safety Features + +- **Static call protection**: Prevents state modification in view calls +- **Gas accounting**: Proper gas cost calculation and out-of-gas protection +- **Error handling**: Comprehensive error types and result handling +- **Type safety**: Full Rust type safety with generic constraints + +## Running the Example + +```bash +cargo run -p custom_precompile_journal +``` + +The example will demonstrate the custom EVM architecture and show how the various components work together to provide journal access functionality within precompiles. + +## Integration with Existing Code + +This example extends the op-revm pattern and demonstrates how to: +- Create custom precompile providers that can access the journal +- Integrate custom precompiles into REVM's execution model +- Maintain compatibility with existing REVM patterns and interfaces +- Build custom EVM variants similar to MyEvm but with enhanced precompile capabilities \ No newline at end of file diff --git a/examples/custom_precompile_journal/src/custom_evm.rs b/examples/custom_precompile_journal/src/custom_evm.rs new file mode 100644 index 0000000000..02222c8fd2 --- /dev/null +++ b/examples/custom_precompile_journal/src/custom_evm.rs @@ -0,0 +1,151 @@ +//! Custom EVM implementation with journal-accessing precompiles. + +use crate::precompile_provider::CustomPrecompileProvider; +use revm::{ + context::{ContextError, ContextSetters, ContextTr, Evm, FrameStack}, + handler::{ + evm::FrameTr, instructions::EthInstructions, EthFrame, EvmTr, FrameInitOrResult, + ItemOrResult, + }, + inspector::{InspectorEvmTr, JournalExt}, + interpreter::interpreter::EthInterpreter, + primitives::hardfork::SpecId, + Database, Inspector, +}; + +/// Custom EVM variant with journal-accessing precompiles. +/// +/// This EVM extends the standard behavior by using a custom precompile provider +/// that includes journal access functionality. It follows the same pattern as MyEvm +/// but uses CustomPrecompileProvider instead of EthPrecompiles. +#[derive(Debug)] +pub struct CustomEvm( + pub Evm< + CTX, + INSP, + EthInstructions, + CustomPrecompileProvider, + EthFrame, + >, +); + +impl CustomEvm +where + CTX: ContextTr>, +{ + /// Creates a new instance of CustomEvm with the provided context and inspector. + /// + /// # Arguments + /// + /// * `ctx` - The execution context that manages state, environment, and journaling + /// * `inspector` - The inspector for debugging and tracing execution + /// + /// # Returns + /// + /// A new CustomEvm instance configured with: + /// - The provided context and inspector + /// - Mainnet instruction set + /// - Custom precompiles with journal access + /// - A fresh frame stack for execution + pub fn new(ctx: CTX, inspector: INSP) -> Self { + Self(Evm { + ctx, + inspector, + instruction: EthInstructions::new_mainnet(), + precompiles: CustomPrecompileProvider::new_with_spec(SpecId::CANCUN), + frame_stack: FrameStack::new(), + }) + } +} + +impl EvmTr for CustomEvm +where + CTX: ContextTr>, +{ + type Context = CTX; + type Instructions = EthInstructions; + type Precompiles = CustomPrecompileProvider; + type Frame = EthFrame; + + fn ctx(&mut self) -> &mut Self::Context { + &mut self.0.ctx + } + + fn ctx_ref(&self) -> &Self::Context { + self.0.ctx_ref() + } + + fn ctx_instructions(&mut self) -> (&mut Self::Context, &mut Self::Instructions) { + self.0.ctx_instructions() + } + + fn ctx_precompiles(&mut self) -> (&mut Self::Context, &mut Self::Precompiles) { + self.0.ctx_precompiles() + } + + fn frame_stack(&mut self) -> &mut FrameStack { + self.0.frame_stack() + } + + fn frame_init( + &mut self, + frame_input: ::FrameInit, + ) -> Result< + ItemOrResult<&mut Self::Frame, ::FrameResult>, + ContextError<<::Db as Database>::Error>, + > { + self.0.frame_init(frame_input) + } + + fn frame_run( + &mut self, + ) -> Result< + FrameInitOrResult, + ContextError<<::Db as Database>::Error>, + > { + self.0.frame_run() + } + + fn frame_return_result( + &mut self, + frame_result: ::FrameResult, + ) -> Result< + Option<::FrameResult>, + ContextError<<::Db as Database>::Error>, + > { + self.0.frame_return_result(frame_result) + } +} + +impl InspectorEvmTr for CustomEvm +where + CTX: ContextSetters, Journal: JournalExt>, + INSP: Inspector, +{ + type Inspector = INSP; + + fn inspector(&mut self) -> &mut Self::Inspector { + self.0.inspector() + } + + fn ctx_inspector(&mut self) -> (&mut Self::Context, &mut Self::Inspector) { + self.0.ctx_inspector() + } + + fn ctx_inspector_frame( + &mut self, + ) -> (&mut Self::Context, &mut Self::Inspector, &mut Self::Frame) { + self.0.ctx_inspector_frame() + } + + fn ctx_inspector_frame_instructions( + &mut self, + ) -> ( + &mut Self::Context, + &mut Self::Inspector, + &mut Self::Frame, + &mut Self::Instructions, + ) { + self.0.ctx_inspector_frame_instructions() + } +} diff --git a/examples/custom_precompile_journal/src/lib.rs b/examples/custom_precompile_journal/src/lib.rs new file mode 100644 index 0000000000..91e98b838f --- /dev/null +++ b/examples/custom_precompile_journal/src/lib.rs @@ -0,0 +1,7 @@ +//! Custom EVM implementation with journal-accessing precompiles. + +pub mod custom_evm; +pub mod precompile_provider; + +pub use custom_evm::CustomEvm; +pub use precompile_provider::CustomPrecompileProvider; diff --git a/examples/custom_precompile_journal/src/main.rs b/examples/custom_precompile_journal/src/main.rs new file mode 100644 index 0000000000..2aaf791a62 --- /dev/null +++ b/examples/custom_precompile_journal/src/main.rs @@ -0,0 +1,198 @@ +//! Example of a custom precompile that can access and modify the journal. +//! +//! This example demonstrates: +//! 1. Creating a custom precompile provider that extends the standard Ethereum precompiles +//! 2. Implementing a precompile that can read from and write to the journaled state +//! 3. Modifying account balances and storage from within a precompile +//! 4. Integrating the custom precompile into a custom EVM implementation + +use custom_precompile_journal::{precompile_provider::CUSTOM_PRECOMPILE_ADDRESS, CustomEvm}; +use revm::{ + context::{result::InvalidTransaction, Context, ContextSetters, ContextTr, TxEnv}, + context_interface::result::EVMError, + database::InMemoryDB, + handler::{Handler, MainnetHandler}, + inspector::NoOpInspector, + primitives::{address, TxKind, U256}, + state::AccountInfo, + Database, MainContext, +}; + +// Type alias for the error type +type MyError = EVMError; + +fn main() -> anyhow::Result<()> { + println!("=== Custom EVM with Journal-Accessing Precompiles ===\n"); + + // Setup initial accounts + let user_address = address!("0000000000000000000000000000000000000001"); + let mut db = InMemoryDB::default(); + + // Give the user some ETH for gas + let user_balance = U256::from(10).pow(U256::from(18)); // 1 ETH + db.insert_account_info( + user_address, + AccountInfo { + balance: user_balance, + nonce: 0, + code_hash: revm::primitives::KECCAK_EMPTY, + code: None, + }, + ); + + // Give the precompile some initial balance for transfers + db.insert_account_info( + CUSTOM_PRECOMPILE_ADDRESS, + AccountInfo { + balance: U256::from(1000), // 1000 wei + nonce: 0, + code_hash: revm::primitives::KECCAK_EMPTY, + code: None, + }, + ); + + println!("✅ Custom EVM with journal-accessing precompiles created successfully!"); + println!("🔧 Precompile available at address: {CUSTOM_PRECOMPILE_ADDRESS}"); + println!("📝 Precompile supports:"); + println!(" - Read storage (empty input): Returns value from storage slot 0"); + println!(" - Write storage (32-byte input): Stores value and transfers 1 wei to caller"); + + // Create our custom EVM with mainnet handler + let context = Context::mainnet().with_db(db); + let mut evm = CustomEvm::new(context, NoOpInspector); + println!("\n=== Testing Custom Precompile ==="); + + // Test 1: Read initial storage value (should be 0) + println!("1. Reading initial storage value from custom precompile..."); + evm.0.ctx.set_tx( + TxEnv::builder() + .caller(user_address) + .kind(TxKind::Call(CUSTOM_PRECOMPILE_ADDRESS)) + .data(revm::primitives::Bytes::new()) // Empty data for read operation + .gas_limit(100_000) + .build() + .unwrap(), + ); + let read_result: Result<_, MyError> = MainnetHandler::default().run(&mut evm); + + match read_result { + Ok(revm::context::result::ExecutionResult::Success { + output, gas_used, .. + }) => { + println!(" ✓ Success! Gas used: {gas_used}"); + let data = output.data(); + let value = U256::from_be_slice(data); + println!(" 📖 Initial storage value: {value}"); + } + Ok(revm::context::result::ExecutionResult::Revert { output, gas_used }) => { + println!(" ❌ Reverted! Gas used: {gas_used}, Output: {output:?}"); + } + Ok(revm::context::result::ExecutionResult::Halt { reason, gas_used }) => { + println!(" 🛑 Halted! Reason: {reason:?}, Gas used: {gas_used}"); + } + Err(e) => { + println!(" ❌ Error: {e:?}"); + } + } + + // Test 2: Write value 42 to storage + println!("\n2. Writing value 42 to storage via custom precompile..."); + let storage_value = U256::from(42); + evm.0.ctx.set_tx( + TxEnv::builder() + .caller(user_address) + .kind(TxKind::Call(CUSTOM_PRECOMPILE_ADDRESS)) + .data(storage_value.to_be_bytes_vec().into()) + .gas_limit(100_000) + .nonce(1) + .build() + .unwrap(), + ); + let write_result: Result<_, MyError> = MainnetHandler::default().run(&mut evm); + + match write_result { + Ok(revm::context::result::ExecutionResult::Success { gas_used, .. }) => { + println!(" ✓ Success! Gas used: {gas_used}"); + println!(" 📝 Value 42 written to storage"); + println!(" 💰 1 wei transferred from precompile to caller as reward"); + } + Ok(revm::context::result::ExecutionResult::Revert { output, gas_used }) => { + println!(" ❌ Reverted! Gas used: {gas_used}, Output: {output:?}"); + } + Ok(revm::context::result::ExecutionResult::Halt { reason, gas_used }) => { + println!(" 🛑 Halted! Reason: {reason:?}, Gas used: {gas_used}"); + } + Err(e) => { + println!(" ❌ Error: {e:?}"); + } + } + + // Test 3: Read storage value again to verify the write + println!("\n3. Reading storage value again to verify the write..."); + evm.0.ctx.set_tx( + TxEnv::builder() + .caller(user_address) + .kind(TxKind::Call(CUSTOM_PRECOMPILE_ADDRESS)) + .data(revm::primitives::Bytes::new()) // Empty data for read operation + .gas_limit(100_000) + .nonce(2) + .build() + .unwrap(), + ); + let verify_result: Result<_, MyError> = MainnetHandler::default().run(&mut evm); + + match verify_result { + Ok(revm::context::result::ExecutionResult::Success { + output, gas_used, .. + }) => { + println!(" ✓ Success! Gas used: {gas_used}"); + let data = output.data(); + let value = U256::from_be_slice(data); + println!(" 📖 Final storage value: {value}"); + if value == U256::from(42) { + println!(" 🎉 Storage write was successful!"); + } else { + println!(" ⚠️ Unexpected value in storage"); + } + } + Ok(revm::context::result::ExecutionResult::Revert { output, gas_used }) => { + println!(" ❌ Reverted! Gas used: {gas_used}, Output: {output:?}"); + } + Ok(revm::context::result::ExecutionResult::Halt { reason, gas_used }) => { + println!(" 🛑 Halted! Reason: {reason:?}, Gas used: {gas_used}"); + } + Err(e) => { + println!(" ❌ Error: {e:?}"); + } + } + + // Check final account states + println!("\n=== Final Account States ==="); + let final_context_mut = &mut evm.0.ctx; + + let user_info = final_context_mut.db_mut().basic(user_address).unwrap(); + if let Some(user_account) = user_info { + println!("👤 User balance: {} wei", user_account.balance); + println!(" Received 1 wei reward from precompile!"); + } + + let precompile_info = final_context_mut + .db_mut() + .basic(CUSTOM_PRECOMPILE_ADDRESS) + .unwrap(); + if let Some(precompile_account) = precompile_info { + println!("🔧 Precompile balance: {} wei", precompile_account.balance); + } + + // Check storage directly from the journal using the storage API + println!("📦 Note: Storage state has been modified via journal operations"); + + println!("\n=== Summary ==="); + println!("✅ Custom EVM with journal-accessing precompiles working correctly!"); + println!("📝 Precompile successfully read and wrote storage"); + println!("💸 Balance transfer from precompile to caller executed"); + println!("🔍 All operations properly recorded in the journal"); + println!("🎯 Used default mainnet handler for transaction execution"); + + Ok(()) +} diff --git a/examples/custom_precompile_journal/src/precompile_provider.rs b/examples/custom_precompile_journal/src/precompile_provider.rs new file mode 100644 index 0000000000..caab5682fd --- /dev/null +++ b/examples/custom_precompile_journal/src/precompile_provider.rs @@ -0,0 +1,216 @@ +//! Custom precompile provider implementation. + +use revm::{ + context::Cfg, + context_interface::{ContextTr, JournalTr, LocalContextTr, Transaction}, + handler::{EthPrecompiles, PrecompileProvider}, + interpreter::{Gas, InputsImpl, InstructionResult, InterpreterResult}, + precompile::{PrecompileError, PrecompileOutput, PrecompileResult}, + primitives::{address, hardfork::SpecId, Address, Bytes, U256}, +}; +use std::boxed::Box; +use std::string::String; + +// Define our custom precompile address +pub const CUSTOM_PRECOMPILE_ADDRESS: Address = address!("0000000000000000000000000000000000000100"); + +// Custom storage key for our example +const STORAGE_KEY: U256 = U256::ZERO; + +/// Custom precompile provider that includes journal access functionality +#[derive(Debug, Clone)] +pub struct CustomPrecompileProvider { + inner: EthPrecompiles, + spec: SpecId, +} + +impl CustomPrecompileProvider { + pub fn new_with_spec(spec: SpecId) -> Self { + Self { + inner: EthPrecompiles::default(), + spec, + } + } +} + +impl PrecompileProvider for CustomPrecompileProvider +where + CTX: ContextTr>, +{ + type Output = InterpreterResult; + + fn set_spec(&mut self, spec: ::Spec) -> bool { + if spec == self.spec { + return false; + } + self.spec = spec; + // Create a new inner provider with the new spec + self.inner = EthPrecompiles::default(); + true + } + + fn run( + &mut self, + context: &mut CTX, + address: &Address, + inputs: &InputsImpl, + is_static: bool, + gas_limit: u64, + ) -> Result, String> { + // Check if this is our custom precompile + if *address == CUSTOM_PRECOMPILE_ADDRESS { + return Ok(Some(run_custom_precompile( + context, inputs, is_static, gas_limit, + )?)); + } + + // Otherwise, delegate to standard Ethereum precompiles + self.inner + .run(context, address, inputs, is_static, gas_limit) + } + + fn warm_addresses(&self) -> Box> { + // Include our custom precompile address along with standard ones + let mut addresses = vec![CUSTOM_PRECOMPILE_ADDRESS]; + addresses.extend(self.inner.warm_addresses()); + Box::new(addresses.into_iter()) + } + + fn contains(&self, address: &Address) -> bool { + *address == CUSTOM_PRECOMPILE_ADDRESS || self.inner.contains(address) + } +} + +/// Runs our custom precompile +fn run_custom_precompile( + context: &mut CTX, + inputs: &InputsImpl, + is_static: bool, + gas_limit: u64, +) -> Result { + let input_bytes = match &inputs.input { + revm::interpreter::CallInput::SharedBuffer(range) => { + if let Some(slice) = context.local().shared_memory_buffer_slice(range.clone()) { + slice.to_vec() + } else { + vec![] + } + } + revm::interpreter::CallInput::Bytes(bytes) => bytes.0.to_vec(), + }; + + // For this example, we'll implement a simple precompile that: + // - If called with empty data: reads a storage value + // - If called with 32 bytes: writes that value to storage and transfers 1 wei to the caller + + let result = if input_bytes.is_empty() { + // Read storage operation + handle_read_storage(context, gas_limit) + } else if input_bytes.len() == 32 { + if is_static { + return Err("Cannot modify state in static context".to_string()); + } + // Write storage operation + handle_write_storage(context, &input_bytes, gas_limit) + } else { + Err(PrecompileError::Other("Invalid input length".to_string())) + }; + + match result { + Ok(output) => { + let mut interpreter_result = InterpreterResult { + result: if output.reverted { + InstructionResult::Revert + } else { + InstructionResult::Return + }, + gas: Gas::new(gas_limit), + output: output.bytes, + }; + let underflow = interpreter_result.gas.record_cost(output.gas_used); + if !underflow { + interpreter_result.result = InstructionResult::PrecompileOOG; + } + Ok(interpreter_result) + } + Err(e) => Ok(InterpreterResult { + result: if e.is_oog() { + InstructionResult::PrecompileOOG + } else { + InstructionResult::PrecompileError + }, + gas: Gas::new(gas_limit), + output: Bytes::new(), + }), + } +} + +/// Handles reading from storage +fn handle_read_storage(context: &mut CTX, gas_limit: u64) -> PrecompileResult { + // Base gas cost for reading storage + const BASE_GAS: u64 = 2_100; + + if gas_limit < BASE_GAS { + return Err(PrecompileError::OutOfGas); + } + + // Read from storage using the journal + let value = context + .journal_mut() + .sload(CUSTOM_PRECOMPILE_ADDRESS, STORAGE_KEY) + .map_err(|e| PrecompileError::Other(format!("Storage read failed: {e:?}")))? + .data; + + // Return the value as output + Ok(PrecompileOutput::new( + BASE_GAS, + value.to_be_bytes_vec().into(), + )) +} + +/// Handles writing to storage and transferring balance +fn handle_write_storage( + context: &mut CTX, + input: &[u8], + gas_limit: u64, +) -> PrecompileResult { + // Base gas cost for the operation + const BASE_GAS: u64 = 21_000; + const SSTORE_GAS: u64 = 20_000; + + if gas_limit < BASE_GAS + SSTORE_GAS { + return Err(PrecompileError::OutOfGas); + } + + // Parse the input as a U256 value + let value = U256::from_be_slice(input); + + // Store the value in the precompile's storage + context + .journal_mut() + .sstore(CUSTOM_PRECOMPILE_ADDRESS, STORAGE_KEY, value) + .map_err(|e| PrecompileError::Other(format!("Storage write failed: {e:?}")))?; + + // Get the caller address + let caller = context.tx().caller(); + + // Transfer 1 wei from the precompile to the caller as a reward + // First, ensure the precompile has balance + context + .journal_mut() + .balance_incr(CUSTOM_PRECOMPILE_ADDRESS, U256::from(1)) + .map_err(|e| PrecompileError::Other(format!("Balance increment failed: {e:?}")))?; + + // Then transfer to caller + let transfer_result = context + .journal_mut() + .transfer(CUSTOM_PRECOMPILE_ADDRESS, caller, U256::from(1)) + .map_err(|e| PrecompileError::Other(format!("Transfer failed: {e:?}")))?; + + if let Some(error) = transfer_result { + return Err(PrecompileError::Other(format!("Transfer error: {error:?}"))); + } + + // Return success with empty output + Ok(PrecompileOutput::new(BASE_GAS + SSTORE_GAS, Bytes::new())) +} diff --git a/examples/erc20_gas/src/handler.rs b/examples/erc20_gas/src/handler.rs index 9d70fa0ea0..c747319899 100644 --- a/examples/erc20_gas/src/handler.rs +++ b/examples/erc20_gas/src/handler.rs @@ -164,8 +164,7 @@ where effective_gas_price }; - let reward = - coinbase_gas_price.saturating_mul((gas.spent() - gas.refunded() as u64) as u128); + let reward = coinbase_gas_price.saturating_mul(gas.used() as u128); token_operation::(context, TREASURY, beneficiary, U256::from(reward))?; Ok(()) diff --git a/examples/erc20_gas/src/main.rs b/examples/erc20_gas/src/main.rs index 98ad133a58..58e2187e7c 100644 --- a/examples/erc20_gas/src/main.rs +++ b/examples/erc20_gas/src/main.rs @@ -9,6 +9,7 @@ use alloy_sol_types::SolValue; use anyhow::Result; use exec::transact_erc20evm_commit; use revm::{ + context::TxEnv, context_interface::{ result::{InvalidHeader, InvalidTransaction}, ContextTr, JournalTr, @@ -134,12 +135,15 @@ fn transfer(from: Address, to: Address, amount: U256, cache_db: &mut AlloyCacheD .modify_cfg_chained(|cfg| { cfg.spec = SpecId::CANCUN; }) - .modify_tx_chained(|tx| { - tx.caller = from; - tx.kind = TxKind::Call(to); - tx.value = amount; - tx.gas_price = 2; - }) + .with_tx( + TxEnv::builder() + .caller(from) + .kind(TxKind::Call(to)) + .value(amount) + .gas_price(2) + .build() + .unwrap(), + ) .modify_block_chained(|b| { b.basefee = 1; }) diff --git a/examples/uniswap_get_reserves/src/main.rs b/examples/uniswap_get_reserves/src/main.rs index 245c64b5be..4b4f9f2a4c 100644 --- a/examples/uniswap_get_reserves/src/main.rs +++ b/examples/uniswap_get_reserves/src/main.rs @@ -70,18 +70,20 @@ async fn main() -> anyhow::Result<()> { // Execute transaction without writing to the DB let result = evm - .transact_one(TxEnv { - // fill in missing bits of env struct - // change that to whatever caller you want to be - caller: address!("0000000000000000000000000000000000000000"), - // account you want to transact with - kind: TxKind::Call(pool_address), - // calldata formed via abigen - data: encoded.into(), - // transaction value in wei - value: U256::from(0), - ..Default::default() - }) + .transact_one( + TxEnv::builder() + // fill in missing bits of env struct + // change that to whatever caller you want to be + .caller(address!("0000000000000000000000000000000000000000")) + // account you want to transact with + .kind(TxKind::Call(pool_address)) + // calldata formed via abigen + .data(encoded.into()) + // transaction value in wei + .value(U256::from(0)) + .build() + .unwrap(), + ) .unwrap(); // Unpack output call enum into raw bytes diff --git a/examples/uniswap_v2_usdc_swap/src/main.rs b/examples/uniswap_v2_usdc_swap/src/main.rs index 817a9bb88f..4b374ab537 100644 --- a/examples/uniswap_v2_usdc_swap/src/main.rs +++ b/examples/uniswap_v2_usdc_swap/src/main.rs @@ -95,14 +95,15 @@ fn balance_of(token: Address, address: Address, alloy_db: &mut AlloyCacheDB) -> let mut evm = Context::mainnet().with_db(alloy_db).build_mainnet(); let result = evm - .transact_one(TxEnv { - // 0x1 because calling USDC proxy from zero address fails - caller: address!("0000000000000000000000000000000000000001"), - kind: TxKind::Call(token), - data: encoded.into(), - value: U256::from(0), - ..Default::default() - }) + .transact_one( + TxEnv::builder() + .caller(address!("0000000000000000000000000000000000000001")) + .kind(TxKind::Call(token)) + .data(encoded.into()) + .value(U256::from(0)) + .build() + .unwrap(), + ) .unwrap(); let value = match result { @@ -139,13 +140,15 @@ async fn get_amount_out( let mut evm = Context::mainnet().with_db(cache_db).build_mainnet(); let result = evm - .transact_one(TxEnv { - caller: address!("0000000000000000000000000000000000000000"), - kind: TxKind::Call(uniswap_v2_router), - data: encoded.into(), - value: U256::from(0), - ..Default::default() - }) + .transact_one( + TxEnv::builder() + .caller(address!("0000000000000000000000000000000000000000")) + .kind(TxKind::Call(uniswap_v2_router)) + .data(encoded.into()) + .value(U256::from(0)) + .build() + .unwrap(), + ) .unwrap(); let value = match result { @@ -171,13 +174,15 @@ fn get_reserves(pair_address: Address, cache_db: &mut AlloyCacheDB) -> Result<(U let mut evm = Context::mainnet().with_db(cache_db).build_mainnet(); let result = evm - .transact_one(TxEnv { - caller: address!("0000000000000000000000000000000000000000"), - kind: TxKind::Call(pair_address), - data: encoded.into(), - value: U256::from(0), - ..Default::default() - }) + .transact_one( + TxEnv::builder() + .caller(address!("0000000000000000000000000000000000000000")) + .kind(TxKind::Call(pair_address)) + .data(encoded.into()) + .value(U256::from(0)) + .build() + .unwrap(), + ) .unwrap(); let value = match result { @@ -218,14 +223,14 @@ fn swap( let mut evm = Context::mainnet().with_db(cache_db).build_mainnet(); - let tx = TxEnv { - caller: from, - kind: TxKind::Call(pool_address), - data: encoded.into(), - value: U256::from(0), - nonce: 1, - ..Default::default() - }; + let tx = TxEnv::builder() + .caller(from) + .kind(TxKind::Call(pool_address)) + .data(encoded.into()) + .value(U256::from(0)) + .nonce(1) + .build() + .unwrap(); let ref_tx = evm.transact_commit(tx).unwrap(); @@ -252,13 +257,13 @@ fn transfer( let mut evm = Context::mainnet().with_db(cache_db).build_mainnet(); - let tx = TxEnv { - caller: from, - kind: TxKind::Call(token), - data: encoded.into(), - value: U256::from(0), - ..Default::default() - }; + let tx = TxEnv::builder() + .caller(from) + .kind(TxKind::Call(token)) + .data(encoded.into()) + .value(U256::from(0)) + .build() + .unwrap(); let ref_tx = evm.transact_commit(tx).unwrap(); let success: bool = match ref_tx { diff --git a/legacytests b/legacytests deleted file mode 160000 index 1f581b8ccd..0000000000 --- a/legacytests +++ /dev/null @@ -1 +0,0 @@ -Subproject commit 1f581b8ccdc4c63acf5f2c5c1b155c690c32a8eb diff --git a/scripts/run-tests.sh b/scripts/run-tests.sh index 97e6c8c900..5826df605b 100755 --- a/scripts/run-tests.sh +++ b/scripts/run-tests.sh @@ -5,15 +5,19 @@ set -e # Version for the execution spec tests VERSION="v4.4.0" +DEVELOP_VERSION="fusaka-devnet-3@v1.0.0" + # Directories FIXTURES_DIR="test-fixtures" STABLE_DIR="$FIXTURES_DIR/stable" -DEVELOP_DIR="$FIXTURES_DIR/develop" +DEVELOP_DIR="$FIXTURES_DIR/develop" +LEGACY_DIR="$FIXTURES_DIR/legacytests" # URL and filenames FIXTURES_URL="https://github.com/ethereum/execution-spec-tests/releases/download" STABLE_TAR="fixtures_stable.tar.gz" -DEVELOP_TAR="fixtures_develop.tar.gz" +DEVELOP_TAR="fixtures_fusaka-devnet-3.tar.gz" +LEGACY_REPO_URL="https://github.com/ethereum/legacytests.git" # Print usage information and exit usage() { @@ -58,7 +62,7 @@ clean() { # Check if all required fixture directories exist check_fixtures() { - if [ -d "$STABLE_DIR" ] && [ -d "$DEVELOP_DIR" ]; then + if [ -d "$STABLE_DIR" ] && [ -d "$DEVELOP_DIR" ] && [ -d "$LEGACY_DIR" ]; then return 0 else return 1 @@ -85,13 +89,18 @@ download_and_extract() { # Download all fixtures download_fixtures() { echo "Creating fixtures directory structure..." - mkdir -p "$STABLE_DIR" "$DEVELOP_DIR" + mkdir -p "$STABLE_DIR" "$DEVELOP_DIR" "$LEGACY_DIR" download_and_extract "$STABLE_DIR" "$STABLE_TAR" "stable" "$VERSION" - download_and_extract "$DEVELOP_DIR" "$DEVELOP_TAR" "develop" "$VERSION" + download_and_extract "$DEVELOP_DIR" "$DEVELOP_TAR" "develop" "$DEVELOP_VERSION" echo "Cleaning up tar files..." rm "${FIXTURES_DIR}/${STABLE_TAR}" "${FIXTURES_DIR}/${DEVELOP_TAR}" + + # Clone legacytests repository + echo "Cloning legacytests repository..." + git clone --depth 1 "$LEGACY_REPO_URL" "$LEGACY_DIR" + echo "Fixtures download and extraction complete." } @@ -123,6 +132,9 @@ run_tests() { echo "Running develop statetests..." $RUST_RUNNER run $CARGO_OPTS -p revme -- statetest "$DEVELOP_DIR/state_tests" + + echo "Running legacy tests..." + $RUST_RUNNER run $CARGO_OPTS -p revme -- statetest "$LEGACY_DIR/Cancun/GeneralStateTests" } ##############################