diff --git a/buf.gen.yaml b/buf.gen.yaml index dfe4a5e848..42a48ab8f4 100644 --- a/buf.gen.yaml +++ b/buf.gen.yaml @@ -5,6 +5,7 @@ inputs: - directory: sei-ibc-go/proto - directory: sei-tendermint/proto - directory: sei-wasmd/proto + - directory: sei-iavl/proto plugins: - local: - ./build/proto/gocosmos/protoc-gen-gocosmos diff --git a/buf.yaml b/buf.yaml index 8ae16d29c3..6a071895d1 100644 --- a/buf.yaml +++ b/buf.yaml @@ -10,6 +10,7 @@ modules: - path: sei-tendermint/proto - path: sei-tendermint/internal - path: sei-wasmd/proto + - path: sei-iavl/proto lint: use: diff --git a/cmd/seid/cmd/debug.go b/cmd/seid/cmd/debug.go index 6ff647f99d..a297939a4e 100644 --- a/cmd/seid/cmd/debug.go +++ b/cmd/seid/cmd/debug.go @@ -13,7 +13,7 @@ import ( "strings" "github.com/cosmos/cosmos-sdk/version" - "github.com/cosmos/iavl" + iavl "github.com/sei-protocol/sei-chain/sei-iavl" "github.com/spf13/cobra" dbm "github.com/tendermint/tm-db" ) diff --git a/go.mod b/go.mod index aa10488ec0..5b55e92f18 100644 --- a/go.mod +++ b/go.mod @@ -12,12 +12,12 @@ require ( github.com/confio/ics23/go v0.9.0 github.com/cosmos/cosmos-sdk v0.45.10 github.com/cosmos/go-bip39 v1.0.0 - github.com/cosmos/iavl v0.21.0-alpha.1.0.20230904092046-df3db2d96583 github.com/dvsekhvalnov/jose2go v1.5.0 github.com/ethereum/go-ethereum v1.16.1 github.com/go-playground/validator/v10 v10.11.1 github.com/gogo/protobuf v1.3.3 github.com/golang-jwt/jwt/v4 v4.5.1 + github.com/golang/mock v1.7.0-rc.1 github.com/golang/protobuf v1.5.4 github.com/golangci/golangci-lint v1.46.0 github.com/google/btree v1.1.3 @@ -57,6 +57,7 @@ require ( go.opentelemetry.io/otel/sdk v1.38.0 go.opentelemetry.io/otel/sdk/metric v1.38.0 go.opentelemetry.io/otel/trace v1.38.0 + golang.org/x/crypto v0.40.0 golang.org/x/exp v0.0.0-20231110203233-9a3e6036ecaa golang.org/x/sync v0.16.0 golang.org/x/sys v0.35.0 @@ -171,7 +172,6 @@ require ( github.com/gogo/gateway v1.1.0 // indirect github.com/golang/glog v1.1.0 // indirect github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da // indirect - github.com/golang/mock v1.7.0-rc.1 // indirect github.com/golang/snappy v0.0.5-0.20220116011046-fa5810519dcb // indirect github.com/golangci/check v0.0.0-20180506172741-cfe4005ccda2 // indirect github.com/golangci/dupl v0.0.0-20180902072040-3e9179ac440a // indirect @@ -320,7 +320,6 @@ require ( go.opentelemetry.io/otel/exporters/jaeger v1.9.0 // indirect go.uber.org/atomic v1.10.0 // indirect go.uber.org/multierr v1.9.0 // indirect - golang.org/x/crypto v0.40.0 // indirect golang.org/x/exp/typeparams v0.0.0-20220218215828-6cf2b201936e // indirect golang.org/x/mod v0.25.0 // indirect golang.org/x/net v0.41.0 // indirect @@ -343,7 +342,6 @@ replace ( github.com/btcsuite/btcd => github.com/btcsuite/btcd v0.23.2 github.com/confio/ics23/go => github.com/cosmos/cosmos-sdk/ics23/go v0.8.0 github.com/cosmos/cosmos-sdk => ./sei-cosmos - github.com/cosmos/iavl => github.com/sei-protocol/sei-iavl v0.2.0 github.com/ethereum/go-ethereum => github.com/sei-protocol/go-ethereum v1.15.7-sei-7.0.20250929182230-93350978bb7c github.com/gogo/protobuf => github.com/regen-network/protobuf v1.3.3-alpha.regen.1 // Latest goleveldb is broken, we have to stick to this version diff --git a/go.sum b/go.sum index ca617c702a..56bcfa7c3d 100644 --- a/go.sum +++ b/go.sum @@ -2001,8 +2001,6 @@ github.com/sei-protocol/go-ethereum v1.15.7-sei-7.0.20250929182230-93350978bb7c github.com/sei-protocol/go-ethereum v1.15.7-sei-7.0.20250929182230-93350978bb7c/go.mod h1:+S9k+jFzlyVTNcYGvqFhzN/SFhI6vA+aOY4T5tLSPL0= github.com/sei-protocol/goutils v0.0.2 h1:Bfa7Sv+4CVLNM20QcpvGb81B8C5HkQC/kW1CQpIbXDA= github.com/sei-protocol/goutils v0.0.2/go.mod h1:iYE2DuJfEnM+APPehr2gOUXfuLuPsVxorcDO+Tzq9q8= -github.com/sei-protocol/sei-iavl v0.2.0 h1:OisPjXiDT+oe+aeckzDEFgkZCYuUjHgs/PP8DPicN+I= -github.com/sei-protocol/sei-iavl v0.2.0/go.mod h1:qRf8QYUPfrAO7K6VDB2B2l/N7K5L76OorioGBcJBIbw= github.com/sei-protocol/sei-load v0.0.0-20251007135253-78fbdc141082 h1:f2sY8OcN60UL1/6POx+HDMZ4w04FTZtSScnrFSnGZHg= github.com/sei-protocol/sei-load v0.0.0-20251007135253-78fbdc141082/go.mod h1:V0fNURAjS6A8+sA1VllegjNeSobay3oRUW5VFZd04bA= github.com/sei-protocol/sei-tm-db v0.0.5 h1:3WONKdSXEqdZZeLuWYfK5hP37TJpfaUa13vAyAlvaQY= diff --git a/scripts/protoc.sh b/scripts/protoc.sh index e26745655e..5017ea81ea 100755 --- a/scripts/protoc.sh +++ b/scripts/protoc.sh @@ -33,8 +33,12 @@ cp -rf ./build/proto/gocosmos/github.com/sei-protocol/sei-chain/* ./ cp -rf ./build/proto/gocosmos/github.com/cosmos/cosmos-sdk/* ./sei-cosmos cp -rf ./build/proto/gocosmos/github.com/sei-protocol/sei-chain/sei-wasmd/* ./sei-wasmd -# Use gogofaster for tendermint because that's the generator it is using currently. +# Use gogofaster for tendermint and iavl because that's the generator they used originally. +# See: +# * https://github.com/sei-protocol/sei-tendermint/blob/46d0a598a7f5c67cbdefea37c8da18df2c25d184/buf.gen.yaml#L3 +# * https://github.com/sei-protocol/sei-iavl/blob/ff17b3473ee2438caa1777930a0bf73d267527fa/buf.gen.yaml#L9 cp -rf ./build/proto/gogofaster/github.com/tendermint/tendermint/* ./sei-tendermint +cp -rf ./build/proto/gogofaster/github.com/sei-protocol/sei-chain/sei-iavl/* ./sei-iavl rm -rf ./build/proto diff --git a/sei-cosmos/go.mod b/sei-cosmos/go.mod index 5fc05eb49e..cb253ede39 100644 --- a/sei-cosmos/go.mod +++ b/sei-cosmos/go.mod @@ -12,7 +12,6 @@ require ( github.com/confio/ics23/go v0.9.0 github.com/cosmos/btcutil v1.0.5 github.com/cosmos/go-bip39 v1.0.0 - github.com/cosmos/iavl v0.21.0-alpha.1.0.20230904092046-df3db2d96583 github.com/cosmos/ledger-cosmos-go v0.12.2 github.com/ethereum/go-ethereum v1.13.2 github.com/gogo/gateway v1.1.0 @@ -175,7 +174,6 @@ replace ( github.com/99designs/keyring => github.com/cosmos/keyring v1.1.7-0.20210622111912-ef00f8ac3d76 github.com/btcsuite/btcd => github.com/btcsuite/btcd v0.23.2 github.com/confio/ics23/go => github.com/cosmos/cosmos-sdk/ics23/go v0.8.0 - github.com/cosmos/iavl => github.com/sei-protocol/sei-iavl v0.2.0 github.com/ethereum/go-ethereum => github.com/sei-protocol/go-ethereum v1.15.7-sei-7.0.20250929182230-93350978bb7c // Fix upstream GHSA-h395-qcrw-5vmq vulnerability. // TODO Remove it: https://github.com/cosmos/cosmos-sdk/issues/10409 diff --git a/sei-cosmos/store/cache/cache_test.go b/sei-cosmos/store/cache/cache_test.go index 635f098c2e..cdbf2fce08 100644 --- a/sei-cosmos/store/cache/cache_test.go +++ b/sei-cosmos/store/cache/cache_test.go @@ -4,7 +4,7 @@ import ( "fmt" "testing" - "github.com/cosmos/iavl" + iavl "github.com/sei-protocol/sei-chain/sei-iavl" "github.com/stretchr/testify/require" dbm "github.com/tendermint/tm-db" diff --git a/sei-cosmos/store/iavl/store.go b/sei-cosmos/store/iavl/store.go index 727466158b..66bd86c128 100644 --- a/sei-cosmos/store/iavl/store.go +++ b/sei-cosmos/store/iavl/store.go @@ -8,7 +8,7 @@ import ( "time" ics23 "github.com/confio/ics23/go" - "github.com/cosmos/iavl" + iavl "github.com/sei-protocol/sei-chain/sei-iavl" abci "github.com/tendermint/tendermint/abci/types" "github.com/tendermint/tendermint/libs/log" tmcrypto "github.com/tendermint/tendermint/proto/tendermint/crypto" diff --git a/sei-cosmos/store/iavl/store_test.go b/sei-cosmos/store/iavl/store_test.go index 434a827a53..f70fcfd2ff 100644 --- a/sei-cosmos/store/iavl/store_test.go +++ b/sei-cosmos/store/iavl/store_test.go @@ -7,7 +7,7 @@ import ( "github.com/cosmos/cosmos-sdk/store/cachekv" - "github.com/cosmos/iavl" + iavl "github.com/sei-protocol/sei-chain/sei-iavl" "github.com/stretchr/testify/require" abci "github.com/tendermint/tendermint/abci/types" "github.com/tendermint/tendermint/libs/log" diff --git a/sei-cosmos/store/iavl/tree.go b/sei-cosmos/store/iavl/tree.go index efc62713f4..98253f5245 100644 --- a/sei-cosmos/store/iavl/tree.go +++ b/sei-cosmos/store/iavl/tree.go @@ -4,7 +4,7 @@ import ( "fmt" "github.com/cosmos/cosmos-sdk/store/types" - "github.com/cosmos/iavl" + iavl "github.com/sei-protocol/sei-chain/sei-iavl" ) var ( diff --git a/sei-cosmos/store/iavl/tree_test.go b/sei-cosmos/store/iavl/tree_test.go index 02d19a97bf..a12bdfe50e 100644 --- a/sei-cosmos/store/iavl/tree_test.go +++ b/sei-cosmos/store/iavl/tree_test.go @@ -3,7 +3,7 @@ package iavl import ( "testing" - "github.com/cosmos/iavl" + iavl "github.com/sei-protocol/sei-chain/sei-iavl" "github.com/stretchr/testify/require" dbm "github.com/tendermint/tm-db" ) diff --git a/sei-cosmos/store/prefix/store_test.go b/sei-cosmos/store/prefix/store_test.go index 2a78a48132..301fc903e7 100644 --- a/sei-cosmos/store/prefix/store_test.go +++ b/sei-cosmos/store/prefix/store_test.go @@ -9,7 +9,7 @@ import ( "github.com/stretchr/testify/require" dbm "github.com/tendermint/tm-db" - tiavl "github.com/cosmos/iavl" + tiavl "github.com/sei-protocol/sei-chain/sei-iavl" "github.com/cosmos/cosmos-sdk/store/dbadapter" "github.com/cosmos/cosmos-sdk/store/gaskv" diff --git a/sei-cosmos/store/rootmulti/proof_test.go b/sei-cosmos/store/rootmulti/proof_test.go index 746e9e963b..98e7281c30 100644 --- a/sei-cosmos/store/rootmulti/proof_test.go +++ b/sei-cosmos/store/rootmulti/proof_test.go @@ -3,7 +3,7 @@ package rootmulti import ( "testing" - iavltree "github.com/cosmos/iavl" + iavltree "github.com/sei-protocol/sei-chain/sei-iavl" "github.com/stretchr/testify/require" abci "github.com/tendermint/tendermint/abci/types" "github.com/tendermint/tendermint/libs/log" diff --git a/sei-cosmos/store/rootmulti/store.go b/sei-cosmos/store/rootmulti/store.go index 21d8dfb774..ec8722ce38 100644 --- a/sei-cosmos/store/rootmulti/store.go +++ b/sei-cosmos/store/rootmulti/store.go @@ -11,10 +11,10 @@ import ( "github.com/armon/go-metrics" "github.com/cosmos/cosmos-sdk/telemetry" - iavltree "github.com/cosmos/iavl" protoio "github.com/gogo/protobuf/io" gogotypes "github.com/gogo/protobuf/types" "github.com/pkg/errors" + iavltree "github.com/sei-protocol/sei-chain/sei-iavl" abci "github.com/tendermint/tendermint/abci/types" "github.com/tendermint/tendermint/libs/log" "github.com/tendermint/tendermint/proto/tendermint/crypto" diff --git a/sei-cosmos/store/types/iterator_test.go b/sei-cosmos/store/types/iterator_test.go index 9512296325..5483b6a9aa 100644 --- a/sei-cosmos/store/types/iterator_test.go +++ b/sei-cosmos/store/types/iterator_test.go @@ -3,7 +3,7 @@ package types_test import ( "testing" - iavltree "github.com/cosmos/iavl" + iavltree "github.com/sei-protocol/sei-chain/sei-iavl" "github.com/stretchr/testify/require" "github.com/tendermint/tendermint/libs/log" dbm "github.com/tendermint/tm-db" diff --git a/sei-cosmos/storev2/commitment/store.go b/sei-cosmos/storev2/commitment/store.go index bde5968e70..055a41a8e3 100644 --- a/sei-cosmos/storev2/commitment/store.go +++ b/sei-cosmos/storev2/commitment/store.go @@ -11,8 +11,8 @@ import ( "github.com/cosmos/cosmos-sdk/store/types" sdkerrors "github.com/cosmos/cosmos-sdk/types/errors" "github.com/cosmos/cosmos-sdk/types/kv" - "github.com/cosmos/iavl" sctypes "github.com/sei-protocol/sei-chain/sei-db/sc/types" + iavl "github.com/sei-protocol/sei-chain/sei-iavl" abci "github.com/tendermint/tendermint/abci/types" "github.com/tendermint/tendermint/libs/log" "github.com/tendermint/tendermint/proto/tendermint/crypto" diff --git a/sei-db/proto/changelog.pb.go b/sei-db/proto/changelog.pb.go index 2193b17254..50c24aad0c 100644 --- a/sei-db/proto/changelog.pb.go +++ b/sei-db/proto/changelog.pb.go @@ -9,9 +9,9 @@ import ( math "math" math_bits "math/bits" - "github.com/cosmos/iavl" _ "github.com/gogo/protobuf/gogoproto" proto "github.com/gogo/protobuf/proto" + iavl "github.com/sei-protocol/sei-chain/sei-iavl" ) // Reference imports to suppress errors if they are not otherwise used. diff --git a/sei-db/sc/memiavl/benchmark_test.go b/sei-db/sc/memiavl/benchmark_test.go index 8141d63ce5..25fae1aa04 100644 --- a/sei-db/sc/memiavl/benchmark_test.go +++ b/sei-db/sc/memiavl/benchmark_test.go @@ -8,7 +8,7 @@ import ( "sort" "testing" - iavlcache "github.com/cosmos/iavl/cache" + iavlcache "github.com/sei-protocol/sei-chain/sei-iavl/cache" "github.com/stretchr/testify/require" "github.com/tidwall/btree" ) diff --git a/sei-db/sc/memiavl/db.go b/sei-db/sc/memiavl/db.go index a11a2b32d1..66ace7e02f 100644 --- a/sei-db/sc/memiavl/db.go +++ b/sei-db/sc/memiavl/db.go @@ -18,13 +18,13 @@ import ( "go.opentelemetry.io/otel/attribute" "go.opentelemetry.io/otel/metric" - "github.com/cosmos/iavl" errorutils "github.com/sei-protocol/sei-chain/sei-db/common/errors" "github.com/sei-protocol/sei-chain/sei-db/common/logger" "github.com/sei-protocol/sei-chain/sei-db/common/utils" "github.com/sei-protocol/sei-chain/sei-db/proto" "github.com/sei-protocol/sei-chain/sei-db/stream/changelog" "github.com/sei-protocol/sei-chain/sei-db/stream/types" + iavl "github.com/sei-protocol/sei-chain/sei-iavl" ) const LockFileName = "LOCK" diff --git a/sei-db/sc/memiavl/db_test.go b/sei-db/sc/memiavl/db_test.go index d917655e1b..07e477ff58 100644 --- a/sei-db/sc/memiavl/db_test.go +++ b/sei-db/sc/memiavl/db_test.go @@ -11,11 +11,11 @@ import ( "testing" "time" - "github.com/cosmos/iavl" "github.com/sei-protocol/sei-chain/sei-db/common/errors" "github.com/sei-protocol/sei-chain/sei-db/common/logger" "github.com/sei-protocol/sei-chain/sei-db/common/utils" "github.com/sei-protocol/sei-chain/sei-db/proto" + iavl "github.com/sei-protocol/sei-chain/sei-iavl" "github.com/stretchr/testify/require" ) diff --git a/sei-db/sc/memiavl/multitree.go b/sei-db/sc/memiavl/multitree.go index fa91a10078..b07c5b98f9 100644 --- a/sei-db/sc/memiavl/multitree.go +++ b/sei-db/sc/memiavl/multitree.go @@ -12,14 +12,13 @@ import ( "time" "github.com/alitto/pond" - "golang.org/x/exp/slices" - - "github.com/cosmos/iavl" "github.com/sei-protocol/sei-chain/sei-db/common/errors" "github.com/sei-protocol/sei-chain/sei-db/common/logger" "github.com/sei-protocol/sei-chain/sei-db/common/utils" "github.com/sei-protocol/sei-chain/sei-db/proto" "github.com/sei-protocol/sei-chain/sei-db/stream/types" + iavl "github.com/sei-protocol/sei-chain/sei-iavl" + "golang.org/x/exp/slices" ) const ( diff --git a/sei-db/sc/memiavl/proof.go b/sei-db/sc/memiavl/proof.go index 575225042d..f7919d04ff 100644 --- a/sei-db/sc/memiavl/proof.go +++ b/sei-db/sc/memiavl/proof.go @@ -8,7 +8,7 @@ import ( "math" ics23 "github.com/confio/ics23/go" - "github.com/cosmos/iavl" + iavl "github.com/sei-protocol/sei-chain/sei-iavl" ) /* diff --git a/sei-db/sc/memiavl/snapshot_catchup_test.go b/sei-db/sc/memiavl/snapshot_catchup_test.go index d4c45be537..13976bb1bd 100644 --- a/sei-db/sc/memiavl/snapshot_catchup_test.go +++ b/sei-db/sc/memiavl/snapshot_catchup_test.go @@ -4,9 +4,9 @@ import ( "testing" "time" - "github.com/cosmos/iavl" "github.com/sei-protocol/sei-chain/sei-db/common/logger" "github.com/sei-protocol/sei-chain/sei-db/proto" + iavl "github.com/sei-protocol/sei-chain/sei-iavl" "github.com/stretchr/testify/require" ) diff --git a/sei-db/sc/memiavl/snapshot_pipeline_test.go b/sei-db/sc/memiavl/snapshot_pipeline_test.go index 8c1b84e82b..6527825ba5 100644 --- a/sei-db/sc/memiavl/snapshot_pipeline_test.go +++ b/sei-db/sc/memiavl/snapshot_pipeline_test.go @@ -4,9 +4,9 @@ import ( "context" "testing" - "github.com/cosmos/iavl" "github.com/sei-protocol/sei-chain/sei-db/common/logger" "github.com/sei-protocol/sei-chain/sei-db/sc/types" + iavl "github.com/sei-protocol/sei-chain/sei-iavl" "github.com/stretchr/testify/require" ) diff --git a/sei-db/sc/memiavl/snapshot_write_test.go b/sei-db/sc/memiavl/snapshot_write_test.go index 474e84d783..1a11fba240 100644 --- a/sei-db/sc/memiavl/snapshot_write_test.go +++ b/sei-db/sc/memiavl/snapshot_write_test.go @@ -6,9 +6,9 @@ import ( "path/filepath" "testing" - "github.com/cosmos/iavl" "github.com/sei-protocol/sei-chain/sei-db/common/logger" "github.com/sei-protocol/sei-chain/sei-db/proto" + iavl "github.com/sei-protocol/sei-chain/sei-iavl" "github.com/stretchr/testify/require" ) diff --git a/sei-db/sc/memiavl/tree.go b/sei-db/sc/memiavl/tree.go index 2d110f4ced..bca5cec399 100644 --- a/sei-db/sc/memiavl/tree.go +++ b/sei-db/sc/memiavl/tree.go @@ -8,10 +8,10 @@ import ( "sync" ics23 "github.com/confio/ics23/go" - "github.com/cosmos/iavl" "github.com/sei-protocol/sei-chain/sei-db/common/logger" "github.com/sei-protocol/sei-chain/sei-db/common/utils" "github.com/sei-protocol/sei-chain/sei-db/sc/types" + iavl "github.com/sei-protocol/sei-chain/sei-iavl" dbm "github.com/tendermint/tm-db" ) diff --git a/sei-db/sc/memiavl/tree_test.go b/sei-db/sc/memiavl/tree_test.go index 17675f59a9..04a9cd57e8 100644 --- a/sei-db/sc/memiavl/tree_test.go +++ b/sei-db/sc/memiavl/tree_test.go @@ -6,7 +6,7 @@ import ( "strconv" "testing" - "github.com/cosmos/iavl" + iavl "github.com/sei-protocol/sei-chain/sei-iavl" "github.com/stretchr/testify/require" db "github.com/tendermint/tm-db" ) diff --git a/sei-db/ss/pebbledb/hash_test.go b/sei-db/ss/pebbledb/hash_test.go index f9061c9e3b..23c45b6aba 100644 --- a/sei-db/ss/pebbledb/hash_test.go +++ b/sei-db/ss/pebbledb/hash_test.go @@ -8,10 +8,10 @@ import ( "time" "github.com/cockroachdb/pebble" - "github.com/cosmos/iavl" "github.com/sei-protocol/sei-chain/sei-db/config" "github.com/sei-protocol/sei-chain/sei-db/proto" "github.com/sei-protocol/sei-chain/sei-db/ss/util" + iavl "github.com/sei-protocol/sei-chain/sei-iavl" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" ) diff --git a/sei-db/ss/store_test.go b/sei-db/ss/store_test.go index a28889aaf4..9db08d4c41 100644 --- a/sei-db/ss/store_test.go +++ b/sei-db/ss/store_test.go @@ -6,10 +6,10 @@ import ( "path/filepath" "testing" - "github.com/cosmos/iavl" "github.com/sei-protocol/sei-chain/sei-db/common/logger" "github.com/sei-protocol/sei-chain/sei-db/config" "github.com/sei-protocol/sei-chain/sei-db/proto" + iavl "github.com/sei-protocol/sei-chain/sei-iavl" "github.com/stretchr/testify/require" ) diff --git a/sei-db/ss/test/storage_bench_suite.go b/sei-db/ss/test/storage_bench_suite.go index 64ebeb90d6..ee5376274e 100644 --- a/sei-db/ss/test/storage_bench_suite.go +++ b/sei-db/ss/test/storage_bench_suite.go @@ -6,11 +6,10 @@ import ( "sort" "testing" - "github.com/stretchr/testify/require" - - "github.com/cosmos/iavl" "github.com/sei-protocol/sei-chain/sei-db/proto" "github.com/sei-protocol/sei-chain/sei-db/ss/types" + iavl "github.com/sei-protocol/sei-chain/sei-iavl" + "github.com/stretchr/testify/require" ) // StorageBenchSuite defines a reusable benchmark suite for all storage backends. diff --git a/sei-db/ss/test/storage_test_suite.go b/sei-db/ss/test/storage_test_suite.go index 80a9b4d270..f8fd1e55b0 100644 --- a/sei-db/ss/test/storage_test_suite.go +++ b/sei-db/ss/test/storage_test_suite.go @@ -4,10 +4,10 @@ import ( "fmt" "sync" - "github.com/cosmos/iavl" "github.com/sei-protocol/sei-chain/sei-db/config" "github.com/sei-protocol/sei-chain/sei-db/proto" "github.com/sei-protocol/sei-chain/sei-db/ss/types" + iavl "github.com/sei-protocol/sei-chain/sei-iavl" "github.com/stretchr/testify/suite" "golang.org/x/exp/slices" ) diff --git a/sei-db/ss/test/utils.go b/sei-db/ss/test/utils.go index bc60e016f2..3ce9c30dd7 100644 --- a/sei-db/ss/test/utils.go +++ b/sei-db/ss/test/utils.go @@ -3,9 +3,9 @@ package sstest import ( "fmt" - "github.com/cosmos/iavl" "github.com/sei-protocol/sei-chain/sei-db/proto" "github.com/sei-protocol/sei-chain/sei-db/ss/types" + iavl "github.com/sei-protocol/sei-chain/sei-iavl" ) // Fills the db with multiple keys each with different versions diff --git a/sei-db/stream/changelog/changelog_test.go b/sei-db/stream/changelog/changelog_test.go index e15b1e076f..860b39f4b9 100644 --- a/sei-db/stream/changelog/changelog_test.go +++ b/sei-db/stream/changelog/changelog_test.go @@ -6,9 +6,9 @@ import ( "path/filepath" "testing" - "github.com/cosmos/iavl" "github.com/sei-protocol/sei-chain/sei-db/common/logger" "github.com/sei-protocol/sei-chain/sei-db/proto" + iavl "github.com/sei-protocol/sei-chain/sei-iavl" "github.com/stretchr/testify/require" "github.com/tidwall/wal" ) diff --git a/sei-db/stream/changelog/utils.go b/sei-db/stream/changelog/utils.go index e3e11f3a02..2500e57dd1 100644 --- a/sei-db/stream/changelog/utils.go +++ b/sei-db/stream/changelog/utils.go @@ -3,12 +3,13 @@ package changelog import ( "bytes" "encoding/binary" + "errors" "math" "os" "path/filepath" "unsafe" - "github.com/cosmos/iavl" + iavl "github.com/sei-protocol/sei-chain/sei-iavl" "github.com/tidwall/gjson" "github.com/tidwall/wal" ) @@ -44,7 +45,7 @@ func truncateCorruptedTail(path string, format wal.LogFormat) error { } else { n, err = loadNextBinaryEntry(data) } - if err == wal.ErrCorrupt { + if errors.Is(err, wal.ErrCorrupt) { break } if err != nil { diff --git a/sei-db/tools/bench/benchmark.go b/sei-db/tools/bench/benchmark.go index 89a3c3006b..bbee44e58a 100644 --- a/sei-db/tools/bench/benchmark.go +++ b/sei-db/tools/bench/benchmark.go @@ -9,10 +9,10 @@ import ( "sync/atomic" "time" - "github.com/cosmos/iavl" "github.com/sei-protocol/sei-chain/sei-db/proto" "github.com/sei-protocol/sei-chain/sei-db/ss/types" "github.com/sei-protocol/sei-chain/sei-db/tools/utils" + iavl "github.com/sei-protocol/sei-chain/sei-iavl" ) // writeToDBConcurrently generates random write load against the db diff --git a/sei-db/tools/utils/utils.go b/sei-db/tools/utils/utils.go index 6df14bfffe..8948bf72ef 100644 --- a/sei-db/tools/utils/utils.go +++ b/sei-db/tools/utils/utils.go @@ -13,7 +13,7 @@ import ( "sync" "time" - "github.com/cosmos/iavl" + iavl "github.com/sei-protocol/sei-chain/sei-iavl" dbm "github.com/tendermint/tm-db" ) diff --git a/sei-iavl/Makefile b/sei-iavl/Makefile new file mode 100644 index 0000000000..ecba766696 --- /dev/null +++ b/sei-iavl/Makefile @@ -0,0 +1,129 @@ +GOTOOLS := github.com/golangci/golangci-lint/cmd/golangci-lint +VERSION := $(shell echo $(shell git describe --tags) | sed 's/^v//') +COMMIT := $(shell git log -1 --format='%H') +BRANCH=$(shell git rev-parse --abbrev-ref HEAD) +DOCKER_BUF := docker run -v $(shell pwd):/workspace --workdir /workspace bufbuild/buf +DOCKER := $(shell which docker) +HTTPS_GIT := https://github.com/cosmos/iavl.git + +PDFFLAGS := -pdf --nodefraction=0.1 +CMDFLAGS := -ldflags -X TENDERMINT_IAVL_COLORS_ON=on +LDFLAGS := -ldflags "-X github.com/cosmos/iavl.Version=$(VERSION) -X github.com/cosmos/iavl.Commit=$(COMMIT) -X github.com/cosmos/iavl.Branch=$(BRANCH)" + +all: lint test install + +install: +ifeq ($(COLORS_ON),) + go install ./cmd/iaviewer +else + go install $(CMDFLAGS) ./cmd/iaviewer +endif +.PHONY: install + +test-short: + @echo "--> Running go test" + @go test ./... $(LDFLAGS) -v --race --short +.PHONY: test-short + +test: + @echo "--> Running go test" + @go test ./... $(LDFLAGS) -v +.PHONY: test + +tools: + go get -v $(GOTOOLS) +.PHONY: tools + +format: + find . -name '*.go' -type f -not -path "*.git*" -not -name '*.pb.go' -not -name '*pb_test.go' | xargs gofmt -w -s + find . -name '*.go' -type f -not -path "*.git*" -not -name '*.pb.go' -not -name '*pb_test.go' | xargs goimports -format +.PHONY: format + +# look into .golangci.yml for enabling / disabling linters +lint: + @echo "--> Running linter" + @golangci-lint run + @go mod verify +.PHONY: lint + +# bench is the basic tests that shouldn't crash an aws instance +bench: + cd benchmarks && \ + go test $(LDFLAGS) -tags cleveldb,rocksdb,boltdb,badgerdb -run=NOTEST -bench=Small . && \ + go test $(LDFLAGS) -tags cleveldb,rocksdb,boltdb,badgerdb -run=NOTEST -bench=Medium . && \ + go test $(LDFLAGS) -run=NOTEST -bench=RandomBytes . +.PHONY: bench + +# fullbench is extra tests needing lots of memory and to run locally +fullbench: + cd benchmarks && \ + go test $(LDFLAGS) -run=NOTEST -bench=RandomBytes . && \ + go test $(LDFLAGS) -tags cleveldb,rocksdb,boltdb,badgerdb -run=NOTEST -bench=Small . && \ + go test $(LDFLAGS) -tags cleveldb,rocksdb,boltdb,badgerdb -run=NOTEST -bench=Medium . && \ + go test $(LDFLAGS) -tags cleveldb,rocksdb,boltdb,badgerdb -run=NOTEST -timeout=30m -bench=Large . && \ + go test $(LDFLAGS) -run=NOTEST -bench=Mem . && \ + go test $(LDFLAGS) -run=NOTEST -timeout=60m -bench=LevelDB . +.PHONY: fullbench + +# note that this just profiles the in-memory version, not persistence +profile: + cd benchmarks && \ + go test $(LDFLAGS) -bench=Mem -cpuprofile=cpu.out -memprofile=mem.out . && \ + go tool pprof ${PDFFLAGS} benchmarks.test cpu.out > cpu.pdf && \ + go tool pprof --alloc_space ${PDFFLAGS} benchmarks.test mem.out > mem_space.pdf && \ + go tool pprof --alloc_objects ${PDFFLAGS} benchmarks.test mem.out > mem_obj.pdf +.PHONY: profile + +explorecpu: + cd benchmarks && \ + go tool pprof benchmarks.test cpu.out +.PHONY: explorecpu + +exploremem: + cd benchmarks && \ + go tool pprof --alloc_objects benchmarks.test mem.out +.PHONY: exploremem + +delve: + dlv test ./benchmarks -- -test.bench=. +.PHONY: delve + +all: tools +.PHONY: all + +tools: protobuf +.PHONY: tools + +check: check_tools +.PHONY: check + +check_tools: + @# https://stackoverflow.com/a/25668869 + @echo "Found tools: $(foreach tool,$(notdir $(GOTOOLS)),\ + $(if $(shell which $(tool)),$(tool),$(error "No $(tool) in PATH")))" +.PHONY: check_tools + +tools-clean: + rm -f $(CERTSTRAP) $(PROTOBUF) $(GOX) $(GOODMAN) + rm -rf /usr/local/include/google/protobuf + rm -f /usr/local/bin/protoc +.PHONY: tooks-clean + +### +# Non Go tools +### + +.PHONY: lint test tools install delve exploremem explorecpu profile fullbench bench proto-gen proto-lint proto-check-breaking + +proto-lint: + @$(DOCKER_BUF) check lint --error-format=json +.PHONY: proto-lint + +proto-check-breaking: + @$(DOCKER_BUF) check breaking --against-input $(HTTPS_GIT)#branch=master +.PHONY: proto-check-breaking + +proto-gen: + @echo "Generating Protobuf files" + $(DOCKER) run --rm -v $(CURDIR):/workspace --workdir /workspace tendermintdev/sdk-proto-gen:master sh scripts/protocgen.sh +.PHONY: proto-gen-d diff --git a/sei-iavl/README.md b/sei-iavl/README.md new file mode 100644 index 0000000000..557f8c65cc --- /dev/null +++ b/sei-iavl/README.md @@ -0,0 +1,22 @@ +# IAVL+ Tree + + +[![version](https://img.shields.io/github/tag/cosmos/iavl.svg)](https://github.com/cosmos/iavl/releases/latest) +[![license](https://img.shields.io/github/license/cosmos/iavl.svg)](https://github.com/cosmos/iavl/blob/master/LICENSE) +[![API Reference](https://camo.githubusercontent.com/915b7be44ada53c290eb157634330494ebe3e30a/68747470733a2f2f676f646f632e6f72672f6769746875622e636f6d2f676f6c616e672f6764646f3f7374617475732e737667)](https://pkg.go.dev/github.com/cosmos/iavl) +[![codecov](https://codecov.io/gh/cosmos/iavl/branch/master/graph/badge.svg)](https://codecov.io/gh/cosmos/iavl) +![Lint](https://github.com/cosmos/iavl/workflows/Lint/badge.svg?branch=master) +![Test](https://github.com/cosmos/iavl/workflows/Test/badge.svg?branch=master) +[![Discord chat](https://img.shields.io/discord/669268347736686612.svg)](https://discord.gg/AzefAFd) + +**Note: Requires Go 1.17+** + +A versioned, snapshottable (immutable) AVL+ tree for persistent data. + +The purpose of this data structure is to provide persistent storage for key-value pairs (say to store account balances) such that a deterministic merkle root hash can be computed. The tree is balanced using a variant of the [AVL algorithm](http://en.wikipedia.org/wiki/AVL_tree) so all operations are O(log(n)). + +Nodes of this tree are immutable and indexed by their hash. Thus any node serves as an immutable snapshot which lets us stage uncommitted transactions from the mempool cheaply, and we can instantly roll back to the last committed state to process transactions of a newly committed block (which may not be the same set of transactions as those from the mempool). + +In an AVL tree, the heights of the two child subtrees of any node differ by at most one. Whenever this condition is violated upon an update, the tree is rebalanced by creating O(log(n)) new nodes that point to unmodified nodes of the old tree. In the original AVL algorithm, inner nodes can also hold key-value pairs. The AVL+ algorithm (note the plus) modifies the AVL algorithm to keep all values on leaf nodes, while only using branch-nodes to store keys. This simplifies the algorithm while keeping the merkle hash trail short. + +In Ethereum, the analog is [Patricia tries](http://en.wikipedia.org/wiki/Radix_tree). There are tradeoffs. Keys do not need to be hashed prior to insertion in IAVL+ trees, so this provides faster iteration in the key space which may benefit some applications. The logic is simpler to implement, requiring only two types of nodes -- inner nodes and leaf nodes. On the other hand, while IAVL+ trees provide a deterministic merkle root hash, it depends on the order of transactions. In practice this shouldn't be a problem, since you can efficiently encode the tree structure when serializing the tree contents. diff --git a/sei-iavl/basic_test.go b/sei-iavl/basic_test.go new file mode 100644 index 0000000000..228809c06a --- /dev/null +++ b/sei-iavl/basic_test.go @@ -0,0 +1,577 @@ +// nolint: errcheck +package iavl + +import ( + "bytes" + "encoding/hex" + mrand "math/rand" + "sort" + "testing" + + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" + db "github.com/tendermint/tm-db" +) + +func TestBasic(t *testing.T) { + tree, err := getTestTree(0) + require.NoError(t, err) + up, err := tree.Set([]byte("1"), []byte("one")) + require.NoError(t, err) + if up { + t.Error("Did not expect an update (should have been create)") + } + up, err = tree.Set([]byte("2"), []byte("two")) + require.NoError(t, err) + if up { + t.Error("Did not expect an update (should have been create)") + } + up, err = tree.Set([]byte("2"), []byte("TWO")) + require.NoError(t, err) + if !up { + t.Error("Expected an update") + } + up, err = tree.Set([]byte("5"), []byte("five")) + require.NoError(t, err) + if up { + t.Error("Did not expect an update (should have been create)") + } + + // Test 0x00 + { + key := []byte{0x00} + expected := "" + + idx, val, err := tree.ImmutableTree().GetWithIndex(key) + require.NoError(t, err) + if val != nil { + t.Error("Expected no value to exist") + } + if idx != 0 { + t.Errorf("Unexpected idx %x", idx) + } + if string(val) != expected { + t.Errorf("Unexpected value %s", val) + } + + val, err = tree.Get(key) + if val != nil { + t.Error("Fast method - expected no value to exist") + } + if string(val) != expected { + t.Errorf("Fast method - Unexpected value %s", val) + } + } + + // Test "1" + { + key := []byte("1") + expected := "one" + + idx, val, err := tree.ImmutableTree().GetWithIndex(key) + require.NoError(t, err) + if val == nil { + t.Error("Expected value to exist") + } + if idx != 0 { + t.Errorf("Unexpected idx %x", idx) + } + if string(val) != expected { + t.Errorf("Unexpected value %s", val) + } + + val, err = tree.Get(key) + require.NoError(t, err) + if val == nil { + t.Error("Fast method - expected value to exist") + } + if string(val) != expected { + t.Errorf("Fast method - Unexpected value %s", val) + } + } + + // Test "2" + { + key := []byte("2") + expected := "TWO" + + idx, val, err := tree.ImmutableTree().GetWithIndex(key) + require.NoError(t, err) + if val == nil { + t.Error("Expected value to exist") + } + if idx != 1 { + t.Errorf("Unexpected idx %x", idx) + } + if string(val) != expected { + t.Errorf("Unexpected value %s", val) + } + + val, err = tree.Get(key) + if val == nil { + t.Error("Fast method - expected value to exist") + } + if string(val) != expected { + t.Errorf("Fast method - Unexpected value %s", val) + } + } + + // Test "4" + { + key := []byte("4") + expected := "" + + idx, val, err := tree.ImmutableTree().GetWithIndex(key) + require.NoError(t, err) + if val != nil { + t.Error("Expected no value to exist") + } + if idx != 2 { + t.Errorf("Unexpected idx %x", idx) + } + if string(val) != expected { + t.Errorf("Unexpected value %s", val) + } + + val, err = tree.Get(key) + if val != nil { + t.Error("Fast method - expected no value to exist") + } + if string(val) != expected { + t.Errorf("Fast method - Unexpected value %s", val) + } + } + + // Test "6" + { + key := []byte("6") + expected := "" + + idx, val, err := tree.ImmutableTree().GetWithIndex(key) + require.NoError(t, err) + if val != nil { + t.Error("Expected no value to exist") + } + if idx != 3 { + t.Errorf("Unexpected idx %x", idx) + } + if string(val) != expected { + t.Errorf("Unexpected value %s", val) + } + + val, err = tree.Get(key) + if val != nil { + t.Error("Fast method - expected no value to exist") + } + if string(val) != expected { + t.Errorf("Fast method - Unexpected value %s", val) + } + } +} + +func TestUnit(t *testing.T) { + + expectHash := func(tree *ImmutableTree, hashCount int64) { + // ensure number of new hash calculations is as expected. + hash, count, err := tree.root.hashWithCount() + require.NoError(t, err) + if count != hashCount { + t.Fatalf("Expected %v new hashes, got %v", hashCount, count) + } + // nuke hashes and reconstruct hash, ensure it's the same. + tree.root.traverse(tree, true, func(node *Node) bool { + node.hash = nil + return false + }) + // ensure that the new hash after nuking is the same as the old. + newHash, _, err := tree.root.hashWithCount() + require.NoError(t, err) + if !bytes.Equal(hash, newHash) { + t.Fatalf("Expected hash %v but got %v after nuking", hash, newHash) + } + } + + expectSet := func(tree *MutableTree, i int, repr string, hashCount int64) { + origNode := tree.ImmutableTree().root + updated, err := tree.Set(i2b(i), []byte{}) + require.NoError(t, err) + // ensure node was added & structure is as expected. + if updated || P(tree.ImmutableTree().root) != repr { + t.Fatalf("Adding %v to %v:\nExpected %v\nUnexpectedly got %v updated:%v", + i, P(origNode), repr, P(tree.ImmutableTree().root), updated) + } + // ensure hash calculation requirements + expectHash(tree.ImmutableTree(), hashCount) + tree.ImmutableTree().root = origNode + } + + expectRemove := func(tree *MutableTree, i int, repr string, hashCount int64) { + origNode := tree.ImmutableTree().root + value, removed, err := tree.Remove(i2b(i)) + require.NoError(t, err) + // ensure node was added & structure is as expected. + if len(value) != 0 || !removed || P(tree.ImmutableTree().root) != repr { + t.Fatalf("Removing %v from %v:\nExpected %v\nUnexpectedly got %v value:%v removed:%v", + i, P(origNode), repr, P(tree.ImmutableTree().root), value, removed) + } + // ensure hash calculation requirements + expectHash(tree.ImmutableTree(), hashCount) + tree.ImmutableTree().root = origNode + } + + // Test Set cases: + + // Case 1: + t1, err := T(N(4, 20)) + + require.NoError(t, err) + expectSet(t1, 8, "((4 8) 20)", 3) + expectSet(t1, 25, "(4 (20 25))", 3) + + t2, err := T(N(4, N(20, 25))) + + require.NoError(t, err) + expectSet(t2, 8, "((4 8) (20 25))", 3) + expectSet(t2, 30, "((4 20) (25 30))", 4) + + t3, err := T(N(N(1, 2), 6)) + + require.NoError(t, err) + expectSet(t3, 4, "((1 2) (4 6))", 4) + expectSet(t3, 8, "((1 2) (6 8))", 3) + + t4, err := T(N(N(1, 2), N(N(5, 6), N(7, 9)))) + + require.NoError(t, err) + expectSet(t4, 8, "(((1 2) (5 6)) ((7 8) 9))", 5) + expectSet(t4, 10, "(((1 2) (5 6)) (7 (9 10)))", 5) + + // Test Remove cases: + + t10, err := T(N(N(1, 2), 3)) + + require.NoError(t, err) + expectRemove(t10, 2, "(1 3)", 1) + expectRemove(t10, 3, "(1 2)", 0) + + t11, err := T(N(N(N(1, 2), 3), N(4, 5))) + + require.NoError(t, err) + expectRemove(t11, 4, "((1 2) (3 5))", 2) + expectRemove(t11, 3, "((1 2) (4 5))", 1) + +} + +func TestRemove(t *testing.T) { + keyLen, dataLen := 16, 40 + + size := 10000 + t1, err := getTestTree(size) + require.NoError(t, err) + + // insert a bunch of random nodes + keys := make([][]byte, size) + l := int32(len(keys)) + for i := 0; i < size; i++ { + key := randBytes(keyLen) + t1.Set(key, randBytes(dataLen)) + keys[i] = key + } + + for i := 0; i < 10; i++ { + step := 50 * i + // remove a bunch of existing keys (may have been deleted twice) + for j := 0; j < step; j++ { + key := keys[mrand.Int31n(l)] + t1.Remove(key) + } + t1.SaveVersion() + } +} + +func TestIntegration(t *testing.T) { + + type record struct { + key string + value string + } + + records := make([]*record, 400) + tree, err := getTestTree(0) + require.NoError(t, err) + + randomRecord := func() *record { + return &record{randstr(20), randstr(20)} + } + + for i := range records { + r := randomRecord() + records[i] = r + updated, err := tree.Set([]byte(r.key), []byte{}) + require.NoError(t, err) + if updated { + t.Error("should have not been updated") + } + updated, err = tree.Set([]byte(r.key), []byte(r.value)) + require.NoError(t, err) + if !updated { + t.Error("should have been updated") + } + if tree.ImmutableTree().Size() != int64(i+1) { + t.Error("size was wrong", tree.ImmutableTree().Size(), i+1) + } + } + + for _, r := range records { + has, err := tree.Has([]byte(r.key)) + require.NoError(t, err) + if !has { + t.Error("Missing key", r.key) + } + + has, err = tree.Has([]byte(randstr(12))) + require.NoError(t, err) + if has { + t.Error("Table has extra key") + } + + val, err := tree.Get([]byte(r.key)) + require.NoError(t, err) + if string(val) != r.value { + t.Error("wrong value") + } + } + + for i, x := range records { + if val, removed, err := tree.Remove([]byte(x.key)); err != nil { + require.NoError(t, err) + } else if !removed { + t.Error("Wasn't removed") + } else if string(val) != x.value { + t.Error("Wrong value") + } + require.NoError(t, err) + for _, r := range records[i+1:] { + has, err := tree.Has([]byte(r.key)) + require.NoError(t, err) + if !has { + t.Error("Missing key", r.key) + } + + has, err = tree.Has([]byte(randstr(12))) + require.NoError(t, err) + if has { + t.Error("Table has extra key") + } + + val, err := tree.Get([]byte(r.key)) + require.NoError(t, err) + if string(val) != r.value { + t.Error("wrong value") + } + } + if tree.ImmutableTree().Size() != int64(len(records)-(i+1)) { + t.Error("size was wrong", tree.ImmutableTree().Size(), (len(records) - (i + 1))) + } + } +} + +func TestIterateRange(t *testing.T) { + type record struct { + key string + value string + } + + records := []record{ + {"abc", "123"}, + {"low", "high"}, + {"fan", "456"}, + {"foo", "a"}, + {"foobaz", "c"}, + {"good", "bye"}, + {"foobang", "d"}, + {"foobar", "b"}, + {"food", "e"}, + {"foml", "f"}, + } + keys := make([]string, len(records)) + for i, r := range records { + keys[i] = r.key + } + sort.Strings(keys) + + tree, err := getTestTree(0) + require.NoError(t, err) + + // insert all the data + for _, r := range records { + updated, err := tree.Set([]byte(r.key), []byte(r.value)) + require.NoError(t, err) + if updated { + t.Error("should have not been updated") + } + } + // test traversing the whole node works... in order + viewed := []string{} + tree.Iterate(func(key []byte, value []byte) bool { + viewed = append(viewed, string(key)) + return false + }) + if len(viewed) != len(keys) { + t.Error("not the same number of keys as expected") + } + for i, v := range viewed { + if v != keys[i] { + t.Error("Keys out of order", v, keys[i]) + } + } + + trav := traverser{} + tree.ImmutableTree().IterateRange([]byte("foo"), []byte("goo"), true, trav.view) + expectTraverse(t, trav, "foo", "food", 5) + + trav = traverser{} + tree.ImmutableTree().IterateRange([]byte("aaa"), []byte("abb"), true, trav.view) + expectTraverse(t, trav, "", "", 0) + + trav = traverser{} + tree.ImmutableTree().IterateRange(nil, []byte("flap"), true, trav.view) + expectTraverse(t, trav, "abc", "fan", 2) + + trav = traverser{} + tree.ImmutableTree().IterateRange([]byte("foob"), nil, true, trav.view) + expectTraverse(t, trav, "foobang", "low", 6) + + trav = traverser{} + tree.ImmutableTree().IterateRange([]byte("very"), nil, true, trav.view) + expectTraverse(t, trav, "", "", 0) + + // make sure it doesn't include end + trav = traverser{} + tree.ImmutableTree().IterateRange([]byte("fooba"), []byte("food"), true, trav.view) + expectTraverse(t, trav, "foobang", "foobaz", 3) + + // make sure backwards also works... (doesn't include end) + trav = traverser{} + tree.ImmutableTree().IterateRange([]byte("fooba"), []byte("food"), false, trav.view) + expectTraverse(t, trav, "foobaz", "foobang", 3) + + // make sure backwards also works... + trav = traverser{} + tree.ImmutableTree().IterateRange([]byte("g"), nil, false, trav.view) + expectTraverse(t, trav, "low", "good", 2) +} + +func TestPersistence(t *testing.T) { + db := db.NewMemDB() + + // Create some random key value pairs + records := make(map[string]string) + for i := 0; i < 10000; i++ { + records[randstr(20)] = randstr(20) + } + + // Construct some tree and save it + t1, err := NewMutableTree(db, 0, false) + require.NoError(t, err) + for key, value := range records { + t1.Set([]byte(key), []byte(value)) + } + t1.SaveVersion() + + // Load a tree + t2, err := NewMutableTree(db, 0, false) + require.NoError(t, err) + t2.Load() + for key, value := range records { + t2value, err := t2.Get([]byte(key)) + require.NoError(t, err) + if string(t2value) != value { + t.Fatalf("Invalid value. Expected %v, got %v", value, t2value) + } + } +} + +func TestProof(t *testing.T) { + + // Construct some random tree + tree, err := getTestTree(100) + require.NoError(t, err) + for i := 0; i < 10; i++ { + key, value := randstr(20), randstr(20) + tree.Set([]byte(key), []byte(value)) + } + + // Persist the items so far + tree.SaveVersion() + + // Add more items so it's not all persisted + for i := 0; i < 10; i++ { + key, value := randstr(20), randstr(20) + tree.Set([]byte(key), []byte(value)) + } + + // Now for each item, construct a proof and verify + tree.Iterate(func(key []byte, value []byte) bool { + value2, proof, err := tree.ImmutableTree().GetWithProof(key) + assert.NoError(t, err) + assert.Equal(t, value, value2) + if assert.NotNil(t, proof) { + hash, err := tree.WorkingHash() + require.NoError(t, err) + verifyProof(t, proof, hash) + } + return false + }) +} + +func TestTreeProof(t *testing.T) { + db := db.NewMemDB() + tree, err := NewMutableTree(db, 100, false) + require.NoError(t, err) + hash, err := tree.Hash() + require.NoError(t, err) + assert.Equal(t, "e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855", hex.EncodeToString(hash)) + + // should get false for proof with nil root + value, proof, err := tree.ImmutableTree().GetWithProof([]byte("foo")) + assert.Nil(t, value) + assert.Nil(t, proof) + assert.Error(t, proof.Verify([]byte(nil))) + assert.NoError(t, err) + + // insert lots of info and store the bytes + keys := make([][]byte, 200) + for i := 0; i < 200; i++ { + key := randstr(20) + tree.Set([]byte(key), []byte(key)) + keys[i] = []byte(key) + } + + tree.SaveVersion() + + // query random key fails + value, proof, err = tree.ImmutableTree().GetWithProof([]byte("foo")) + assert.Nil(t, value) + assert.NotNil(t, proof) + assert.NoError(t, err) + hash, err = tree.Hash() + assert.NoError(t, err) + assert.NoError(t, proof.Verify(hash)) + assert.NoError(t, proof.VerifyAbsence([]byte("foo"))) + + // valid proof for real keys + root, err := tree.WorkingHash() + assert.NoError(t, err) + for _, key := range keys { + value, proof, err := tree.ImmutableTree().GetWithProof(key) + if assert.NoError(t, err) { + require.Nil(t, err, "Failed to read proof from bytes: %v", err) + assert.Equal(t, key, value) + err := proof.Verify(root) + assert.NoError(t, err, "#### %v", proof.String()) + err = proof.VerifyItem(key, key) + assert.NoError(t, err, "#### %v", proof.String()) + } + } +} diff --git a/sei-iavl/benchmarks/README.md b/sei-iavl/benchmarks/README.md new file mode 100644 index 0000000000..cfac7b9982 --- /dev/null +++ b/sei-iavl/benchmarks/README.md @@ -0,0 +1,55 @@ +# Running Benchmarks + +These instructions are mainly for running the benchmarks on an cloud instance that is intended to be thrown away, not on a dev machine. Be careful with the install scripts locally. + +This has only been tested on Ubuntu 16.04 and 18.04. It *should* work on Ubuntu 14.04 as well. It *may* work on Debian, but has never been tested. + + +## Setting up the machine + +Put the files on the machine and login (all code assumes you are in this directory locally) + +``` +scp -r setup user@host: +ssh user@host +``` + +Run the install script (once per machine) + +``` +cd setup +chmod +x * +sudo ./INSTALL_ROOT.sh +``` + +## Running the tests + +Run the benchmarks in a screen: + +``` +screen +./RUN_BENCHMARKS.sh +``` + +Copy them back from your local machine: + +``` +scp user@host:go/src/github.com/cosmos/iavl/results.txt results.txt +git add results +``` + +## Running benchmarks with docker + +Run the command below to install leveldb and rocksdb from source then run the benchmarks all the dbs (memdb, goleveldb, rocksdb, badgerdb) except boltdb. + +replace: +- `baabeetaa` with your repo username and +- `fix-bencharks` with your branch. + +``` +docker run --rm -it ubuntu:16.04 /bin/bash -c \ +"apt-get update && apt-get install -y curl && \ +sh <(curl -s https://raw.githubusercontent.com/baabeetaa/iavl/fix-bencharks/benchmarks/setup/INSTALL_ROOT.sh) && \ +sh <(curl -s https://raw.githubusercontent.com/baabeetaa/iavl/fix-bencharks/benchmarks/setup/RUN_BENCHMARKS.sh) fix-bencharks baabeetaa && \ +cat ~/iavl/results.txt" +``` diff --git a/sei-iavl/benchmarks/bench_test.go b/sei-iavl/benchmarks/bench_test.go new file mode 100644 index 0000000000..2f19b879f0 --- /dev/null +++ b/sei-iavl/benchmarks/bench_test.go @@ -0,0 +1,429 @@ +package benchmarks + +import ( + "fmt" + "math/rand" + "os" + "runtime" + "testing" + + "github.com/stretchr/testify/require" + + iavl "github.com/sei-protocol/sei-chain/sei-iavl" + db "github.com/tendermint/tm-db" +) + +const historySize = 20 + +func randBytes(length int) []byte { + key := make([]byte, length) + // math.rand.Read always returns err=nil + // we do not need cryptographic randomness for this test: + rand.Read(key) + return key +} + +func prepareTree(b *testing.B, db db.DB, size, keyLen, dataLen int) (*iavl.MutableTree, [][]byte) { + t, err := iavl.NewMutableTreeWithOpts(db, size, nil, false) + require.NoError(b, err) + keys := make([][]byte, size) + + for i := 0; i < size; i++ { + key := randBytes(keyLen) + t.Set(key, randBytes(dataLen)) + keys[i] = key + } + commitTree(b, t) + runtime.GC() + return t, keys +} + +// commit tree saves a new version and deletes old ones according to historySize +func commitTree(b *testing.B, t *iavl.MutableTree) { + t.Hash() + + _, version, err := t.SaveVersion() + + if err != nil { + b.Errorf("Can't save: %v", err) + } + + if version > historySize { + err = t.DeleteVersion(version - historySize) + if err != nil { + b.Errorf("Can't delete: %v", err) + } + } +} + +// queries random keys against live state. Keys are almost certainly not in the tree. +func runQueriesFast(b *testing.B, t *iavl.MutableTree, keyLen int) { + isFastCacheEnabled, err := t.ImmutableTree().IsFastCacheEnabled() + require.NoError(b, err) + require.True(b, isFastCacheEnabled) + for i := 0; i < b.N; i++ { + q := randBytes(keyLen) + t.Get(q) + } +} + +// queries keys that are known to be in state +func runKnownQueriesFast(b *testing.B, t *iavl.MutableTree, keys [][]byte) { + isFastCacheEnabled, err := t.ImmutableTree().IsFastCacheEnabled() // to ensure fast storage is enabled + require.NoError(b, err) + require.True(b, isFastCacheEnabled) + l := int32(len(keys)) + for i := 0; i < b.N; i++ { + q := keys[rand.Int31n(l)] + t.Get(q) + } +} + +func runQueriesSlow(b *testing.B, t *iavl.MutableTree, keyLen int) { + b.StopTimer() + // Save version to get an old immutable tree to query against, + // Fast storage is not enabled on old tree versions, allowing us to bench the desired behavior. + _, version, err := t.SaveVersion() + require.NoError(b, err) + + itree, err := t.GetImmutable(version - 1) + require.NoError(b, err) + isFastCacheEnabled, err := itree.IsFastCacheEnabled() // to ensure fast storage is enabled + require.NoError(b, err) + require.False(b, isFastCacheEnabled) // to ensure fast storage is not enabled + + b.StartTimer() + for i := 0; i < b.N; i++ { + q := randBytes(keyLen) + itree.GetWithIndex(q) + } +} + +func runKnownQueriesSlow(b *testing.B, t *iavl.MutableTree, keys [][]byte) { + b.StopTimer() + // Save version to get an old immutable tree to query against, + // Fast storage is not enabled on old tree versions, allowing us to bench the desired behavior. + _, version, err := t.SaveVersion() + require.NoError(b, err) + + itree, err := t.GetImmutable(version - 1) + require.NoError(b, err) + isFastCacheEnabled, err := itree.IsFastCacheEnabled() // to ensure fast storage is not enabled + require.NoError(b, err) + require.False(b, isFastCacheEnabled) + b.StartTimer() + l := int32(len(keys)) + for i := 0; i < b.N; i++ { + q := keys[rand.Int31n(l)] + index, value, err := itree.GetWithIndex(q) + require.NoError(b, err) + require.True(b, index >= 0, "the index must not be negative") + require.NotNil(b, value, "the value should exist") + } +} + +func runIterationFast(b *testing.B, t *iavl.MutableTree, expectedSize int) { + isFastCacheEnabled, err := t.ImmutableTree().IsFastCacheEnabled() + require.NoError(b, err) + require.True(b, isFastCacheEnabled) // to ensure fast storage is enabled + for i := 0; i < b.N; i++ { + itr, err := t.ImmutableTree().Iterator(nil, nil, false) + require.NoError(b, err) + iterate(b, itr, expectedSize) + require.Nil(b, itr.Close(), ".Close should not error out") + } +} + +func runIterationSlow(b *testing.B, t *iavl.MutableTree, expectedSize int) { + for i := 0; i < b.N; i++ { + itr := iavl.NewIterator(nil, nil, false, t.ImmutableTree()) // create slow iterator directly + iterate(b, itr, expectedSize) + require.Nil(b, itr.Close(), ".Close should not error out") + } +} + +func iterate(b *testing.B, itr db.Iterator, expectedSize int) { + b.StartTimer() + keyValuePairs := make([][][]byte, 0, expectedSize) + for i := 0; i < expectedSize && itr.Valid(); i++ { + itr.Next() + keyValuePairs = append(keyValuePairs, [][]byte{itr.Key(), itr.Value()}) + } + b.StopTimer() + if g, w := len(keyValuePairs), expectedSize; g != w { + b.Errorf("iteration count mismatch: got=%d, want=%d", g, w) + } else { + b.Logf("completed %d iterations", len(keyValuePairs)) + } +} + +// func runInsert(b *testing.B, t *iavl.MutableTree, keyLen, dataLen, blockSize int) *iavl.MutableTree { +// for i := 1; i <= b.N; i++ { +// t.Set(randBytes(keyLen), randBytes(dataLen)) +// if i%blockSize == 0 { +// t.Hash() +// t.SaveVersion() +// } +// } +// return t +// } + +func runUpdate(b *testing.B, t *iavl.MutableTree, dataLen, blockSize int, keys [][]byte) *iavl.MutableTree { + l := int32(len(keys)) + for i := 1; i <= b.N; i++ { + key := keys[rand.Int31n(l)] + t.Set(key, randBytes(dataLen)) + if i%blockSize == 0 { + commitTree(b, t) + } + } + return t +} + +// func runDelete(b *testing.B, t *iavl.MutableTree, blockSize int, keys [][]byte) *iavl.MutableTree { +// var key []byte +// l := int32(len(keys)) +// for i := 1; i <= b.N; i++ { +// key = keys[rand.Int31n(l)] +// // key = randBytes(16) +// // TODO: test if removed, use more keys (from insert) +// t.Remove(key) +// if i%blockSize == 0 { +// commitTree(b, t) +// } +// } +// return t +// } + +// runBlock measures time for an entire block, not just one tx +func runBlock(b *testing.B, t *iavl.MutableTree, keyLen, dataLen, blockSize int, keys [][]byte) *iavl.MutableTree { + l := int32(len(keys)) + + // XXX: This was adapted to work with VersionedTree but needs to be re-thought. + + lastCommit := t + real := t + // check := t + + for i := 0; i < b.N; i++ { + for j := 0; j < blockSize; j++ { + // 50% insert, 50% update + var key []byte + if i%2 == 0 { + key = keys[rand.Int31n(l)] + } else { + key = randBytes(keyLen) + } + data := randBytes(dataLen) + + // perform query and write on check and then real + // check.GetFast(key) + // check.Set(key, data) + real.Get(key) + real.Set(key, data) + } + + // at the end of a block, move it all along.... + commitTree(b, real) + lastCommit = real + } + + return lastCommit +} + +func BenchmarkRandomBytes(b *testing.B) { + fmt.Printf("%s\n", iavl.GetVersionInfo()) + benchmarks := []struct { + length int + }{ + {4}, {16}, {32}, {100}, {1000}, + } + for _, bench := range benchmarks { + bench := bench + name := fmt.Sprintf("random-%d", bench.length) + b.Run(name, func(b *testing.B) { + for i := 0; i < b.N; i++ { + randBytes(bench.length) + } + runtime.GC() + }) + } +} + +type benchmark struct { + dbType db.BackendType + initSize, blockSize int + keyLen, dataLen int +} + +func BenchmarkMedium(b *testing.B) { + benchmarks := []benchmark{ + {"memdb", 100000, 100, 16, 40}, + {"goleveldb", 100000, 100, 16, 40}, + // {"cleveldb", 100000, 100, 16, 40}, + // FIXME: idk why boltdb is too slow !? + // {"boltdb", 100000, 100, 16, 40}, + // {"rocksdb", 100000, 100, 16, 40}, + // {"badgerdb", 100000, 100, 16, 40}, + } + runBenchmarks(b, benchmarks) +} + +func BenchmarkSmall(b *testing.B) { + benchmarks := []benchmark{ + {"memdb", 1000, 100, 4, 10}, + {"goleveldb", 1000, 100, 4, 10}, + // {"cleveldb", 1000, 100, 4, 10}, + // {"boltdb", 1000, 100, 4, 10}, + // {"rocksdb", 1000, 100, 4, 10}, + // {"badgerdb", 1000, 100, 4, 10}, + } + runBenchmarks(b, benchmarks) +} + +func BenchmarkLarge(b *testing.B) { + benchmarks := []benchmark{ + {"memdb", 1000000, 100, 16, 40}, + {"goleveldb", 1000000, 100, 16, 40}, + // FIXME: idk why boltdb is too slow !? + // {"boltdb", 1000000, 100, 16, 40}, + // {"rocksdb", 1000000, 100, 16, 40}, + // {"badgerdb", 1000000, 100, 16, 40}, + } + runBenchmarks(b, benchmarks) +} + +func BenchmarkLevelDBBatchSizes(b *testing.B) { + benchmarks := []benchmark{ + {"goleveldb", 100000, 5, 16, 40}, + {"goleveldb", 100000, 25, 16, 40}, + {"goleveldb", 100000, 100, 16, 40}, + {"goleveldb", 100000, 400, 16, 40}, + {"goleveldb", 100000, 2000, 16, 40}, + } + runBenchmarks(b, benchmarks) +} + +// BenchmarkLevelDBLargeData is intended to push disk limits +// in the goleveldb, to make sure not everything is cached +func BenchmarkLevelDBLargeData(b *testing.B) { + benchmarks := []benchmark{ + {"goleveldb", 50000, 100, 32, 100}, + {"goleveldb", 50000, 100, 32, 1000}, + {"goleveldb", 50000, 100, 32, 10000}, + {"goleveldb", 50000, 100, 32, 100000}, + } + runBenchmarks(b, benchmarks) +} + +func runBenchmarks(b *testing.B, benchmarks []benchmark) { + fmt.Printf("%s\n", iavl.GetVersionInfo()) + for _, bb := range benchmarks { + bb := bb + prefix := fmt.Sprintf("%s-%d-%d-%d-%d", bb.dbType, + bb.initSize, bb.blockSize, bb.keyLen, bb.dataLen) + + // prepare a dir for the db and cleanup afterwards + dirName := fmt.Sprintf("./%s-db", prefix) + if (bb.dbType == db.RocksDBBackend) || (bb.dbType == db.CLevelDBBackend) || (bb.dbType == db.BoltDBBackend) { + _ = os.Mkdir(dirName, 0755) + } + + defer func() { + err := os.RemoveAll(dirName) + if err != nil { + b.Errorf("%+v\n", err) + } + }() + + // note that "" leads to nil backing db! + var ( + d db.DB + err error + ) + if bb.dbType != "nodb" { + d, err = db.NewDB("test", bb.dbType, dirName) + require.NoError(b, err) + defer func() { _ = d.Close() }() + } + b.Run(prefix, func(sub *testing.B) { + runSuite(sub, d, bb.initSize, bb.blockSize, bb.keyLen, bb.dataLen) + }) + } +} + +// returns number of MB in use +func memUseMB() float64 { + var mem runtime.MemStats + runtime.ReadMemStats(&mem) + asize := mem.Alloc + mb := float64(asize) / 1000000 + return mb +} + +func runSuite(b *testing.B, d db.DB, initSize, blockSize, keyLen, dataLen int) { + // measure mem usage + runtime.GC() + init := memUseMB() + + t, keys := prepareTree(b, d, initSize, keyLen, dataLen) + used := memUseMB() - init + fmt.Printf("Init Tree took %0.2f MB\n", used) + + b.ResetTimer() + + b.Run("query-no-in-tree-guarantee-fast", func(sub *testing.B) { + sub.ReportAllocs() + runQueriesFast(sub, t, keyLen) + }) + b.Run("query-no-in-tree-guarantee-slow", func(sub *testing.B) { + sub.ReportAllocs() + runQueriesSlow(sub, t, keyLen) + }) + // + b.Run("query-hits-fast", func(sub *testing.B) { + sub.ReportAllocs() + runKnownQueriesFast(sub, t, keys) + }) + b.Run("query-hits-slow", func(sub *testing.B) { + sub.ReportAllocs() + runKnownQueriesSlow(sub, t, keys) + }) + // + // Iterations for BenchmarkLevelDBLargeData timeout bencher in CI so + // we must skip them. + if b.Name() != "BenchmarkLevelDBLargeData" { + b.Run("iteration-fast", func(sub *testing.B) { + sub.ReportAllocs() + runIterationFast(sub, t, initSize) + }) + b.Run("iteration-slow", func(sub *testing.B) { + sub.ReportAllocs() + runIterationSlow(sub, t, initSize) + }) + } + + // + b.Run("update", func(sub *testing.B) { + sub.ReportAllocs() + t = runUpdate(sub, t, dataLen, blockSize, keys) + }) + b.Run("block", func(sub *testing.B) { + sub.ReportAllocs() + t = runBlock(sub, t, keyLen, dataLen, blockSize, keys) + }) + + // both of these edit size of the tree too much + // need to run with their own tree + // t = nil // for gc + // b.Run("insert", func(sub *testing.B) { + // it, _ := prepareTree(d, initSize, keyLen, dataLen) + // sub.ResetTimer() + // runInsert(sub, it, keyLen, dataLen, blockSize) + // }) + // b.Run("delete", func(sub *testing.B) { + // dt, dkeys := prepareTree(d, initSize+sub.N, keyLen, dataLen) + // sub.ResetTimer() + // runDelete(sub, dt, blockSize, dkeys) + // }) +} diff --git a/sei-iavl/benchmarks/cosmos-exim/README.md b/sei-iavl/benchmarks/cosmos-exim/README.md new file mode 100644 index 0000000000..2de7b8d4b0 --- /dev/null +++ b/sei-iavl/benchmarks/cosmos-exim/README.md @@ -0,0 +1,38 @@ +# cosmos-exim + +A small utility to benchmark export/import of Cosmos Hub IAVL stores. These stores can be downloaded e.g. from [chainlayer.io](https://www.chainlayer.io). Example usage: + +```sh +$ go run benchmarks/cosmos-exim/main.go ../cosmoshub-3/data +Exporting cosmoshub database at version 870068 + +acc : 67131 nodes (33566 leaves) in 676ms with size 3 MB +distribution : 66509 nodes (33255 leaves) in 804ms with size 3 MB +evidence : 0 nodes (0 leaves) in 0s with size 0 MB +god : 0 nodes (0 leaves) in 0s with size 0 MB +main : 1 nodes (1 leaves) in 0s with size 0 MB +mint : 1 nodes (1 leaves) in 0s with size 0 MB +params : 59 nodes (30 leaves) in 0s with size 0 MB +slashing : 1128139 nodes (564070 leaves) in 17.423s with size 41 MB +staking : 44573 nodes (22287 leaves) in 433ms with size 3 MB +supply : 1 nodes (1 leaves) in 0s with size 0 MB +upgrade : 0 nodes (0 leaves) in 0s with size 0 MB + +Exported 11 stores with 1306414 nodes (653211 leaves) in 19.336s with size 52 MB + +Importing into new LevelDB stores + +acc : 67131 nodes (33566 leaves) in 259ms with size 3 MB +distribution: 66509 nodes (33255 leaves) in 238ms with size 3 MB +evidence : 0 nodes (0 leaves) in 19ms with size 0 MB +god : 0 nodes (0 leaves) in 40ms with size 0 MB +main : 1 nodes (1 leaves) in 22ms with size 0 MB +mint : 1 nodes (1 leaves) in 26ms with size 0 MB +params : 59 nodes (30 leaves) in 26ms with size 0 MB +slashing : 1128139 nodes (564070 leaves) in 5.213s with size 41 MB +staking : 44573 nodes (22287 leaves) in 173ms with size 3 MB +supply : 1 nodes (1 leaves) in 25ms with size 0 MB +upgrade : 0 nodes (0 leaves) in 26ms with size 0 MB + +Imported 11 stores with 1306414 nodes (653211 leaves) in 6.067s with size 52 MB +``` \ No newline at end of file diff --git a/sei-iavl/benchmarks/cosmos-exim/main.go b/sei-iavl/benchmarks/cosmos-exim/main.go new file mode 100644 index 0000000000..179b851f01 --- /dev/null +++ b/sei-iavl/benchmarks/cosmos-exim/main.go @@ -0,0 +1,207 @@ +package main + +import ( + "fmt" + "os" + "time" + + iavl "github.com/sei-protocol/sei-chain/sei-iavl" + tmdb "github.com/tendermint/tm-db" +) + +// stores is the list of stores in the CosmosHub database +// FIXME would be nice to autodetect this +var stores = []string{ + "acc", + "distribution", + "evidence", + "god", + "main", + "mint", + "params", + "slashing", + "staking", + "supply", + "upgrade", +} + +// Stats track import/export statistics +type Stats struct { + nodes uint64 + leafNodes uint64 + size uint64 + duration time.Duration +} + +func (s *Stats) Add(o Stats) { + s.nodes += o.nodes + s.leafNodes += o.leafNodes + s.size += o.size + s.duration += o.duration +} + +func (s *Stats) AddDurationSince(started time.Time) { + s.duration += time.Since(started) +} + +func (s *Stats) AddNode(node *iavl.ExportNode) { + s.nodes++ + if node.Height == 0 { + s.leafNodes++ + } + // #nosec G115 -- len() always returns non-negative values + s.size += uint64(len(node.Key)) + uint64(len(node.Value)) + 9 +} + +func (s *Stats) String() string { + return fmt.Sprintf("%v nodes (%v leaves) in %v with size %v MB", + s.nodes, s.leafNodes, s.duration.Round(time.Millisecond), s.size/1024/1024) +} + +// main runs the main program +func main() { + if len(os.Args) != 2 { + _, _ = fmt.Fprintf(os.Stderr, "Usage: %v \n", os.Args[0]) + os.Exit(1) + } + err := run(os.Args[1]) + if err != nil { + _, _ = fmt.Fprintf(os.Stderr, "Error: %v\n", err.Error()) + os.Exit(1) + } +} + +// run runs the command with normal error handling +func run(dbPath string) error { + version, exports, err := runExport(dbPath) + if err != nil { + return err + } + + err = runImport(version, exports) + if err != nil { + return err + } + return nil +} + +// runExport runs an export benchmark and returns a map of store names/export nodes +func runExport(dbPath string) (int64, map[string][]*iavl.ExportNode, error) { + ldb, err := tmdb.NewDB("application", tmdb.GoLevelDBBackend, dbPath) + if err != nil { + return 0, nil, err + } + tree, err := iavl.NewMutableTree(tmdb.NewPrefixDB(ldb, []byte("s/k:main/")), 0, false) + if err != nil { + return 0, nil, err + } + version, err := tree.LoadVersion(0) + if err != nil { + return 0, nil, err + } + fmt.Printf("Exporting cosmoshub database at version %v\n\n", version) + + exports := make(map[string][]*iavl.ExportNode, len(stores)) + + totalStats := Stats{} + for _, name := range stores { + db := tmdb.NewPrefixDB(ldb, []byte("s/k:"+name+"/")) + tree, err := iavl.NewMutableTree(db, 0, false) + if err != nil { + return 0, nil, err + } + + stats := Stats{} + export := make([]*iavl.ExportNode, 0, 100000) + + storeVersion, err := tree.LoadVersion(0) + if err != nil { + return 0, nil, err + } + if storeVersion == 0 { + fmt.Printf("%-13v: %v\n", name, stats.String()) + continue + } + + itree, err := tree.GetImmutable(version) + if err != nil { + return 0, nil, err + } + start := time.Now().UTC() + exporter := itree.Export() + defer exporter.Close() + for { + node, err := exporter.Next() + if err == iavl.ExportDone { + break + } else if err != nil { + return 0, nil, err + } + export = append(export, node) + stats.AddNode(node) + } + stats.AddDurationSince(start) + fmt.Printf("%-13v: %v\n", name, stats.String()) + totalStats.Add(stats) + exports[name] = export + } + + fmt.Printf("\nExported %v stores with %v\n\n", len(stores), totalStats.String()) + + return version, exports, nil +} + +// runImport runs an import benchmark with nodes exported from runExport() +func runImport(version int64, exports map[string][]*iavl.ExportNode) error { + fmt.Print("Importing into new LevelDB stores\n\n") + + totalStats := Stats{} + + for _, name := range stores { + tempdir, err := os.MkdirTemp("", name) + if err != nil { + return err + } + + start := time.Now() + stats := Stats{} + + newDB, err := tmdb.NewDB(name, tmdb.GoLevelDBBackend, tempdir) + if err != nil { + _ = os.RemoveAll(tempdir) + return err + } + newTree, err := iavl.NewMutableTree(newDB, 0, false) + if err != nil { + _ = os.RemoveAll(tempdir) + return err + } + importer, err := newTree.Import(version) + if err != nil { + _ = os.RemoveAll(tempdir) + return err + } + defer importer.Close() + for _, node := range exports[name] { + err = importer.Add(node) + if err != nil { + _ = os.RemoveAll(tempdir) + return err + } + stats.AddNode(node) + } + err = importer.Commit() + if err != nil { + _ = os.RemoveAll(tempdir) + return err + } + stats.AddDurationSince(start) + fmt.Printf("%-12v: %v\n", name, stats.String()) + totalStats.Add(stats) + _ = os.RemoveAll(tempdir) + } + + fmt.Printf("\nImported %v stores with %v\n", len(stores), totalStats.String()) + + return nil +} diff --git a/sei-iavl/benchmarks/hash_test.go b/sei-iavl/benchmarks/hash_test.go new file mode 100644 index 0000000000..62da2ca8d0 --- /dev/null +++ b/sei-iavl/benchmarks/hash_test.go @@ -0,0 +1,53 @@ +package benchmarks + +import ( + "crypto" + "fmt" + "hash" + "testing" + + iavl "github.com/sei-protocol/sei-chain/sei-iavl" + "github.com/stretchr/testify/require" + + _ "crypto/sha256" + + _ "golang.org/x/crypto/ripemd160" // nolint: staticcheck // need to test ripemd160 + _ "golang.org/x/crypto/sha3" +) + +func BenchmarkHash(b *testing.B) { + fmt.Printf("%s\n", iavl.GetVersionInfo()) + hashers := []struct { + name string + size int + hash hash.Hash + }{ + {"ripemd160", 64, crypto.RIPEMD160.New()}, + {"ripemd160", 512, crypto.RIPEMD160.New()}, + {"sha2-256", 64, crypto.SHA256.New()}, + {"sha2-256", 512, crypto.SHA256.New()}, + {"sha3-256", 64, crypto.SHA3_256.New()}, + {"sha3-256", 512, crypto.SHA3_256.New()}, + } + + for _, h := range hashers { + prefix := fmt.Sprintf("%s-%d", h.name, h.size) + hasher := h + b.Run(prefix, func(sub *testing.B) { + benchHasher(sub, hasher.hash, hasher.size) + }) + } +} + +func benchHasher(b *testing.B, hash hash.Hash, size int) { + // create all random bytes before to avoid timing this + inputs := randBytes(b.N + size + 1) + + for i := 0; i < b.N; i++ { + hash.Reset() + // grab a slice of size bytes from random string + _, err := hash.Write(inputs[i : i+size]) + require.NoError(b, err) + hash.Sum(nil) + } +} diff --git a/sei-iavl/benchmarks/results/165-adityaversion/Adityas-MBP-0.12.3-4-ge3e5a91-results.txt b/sei-iavl/benchmarks/results/165-adityaversion/Adityas-MBP-0.12.3-4-ge3e5a91-results.txt new file mode 100644 index 0000000000..0878c9020f --- /dev/null +++ b/sei-iavl/benchmarks/results/165-adityaversion/Adityas-MBP-0.12.3-4-ge3e5a91-results.txt @@ -0,0 +1,62 @@ +cd benchmarks && \ + go test -ldflags "-X github.com/tendermint/iavl.Version=0.12.3-4-ge3e5a91 -X github.com/tendermint/iavl.Commit=e3e5a91edda5a9d525c49749ad102d9cf3b602b4 -X github.com/tendermint/iavl.Branch=aditya/version" -bench=RandomBytes . && \ + go test -ldflags "-X github.com/tendermint/iavl.Version=0.12.3-4-ge3e5a91 -X github.com/tendermint/iavl.Commit=e3e5a91edda5a9d525c49749ad102d9cf3b602b4 -X github.com/tendermint/iavl.Branch=aditya/version" -bench=Small . && \ + go test -ldflags "-X github.com/tendermint/iavl.Version=0.12.3-4-ge3e5a91 -X github.com/tendermint/iavl.Commit=e3e5a91edda5a9d525c49749ad102d9cf3b602b4 -X github.com/tendermint/iavl.Branch=aditya/version" -bench=Medium . && \ + go test -ldflags "-X github.com/tendermint/iavl.Version=0.12.3-4-ge3e5a91 -X github.com/tendermint/iavl.Commit=e3e5a91edda5a9d525c49749ad102d9cf3b602b4 -X github.com/tendermint/iavl.Branch=aditya/version" -bench=BenchmarkMemKeySizes . +iavl: 0.12.3-4-ge3e5a91 +git commit: e3e5a91edda5a9d525c49749ad102d9cf3b602b4 +git branch: aditya/version +go version go1.12.1 darwin/amd64 + +goos: darwin +goarch: amd64 +pkg: github.com/tendermint/iavl/benchmarks +BenchmarkRandomBytes/random-4-8 30000000 51.1 ns/op +BenchmarkRandomBytes/random-16-8 20000000 77.0 ns/op +BenchmarkRandomBytes/random-32-8 20000000 111 ns/op +BenchmarkRandomBytes/random-100-8 5000000 255 ns/op +BenchmarkRandomBytes/random-1000-8 1000000 2115 ns/op +PASS +ok github.com/tendermint/iavl/benchmarks 9.235s +iavl: 0.12.3-4-ge3e5a91 +git commit: e3e5a91edda5a9d525c49749ad102d9cf3b602b4 +git branch: aditya/version +go version go1.12.1 darwin/amd64 + +Init Tree took 0.90 MB +goos: darwin +goarch: amd64 +pkg: github.com/tendermint/iavl/benchmarks +BenchmarkSmall/memdb-1000-100-4-10/query-miss-8 1000000 2432 ns/op +BenchmarkSmall/memdb-1000-100-4-10/query-hits-8 500000 2825 ns/op +BenchmarkSmall/memdb-1000-100-4-10/update-8 20000 116399 ns/op +BenchmarkSmall/memdb-1000-100-4-10/block-8 100 16906178 ns/op +Init Tree took 0.49 MB +BenchmarkSmall/goleveldb-1000-100-4-10/query-miss-8 500000 3790 ns/op +BenchmarkSmall/goleveldb-1000-100-4-10/query-hits-8 300000 4671 ns/op +BenchmarkSmall/goleveldb-1000-100-4-10/update-8 20000 100226 ns/op +BenchmarkSmall/goleveldb-1000-100-4-10/block-8 100 18355816 ns/op +PASS +ok github.com/tendermint/iavl/benchmarks 17.244s +iavl: 0.12.3-4-ge3e5a91 +git commit: e3e5a91edda5a9d525c49749ad102d9cf3b602b4 +git branch: aditya/version +go version go1.12.1 darwin/amd64 + +Init Tree took 85.05 MB +goos: darwin +goarch: amd64 +pkg: github.com/tendermint/iavl/benchmarks +BenchmarkMedium/memdb-100000-100-16-40/query-miss-8 200000 7335 ns/op +BenchmarkMedium/memdb-100000-100-16-40/query-hits-8 200000 8195 ns/op +BenchmarkMedium/memdb-100000-100-16-40/update-8 5000 1359239 ns/op +BenchmarkMedium/memdb-100000-100-16-40/block-8 10 169015024 ns/op +Init Tree took 47.53 MB +BenchmarkMedium/goleveldb-100000-100-16-40/query-miss-8 50000 28617 ns/op +BenchmarkMedium/goleveldb-100000-100-16-40/query-hits-8 50000 37218 ns/op +BenchmarkMedium/goleveldb-100000-100-16-40/update-8 10000 471017 ns/op +BenchmarkMedium/goleveldb-100000-100-16-40/block-8 20 62199885 ns/op +PASS +ok github.com/tendermint/iavl/benchmarks 31.865s +PASS +ok github.com/tendermint/iavl/benchmarks 0.016s diff --git a/sei-iavl/benchmarks/results/165-adityaversion/DigitalOcean-4vcpu-8gb-160gb-0.12.3-7-gf5dfff0-results.txt b/sei-iavl/benchmarks/results/165-adityaversion/DigitalOcean-4vcpu-8gb-160gb-0.12.3-7-gf5dfff0-results.txt new file mode 100644 index 0000000000..d6123296fa --- /dev/null +++ b/sei-iavl/benchmarks/results/165-adityaversion/DigitalOcean-4vcpu-8gb-160gb-0.12.3-7-gf5dfff0-results.txt @@ -0,0 +1,62 @@ +cd benchmarks && \ + go test -ldflags "-X github.com/tendermint/iavl.Version=0.12.3-7-gf5dfff0 -X github.com/tendermint/iavl.Commit=f5dfff0a2707c82dd8bededc8799fff16c663b20 -X github.com/tendermint/iavl.Branch=aditya/version" -bench=RandomBytes . && \ + go test -ldflags "-X github.com/tendermint/iavl.Version=0.12.3-7-gf5dfff0 -X github.com/tendermint/iavl.Commit=f5dfff0a2707c82dd8bededc8799fff16c663b20 -X github.com/tendermint/iavl.Branch=aditya/version" -bench=Small . && \ + go test -ldflags "-X github.com/tendermint/iavl.Version=0.12.3-7-gf5dfff0 -X github.com/tendermint/iavl.Commit=f5dfff0a2707c82dd8bededc8799fff16c663b20 -X github.com/tendermint/iavl.Branch=aditya/version" -bench=Medium . && \ + go test -ldflags "-X github.com/tendermint/iavl.Version=0.12.3-7-gf5dfff0 -X github.com/tendermint/iavl.Commit=f5dfff0a2707c82dd8bededc8799fff16c663b20 -X github.com/tendermint/iavl.Branch=aditya/version" -bench=BenchmarkMemKeySizes . +iavl: 0.12.3-7-gf5dfff0 +git commit: f5dfff0a2707c82dd8bededc8799fff16c663b20 +git branch: aditya/version +go version go1.12.7 linux/amd64 + +goos: linux +goarch: amd64 +pkg: github.com/tendermint/iavl/benchmarks +BenchmarkRandomBytes/random-4-4 20000000 74.3 ns/op +BenchmarkRandomBytes/random-16-4 20000000 115 ns/op +BenchmarkRandomBytes/random-32-4 10000000 170 ns/op +BenchmarkRandomBytes/random-100-4 3000000 415 ns/op +BenchmarkRandomBytes/random-1000-4 500000 3403 ns/op +PASS +ok github.com/tendermint/iavl/benchmarks 9.286s +iavl: 0.12.3-7-gf5dfff0 +git commit: f5dfff0a2707c82dd8bededc8799fff16c663b20 +git branch: aditya/version +go version go1.12.7 linux/amd64 + +Init Tree took 0.90 MB +goos: linux +goarch: amd64 +pkg: github.com/tendermint/iavl/benchmarks +BenchmarkSmall/memdb-1000-100-4-10/query-miss-4 300000 4112 ns/op +BenchmarkSmall/memdb-1000-100-4-10/query-hits-4 300000 4950 ns/op +BenchmarkSmall/memdb-1000-100-4-10/update-4 10000 165429 ns/op +BenchmarkSmall/memdb-1000-100-4-10/block-4 100 25848533 ns/op +Init Tree took 0.49 MB +BenchmarkSmall/goleveldb-1000-100-4-10/query-miss-4 200000 6100 ns/op +BenchmarkSmall/goleveldb-1000-100-4-10/query-hits-4 200000 7678 ns/op +BenchmarkSmall/goleveldb-1000-100-4-10/update-4 20000 101781 ns/op +BenchmarkSmall/goleveldb-1000-100-4-10/block-4 100 16241078 ns/op +PASS +ok github.com/tendermint/iavl/benchmarks 14.681s +iavl: 0.12.3-7-gf5dfff0 +git commit: f5dfff0a2707c82dd8bededc8799fff16c663b20 +git branch: aditya/version +go version go1.12.7 linux/amd64 + +Init Tree took 85.07 MB +goos: linux +goarch: amd64 +pkg: github.com/tendermint/iavl/benchmarks +BenchmarkMedium/memdb-100000-100-16-40/query-miss-4 100000 10310 ns/op +BenchmarkMedium/memdb-100000-100-16-40/query-hits-4 200000 12132 ns/op +BenchmarkMedium/memdb-100000-100-16-40/update-4 3000 1278477 ns/op +BenchmarkMedium/memdb-100000-100-16-40/block-4 10 197126616 ns/op +Init Tree took 47.61 MB +BenchmarkMedium/goleveldb-100000-100-16-40/query-miss-4 50000 27411 ns/op +BenchmarkMedium/goleveldb-100000-100-16-40/query-hits-4 30000 35691 ns/op +BenchmarkMedium/goleveldb-100000-100-16-40/update-4 10000 295694 ns/op +BenchmarkMedium/goleveldb-100000-100-16-40/block-4 50 34368088 ns/op +PASS +ok github.com/tendermint/iavl/benchmarks 27.176s +PASS +ok github.com/tendermint/iavl/benchmarks 0.007s diff --git a/sei-iavl/benchmarks/results/168/Rickys-MBP-0.12.4-4-g494d60b-result.txt b/sei-iavl/benchmarks/results/168/Rickys-MBP-0.12.4-4-g494d60b-result.txt new file mode 100644 index 0000000000..f1a78a7ee1 --- /dev/null +++ b/sei-iavl/benchmarks/results/168/Rickys-MBP-0.12.4-4-g494d60b-result.txt @@ -0,0 +1,69 @@ +go test -ldflags "-X github.com/tendermint/iavl.Version=0.12.4-4-g494d60b -X github.com/tendermint/iavl.Commit=494d60be6833a7bde8e4b8ddf3ad8d2a9df662c6 -X github.com/tendermint/iavl.Branch=master" -bench=MutableTree_Set . +goos: darwin +goarch: amd64 +pkg: github.com/tendermint/iavl +BenchmarkMutableTree_Set-12 300000 5936 ns/op 4454 B/op 25 allocs/op +PASS +ok github.com/tendermint/iavl 33.943s +cd benchmarks && \ + go test -ldflags "-X github.com/tendermint/iavl.Version=0.12.4-4-g494d60b -X github.com/tendermint/iavl.Commit=494d60be6833a7bde8e4b8ddf3ad8d2a9df662c6 -X github.com/tendermint/iavl.Branch=master" -bench=RandomBytes . && \ + go test -ldflags "-X github.com/tendermint/iavl.Version=0.12.4-4-g494d60b -X github.com/tendermint/iavl.Commit=494d60be6833a7bde8e4b8ddf3ad8d2a9df662c6 -X github.com/tendermint/iavl.Branch=master" -bench=Small . && \ + go test -ldflags "-X github.com/tendermint/iavl.Version=0.12.4-4-g494d60b -X github.com/tendermint/iavl.Commit=494d60be6833a7bde8e4b8ddf3ad8d2a9df662c6 -X github.com/tendermint/iavl.Branch=master" -bench=Medium . && \ + go test -ldflags "-X github.com/tendermint/iavl.Version=0.12.4-4-g494d60b -X github.com/tendermint/iavl.Commit=494d60be6833a7bde8e4b8ddf3ad8d2a9df662c6 -X github.com/tendermint/iavl.Branch=master" -bench=BenchmarkMemKeySizes . +iavl: 0.12.4-4-g494d60b +git commit: 494d60be6833a7bde8e4b8ddf3ad8d2a9df662c6 +git branch: master +go version go1.12.5 darwin/amd64 + +goos: darwin +goarch: amd64 +pkg: github.com/tendermint/iavl/benchmarks +BenchmarkRandomBytes/random-4-12 30000000 37.5 ns/op +BenchmarkRandomBytes/random-16-12 30000000 55.1 ns/op +BenchmarkRandomBytes/random-32-12 20000000 76.4 ns/op +BenchmarkRandomBytes/random-100-12 10000000 171 ns/op +BenchmarkRandomBytes/random-1000-12 1000000 1338 ns/op +PASS +ok github.com/tendermint/iavl/benchmarks 7.739s +iavl: 0.12.4-4-g494d60b +git commit: 494d60be6833a7bde8e4b8ddf3ad8d2a9df662c6 +git branch: master +go version go1.12.5 darwin/amd64 + +Init Tree took 0.90 MB +goos: darwin +goarch: amd64 +pkg: github.com/tendermint/iavl/benchmarks +BenchmarkSmall/memdb-1000-100-4-10/query-miss-12 1000000 1814 ns/op 353 B/op 7 allocs/op +BenchmarkSmall/memdb-1000-100-4-10/query-hits-12 1000000 2087 ns/op 515 B/op 9 allocs/op +BenchmarkSmall/memdb-1000-100-4-10/update-12 20000 76006 ns/op 47671 B/op 843 allocs/op +BenchmarkSmall/memdb-1000-100-4-10/block-12 100 11481861 ns/op 6581560 B/op 118259 allocs/op +Init Tree took 0.49 MB +BenchmarkSmall/goleveldb-1000-100-4-10/query-miss-12 500000 2903 ns/op 575 B/op 12 allocs/op +BenchmarkSmall/goleveldb-1000-100-4-10/query-hits-12 500000 3533 ns/op 788 B/op 15 allocs/op +BenchmarkSmall/goleveldb-1000-100-4-10/update-12 30000 59931 ns/op 23571 B/op 247 allocs/op +BenchmarkSmall/goleveldb-1000-100-4-10/block-12 200 13931032 ns/op 4455199 B/op 52266 allocs/op +PASS +ok github.com/tendermint/iavl/benchmarks 16.641s +iavl: 0.12.4-4-g494d60b +git commit: 494d60be6833a7bde8e4b8ddf3ad8d2a9df662c6 +git branch: master +go version go1.12.5 darwin/amd64 + +Init Tree took 85.03 MB +goos: darwin +goarch: amd64 +pkg: github.com/tendermint/iavl/benchmarks +BenchmarkMedium/memdb-100000-100-16-40/query-miss-12 200000 6072 ns/op 426 B/op 8 allocs/op +BenchmarkMedium/memdb-100000-100-16-40/query-hits-12 200000 6660 ns/op 557 B/op 9 allocs/op +BenchmarkMedium/memdb-100000-100-16-40/update-12 5000 891292 ns/op 308110 B/op 6016 allocs/op +BenchmarkMedium/memdb-100000-100-16-40/block-12 10 119351442 ns/op 39926731 B/op 791429 allocs/op +Init Tree took 47.50 MB +BenchmarkMedium/goleveldb-100000-100-16-40/query-miss-12 100000 16961 ns/op 1592 B/op 28 allocs/op +BenchmarkMedium/goleveldb-100000-100-16-40/query-hits-12 100000 21351 ns/op 2209 B/op 38 allocs/op +BenchmarkMedium/goleveldb-100000-100-16-40/update-12 10000 193156 ns/op 48430 B/op 616 allocs/op +BenchmarkMedium/goleveldb-100000-100-16-40/block-12 50 28373250 ns/op 5519076 B/op 73151 allocs/op +PASS +ok github.com/tendermint/iavl/benchmarks 21.117s +PASS +ok github.com/tendermint/iavl/benchmarks 0.009s diff --git a/sei-iavl/benchmarks/results/169/Rickys-MBP-0.12.4-3-ge247ad9-results.txt b/sei-iavl/benchmarks/results/169/Rickys-MBP-0.12.4-3-ge247ad9-results.txt new file mode 100644 index 0000000000..79d9fafb23 --- /dev/null +++ b/sei-iavl/benchmarks/results/169/Rickys-MBP-0.12.4-3-ge247ad9-results.txt @@ -0,0 +1,72 @@ +go test -ldflags "-X github.com/tendermint/iavl.Version=0.12.4-3-ge247ad9 -X github.com/tendermint/iavl.Commit=e247ad97f779f28abbe31b05d8cbc8b62aa6994b -X github.com/tendermint/iavl.Branch=node_value" -bench=Node* . +goos: darwin +goarch: amd64 +pkg: github.com/tendermint/iavl +BenchmarkNode_aminoSize-12 200000000 8.72 ns/op 0 B/op 0 allocs/op +BenchmarkNode_WriteBytes/NoPreAllocate-12 5000000 373 ns/op 368 B/op 9 allocs/op +BenchmarkNode_WriteBytes/PreAllocate-12 5000000 328 ns/op 224 B/op 8 allocs/op +BenchmarkNodeKey-12 30000000 47.1 ns/op +PASS +ok github.com/tendermint/iavl 21.170s +cd benchmarks && \ + go test -ldflags "-X github.com/tendermint/iavl.Version=0.12.4-3-ge247ad9 -X github.com/tendermint/iavl.Commit=e247ad97f779f28abbe31b05d8cbc8b62aa6994b -X github.com/tendermint/iavl.Branch=node_value" -bench=RandomBytes . && \ + go test -ldflags "-X github.com/tendermint/iavl.Version=0.12.4-3-ge247ad9 -X github.com/tendermint/iavl.Commit=e247ad97f779f28abbe31b05d8cbc8b62aa6994b -X github.com/tendermint/iavl.Branch=node_value" -bench=Small . && \ + go test -ldflags "-X github.com/tendermint/iavl.Version=0.12.4-3-ge247ad9 -X github.com/tendermint/iavl.Commit=e247ad97f779f28abbe31b05d8cbc8b62aa6994b -X github.com/tendermint/iavl.Branch=node_value" -bench=Medium . && \ + go test -ldflags "-X github.com/tendermint/iavl.Version=0.12.4-3-ge247ad9 -X github.com/tendermint/iavl.Commit=e247ad97f779f28abbe31b05d8cbc8b62aa6994b -X github.com/tendermint/iavl.Branch=node_value" -bench=BenchmarkMemKeySizes . +iavl: 0.12.4-3-ge247ad9 +git commit: e247ad97f779f28abbe31b05d8cbc8b62aa6994b +git branch: node_value +go version go1.12.5 darwin/amd64 + +goos: darwin +goarch: amd64 +pkg: github.com/tendermint/iavl/benchmarks +BenchmarkRandomBytes/random-4-12 30000000 36.5 ns/op +BenchmarkRandomBytes/random-16-12 30000000 53.5 ns/op +BenchmarkRandomBytes/random-32-12 20000000 76.7 ns/op +BenchmarkRandomBytes/random-100-12 10000000 167 ns/op +BenchmarkRandomBytes/random-1000-12 1000000 1345 ns/op +PASS +ok github.com/tendermint/iavl/benchmarks 7.627s +iavl: 0.12.4-3-ge247ad9 +git commit: e247ad97f779f28abbe31b05d8cbc8b62aa6994b +git branch: node_value +go version go1.12.5 darwin/amd64 + +Init Tree took 0.82 MB +goos: darwin +goarch: amd64 +pkg: github.com/tendermint/iavl/benchmarks +BenchmarkSmall/memdb-1000-100-4-10/query-miss-12 1000000 1787 ns/op 353 B/op 7 allocs/op +BenchmarkSmall/memdb-1000-100-4-10/query-hits-12 1000000 2138 ns/op 514 B/op 9 allocs/op +BenchmarkSmall/memdb-1000-100-4-10/update-12 20000 74001 ns/op 47666 B/op 860 allocs/op +BenchmarkSmall/memdb-1000-100-4-10/block-12 100 11359815 ns/op 6558684 B/op 119840 allocs/op +Init Tree took 0.48 MB +BenchmarkSmall/goleveldb-1000-100-4-10/query-miss-12 500000 2800 ns/op 556 B/op 11 allocs/op +BenchmarkSmall/goleveldb-1000-100-4-10/query-hits-12 500000 3477 ns/op 787 B/op 15 allocs/op +BenchmarkSmall/goleveldb-1000-100-4-10/update-12 30000 61296 ns/op 23580 B/op 263 allocs/op +BenchmarkSmall/goleveldb-1000-100-4-10/block-12 200 14048236 ns/op 4472449 B/op 54353 allocs/op +PASS +ok github.com/tendermint/iavl/benchmarks 17.569s +iavl: 0.12.4-3-ge247ad9 +git commit: e247ad97f779f28abbe31b05d8cbc8b62aa6994b +git branch: node_value +go version go1.12.5 darwin/amd64 + +Init Tree took 78.64 MB +goos: darwin +goarch: amd64 +pkg: github.com/tendermint/iavl/benchmarks +BenchmarkMedium/memdb-100000-100-16-40/query-miss-12 200000 5764 ns/op 426 B/op 8 allocs/op +BenchmarkMedium/memdb-100000-100-16-40/query-hits-12 200000 6485 ns/op 557 B/op 9 allocs/op +BenchmarkMedium/memdb-100000-100-16-40/update-12 5000 892383 ns/op 308163 B/op 6040 allocs/op +BenchmarkMedium/memdb-100000-100-16-40/block-12 10 120660641 ns/op 39925867 B/op 793685 allocs/op +Init Tree took 46.84 MB +BenchmarkMedium/goleveldb-100000-100-16-40/query-miss-12 100000 17000 ns/op 1592 B/op 28 allocs/op +BenchmarkMedium/goleveldb-100000-100-16-40/query-hits-12 100000 20677 ns/op 2209 B/op 38 allocs/op +BenchmarkMedium/goleveldb-100000-100-16-40/update-12 10000 189551 ns/op 48464 B/op 639 allocs/op +BenchmarkMedium/goleveldb-100000-100-16-40/block-12 50 25864961 ns/op 5432798 B/op 74492 allocs/op +PASS +ok github.com/tendermint/iavl/benchmarks 21.234s +PASS +ok github.com/tendermint/iavl/benchmarks 0.008s diff --git a/sei-iavl/benchmarks/results/legacy/Ethans-MBP-2717167.txt b/sei-iavl/benchmarks/results/legacy/Ethans-MBP-2717167.txt new file mode 100644 index 0000000000..0342129b2e --- /dev/null +++ b/sei-iavl/benchmarks/results/legacy/Ethans-MBP-2717167.txt @@ -0,0 +1,116 @@ +cd benchmarks && \ + go test -bench=RandomBytes . && \ + go test -bench=Small . && \ + go test -bench=Medium . && \ + go test -bench=BenchmarkMemKeySizes . +BenchmarkRandomBytes/random-4-4 20000000 63.1 ns/op +BenchmarkRandomBytes/random-16-4 20000000 88.9 ns/op +BenchmarkRandomBytes/random-32-4 10000000 123 ns/op +BenchmarkRandomBytes/random-100-4 5000000 275 ns/op +BenchmarkRandomBytes/random-1000-4 1000000 2131 ns/op +PASS +ok github.com/tendermint/merkleeyes/iavl/benchmarks 8.390s +Init Tree took 0.42 MB +BenchmarkSmall/nodb-1000-100-4-10/query-miss-4 5000000 345 ns/op +BenchmarkSmall/nodb-1000-100-4-10/query-hits-4 5000000 320 ns/op +BenchmarkSmall/nodb-1000-100-4-10/update-4 200000 7959 ns/op +BenchmarkSmall/nodb-1000-100-4-10/tmsp-4 100000 25878 ns/op +BenchmarkSmall/nodb-1000-100-4-10/insert-4 100000 28215 ns/op +BenchmarkSmall/nodb-1000-100-4-10/delete-4 100000 15757 ns/op +Init Tree took 0.84 MB +BenchmarkSmall/memdb-1000-100-4-10/query-miss-4 500000 3296 ns/op +BenchmarkSmall/memdb-1000-100-4-10/query-hits-4 300000 3664 ns/op +BenchmarkSmall/memdb-1000-100-4-10/update-4 50000 25869 ns/op +BenchmarkSmall/memdb-1000-100-4-10/tmsp-4 30000 87781 ns/op +BenchmarkSmall/memdb-1000-100-4-10/insert-4 20000 70119 ns/op +BenchmarkSmall/memdb-1000-100-4-10/delete-4 50000 49289 ns/op +Init Tree took 0.47 MB +BenchmarkSmall/goleveldb-1000-100-4-10/query-miss-4 300000 5219 ns/op +BenchmarkSmall/goleveldb-1000-100-4-10/query-hits-4 300000 6329 ns/op +BenchmarkSmall/goleveldb-1000-100-4-10/update-4 30000 65849 ns/op +BenchmarkSmall/goleveldb-1000-100-4-10/tmsp-4 10000 157153 ns/op +BenchmarkSmall/goleveldb-1000-100-4-10/insert-4 10000 223123 ns/op +BenchmarkSmall/goleveldb-1000-100-4-10/delete-4 20000 105517 ns/op +Init Tree took 0.48 MB +BenchmarkSmall/leveldb-1000-100-4-10/query-miss-4 200000 6629 ns/op +BenchmarkSmall/leveldb-1000-100-4-10/query-hits-4 200000 7247 ns/op +BenchmarkSmall/leveldb-1000-100-4-10/update-4 20000 60328 ns/op +BenchmarkSmall/leveldb-1000-100-4-10/tmsp-4 10000 133760 ns/op +BenchmarkSmall/leveldb-1000-100-4-10/insert-4 10000 128193 ns/op +BenchmarkSmall/leveldb-1000-100-4-10/delete-4 20000 87809 ns/op +PASS +ok github.com/tendermint/merkleeyes/iavl/benchmarks 57.747s +Init Tree took 47.20 MB +BenchmarkMedium/nodb-100000-100-16-40/query-miss-4 1000000 1372 ns/op +BenchmarkMedium/nodb-100000-100-16-40/query-hits-4 1000000 1630 ns/op +BenchmarkMedium/nodb-100000-100-16-40/update-4 50000 27774 ns/op +BenchmarkMedium/nodb-100000-100-16-40/tmsp-4 50000 35904 ns/op +BenchmarkMedium/nodb-100000-100-16-40/insert-4 50000 29889 ns/op +BenchmarkMedium/nodb-100000-100-16-40/delete-4 100000 21200 ns/op +Init Tree took 85.08 MB +BenchmarkMedium/memdb-100000-100-16-40/query-miss-4 200000 8863 ns/op +BenchmarkMedium/memdb-100000-100-16-40/query-hits-4 200000 9878 ns/op +BenchmarkMedium/memdb-100000-100-16-40/update-4 20000 75840 ns/op +BenchmarkMedium/memdb-100000-100-16-40/tmsp-4 10000 112251 ns/op +BenchmarkMedium/memdb-100000-100-16-40/insert-4 20000 103080 ns/op +BenchmarkMedium/memdb-100000-100-16-40/delete-4 20000 66677 ns/op +Init Tree took 45.17 MB +BenchmarkMedium/goleveldb-100000-100-16-40/query-miss-4 50000 21601 ns/op +BenchmarkMedium/goleveldb-100000-100-16-40/query-hits-4 50000 27372 ns/op +BenchmarkMedium/goleveldb-100000-100-16-40/update-4 10000 148820 ns/op +BenchmarkMedium/goleveldb-100000-100-16-40/tmsp-4 5000 319488 ns/op +BenchmarkMedium/goleveldb-100000-100-16-40/insert-4 2000 530568 ns/op +BenchmarkMedium/goleveldb-100000-100-16-40/delete-4 2000 569455 ns/op +Init Tree took 36.12 MB +BenchmarkMedium/leveldb-100000-100-16-40/query-miss-4 50000 23190 ns/op +BenchmarkMedium/leveldb-100000-100-16-40/query-hits-4 50000 27447 ns/op +BenchmarkMedium/leveldb-100000-100-16-40/update-4 10000 147662 ns/op +BenchmarkMedium/leveldb-100000-100-16-40/tmsp-4 5000 310984 ns/op +BenchmarkMedium/leveldb-100000-100-16-40/insert-4 2000 549814 ns/op +BenchmarkMedium/leveldb-100000-100-16-40/delete-4 2000 716094 ns/op +PASS +ok github.com/tendermint/merkleeyes/iavl/benchmarks 202.957s +Init Tree took 49.20 MB +BenchmarkMemKeySizes/nodb-100000-100-4-80/query-miss-4 1000000 1079 ns/op +BenchmarkMemKeySizes/nodb-100000-100-4-80/query-hits-4 1000000 1122 ns/op +BenchmarkMemKeySizes/nodb-100000-100-4-80/update-4 100000 25405 ns/op +BenchmarkMemKeySizes/nodb-100000-100-4-80/tmsp-4 50000 35486 ns/op +BenchmarkMemKeySizes/nodb-100000-100-4-80/insert-4 50000 29162 ns/op +BenchmarkMemKeySizes/nodb-100000-100-4-80/delete-4 100000 19531 ns/op +Init Tree took 50.40 MB +BenchmarkMemKeySizes/nodb-100000-100-16-80/query-miss-4 1000000 1224 ns/op +BenchmarkMemKeySizes/nodb-100000-100-16-80/query-hits-4 1000000 1262 ns/op +BenchmarkMemKeySizes/nodb-100000-100-16-80/update-4 50000 25344 ns/op +BenchmarkMemKeySizes/nodb-100000-100-16-80/tmsp-4 50000 33859 ns/op +BenchmarkMemKeySizes/nodb-100000-100-16-80/insert-4 50000 28705 ns/op +BenchmarkMemKeySizes/nodb-100000-100-16-80/delete-4 100000 19679 ns/op +Init Tree took 52.00 MB +BenchmarkMemKeySizes/nodb-100000-100-32-80/query-miss-4 1000000 1333 ns/op +BenchmarkMemKeySizes/nodb-100000-100-32-80/query-hits-4 1000000 1348 ns/op +BenchmarkMemKeySizes/nodb-100000-100-32-80/update-4 50000 25319 ns/op +BenchmarkMemKeySizes/nodb-100000-100-32-80/tmsp-4 50000 33329 ns/op +BenchmarkMemKeySizes/nodb-100000-100-32-80/insert-4 50000 29624 ns/op +BenchmarkMemKeySizes/nodb-100000-100-32-80/delete-4 100000 20300 ns/op +Init Tree took 55.20 MB +BenchmarkMemKeySizes/nodb-100000-100-64-80/query-miss-4 1000000 1503 ns/op +BenchmarkMemKeySizes/nodb-100000-100-64-80/query-hits-4 1000000 1412 ns/op +BenchmarkMemKeySizes/nodb-100000-100-64-80/update-4 50000 26542 ns/op +BenchmarkMemKeySizes/nodb-100000-100-64-80/tmsp-4 50000 33529 ns/op +BenchmarkMemKeySizes/nodb-100000-100-64-80/insert-4 50000 32928 ns/op +BenchmarkMemKeySizes/nodb-100000-100-64-80/delete-4 100000 22184 ns/op +Init Tree took 61.60 MB +BenchmarkMemKeySizes/nodb-100000-100-128-80/query-miss-4 500000 2234 ns/op +BenchmarkMemKeySizes/nodb-100000-100-128-80/query-hits-4 1000000 2012 ns/op +BenchmarkMemKeySizes/nodb-100000-100-128-80/update-4 50000 28088 ns/op +BenchmarkMemKeySizes/nodb-100000-100-128-80/tmsp-4 50000 49556 ns/op +BenchmarkMemKeySizes/nodb-100000-100-128-80/insert-4 50000 31738 ns/op +BenchmarkMemKeySizes/nodb-100000-100-128-80/delete-4 100000 20232 ns/op +Init Tree took 74.40 MB +BenchmarkMemKeySizes/nodb-100000-100-256-80/query-miss-4 500000 2222 ns/op +BenchmarkMemKeySizes/nodb-100000-100-256-80/query-hits-4 1000000 1592 ns/op +BenchmarkMemKeySizes/nodb-100000-100-256-80/update-4 50000 29363 ns/op +BenchmarkMemKeySizes/nodb-100000-100-256-80/tmsp-4 50000 37905 ns/op +BenchmarkMemKeySizes/nodb-100000-100-256-80/insert-4 50000 33531 ns/op +BenchmarkMemKeySizes/nodb-100000-100-256-80/delete-4 100000 19828 ns/op +PASS +ok github.com/tendermint/merkleeyes/iavl/benchmarks 147.856s diff --git a/sei-iavl/benchmarks/results/legacy/aws-c4-large-f6f41ca.txt b/sei-iavl/benchmarks/results/legacy/aws-c4-large-f6f41ca.txt new file mode 100644 index 0000000000..e5db99f2ef --- /dev/null +++ b/sei-iavl/benchmarks/results/legacy/aws-c4-large-f6f41ca.txt @@ -0,0 +1,118 @@ +make[1]: Entering directory '/home/ubuntu/go/src/github.com/tendermint/merkleeyes/iavl' +cd benchmarks && \ + go test -bench=RandomBytes . && \ + go test -bench=Small . && \ + go test -bench=Medium . && \ + go test -bench=BenchmarkMemKeySizes . +BenchmarkRandomBytes/random-4-2 20000000 58.1 ns/op +BenchmarkRandomBytes/random-16-2 20000000 85.5 ns/op +BenchmarkRandomBytes/random-32-2 20000000 118 ns/op +BenchmarkRandomBytes/random-100-2 5000000 267 ns/op +BenchmarkRandomBytes/random-1000-2 1000000 2129 ns/op +PASS +ok github.com/tendermint/merkleeyes/iavl/benchmarks 9.276s +Init Tree took 0.42 MB +BenchmarkSmall/nodb-1000-100-4-10/query-miss-2 5000000 316 ns/op +BenchmarkSmall/nodb-1000-100-4-10/query-hits-2 5000000 297 ns/op +BenchmarkSmall/nodb-1000-100-4-10/update-2 200000 8469 ns/op +BenchmarkSmall/nodb-1000-100-4-10/tmsp-2 100000 31893 ns/op +BenchmarkSmall/nodb-1000-100-4-10/insert-2 100000 30303 ns/op +BenchmarkSmall/nodb-1000-100-4-10/delete-2 200000 20423 ns/op +Init Tree took 0.84 MB +BenchmarkSmall/memdb-1000-100-4-10/query-miss-2 500000 3156 ns/op +BenchmarkSmall/memdb-1000-100-4-10/query-hits-2 500000 3527 ns/op +BenchmarkSmall/memdb-1000-100-4-10/update-2 100000 22616 ns/op +BenchmarkSmall/memdb-1000-100-4-10/tmsp-2 30000 76474 ns/op +BenchmarkSmall/memdb-1000-100-4-10/insert-2 30000 69403 ns/op +BenchmarkSmall/memdb-1000-100-4-10/delete-2 50000 45710 ns/op +Init Tree took 0.47 MB +BenchmarkSmall/goleveldb-1000-100-4-10/query-miss-2 300000 5451 ns/op +BenchmarkSmall/goleveldb-1000-100-4-10/query-hits-2 200000 6857 ns/op +BenchmarkSmall/goleveldb-1000-100-4-10/update-2 20000 83362 ns/op +BenchmarkSmall/goleveldb-1000-100-4-10/tmsp-2 10000 165200 ns/op +BenchmarkSmall/goleveldb-1000-100-4-10/insert-2 10000 158884 ns/op +BenchmarkSmall/goleveldb-1000-100-4-10/delete-2 20000 116426 ns/op +Init Tree took -7.19 MB +BenchmarkSmall/leveldb-1000-100-4-10/query-miss-2 300000 5168 ns/op +BenchmarkSmall/leveldb-1000-100-4-10/query-hits-2 200000 6381 ns/op +BenchmarkSmall/leveldb-1000-100-4-10/update-2 20000 83882 ns/op +BenchmarkSmall/leveldb-1000-100-4-10/tmsp-2 10000 154254 ns/op +BenchmarkSmall/leveldb-1000-100-4-10/insert-2 10000 152926 ns/op +BenchmarkSmall/leveldb-1000-100-4-10/delete-2 20000 115319 ns/op +PASS +ok github.com/tendermint/merkleeyes/iavl/benchmarks 63.175s +Init Tree took 47.20 MB +BenchmarkMedium/nodb-100000-100-16-40/query-miss-2 2000000 971 ns/op +BenchmarkMedium/nodb-100000-100-16-40/query-hits-2 2000000 981 ns/op +BenchmarkMedium/nodb-100000-100-16-40/update-2 50000 30686 ns/op +BenchmarkMedium/nodb-100000-100-16-40/tmsp-2 30000 44455 ns/op +BenchmarkMedium/nodb-100000-100-16-40/insert-2 50000 35721 ns/op +BenchmarkMedium/nodb-100000-100-16-40/delete-2 50000 25793 ns/op +Init Tree took 85.13 MB +BenchmarkMedium/memdb-100000-100-16-40/query-miss-2 200000 7887 ns/op +BenchmarkMedium/memdb-100000-100-16-40/query-hits-2 200000 8736 ns/op +BenchmarkMedium/memdb-100000-100-16-40/update-2 20000 86067 ns/op +BenchmarkMedium/memdb-100000-100-16-40/tmsp-2 10000 122596 ns/op +BenchmarkMedium/memdb-100000-100-16-40/insert-2 20000 92261 ns/op +BenchmarkMedium/memdb-100000-100-16-40/delete-2 20000 66919 ns/op +Init Tree took 45.41 MB +BenchmarkMedium/goleveldb-100000-100-16-40/query-miss-2 100000 17993 ns/op +BenchmarkMedium/goleveldb-100000-100-16-40/query-hits-2 100000 22781 ns/op +BenchmarkMedium/goleveldb-100000-100-16-40/update-2 10000 210399 ns/op +BenchmarkMedium/goleveldb-100000-100-16-40/tmsp-2 3000 371019 ns/op +BenchmarkMedium/goleveldb-100000-100-16-40/insert-2 2000 597672 ns/op +BenchmarkMedium/goleveldb-100000-100-16-40/delete-2 2000 660506 ns/op +Init Tree took 36.39 MB +BenchmarkMedium/leveldb-100000-100-16-40/query-miss-2 100000 17543 ns/op +BenchmarkMedium/leveldb-100000-100-16-40/query-hits-2 100000 22734 ns/op +BenchmarkMedium/leveldb-100000-100-16-40/update-2 10000 208948 ns/op +BenchmarkMedium/leveldb-100000-100-16-40/tmsp-2 3000 392419 ns/op +BenchmarkMedium/leveldb-100000-100-16-40/insert-2 2000 622588 ns/op +BenchmarkMedium/leveldb-100000-100-16-40/delete-2 2000 580446 ns/op +PASS +ok github.com/tendermint/merkleeyes/iavl/benchmarks 185.401s +Init Tree took 49.20 MB +BenchmarkMemKeySizes/nodb-100000-100-4-80/query-miss-2 2000000 875 ns/op +BenchmarkMemKeySizes/nodb-100000-100-4-80/query-hits-2 2000000 928 ns/op +BenchmarkMemKeySizes/nodb-100000-100-4-80/update-2 50000 30808 ns/op +BenchmarkMemKeySizes/nodb-100000-100-4-80/tmsp-2 30000 41638 ns/op +BenchmarkMemKeySizes/nodb-100000-100-4-80/insert-2 50000 35774 ns/op +BenchmarkMemKeySizes/nodb-100000-100-4-80/delete-2 50000 23646 ns/op +Init Tree took 50.40 MB +BenchmarkMemKeySizes/nodb-100000-100-16-80/query-miss-2 1000000 1005 ns/op +BenchmarkMemKeySizes/nodb-100000-100-16-80/query-hits-2 1000000 1002 ns/op +BenchmarkMemKeySizes/nodb-100000-100-16-80/update-2 50000 31509 ns/op +BenchmarkMemKeySizes/nodb-100000-100-16-80/tmsp-2 30000 41108 ns/op +BenchmarkMemKeySizes/nodb-100000-100-16-80/insert-2 50000 36185 ns/op +BenchmarkMemKeySizes/nodb-100000-100-16-80/delete-2 50000 23535 ns/op +Init Tree took 52.00 MB +BenchmarkMemKeySizes/nodb-100000-100-32-80/query-miss-2 1000000 1023 ns/op +BenchmarkMemKeySizes/nodb-100000-100-32-80/query-hits-2 1000000 1010 ns/op +BenchmarkMemKeySizes/nodb-100000-100-32-80/update-2 50000 31243 ns/op +BenchmarkMemKeySizes/nodb-100000-100-32-80/tmsp-2 50000 43901 ns/op +BenchmarkMemKeySizes/nodb-100000-100-32-80/insert-2 50000 35957 ns/op +BenchmarkMemKeySizes/nodb-100000-100-32-80/delete-2 50000 23259 ns/op +Init Tree took 55.20 MB +BenchmarkMemKeySizes/nodb-100000-100-64-80/query-miss-2 1000000 1104 ns/op +BenchmarkMemKeySizes/nodb-100000-100-64-80/query-hits-2 2000000 937 ns/op +BenchmarkMemKeySizes/nodb-100000-100-64-80/update-2 50000 31014 ns/op +BenchmarkMemKeySizes/nodb-100000-100-64-80/tmsp-2 50000 40717 ns/op +BenchmarkMemKeySizes/nodb-100000-100-64-80/insert-2 50000 35122 ns/op +BenchmarkMemKeySizes/nodb-100000-100-64-80/delete-2 50000 22827 ns/op +Init Tree took 61.60 MB +BenchmarkMemKeySizes/nodb-100000-100-128-80/query-miss-2 1000000 1343 ns/op +BenchmarkMemKeySizes/nodb-100000-100-128-80/query-hits-2 2000000 978 ns/op +BenchmarkMemKeySizes/nodb-100000-100-128-80/update-2 50000 31088 ns/op +BenchmarkMemKeySizes/nodb-100000-100-128-80/tmsp-2 50000 42017 ns/op +BenchmarkMemKeySizes/nodb-100000-100-128-80/insert-2 50000 35814 ns/op +BenchmarkMemKeySizes/nodb-100000-100-128-80/delete-2 100000 21684 ns/op +Init Tree took 74.40 MB +BenchmarkMemKeySizes/nodb-100000-100-256-80/query-miss-2 1000000 1686 ns/op +BenchmarkMemKeySizes/nodb-100000-100-256-80/query-hits-2 2000000 937 ns/op +BenchmarkMemKeySizes/nodb-100000-100-256-80/update-2 50000 31305 ns/op +BenchmarkMemKeySizes/nodb-100000-100-256-80/tmsp-2 50000 41948 ns/op +BenchmarkMemKeySizes/nodb-100000-100-256-80/insert-2 50000 35809 ns/op +BenchmarkMemKeySizes/nodb-100000-100-256-80/delete-2 100000 20561 ns/op +PASS +ok github.com/tendermint/merkleeyes/iavl/benchmarks 160.512s +make[1]: Leaving directory '/home/ubuntu/go/src/github.com/tendermint/merkleeyes/iavl' diff --git a/sei-iavl/benchmarks/results/legacy/digital-ocean-2gb-2717167.txt b/sei-iavl/benchmarks/results/legacy/digital-ocean-2gb-2717167.txt new file mode 100644 index 0000000000..c926dc3006 --- /dev/null +++ b/sei-iavl/benchmarks/results/legacy/digital-ocean-2gb-2717167.txt @@ -0,0 +1,118 @@ +make[1]: Entering directory '/root/go/src/github.com/tendermint/merkleeyes/iavl' +cd benchmarks && \ + go test -bench=RandomBytes . && \ + go test -bench=Small . && \ + go test -bench=Medium . && \ + go test -bench=BenchmarkMemKeySizes . +BenchmarkRandomBytes/random-4-2 10000000 109 ns/op +BenchmarkRandomBytes/random-16-2 10000000 138 ns/op +BenchmarkRandomBytes/random-32-2 10000000 293 ns/op +BenchmarkRandomBytes/random-100-2 2000000 929 ns/op +BenchmarkRandomBytes/random-1000-2 300000 7309 ns/op +PASS +ok github.com/tendermint/merkleeyes/iavl/benchmarks 11.873s +Init Tree took 0.42 MB +BenchmarkSmall/nodb-1000-100-4-10/query-miss-2 2000000 699 ns/op +BenchmarkSmall/nodb-1000-100-4-10/query-hits-2 2000000 709 ns/op +BenchmarkSmall/nodb-1000-100-4-10/update-2 100000 25067 ns/op +BenchmarkSmall/nodb-1000-100-4-10/tmsp-2 30000 79326 ns/op +BenchmarkSmall/nodb-1000-100-4-10/insert-2 30000 71205 ns/op +BenchmarkSmall/nodb-1000-100-4-10/delete-2 50000 37782 ns/op +Init Tree took 0.84 MB +BenchmarkSmall/memdb-1000-100-4-10/query-miss-2 100000 11325 ns/op +BenchmarkSmall/memdb-1000-100-4-10/query-hits-2 300000 9585 ns/op +BenchmarkSmall/memdb-1000-100-4-10/update-2 20000 88439 ns/op +BenchmarkSmall/memdb-1000-100-4-10/tmsp-2 10000 235413 ns/op +BenchmarkSmall/memdb-1000-100-4-10/insert-2 10000 231453 ns/op +BenchmarkSmall/memdb-1000-100-4-10/delete-2 10000 121835 ns/op +Init Tree took 0.47 MB +BenchmarkSmall/goleveldb-1000-100-4-10/query-miss-2 100000 10942 ns/op +BenchmarkSmall/goleveldb-1000-100-4-10/query-hits-2 100000 13878 ns/op +BenchmarkSmall/goleveldb-1000-100-4-10/update-2 10000 125941 ns/op +BenchmarkSmall/goleveldb-1000-100-4-10/tmsp-2 5000 228558 ns/op +BenchmarkSmall/goleveldb-1000-100-4-10/insert-2 10000 439718 ns/op +BenchmarkSmall/goleveldb-1000-100-4-10/delete-2 10000 254810 ns/op +Init Tree took 0.47 MB +BenchmarkSmall/leveldb-1000-100-4-10/query-miss-2 200000 8161 ns/op +BenchmarkSmall/leveldb-1000-100-4-10/query-hits-2 200000 10279 ns/op +BenchmarkSmall/leveldb-1000-100-4-10/update-2 10000 140223 ns/op +BenchmarkSmall/leveldb-1000-100-4-10/tmsp-2 5000 266231 ns/op +BenchmarkSmall/leveldb-1000-100-4-10/insert-2 10000 440206 ns/op +BenchmarkSmall/leveldb-1000-100-4-10/delete-2 10000 262984 ns/op +PASS +ok github.com/tendermint/merkleeyes/iavl/benchmarks 61.918s +Init Tree took 47.20 MB +BenchmarkMedium/nodb-100000-100-16-40/query-miss-2 500000 3026 ns/op +BenchmarkMedium/nodb-100000-100-16-40/query-hits-2 500000 2226 ns/op +BenchmarkMedium/nodb-100000-100-16-40/update-2 30000 47671 ns/op +BenchmarkMedium/nodb-100000-100-16-40/tmsp-2 20000 91781 ns/op +BenchmarkMedium/nodb-100000-100-16-40/insert-2 20000 70797 ns/op +BenchmarkMedium/nodb-100000-100-16-40/delete-2 30000 47361 ns/op +Init Tree took 85.08 MB +BenchmarkMedium/memdb-100000-100-16-40/query-miss-2 50000 22383 ns/op +BenchmarkMedium/memdb-100000-100-16-40/query-hits-2 100000 15686 ns/op +BenchmarkMedium/memdb-100000-100-16-40/update-2 10000 146801 ns/op +BenchmarkMedium/memdb-100000-100-16-40/tmsp-2 5000 301393 ns/op +BenchmarkMedium/memdb-100000-100-16-40/insert-2 10000 123555 ns/op +BenchmarkMedium/memdb-100000-100-16-40/delete-2 10000 130045 ns/op +Init Tree took 45.29 MB +BenchmarkMedium/goleveldb-100000-100-16-40/query-miss-2 10000 331146 ns/op +BenchmarkMedium/goleveldb-100000-100-16-40/query-hits-2 20000 72753 ns/op +BenchmarkMedium/goleveldb-100000-100-16-40/update-2 5000 655807 ns/op +BenchmarkMedium/goleveldb-100000-100-16-40/tmsp-2 1000 1174947 ns/op +BenchmarkMedium/goleveldb-100000-100-16-40/insert-2 1000 1570246 ns/op +BenchmarkMedium/goleveldb-100000-100-16-40/delete-2 1000 1500384 ns/op +Init Tree took 36.45 MB +BenchmarkMedium/leveldb-100000-100-16-40/query-miss-2 3000 382304 ns/op +BenchmarkMedium/leveldb-100000-100-16-40/query-hits-2 30000 68459 ns/op +BenchmarkMedium/leveldb-100000-100-16-40/update-2 5000 407120 ns/op +BenchmarkMedium/leveldb-100000-100-16-40/tmsp-2 3000 962354 ns/op +BenchmarkMedium/leveldb-100000-100-16-40/insert-2 1000 1806111 ns/op +BenchmarkMedium/leveldb-100000-100-16-40/delete-2 1000 1043286 ns/op +PASS +ok github.com/tendermint/merkleeyes/iavl/benchmarks 321.607s +Init Tree took 49.20 MB +BenchmarkMemKeySizes/nodb-100000-100-4-80/query-miss-2 500000 3863 ns/op +BenchmarkMemKeySizes/nodb-100000-100-4-80/query-hits-2 500000 2577 ns/op +BenchmarkMemKeySizes/nodb-100000-100-4-80/update-2 30000 54838 ns/op +BenchmarkMemKeySizes/nodb-100000-100-4-80/tmsp-2 10000 110951 ns/op +BenchmarkMemKeySizes/nodb-100000-100-4-80/insert-2 30000 82763 ns/op +BenchmarkMemKeySizes/nodb-100000-100-4-80/delete-2 50000 36173 ns/op +Init Tree took 50.40 MB +BenchmarkMemKeySizes/nodb-100000-100-16-80/query-miss-2 500000 2017 ns/op +BenchmarkMemKeySizes/nodb-100000-100-16-80/query-hits-2 1000000 1852 ns/op +BenchmarkMemKeySizes/nodb-100000-100-16-80/update-2 50000 63354 ns/op +BenchmarkMemKeySizes/nodb-100000-100-16-80/tmsp-2 20000 104027 ns/op +BenchmarkMemKeySizes/nodb-100000-100-16-80/insert-2 30000 48495 ns/op +BenchmarkMemKeySizes/nodb-100000-100-16-80/delete-2 50000 37681 ns/op +Init Tree took 52.00 MB +BenchmarkMemKeySizes/nodb-100000-100-32-80/query-miss-2 1000000 2594 ns/op +BenchmarkMemKeySizes/nodb-100000-100-32-80/query-hits-2 500000 3050 ns/op +BenchmarkMemKeySizes/nodb-100000-100-32-80/update-2 30000 45242 ns/op +BenchmarkMemKeySizes/nodb-100000-100-32-80/tmsp-2 20000 80852 ns/op +BenchmarkMemKeySizes/nodb-100000-100-32-80/insert-2 30000 48661 ns/op +BenchmarkMemKeySizes/nodb-100000-100-32-80/delete-2 50000 42489 ns/op +Init Tree took 55.20 MB +BenchmarkMemKeySizes/nodb-100000-100-64-80/query-miss-2 1000000 2733 ns/op +BenchmarkMemKeySizes/nodb-100000-100-64-80/query-hits-2 1000000 2477 ns/op +BenchmarkMemKeySizes/nodb-100000-100-64-80/update-2 30000 71995 ns/op +BenchmarkMemKeySizes/nodb-100000-100-64-80/tmsp-2 30000 60682 ns/op +BenchmarkMemKeySizes/nodb-100000-100-64-80/insert-2 30000 65346 ns/op +BenchmarkMemKeySizes/nodb-100000-100-64-80/delete-2 50000 32863 ns/op +Init Tree took 61.60 MB +BenchmarkMemKeySizes/nodb-100000-100-128-80/query-miss-2 500000 3102 ns/op +BenchmarkMemKeySizes/nodb-100000-100-128-80/query-hits-2 500000 3365 ns/op +BenchmarkMemKeySizes/nodb-100000-100-128-80/update-2 30000 68945 ns/op +BenchmarkMemKeySizes/nodb-100000-100-128-80/tmsp-2 20000 62037 ns/op +BenchmarkMemKeySizes/nodb-100000-100-128-80/insert-2 20000 79507 ns/op +BenchmarkMemKeySizes/nodb-100000-100-128-80/delete-2 50000 48408 ns/op +Init Tree took 74.40 MB +BenchmarkMemKeySizes/nodb-100000-100-256-80/query-miss-2 300000 4619 ns/op +BenchmarkMemKeySizes/nodb-100000-100-256-80/query-hits-2 1000000 1975 ns/op +BenchmarkMemKeySizes/nodb-100000-100-256-80/update-2 30000 53841 ns/op +BenchmarkMemKeySizes/nodb-100000-100-256-80/tmsp-2 20000 87730 ns/op +BenchmarkMemKeySizes/nodb-100000-100-256-80/insert-2 30000 49158 ns/op +BenchmarkMemKeySizes/nodb-100000-100-256-80/delete-2 50000 31043 ns/op +PASS +ok github.com/tendermint/merkleeyes/iavl/benchmarks 247.827s +make[1]: Leaving directory '/root/go/src/github.com/tendermint/merkleeyes/iavl' diff --git a/sei-iavl/benchmarks/results/legacy/digital-ocean-64gb-fullbench-memory-8f19f23.txt b/sei-iavl/benchmarks/results/legacy/digital-ocean-64gb-fullbench-memory-8f19f23.txt new file mode 100644 index 0000000000..ca6a7c618f --- /dev/null +++ b/sei-iavl/benchmarks/results/legacy/digital-ocean-64gb-fullbench-memory-8f19f23.txt @@ -0,0 +1,149 @@ +cd benchmarks && \ + go test -bench=RandomBytes . -benchmem && \ + go test -bench=Small . -benchmem && \ + go test -bench=Medium . -benchmem && \ + go test -timeout=30m -bench=Large . -benchmem && \ + go test -bench=Mem . -benchmem && \ + go test -timeout=60m -bench=LevelDB . -benchmem +goos: linux +goarch: amd64 +pkg: github.com/tendermint/iavl/benchmarks +BenchmarkRandomBytes/random-4-24 20000000 72.0 ns/op 4 B/op 1 allocs/op +BenchmarkRandomBytes/random-16-24 20000000 118 ns/op 16 B/op 1 allocs/op +BenchmarkRandomBytes/random-32-24 10000000 156 ns/op 32 B/op 1 allocs/op +BenchmarkRandomBytes/random-100-24 5000000 370 ns/op 112 B/op 1 allocs/op +BenchmarkRandomBytes/random-1000-24 500000 2958 ns/op 1024 B/op 1 allocs/op +PASS +ok github.com/tendermint/iavl/benchmarks 9.506s +Init Tree took 0.91 MB +goos: linux +goarch: amd64 +pkg: github.com/tendermint/iavl/benchmarks +BenchmarkSmall/memdb-1000-100-4-10/query-miss-24 500000 4420 ns/op 435 B/op 9 allocs/op +BenchmarkSmall/memdb-1000-100-4-10/query-hits-24 200000 5272 ns/op 634 B/op 12 allocs/op +BenchmarkSmall/memdb-1000-100-4-10/update-24 10000 159192 ns/op 42771 B/op 764 allocs/op +BenchmarkSmall/memdb-1000-100-4-10/block-24 100 24824001 ns/op 6620344 B/op 120912 allocs/op +Init Tree took 0.49 MB +BenchmarkSmall/goleveldb-1000-100-4-10/query-miss-24 300000 6368 ns/op 649 B/op 13 allocs/op +BenchmarkSmall/goleveldb-1000-100-4-10/query-hits-24 200000 8741 ns/op 918 B/op 18 allocs/op +BenchmarkSmall/goleveldb-1000-100-4-10/update-24 10000 109113 ns/op 22326 B/op 254 allocs/op +BenchmarkSmall/goleveldb-1000-100-4-10/block-24 100 17755475 ns/op 3487423 B/op 39158 allocs/op +Init Tree took 0.49 MB +BenchmarkSmall/leveldb-1000-100-4-10/query-miss-24 300000 6584 ns/op 651 B/op 14 allocs/op +BenchmarkSmall/leveldb-1000-100-4-10/query-hits-24 200000 7898 ns/op 918 B/op 18 allocs/op +BenchmarkSmall/leveldb-1000-100-4-10/update-24 10000 102305 ns/op 22352 B/op 254 allocs/op +BenchmarkSmall/leveldb-1000-100-4-10/block-24 100 16187510 ns/op 3461634 B/op 39097 allocs/op +PASS +ok github.com/tendermint/iavl/benchmarks 20.641s +Init Tree took 85.10 MB +goos: linux +goarch: amd64 +pkg: github.com/tendermint/iavl/benchmarks +BenchmarkMedium/memdb-100000-100-16-40/query-miss-24 200000 9595 ns/op 513 B/op 10 allocs/op +BenchmarkMedium/memdb-100000-100-16-40/query-hits-24 200000 10834 ns/op 676 B/op 12 allocs/op +BenchmarkMedium/memdb-100000-100-16-40/update-24 3000 1261394 ns/op 246830 B/op 4746 allocs/op +BenchmarkMedium/memdb-100000-100-16-40/block-24 10 198937291 ns/op 40016809 B/op 795945 allocs/op +Init Tree took 47.42 MB +BenchmarkMedium/goleveldb-100000-100-16-40/query-miss-24 50000 22673 ns/op 1594 B/op 27 allocs/op +BenchmarkMedium/goleveldb-100000-100-16-40/query-hits-24 50000 28135 ns/op 2144 B/op 35 allocs/op +BenchmarkMedium/goleveldb-100000-100-16-40/update-24 10000 294852 ns/op 53115 B/op 592 allocs/op +BenchmarkMedium/goleveldb-100000-100-16-40/block-24 50 34807659 ns/op 5968256 B/op 67622 allocs/op +Init Tree took 47.80 MB +BenchmarkMedium/leveldb-100000-100-16-40/query-miss-24 50000 22686 ns/op 1532 B/op 25 allocs/op +BenchmarkMedium/leveldb-100000-100-16-40/query-hits-24 50000 27668 ns/op 2159 B/op 35 allocs/op +BenchmarkMedium/leveldb-100000-100-16-40/update-24 10000 287108 ns/op 53026 B/op 593 allocs/op +BenchmarkMedium/leveldb-100000-100-16-40/block-24 30 35596044 ns/op 6206475 B/op 67118 allocs/op +PASS +ok github.com/tendermint/iavl/benchmarks 38.966s +Init Tree took 917.91 MB +goos: linux +goarch: amd64 +pkg: github.com/tendermint/iavl/benchmarks +BenchmarkLarge/memdb-1000000-100-16-40/query-miss-24 100000 15911 ns/op 1061 B/op 20 allocs/op +BenchmarkLarge/memdb-1000000-100-16-40/query-hits-24 100000 15748 ns/op 829 B/op 15 allocs/op +BenchmarkLarge/memdb-1000000-100-16-40/update-24 300 5161409 ns/op 994056 B/op 20570 allocs/op +BenchmarkLarge/memdb-1000000-100-16-40/block-24 2 514923704 ns/op 100084344 B/op 2069282 allocs/op +Init Tree took 416.94 MB +BenchmarkLarge/goleveldb-1000000-100-16-40/query-miss-24 20000 60866 ns/op 4902 B/op 82 allocs/op +BenchmarkLarge/goleveldb-1000000-100-16-40/query-hits-24 30000 49123 ns/op 3745 B/op 62 allocs/op +BenchmarkLarge/goleveldb-1000000-100-16-40/update-24 10000 478900 ns/op 81376 B/op 836 allocs/op +BenchmarkLarge/goleveldb-1000000-100-16-40/block-24 20 53327657 ns/op 10397199 B/op 98883 allocs/op +Init Tree took 404.64 MB +BenchmarkLarge/leveldb-1000000-100-16-40/query-miss-24 20000 57290 ns/op 4590 B/op 76 allocs/op +BenchmarkLarge/leveldb-1000000-100-16-40/query-hits-24 30000 48691 ns/op 3640 B/op 60 allocs/op +BenchmarkLarge/leveldb-1000000-100-16-40/update-24 5000 381530 ns/op 69914 B/op 736 allocs/op +BenchmarkLarge/leveldb-1000000-100-16-40/block-24 30 56220875 ns/op 10656291 B/op 101281 allocs/op +Init Tree took 24.87 MB +BenchmarkLevelDBLargeData/goleveldb-50000-100-32-100/query-miss-24 50000 23263 ns/op 1745 B/op 27 allocs/op +BenchmarkLevelDBLargeData/goleveldb-50000-100-32-100/query-hits-24 50000 25201 ns/op 2238 B/op 34 allocs/op +BenchmarkLevelDBLargeData/goleveldb-50000-100-32-100/update-24 10000 283769 ns/op 52149 B/op 563 allocs/op +BenchmarkLevelDBLargeData/goleveldb-50000-100-32-100/block-24 50 33455540 ns/op 5749534 B/op 64612 allocs/op +Init Tree took 39.73 MB +BenchmarkLevelDBLargeData/goleveldb-50000-100-32-1000/query-miss-24 50000 23683 ns/op 2716 B/op 28 allocs/op +BenchmarkLevelDBLargeData/goleveldb-50000-100-32-1000/query-hits-24 50000 32174 ns/op 3716 B/op 37 allocs/op +BenchmarkLevelDBLargeData/goleveldb-50000-100-32-1000/update-24 10000 317281 ns/op 64047 B/op 616 allocs/op +BenchmarkLevelDBLargeData/goleveldb-50000-100-32-1000/block-24 50 44231625 ns/op 7399545 B/op 73233 allocs/op +Init Tree took 264.19 MB +BenchmarkLevelDBLargeData/goleveldb-50000-100-32-10000/query-miss-24 30000 35564 ns/op 11875 B/op 30 allocs/op +BenchmarkLevelDBLargeData/goleveldb-50000-100-32-10000/query-hits-24 30000 45093 ns/op 16763 B/op 39 allocs/op +BenchmarkLevelDBLargeData/goleveldb-50000-100-32-10000/update-24 10000 574978 ns/op 207200 B/op 752 allocs/op +BenchmarkLevelDBLargeData/goleveldb-50000-100-32-10000/block-24 20 76693083 ns/op 28530852 B/op 92942 allocs/op +Init Tree took 2676.96 MB +BenchmarkLevelDBLargeData/goleveldb-50000-100-32-100000/query-miss-24 10000 147724 ns/op 256119 B/op 64 allocs/op +BenchmarkLevelDBLargeData/goleveldb-50000-100-32-100000/query-hits-24 10000 169279 ns/op 307666 B/op 70 allocs/op +BenchmarkLevelDBLargeData/goleveldb-50000-100-32-100000/update-24 1000 3882880 ns/op 2601694 B/op 639 allocs/op +BenchmarkLevelDBLargeData/goleveldb-50000-100-32-100000/block-24 10 520453137 ns/op 431258147 B/op 102744 allocs/op +PASS +ok github.com/tendermint/iavl/benchmarks 448.959s +PASS +ok github.com/tendermint/iavl/benchmarks 0.008s +Init Tree took 47.64 MB +goos: linux +goarch: amd64 +pkg: github.com/tendermint/iavl/benchmarks +BenchmarkLevelDBBatchSizes/goleveldb-100000-5-16-40/query-miss-24 50000 22097 ns/op 1601 B/op 27 allocs/op +BenchmarkLevelDBBatchSizes/goleveldb-100000-5-16-40/query-hits-24 50000 27734 ns/op 2157 B/op 35 allocs/op +BenchmarkLevelDBBatchSizes/goleveldb-100000-5-16-40/update-24 10000 511317 ns/op 78187 B/op 842 allocs/op +BenchmarkLevelDBBatchSizes/goleveldb-100000-5-16-40/block-24 500 2681891 ns/op 398000 B/op 4350 allocs/op +Init Tree took 47.60 MB +BenchmarkLevelDBBatchSizes/goleveldb-100000-25-16-40/query-miss-24 50000 23397 ns/op 1536 B/op 26 allocs/op +BenchmarkLevelDBBatchSizes/goleveldb-100000-25-16-40/query-hits-24 50000 28137 ns/op 2163 B/op 35 allocs/op +BenchmarkLevelDBBatchSizes/goleveldb-100000-25-16-40/update-24 10000 349234 ns/op 64083 B/op 704 allocs/op +BenchmarkLevelDBBatchSizes/goleveldb-100000-25-16-40/block-24 200 9405218 ns/op 1702630 B/op 18575 allocs/op +Init Tree took 35.87 MB +BenchmarkLevelDBBatchSizes/goleveldb-100000-100-16-40/query-miss-24 50000 22684 ns/op 1609 B/op 27 allocs/op +BenchmarkLevelDBBatchSizes/goleveldb-100000-100-16-40/query-hits-24 50000 28836 ns/op 2156 B/op 35 allocs/op +BenchmarkLevelDBBatchSizes/goleveldb-100000-100-16-40/update-24 10000 279935 ns/op 52609 B/op 595 allocs/op +BenchmarkLevelDBBatchSizes/goleveldb-100000-100-16-40/block-24 50 34924252 ns/op 5994318 B/op 67455 allocs/op +Init Tree took 40.94 MB +BenchmarkLevelDBBatchSizes/goleveldb-100000-400-16-40/query-miss-24 50000 21974 ns/op 1568 B/op 26 allocs/op +BenchmarkLevelDBBatchSizes/goleveldb-100000-400-16-40/query-hits-24 50000 29079 ns/op 2212 B/op 36 allocs/op +BenchmarkLevelDBBatchSizes/goleveldb-100000-400-16-40/update-24 10000 200319 ns/op 40108 B/op 454 allocs/op +BenchmarkLevelDBBatchSizes/goleveldb-100000-400-16-40/block-24 10 143250792 ns/op 23631405 B/op 273493 allocs/op +Init Tree took 38.19 MB +BenchmarkLevelDBBatchSizes/goleveldb-100000-2000-16-40/query-miss-24 50000 22070 ns/op 1543 B/op 26 allocs/op +BenchmarkLevelDBBatchSizes/goleveldb-100000-2000-16-40/query-hits-24 50000 28305 ns/op 2150 B/op 35 allocs/op +BenchmarkLevelDBBatchSizes/goleveldb-100000-2000-16-40/update-24 10000 150990 ns/op 31752 B/op 326 allocs/op +BenchmarkLevelDBBatchSizes/goleveldb-100000-2000-16-40/block-24 3 416917935 ns/op 78008320 B/op 824207 allocs/op +Init Tree took 23.60 MB +BenchmarkLevelDBLargeData/goleveldb-50000-100-32-100/query-miss-24 100000 19841 ns/op 1591 B/op 24 allocs/op +BenchmarkLevelDBLargeData/goleveldb-50000-100-32-100/query-hits-24 50000 25320 ns/op 2237 B/op 34 allocs/op +BenchmarkLevelDBLargeData/goleveldb-50000-100-32-100/update-24 10000 310418 ns/op 51051 B/op 564 allocs/op +BenchmarkLevelDBLargeData/goleveldb-50000-100-32-100/block-24 50 33174907 ns/op 5796150 B/op 64856 allocs/op +Init Tree took 39.77 MB +BenchmarkLevelDBLargeData/goleveldb-50000-100-32-1000/query-miss-24 100000 23820 ns/op 2638 B/op 27 allocs/op +BenchmarkLevelDBLargeData/goleveldb-50000-100-32-1000/query-hits-24 50000 28973 ns/op 3720 B/op 37 allocs/op +BenchmarkLevelDBLargeData/goleveldb-50000-100-32-1000/update-24 10000 315650 ns/op 64815 B/op 618 allocs/op +BenchmarkLevelDBLargeData/goleveldb-50000-100-32-1000/block-24 50 41392373 ns/op 7424452 B/op 74022 allocs/op +Init Tree took 266.07 MB +BenchmarkLevelDBLargeData/goleveldb-50000-100-32-10000/query-miss-24 30000 33961 ns/op 11884 B/op 30 allocs/op +BenchmarkLevelDBLargeData/goleveldb-50000-100-32-10000/query-hits-24 30000 45861 ns/op 16535 B/op 39 allocs/op +BenchmarkLevelDBLargeData/goleveldb-50000-100-32-10000/update-24 10000 594813 ns/op 213379 B/op 760 allocs/op +BenchmarkLevelDBLargeData/goleveldb-50000-100-32-10000/block-24 20 77458855 ns/op 28589214 B/op 92146 allocs/op +Init Tree took 2677.35 MB +BenchmarkLevelDBLargeData/goleveldb-50000-100-32-100000/query-miss-24 10000 148305 ns/op 252620 B/op 63 allocs/op +BenchmarkLevelDBLargeData/goleveldb-50000-100-32-100000/query-hits-24 10000 169529 ns/op 310811 B/op 71 allocs/op +BenchmarkLevelDBLargeData/goleveldb-50000-100-32-100000/update-24 1000 3540659 ns/op 2632158 B/op 640 allocs/op +BenchmarkLevelDBLargeData/goleveldb-50000-100-32-100000/block-24 10 515252342 ns/op 440797695 B/op 102385 allocs/op +PASS +ok github.com/tendermint/iavl/benchmarks 243.566s diff --git a/sei-iavl/benchmarks/results/legacy/digital-ocean-64gb-fullbench-memory-c1f6d4e.txt b/sei-iavl/benchmarks/results/legacy/digital-ocean-64gb-fullbench-memory-c1f6d4e.txt new file mode 100644 index 0000000000..5c43e9624e --- /dev/null +++ b/sei-iavl/benchmarks/results/legacy/digital-ocean-64gb-fullbench-memory-c1f6d4e.txt @@ -0,0 +1,149 @@ +cd benchmarks && \ + go test -bench=RandomBytes . -benchmem && \ + go test -bench=Small . -benchmem && \ + go test -bench=Medium . -benchmem && \ + go test -timeout=30m -bench=Large . -benchmem && \ + go test -bench=Mem . -benchmem && \ + go test -timeout=60m -bench=LevelDB . -benchmem +goos: linux +goarch: amd64 +pkg: github.com/tendermint/iavl/benchmarks +BenchmarkRandomBytes/random-4-24 20000000 74.8 ns/op 4 B/op 1 allocs/op +BenchmarkRandomBytes/random-16-24 20000000 121 ns/op 16 B/op 1 allocs/op +BenchmarkRandomBytes/random-32-24 10000000 166 ns/op 32 B/op 1 allocs/op +BenchmarkRandomBytes/random-100-24 5000000 376 ns/op 112 B/op 1 allocs/op +BenchmarkRandomBytes/random-1000-24 500000 2943 ns/op 1024 B/op 1 allocs/op +PASS +ok github.com/tendermint/iavl/benchmarks 9.769s +Init Tree took 0.91 MB +goos: linux +goarch: amd64 +pkg: github.com/tendermint/iavl/benchmarks +BenchmarkSmall/memdb-1000-100-4-10/query-miss-24 300000 4081 ns/op 434 B/op 9 allocs/op +BenchmarkSmall/memdb-1000-100-4-10/query-hits-24 300000 5112 ns/op 633 B/op 12 allocs/op +BenchmarkSmall/memdb-1000-100-4-10/update-24 10000 157089 ns/op 42683 B/op 763 allocs/op +BenchmarkSmall/memdb-1000-100-4-10/block-24 100 25654741 ns/op 6619026 B/op 120940 allocs/op +Init Tree took 0.49 MB +BenchmarkSmall/goleveldb-1000-100-4-10/query-miss-24 200000 7127 ns/op 637 B/op 13 allocs/op +BenchmarkSmall/goleveldb-1000-100-4-10/query-hits-24 200000 8713 ns/op 918 B/op 18 allocs/op +BenchmarkSmall/goleveldb-1000-100-4-10/update-24 10000 103509 ns/op 22232 B/op 253 allocs/op +BenchmarkSmall/goleveldb-1000-100-4-10/block-24 100 17394312 ns/op 3483478 B/op 39338 allocs/op +Init Tree took 0.49 MB +BenchmarkSmall/leveldb-1000-100-4-10/query-miss-24 300000 6214 ns/op 646 B/op 13 allocs/op +BenchmarkSmall/leveldb-1000-100-4-10/query-hits-24 200000 8254 ns/op 919 B/op 18 allocs/op +BenchmarkSmall/leveldb-1000-100-4-10/update-24 10000 107058 ns/op 22312 B/op 254 allocs/op +BenchmarkSmall/leveldb-1000-100-4-10/block-24 100 17031744 ns/op 3482495 B/op 39144 allocs/op +PASS +ok github.com/tendermint/iavl/benchmarks 19.874s +Init Tree took 85.10 MB +goos: linux +goarch: amd64 +pkg: github.com/tendermint/iavl/benchmarks +BenchmarkMedium/memdb-100000-100-16-40/query-miss-24 200000 10064 ns/op 513 B/op 10 allocs/op +BenchmarkMedium/memdb-100000-100-16-40/query-hits-24 200000 11143 ns/op 676 B/op 12 allocs/op +BenchmarkMedium/memdb-100000-100-16-40/update-24 3000 1303374 ns/op 246834 B/op 4746 allocs/op +BenchmarkMedium/memdb-100000-100-16-40/block-24 10 190258294 ns/op 40016520 B/op 795943 allocs/op +Init Tree took 47.63 MB +BenchmarkMedium/goleveldb-100000-100-16-40/query-miss-24 50000 22452 ns/op 1539 B/op 26 allocs/op +BenchmarkMedium/goleveldb-100000-100-16-40/query-hits-24 50000 28301 ns/op 2148 B/op 35 allocs/op +BenchmarkMedium/goleveldb-100000-100-16-40/update-24 10000 296013 ns/op 52887 B/op 594 allocs/op +BenchmarkMedium/goleveldb-100000-100-16-40/block-24 30 35855483 ns/op 6213133 B/op 67658 allocs/op +Init Tree took 42.26 MB +BenchmarkMedium/leveldb-100000-100-16-40/query-miss-24 50000 22802 ns/op 1595 B/op 27 allocs/op +BenchmarkMedium/leveldb-100000-100-16-40/query-hits-24 50000 31757 ns/op 2147 B/op 35 allocs/op +BenchmarkMedium/leveldb-100000-100-16-40/update-24 10000 297615 ns/op 52713 B/op 594 allocs/op +BenchmarkMedium/leveldb-100000-100-16-40/block-24 30 36791150 ns/op 6289507 B/op 67963 allocs/op +PASS +ok github.com/tendermint/iavl/benchmarks 38.736s +Init Tree took 917.92 MB +goos: linux +goarch: amd64 +pkg: github.com/tendermint/iavl/benchmarks +BenchmarkLarge/memdb-1000000-100-16-40/query-miss-24 100000 15781 ns/op 1061 B/op 20 allocs/op +BenchmarkLarge/memdb-1000000-100-16-40/query-hits-24 100000 15750 ns/op 829 B/op 15 allocs/op +BenchmarkLarge/memdb-1000000-100-16-40/update-24 300 5256647 ns/op 994044 B/op 20570 allocs/op +BenchmarkLarge/memdb-1000000-100-16-40/block-24 2 534785952 ns/op 100083320 B/op 2069277 allocs/op +Init Tree took 416.96 MB +BenchmarkLarge/goleveldb-1000000-100-16-40/query-miss-24 20000 59997 ns/op 4900 B/op 82 allocs/op +BenchmarkLarge/goleveldb-1000000-100-16-40/query-hits-24 30000 51637 ns/op 3748 B/op 62 allocs/op +BenchmarkLarge/goleveldb-1000000-100-16-40/update-24 10000 476117 ns/op 81887 B/op 838 allocs/op +BenchmarkLarge/goleveldb-1000000-100-16-40/block-24 30 56340657 ns/op 10034120 B/op 95529 allocs/op +Init Tree took 404.27 MB +BenchmarkLarge/leveldb-1000000-100-16-40/query-miss-24 20000 62528 ns/op 5003 B/op 81 allocs/op +BenchmarkLarge/leveldb-1000000-100-16-40/query-hits-24 30000 50966 ns/op 3701 B/op 61 allocs/op +BenchmarkLarge/leveldb-1000000-100-16-40/update-24 10000 456299 ns/op 86644 B/op 841 allocs/op +BenchmarkLarge/leveldb-1000000-100-16-40/block-24 30 58929008 ns/op 12193146 B/op 100887 allocs/op +Init Tree took 25.20 MB +BenchmarkLevelDBLargeData/goleveldb-50000-100-32-100/query-miss-24 50000 20237 ns/op 1659 B/op 26 allocs/op +BenchmarkLevelDBLargeData/goleveldb-50000-100-32-100/query-hits-24 50000 26440 ns/op 2248 B/op 34 allocs/op +BenchmarkLevelDBLargeData/goleveldb-50000-100-32-100/update-24 10000 276849 ns/op 52649 B/op 565 allocs/op +BenchmarkLevelDBLargeData/goleveldb-50000-100-32-100/block-24 50 33371134 ns/op 5881967 B/op 65264 allocs/op +Init Tree took 39.72 MB +BenchmarkLevelDBLargeData/goleveldb-50000-100-32-1000/query-miss-24 50000 23127 ns/op 2732 B/op 28 allocs/op +BenchmarkLevelDBLargeData/goleveldb-50000-100-32-1000/query-hits-24 50000 30518 ns/op 3739 B/op 37 allocs/op +BenchmarkLevelDBLargeData/goleveldb-50000-100-32-1000/update-24 10000 317968 ns/op 63822 B/op 616 allocs/op +BenchmarkLevelDBLargeData/goleveldb-50000-100-32-1000/block-24 50 40372117 ns/op 7424951 B/op 73717 allocs/op +Init Tree took 264.06 MB +BenchmarkLevelDBLargeData/goleveldb-50000-100-32-10000/query-miss-24 30000 34792 ns/op 11953 B/op 30 allocs/op +BenchmarkLevelDBLargeData/goleveldb-50000-100-32-10000/query-hits-24 30000 45317 ns/op 16693 B/op 39 allocs/op +BenchmarkLevelDBLargeData/goleveldb-50000-100-32-10000/update-24 5000 521571 ns/op 189390 B/op 695 allocs/op +BenchmarkLevelDBLargeData/goleveldb-50000-100-32-10000/block-24 20 78074233 ns/op 26628664 B/op 93850 allocs/op +Init Tree took 2676.68 MB +BenchmarkLevelDBLargeData/goleveldb-50000-100-32-100000/query-miss-24 10000 160833 ns/op 257161 B/op 65 allocs/op +BenchmarkLevelDBLargeData/goleveldb-50000-100-32-100000/query-hits-24 10000 172494 ns/op 312459 B/op 71 allocs/op +BenchmarkLevelDBLargeData/goleveldb-50000-100-32-100000/update-24 1000 3285298 ns/op 2404346 B/op 621 allocs/op +BenchmarkLevelDBLargeData/goleveldb-50000-100-32-100000/block-24 5 482757364 ns/op 364325902 B/op 84213 allocs/op +PASS +ok github.com/tendermint/iavl/benchmarks 441.311s +PASS +ok github.com/tendermint/iavl/benchmarks 0.008s +Init Tree took 47.04 MB +goos: linux +goarch: amd64 +pkg: github.com/tendermint/iavl/benchmarks +BenchmarkLevelDBBatchSizes/goleveldb-100000-5-16-40/query-miss-24 50000 22448 ns/op 1530 B/op 25 allocs/op +BenchmarkLevelDBBatchSizes/goleveldb-100000-5-16-40/query-hits-24 50000 32738 ns/op 2172 B/op 35 allocs/op +BenchmarkLevelDBBatchSizes/goleveldb-100000-5-16-40/update-24 10000 515236 ns/op 78273 B/op 843 allocs/op +BenchmarkLevelDBBatchSizes/goleveldb-100000-5-16-40/block-24 500 2702588 ns/op 402931 B/op 4385 allocs/op +Init Tree took 47.13 MB +BenchmarkLevelDBBatchSizes/goleveldb-100000-25-16-40/query-miss-24 50000 23648 ns/op 1598 B/op 27 allocs/op +BenchmarkLevelDBBatchSizes/goleveldb-100000-25-16-40/query-hits-24 50000 28103 ns/op 2160 B/op 35 allocs/op +BenchmarkLevelDBBatchSizes/goleveldb-100000-25-16-40/update-24 10000 351358 ns/op 65065 B/op 703 allocs/op +BenchmarkLevelDBBatchSizes/goleveldb-100000-25-16-40/block-24 200 9918965 ns/op 1711946 B/op 18689 allocs/op +Init Tree took 42.02 MB +BenchmarkLevelDBBatchSizes/goleveldb-100000-100-16-40/query-miss-24 50000 24165 ns/op 1620 B/op 27 allocs/op +BenchmarkLevelDBBatchSizes/goleveldb-100000-100-16-40/query-hits-24 50000 29367 ns/op 2154 B/op 35 allocs/op +BenchmarkLevelDBBatchSizes/goleveldb-100000-100-16-40/update-24 10000 291198 ns/op 53061 B/op 594 allocs/op +BenchmarkLevelDBBatchSizes/goleveldb-100000-100-16-40/block-24 30 35941442 ns/op 6034383 B/op 67049 allocs/op +Init Tree took 45.32 MB +BenchmarkLevelDBBatchSizes/goleveldb-100000-400-16-40/query-miss-24 50000 22196 ns/op 1579 B/op 26 allocs/op +BenchmarkLevelDBBatchSizes/goleveldb-100000-400-16-40/query-hits-24 50000 28725 ns/op 2154 B/op 35 allocs/op +BenchmarkLevelDBBatchSizes/goleveldb-100000-400-16-40/update-24 10000 200024 ns/op 40108 B/op 448 allocs/op +BenchmarkLevelDBBatchSizes/goleveldb-100000-400-16-40/block-24 10 152324790 ns/op 23616529 B/op 273242 allocs/op +Init Tree took 38.47 MB +BenchmarkLevelDBBatchSizes/goleveldb-100000-2000-16-40/query-miss-24 50000 22555 ns/op 1538 B/op 26 allocs/op +BenchmarkLevelDBBatchSizes/goleveldb-100000-2000-16-40/query-hits-24 50000 28427 ns/op 2159 B/op 35 allocs/op +BenchmarkLevelDBBatchSizes/goleveldb-100000-2000-16-40/update-24 10000 143810 ns/op 30211 B/op 320 allocs/op +BenchmarkLevelDBBatchSizes/goleveldb-100000-2000-16-40/block-24 3 408154340 ns/op 78403509 B/op 815927 allocs/op +Init Tree took 27.54 MB +BenchmarkLevelDBLargeData/goleveldb-50000-100-32-100/query-miss-24 50000 20109 ns/op 1668 B/op 26 allocs/op +BenchmarkLevelDBLargeData/goleveldb-50000-100-32-100/query-hits-24 50000 25620 ns/op 2243 B/op 34 allocs/op +BenchmarkLevelDBLargeData/goleveldb-50000-100-32-100/update-24 10000 276745 ns/op 52750 B/op 566 allocs/op +BenchmarkLevelDBLargeData/goleveldb-50000-100-32-100/block-24 50 33831408 ns/op 5826982 B/op 64847 allocs/op +Init Tree took 39.80 MB +BenchmarkLevelDBLargeData/goleveldb-50000-100-32-1000/query-miss-24 50000 23057 ns/op 2722 B/op 28 allocs/op +BenchmarkLevelDBLargeData/goleveldb-50000-100-32-1000/query-hits-24 50000 28850 ns/op 3742 B/op 37 allocs/op +BenchmarkLevelDBLargeData/goleveldb-50000-100-32-1000/update-24 10000 310652 ns/op 63335 B/op 617 allocs/op +BenchmarkLevelDBLargeData/goleveldb-50000-100-32-1000/block-24 30 40865126 ns/op 7547879 B/op 74541 allocs/op +Init Tree took 266.72 MB +BenchmarkLevelDBLargeData/goleveldb-50000-100-32-10000/query-miss-24 30000 33848 ns/op 12084 B/op 30 allocs/op +BenchmarkLevelDBLargeData/goleveldb-50000-100-32-10000/query-hits-24 30000 43692 ns/op 16592 B/op 39 allocs/op +BenchmarkLevelDBLargeData/goleveldb-50000-100-32-10000/update-24 10000 588636 ns/op 211307 B/op 754 allocs/op +BenchmarkLevelDBLargeData/goleveldb-50000-100-32-10000/block-24 20 82922740 ns/op 29007420 B/op 93883 allocs/op +Init Tree took 2675.64 MB +BenchmarkLevelDBLargeData/goleveldb-50000-100-32-100000/query-miss-24 10000 147862 ns/op 250440 B/op 63 allocs/op +BenchmarkLevelDBLargeData/goleveldb-50000-100-32-100000/query-hits-24 10000 178420 ns/op 313624 B/op 72 allocs/op +BenchmarkLevelDBLargeData/goleveldb-50000-100-32-100000/update-24 1000 3606703 ns/op 2569932 B/op 640 allocs/op +BenchmarkLevelDBLargeData/goleveldb-50000-100-32-100000/block-24 5 459105626 ns/op 401755606 B/op 89339 allocs/op +PASS +ok github.com/tendermint/iavl/benchmarks 241.593s diff --git a/sei-iavl/benchmarks/sdk_results/README.md b/sei-iavl/benchmarks/sdk_results/README.md new file mode 100644 index 0000000000..1fef4d4e08 --- /dev/null +++ b/sei-iavl/benchmarks/sdk_results/README.md @@ -0,0 +1,21 @@ +Results.csv holdsthe results of running SDK benchmark with IAVL pruning testing with different pruning parameters. + +## Schema +every - keepEvery configuration +recent - keepRecent configuration +sim_time - SDK benchmark simulation run time +max_cpu - maximum CPU during simulation +max_mem - maximum memory usage during simulation + +## Steps to reproduce the results + +1. SSH into the machine you want to run the simulation and clone the Cosmos SDK +`git clone http://github.com/cosmos/cosmos-sdk` + +2. Checkout the tim/pruning-test branch +`git checkout tim/pruning-test` + +3. Run the grid_search.py script +`python grid_search.py` + +If you want to try a different range of pruning params, you can modify the script to change the min and max ranges. diff --git a/sei-iavl/benchmarks/sdk_results/results.csv b/sei-iavl/benchmarks/sdk_results/results.csv new file mode 100644 index 0000000000..f1024d0eb3 --- /dev/null +++ b/sei-iavl/benchmarks/sdk_results/results.csv @@ -0,0 +1,98 @@ +every,recent,sim_time,max_cpu,max_mem +1,151,1514.017,156.4,1312.793 +1,201,1532.657,159.4,1381.539 +1,251,1540.935,153.5,1431.125 +1,301,1538.684,165.0,1473.312 +1,351,1535.435,163.4,1510.496 +1,401,1541.614,167.5,1555.281 +1,451,1508.293,162.8,1568.688 +451,1,1416.687,129.2,312.594 +451,51,1438.157,131.7,662.992 +451,101,1470.564,132.6,926.523 +451,151,1492.237,134.3,1149.09 +451,201,1513.664,133.7,1250.734 +451,251,1515.880,134.9,1258.551 +451,301,1520.870,134.7,1313.852 +451,351,1520.953,134.4,1349.668 +451,401,1498.549,135.5,1396.91 +451,451,1486.171,136.4,1413.352 +101,1,1473.871,136.3,1477.281 +101,51,1451.413,136.3,1473.172 +101,101,1439.962,135.8,1454.371 +101,151,1434.421,135.9,1473.379 +101,201,1434.665,135.3,1474.691 +101,251,1437.749,135.5,1468.234 +101,301,1437.466,135.9,1451.621 +101,351,1438.126,135.9,1453.57 +101,401,1448.480,136.7,1472.062 +101,451,1450.976,135.7,1472.637 +201,1,1423.411,135.9,1469.059 +201,51,1422.885,136.0,1471.879 +201,101,1445.515,135.1,1474.887 +201,151,1426.950,135.0,1447.227 +201,201,1423.495,136.1,1452.746 +201,251,1430.685,136.4,1474.234 +201,301,1427.875,135.0,1445.262 +201,351,1442.899,135.8,1449.84 +201,401,1439.263,135.2,1454.102 +201,451,1445.961,135.9,1474.453 +301,1,1436.947,136.0,1473.016 +301,51,1429.552,135.8,1474.066 +301,101,1436.137,134.6,1472.137 +301,151,1440.573,135.5,1454.711 +301,201,1436.428,135.9,1476.141 +301,251,1432.788,135.9,1475.758 +301,301,1429.419,135.9,1471.59 +301,351,1427.612,135.5,1455.324 +301,401,1467.895,135.6,1374.039 +301,451,1447.902,134.4,1430.773 +401,1,1410.233,129.3,260.105 +401,51,1436.970,131.5,669.082 +401,101,1449.739,132.9,893.953 +401,151,1487.265,133.9,1149.93 +401,201,1485.108,134.2,1244.66 +401,251,1486.935,134.7,1297.875 +401,301,1500.033,136.1,1307.594 +401,351,1500.460,135.5,1333.438 +401,401,1488.885,136.1,1395.02 +401,451,1467.255,135.3,1428.539 +51,1,1402.913,129.2,300.777 +51,51,1439.816,135.7,1457.918 +51,101,1435.350,135.7,1472.102 +51,151,1448.041,136.1,1470.078 +51,201,1456.272,135.2,1451.703 +51,251,1437.499,135.7,1469.277 +51,301,1464.756,136.6,1474.988 +51,351,1465.644,137.2,1478.207 +51,401,1473.698,136.1,1482.172 +51,451,1466.381,136.0,1455.27 +151,1,1441.365,135.3,1452.402 +151,51,1447.196,136.5,1473.066 +151,101,1458.460,136.0,1473.312 +151,151,1443.220,134.6,1453.773 +151,201,1417.892,136.1,1470.895 +151,251,1430.177,135.7,1478.102 +151,301,1425.994,134.8,1452.426 +151,351,1430.587,135.8,1470.641 +151,401,1414.692,135.4,1465.562 +151,451,1423.519,135.0,1452.387 +251,1,1426.660,135.9,1451.523 +251,51,1439.819,135.4,1459.688 +251,101,1440.728,135.4,1450.691 +251,151,1435.941,136.0,1473.227 +251,201,1447.975,136.4,1472.16 +251,251,1423.207,135.3,1466.766 +251,301,1429.464,135.4,1474.418 +251,351,1427.153,135.9,1452.832 +251,401,1433.500,135.7,1476.754 +251,451,1431.558,135.9,1470.02 +351,1,1401.018,128.9,274.988 +351,51,1417.596,131.1,663.898 +351,101,1434.701,132.9,917.777 +351,151,1470.807,133.8,1169.207 +351,201,1469.728,135.1,1246.5 +351,251,1499.359,134.1,1270.023 +351,301,1478.456,135.3,1313.504 +351,351,1486.347,135.9,1337.0 +351,401,1477.687,135.6,1388.016 +351,451,1460.510,135.5,1422.824 \ No newline at end of file diff --git a/sei-iavl/benchmarks/setup/INSTALL_ROOT.sh b/sei-iavl/benchmarks/setup/INSTALL_ROOT.sh new file mode 100755 index 0000000000..bcd719c01f --- /dev/null +++ b/sei-iavl/benchmarks/setup/INSTALL_ROOT.sh @@ -0,0 +1,32 @@ +#!/bin/sh + +export DEBIAN_FRONTEND=noninteractive + +apt-get update +apt-get -y upgrade +apt-get -y install screen wget git build-essential libsnappy-dev libgflags-dev zlib1g-dev libbz2-dev liblz4-dev libzstd-dev + +# Installing leveldb from source +cd ~/ +git clone https://github.com/google/leveldb +cd leveldb +git checkout v1.7 +make -j2 +cp --preserve=links libleveldb.* /usr/local/lib +cp -r include/leveldb /usr/local/include/ +ldconfig + +# installing rocksdb from source +cd ~/ +git clone https://github.com/facebook/rocksdb +cd rocksdb +git checkout v6.15.5 +make -j4 install-shared +ldconfig + +# install go +cd ~/ +mkdir go +wget https://go.dev/dl/go1.17.6.linux-amd64.tar.gz +tar -C /usr/local -xzf go1.17.6.linux-amd64.tar.gz + diff --git a/sei-iavl/benchmarks/setup/RUN_BENCHMARKS.sh b/sei-iavl/benchmarks/setup/RUN_BENCHMARKS.sh new file mode 100755 index 0000000000..3c9dd4c8d0 --- /dev/null +++ b/sei-iavl/benchmarks/setup/RUN_BENCHMARKS.sh @@ -0,0 +1,28 @@ +#!/bin/sh + +# This runs benchmarks, by default from master branch of +# github.com/cosmos/iavl +# You can customize this by optional command line args +# +# INSTALL_USER.sh [branch] [repouser] +# +# set repouser as your username to time your fork + +BRANCH=${1:-master} +REPOUSER=${2:-tendermint} + +export PATH=$PATH:/usr/local/go/bin +export PATH=$PATH:/usr/local/go/bin:$HOME/go/bin +export GOROOT=/usr/local/go +export GOPATH=$HOME/go + +export CGO_CFLAGS="-I/usr/local/include" +export CGO_LDFLAGS="-L/usr/local/lib -lleveldb -lrocksdb -lstdc++ -lm -lz -lbz2 -lsnappy -llz4 -lzstd" + +cd ~/ +git clone https://github.com/${REPOUSER}/iavl +cd iavl +git checkout ${BRANCH} + +make bench > results.txt + diff --git a/sei-iavl/cache/cache.go b/sei-iavl/cache/cache.go new file mode 100644 index 0000000000..a5d34332aa --- /dev/null +++ b/sei-iavl/cache/cache.go @@ -0,0 +1,114 @@ +package cache + +import ( + "container/list" + + ibytes "github.com/sei-protocol/sei-chain/sei-iavl/internal/bytes" +) + +// Node represents a node eligible for caching. +type Node interface { + GetCacheKey() []byte +} + +// Cache is an in-memory structure to persist nodes for quick access. +// Please see lruCache for more details about why we need a custom +// cache implementation. +type Cache interface { + // Adds node to cache. If full and had to remove the oldest element, + // returns the oldest, otherwise nil. + // CONTRACT: node can never be nil. Otherwise, cache panics. + Add(node Node) Node + + // Returns Node for the key, if exists. nil otherwise. + Get(key []byte) Node + + // Has returns true if node with key exists in cache, false otherwise. + Has(key []byte) bool + + // Remove removes node with key from cache. The removed node is returned. + // if not in cache, return nil. + Remove(key []byte) Node + + // Len returns the cache length. + Len() int +} + +// lruCache is an LRU cache implementation. +// The motivation for using a custom cache implementation is to +// allow for a custom max policy. +// +// Currently, the cache maximum is implemented in terms of the +// number of nodes which is not intuitive to configure. +// Instead, we are planning to add a byte maximum. +// The alternative implementations do not allow for +// customization and the ability to estimate the byte +// size of the cache. +type lruCache struct { + dict map[string]*list.Element // FastNode cache. + maxElementCount int // FastNode the maximum number of nodes in the cache. + ll *list.List // LRU queue of cache elements. Used for deletion. +} + +var _ Cache = (*lruCache)(nil) + +func New(maxElementCount int) Cache { + return &lruCache{ + dict: make(map[string]*list.Element), + maxElementCount: maxElementCount, + ll: list.New(), + } +} + +func (c *lruCache) Add(node Node) Node { + keyStr := ibytes.UnsafeBytesToStr(node.GetCacheKey()) + if e, exists := c.dict[keyStr]; exists { + c.ll.MoveToFront(e) + old := e.Value + e.Value = node + return old.(Node) + } + + elem := c.ll.PushFront(node) + c.dict[keyStr] = elem + + if c.ll.Len() > c.maxElementCount { + oldest := c.ll.Back() + return c.remove(oldest) + } + return nil +} + +func (nc *lruCache) Get(key []byte) Node { + if ele, hit := nc.dict[ibytes.UnsafeBytesToStr(key)]; hit { + nc.ll.MoveToFront(ele) + return ele.Value.(Node) + } + return nil +} + +func (c *lruCache) Has(key []byte) bool { + _, exists := c.dict[ibytes.UnsafeBytesToStr(key)] + return exists +} + +func (nc *lruCache) Len() int { + return nc.ll.Len() +} + +func (c *lruCache) Remove(key []byte) Node { + if elem, exists := c.dict[ibytes.UnsafeBytesToStr(key)]; exists { + return c.remove(elem) + } + return nil +} + +func (c *lruCache) remove(e *list.Element) Node { + element := c.ll.Remove(e) + if element != nil { + node := element.(Node) + delete(c.dict, ibytes.UnsafeBytesToStr(node.GetCacheKey())) + return node + } + return nil +} diff --git a/sei-iavl/cache/cache_bench_test.go b/sei-iavl/cache/cache_bench_test.go new file mode 100644 index 0000000000..815e97bffe --- /dev/null +++ b/sei-iavl/cache/cache_bench_test.go @@ -0,0 +1,69 @@ +package cache_test + +import ( + "math/rand" + "testing" + + "github.com/sei-protocol/sei-chain/sei-iavl/cache" +) + +func BenchmarkAdd(b *testing.B) { + b.ReportAllocs() + testcases := map[string]struct { + cacheMax int + keySize int + }{ + "small - max: 10K, key size - 10b": { + cacheMax: 10000, + keySize: 10, + }, + "med - max: 100K, key size 20b": { + cacheMax: 100000, + keySize: 20, + }, + "large - max: 1M, key size 30b": { + cacheMax: 1000000, + keySize: 30, + }, + } + + for name, tc := range testcases { + cache := cache.New(tc.cacheMax) + b.Run(name, func(b *testing.B) { + for i := 0; i < b.N; i++ { + b.StopTimer() + key := randBytes(tc.keySize) + b.StartTimer() + + _ = cache.Add(&testNode{ + key: key, + }) + } + }) + } +} + +func BenchmarkRemove(b *testing.B) { + b.ReportAllocs() + + cache := cache.New(1000) + existentKeyMirror := [][]byte{} + // Populate cache + for i := 0; i < 50; i++ { + key := randBytes(1000) + + existentKeyMirror = append(existentKeyMirror, key) + + cache.Add(&testNode{ + key: key, + }) + } + + randSeed := 498727689 // For deterministic tests + r := rand.New(rand.NewSource(int64(randSeed))) + b.ResetTimer() + for i := 0; i < b.N; i++ { + key := existentKeyMirror[r.Intn(len(existentKeyMirror))] + _ = cache.Remove(key) + } +} diff --git a/sei-iavl/cache/cache_test.go b/sei-iavl/cache/cache_test.go new file mode 100644 index 0000000000..4dbdd7e0de --- /dev/null +++ b/sei-iavl/cache/cache_test.go @@ -0,0 +1,311 @@ +package cache_test + +import ( + "crypto/rand" + "fmt" + "testing" + + "github.com/sei-protocol/sei-chain/sei-iavl/cache" + "github.com/stretchr/testify/require" +) + +// expectedResult represents the expected result of each add/remove operation. +// It can be noneRemoved or the index of the removed node in testNodes +type expectedResult int + +const ( + noneRemoved expectedResult = -1 + // The rest represent the index of the removed node +) + +// testNode is the node used for testing cache implementation +type testNode struct { + key []byte +} + +type cacheOp struct { + testNodexIdx int + expectedResult expectedResult +} + +type testcase struct { + setup func(cache.Cache) + cacheMax int + cacheOps []cacheOp + expectedNodeIndexes []int // contents of the cache once test case completes represent by indexes in testNodes +} + +func (tn *testNode) GetCacheKey() []byte { + return tn.key +} + +const ( + testKey = "key" +) + +var _ cache.Node = (*testNode)(nil) + +var ( + testNodes = []cache.Node{ + &testNode{ + key: []byte(fmt.Sprintf("%s%d", testKey, 1)), + }, + &testNode{ + key: []byte(fmt.Sprintf("%s%d", testKey, 2)), + }, + &testNode{ + key: []byte(fmt.Sprintf("%s%d", testKey, 3)), + }, + } +) + +func Test_Cache_Add(t *testing.T) { + testcases := map[string]testcase{ + "add 1 node with 1 max - added": { + cacheMax: 1, + cacheOps: []cacheOp{ + { + testNodexIdx: 0, + expectedResult: noneRemoved, + }, + }, + expectedNodeIndexes: []int{0}, + }, + "add 1 node twice, cache max 2 - only one added": { + cacheMax: 2, + cacheOps: []cacheOp{ + { + testNodexIdx: 0, + expectedResult: noneRemoved, + }, + { + testNodexIdx: 0, + expectedResult: 0, + }, + }, + expectedNodeIndexes: []int{0}, + }, + "add 1 node with 0 max - not added and return itself": { + cacheMax: 0, + cacheOps: []cacheOp{ + { + testNodexIdx: 0, + expectedResult: 0, + }, + }, + }, + "add 3 nodes with 1 max - first 2 removed": { + cacheMax: 1, + cacheOps: []cacheOp{ + { + testNodexIdx: 0, + expectedResult: noneRemoved, + }, + { + testNodexIdx: 1, + expectedResult: 0, + }, + { + testNodexIdx: 2, + expectedResult: 1, + }, + }, + expectedNodeIndexes: []int{2}, + }, + "add 3 nodes with 2 max - first removed": { + cacheMax: 2, + cacheOps: []cacheOp{ + { + testNodexIdx: 0, + expectedResult: noneRemoved, + }, + { + testNodexIdx: 1, + expectedResult: noneRemoved, + }, + { + testNodexIdx: 2, + expectedResult: 0, + }, + }, + expectedNodeIndexes: []int{1, 2}, + }, + "add 3 nodes with 10 max - non removed": { + cacheMax: 10, + cacheOps: []cacheOp{ + { + testNodexIdx: 0, + expectedResult: noneRemoved, + }, + { + testNodexIdx: 1, + expectedResult: noneRemoved, + }, + { + testNodexIdx: 2, + expectedResult: noneRemoved, + }, + }, + expectedNodeIndexes: []int{0, 1, 2}, + }, + } + + for name, tc := range testcases { + t.Run(name, func(t *testing.T) { + cache := cache.New(tc.cacheMax) + + expectedCurSize := 0 + + for _, op := range tc.cacheOps { + + actualResult := cache.Add(testNodes[op.testNodexIdx]) + + expectedResult := op.expectedResult + + if expectedResult == noneRemoved { + require.Nil(t, actualResult) + expectedCurSize++ + } else { + require.NotNil(t, actualResult) + + // Here, op.expectedResult represents the index of the removed node in tc.cacheOps + require.Equal(t, testNodes[int(op.expectedResult)], actualResult) + } + require.Equal(t, expectedCurSize, cache.Len()) + } + + validateCacheContentsAfterTest(t, tc, cache) + }) + } +} + +func Test_Cache_Remove(t *testing.T) { + testcases := map[string]testcase{ + "remove non-existent key, cache max 0 - nil returned": { + cacheMax: 0, + cacheOps: []cacheOp{ + { + testNodexIdx: 0, + expectedResult: noneRemoved, + }, + }, + }, + "remove non-existent key, cache max 1 - nil returned": { + setup: func(c cache.Cache) { + require.Nil(t, c.Add(testNodes[1])) + require.Equal(t, 1, c.Len()) + }, + cacheMax: 1, + cacheOps: []cacheOp{ + { + testNodexIdx: 0, + expectedResult: noneRemoved, + }, + }, + expectedNodeIndexes: []int{1}, + }, + "remove existent key, cache max 1 - removed": { + setup: func(c cache.Cache) { + require.Nil(t, c.Add(testNodes[0])) + require.Equal(t, 1, c.Len()) + }, + cacheMax: 1, + cacheOps: []cacheOp{ + { + testNodexIdx: 0, + expectedResult: 0, + }, + }, + }, + "remove twice, cache max 1 - removed first time, then nil": { + setup: func(c cache.Cache) { + require.Nil(t, c.Add(testNodes[0])) + require.Equal(t, 1, c.Len()) + }, + cacheMax: 1, + cacheOps: []cacheOp{ + { + testNodexIdx: 0, + expectedResult: 0, + }, + { + testNodexIdx: 0, + expectedResult: noneRemoved, + }, + }, + }, + "remove all, cache max 3": { + setup: func(c cache.Cache) { + require.Nil(t, c.Add(testNodes[0])) + require.Nil(t, c.Add(testNodes[1])) + require.Nil(t, c.Add(testNodes[2])) + require.Equal(t, 3, c.Len()) + }, + cacheMax: 3, + cacheOps: []cacheOp{ + { + testNodexIdx: 2, + expectedResult: 2, + }, + { + testNodexIdx: 0, + expectedResult: 0, + }, + { + testNodexIdx: 1, + expectedResult: 1, + }, + }, + }, + } + + for name, tc := range testcases { + t.Run(name, func(t *testing.T) { + cache := cache.New(tc.cacheMax) + + if tc.setup != nil { + tc.setup(cache) + } + + expectedCurSize := cache.Len() + + for _, op := range tc.cacheOps { + + actualResult := cache.Remove(testNodes[op.testNodexIdx].GetCacheKey()) + + expectedResult := op.expectedResult + + if expectedResult == noneRemoved { + require.Nil(t, actualResult) + } else { + expectedCurSize-- + require.NotNil(t, actualResult) + + // Here, op.expectedResult represents the index of the removed node in tc.cacheOps + require.Equal(t, testNodes[int(op.expectedResult)], actualResult) + } + require.Equal(t, expectedCurSize, cache.Len()) + } + + validateCacheContentsAfterTest(t, tc, cache) + }) + } +} + +func validateCacheContentsAfterTest(t *testing.T, tc testcase, cache cache.Cache) { + require.Equal(t, len(tc.expectedNodeIndexes), cache.Len()) + for _, idx := range tc.expectedNodeIndexes { + expectedNode := testNodes[idx] + require.True(t, cache.Has(expectedNode.GetCacheKey())) + require.Equal(t, expectedNode, cache.Get(expectedNode.GetCacheKey())) + } +} + +func randBytes(length int) []byte { + key := make([]byte, length) + // math.rand.Read always returns err=nil + // we do not need cryptographic randomness for this test: + //nolint:gosec + rand.Read(key) + return key +} diff --git a/sei-iavl/cmd/iaviewer/README.md b/sei-iavl/cmd/iaviewer/README.md new file mode 100644 index 0000000000..ef21f1b983 --- /dev/null +++ b/sei-iavl/cmd/iaviewer/README.md @@ -0,0 +1,118 @@ +# IaViewer + +`iaviewer` is a utility to inspect the contents of a persisted iavl tree, given (a copy of) the leveldb store. +This can be quite useful for debugging, especially when you find odd errors, or non-deterministic behavior. +Below is a brief introduction to the tool. + +## Installation + +Once this is merged into the offical repo, master, you should be able to do: + +```shell +go get github.com/cosmos/iavl +cd ${GOPATH}/src/github.com/cosmos/iavl +make install +``` + +## Using the tool + +First make sure it is properly installed and you have `${GOPATH}/bin` in your `PATH`. +Typing `iaviewer` should run and print out a usage message. + +### Sample databases + +Once you understand the tool, you will most likely want to run it on captures from your +own abci app (built on cosmos-sdk or weave), but as a tutorial, you can try to use some +captures from an actual bug I found in my code... Same data, different hash. + +```shell +mkdir ./testdata +cd ./testdata +curl -L https://github.com/iov-one/iavl/files/2860877/bns-a.db.zip > bns-a.db.zip +unzip bns-a.db.zip +curl -L https://github.com/iov-one/iavl/files/2860878/bns-b.db.zip > bns-b.db.zip +unzip bns-b.db.zip +``` + +Now, if you run `ls -l`, you should see two directories... `bns-a.db` and `bns-b.db` + +### Inspecting available versions + +```shell +iaviewer versions ./bns-a.db "" +``` + +This should print out a list of 20 versions of the code. Note the the iavl tree will persist multiple +historical versions, which is a great aid in forensic queries (thanks Tendermint team!). For the rest +of the cases, we will consider only the last two versions, 190257 (last one where they match) and 190258 +(where they are different). + +### Checking keys and app hash + +First run these two and take a quick a look at the output: + +```shell +iaviewer data ./bns-a.db "" +iaviewer data ./bns-a.db "" 190257 +``` + +Notice you see the different heights and there is a change in size and app hash. +That's what happens when we process a transaction. Let's go further and use +the handy tool `diff` to compare two states. + +```shell +iaviewer data ./bns-a.db "" 190257 > a-last.data +iaviewer data ./bns-b.db "" 190257 > b-last.data + +diff a-last.data b-last.data +``` + +Same, same :) +But if we take the current version... + +```shell +iaviewer data ./bns-a.db "" 190258 > a-cur.data +iaviewer data ./bns-b.db "" 190258 > b-cur.data + +diff a-cur.data b-cur.data +``` + +Hmmm... everything is the same, except the hash. Odd... +So odd that I [wrote an article about it](https://medium.com/@ethan.frey/tracking-down-a-tendermint-consensus-failure-77f6ff414406) + +And finally, if we want to inspect which keys were modified in the last block: + +```shell +diff a-cur.data a-last.data +``` + +You should see 6 writes.. the `_i.usernft_*` are the secondary indexes on the username nft. +`sigs.*` is setting the nonce (if this were an update, you would see a previous value). +And `usrnft:*` is creating the actual username nft. + +### Checking the tree shape + +So, remember above, when we found that the current state of a and b have the same data +but different hashes. This must be due to the shape of the iavl tree. +To confirm that, and possibly get more insights, there is another command. + +```shell +iaviewer shape ./bns-a.db "" 190258 > a-cur.shape +iaviewer shape ./bns-b.db "" 190258 > b-cur.shape + +diff a-cur.shape b-cur.shape +``` + +Yup, that is quite some difference. You can also look at the tree as a whole. +So, stretch your terminal nice and wide, and... + +```shell +less a-cur.shape +``` + +It has `-5 ` for an inner node of depth 5, and `*6 ` for a leaf node (data) of depth 6. +Indentation also suggests the shape of the tree. + +Note, if anyone wants to improve the visualization, that would be awesome. +I have no idea how to do this well, but at least text output makes some +sense and is diff-able. \ No newline at end of file diff --git a/sei-iavl/cmd/iaviewer/main.go b/sei-iavl/cmd/iaviewer/main.go new file mode 100644 index 0000000000..526cf20942 --- /dev/null +++ b/sei-iavl/cmd/iaviewer/main.go @@ -0,0 +1,226 @@ +package main + +import ( + "crypto/sha256" + "encoding/hex" + "fmt" + "os" + "strconv" + "strings" + + dbm "github.com/tendermint/tm-db" + + iavl "github.com/sei-protocol/sei-chain/sei-iavl" + ibytes "github.com/sei-protocol/sei-chain/sei-iavl/internal/bytes" +) + +// TODO: make this configurable? +const ( + DefaultCacheSize int = 10000 +) + +func main() { + args := os.Args[1:] + if len(args) < 3 || (args[0] != "data" && args[0] != "keys" && args[0] != "shape" && args[0] != "versions" && args[0] != "size") { + fmt.Fprintln(os.Stderr, "Usage: iaviewer [version number]") + fmt.Fprintln(os.Stderr, " is the prefix of db, and the iavl tree of different modules in cosmos-sdk uses ") + fmt.Fprintln(os.Stderr, "different to identify, just like \"s/k:gov/\" represents the prefix of gov module") + os.Exit(1) + } + + version := 0 + if len(args) == 4 { + var err error + version, err = strconv.Atoi(args[3]) + if err != nil { + fmt.Fprintf(os.Stderr, "Invalid version number: %s\n", err) + os.Exit(1) + } + } + + tree, err := ReadTree(args[1], version, []byte(args[2])) + if err != nil { + fmt.Fprintf(os.Stderr, "Error reading data: %s\n", err) + os.Exit(1) + } + treeHash, err := tree.Hash() + if err != nil { + fmt.Fprintf(os.Stderr, "Error hashing tree: %s\n", err) + os.Exit(1) + } + fmt.Printf("Tree hash is %X, tree size is %d\n", treeHash, tree.ImmutableTree().Size()) + + switch args[0] { + case "data": + PrintTreeData(tree, false) + case "keys": + PrintTreeData(tree, true) + case "shape": + PrintShape(tree) + case "versions": + PrintVersions(tree) + case "size": + PrintSize(tree) + } +} + +func OpenDB(dir string) (dbm.DB, error) { + switch { + case strings.HasSuffix(dir, ".db"): + dir = dir[:len(dir)-3] + case strings.HasSuffix(dir, ".db/"): + dir = dir[:len(dir)-4] + default: + return nil, fmt.Errorf("database directory must end with .db") + } + // TODO: doesn't work on windows! + cut := strings.LastIndex(dir, "/") + if cut == -1 { + return nil, fmt.Errorf("cannot cut paths on %s", dir) + } + name := dir[cut+1:] + db, err := dbm.NewGoLevelDB(name, dir[:cut]) + if err != nil { + return nil, err + } + return db, nil +} + +// nolint: deadcode +func PrintDBStats(db dbm.DB) { + count := 0 + prefix := map[string]int{} + itr, err := db.Iterator(nil, nil) + if err != nil { + panic(err) + } + + defer func() { _ = itr.Close() }() + for ; itr.Valid(); itr.Next() { + key := ibytes.UnsafeBytesToStr(itr.Key()[:1]) + prefix[key]++ + count++ + } + if err := itr.Error(); err != nil { + panic(err) + } + fmt.Printf("DB contains %d entries\n", count) + for k, v := range prefix { + fmt.Printf(" %s: %d\n", k, v) + } +} + +// ReadTree loads an iavl tree from the directory +// If version is 0, load latest, otherwise, load named version +// The prefix represents which iavl tree you want to read. The iaviwer will always set a prefix. +func ReadTree(dir string, version int, prefix []byte) (*iavl.MutableTree, error) { + db, err := OpenDB(dir) + if err != nil { + return nil, err + } + if len(prefix) != 0 { + db = dbm.NewPrefixDB(db, prefix) + } + + tree, err := iavl.NewMutableTree(db, DefaultCacheSize, true) + if err != nil { + return nil, err + } + ver, err := tree.LoadVersion(int64(version)) + fmt.Printf("Got version: %d\n", ver) + return tree, err +} + +func PrintTreeData(tree *iavl.MutableTree, keysOnly bool) { + fmt.Println("Printing all keys with hashed values (to detect diff)") + totalKeySize := 0 + totalValSize := 0 + totalNumKeys := 0 + keyPrefixMap := map[string]int{} + _, err := tree.Iterate(func(key []byte, value []byte) bool { + printKey := parseWeaveKey(key) + if keysOnly { + fmt.Printf("%s\n", printKey) + } else { + digest := sha256.Sum256(value) + fmt.Printf("%s\n %X\n", printKey, digest) + } + totalKeySize += len(key) + totalValSize += len(value) + totalNumKeys++ + keyPrefixMap[fmt.Sprintf("%x", key[0])]++ + return false + }) + if err != nil { + fmt.Printf("Failed to iterate the tree fully: %v\n", err) + } else { + fmt.Printf("Total key count %d, total key bytes %d, total value bytes %d, prefix map %v\n", totalNumKeys, totalKeySize, totalValSize, keyPrefixMap) + } +} + +// parseWeaveKey assumes a separating : where all in front should be ascii, +// and all afterward may be ascii or binary +func parseWeaveKey(key []byte) string { + return encodeID(key) +} + +// casts to a string if it is printable ascii, hex-encodes otherwise +func encodeID(id []byte) string { + for _, b := range id { + if b < 0x20 || b >= 0x80 { + return strings.ToUpper(hex.EncodeToString(id)) + } + } + return string(id) +} + +func PrintShape(tree *iavl.MutableTree) { + // shape := tree.RenderShape(" ", nil) + //TODO: handle this error + shape, _ := tree.ImmutableTree().RenderShape(" ", nodeEncoder) + fmt.Println(strings.Join(shape, "\n")) +} + +func nodeEncoder(id []byte, depth int, isLeaf bool) string { + prefix := fmt.Sprintf("-%d ", depth) + if isLeaf { + prefix = fmt.Sprintf("*%d ", depth) + } + if len(id) == 0 { + return fmt.Sprintf("%s", prefix) + } + return fmt.Sprintf("%s%s", prefix, parseWeaveKey(id)) +} + +func PrintVersions(tree *iavl.MutableTree) { + versions := tree.AvailableVersions() + fmt.Println("Available versions:") + for _, v := range versions { + fmt.Printf(" %d\n", v) + } +} + +func PrintSize(tree *iavl.MutableTree) { + count, totalKeySize, totalValueSize := 0, 0, 0 + keySizeByPrefix, valSizeByPrefix := map[byte]int{}, map[byte]int{} + _, err := tree.Iterate(func(key []byte, value []byte) bool { + count += 1 + totalKeySize += len(key) + totalValueSize += len(value) + if _, ok := keySizeByPrefix[key[0]]; !ok { + keySizeByPrefix[key[0]] = 0 + valSizeByPrefix[key[0]] = 0 + } + keySizeByPrefix[key[0]] += len(key) + valSizeByPrefix[key[0]] += len(value) + return false + }) + if err != nil { + fmt.Printf("Failed to iterate the tree fully: %v\n", err) + return + } + fmt.Printf("Total entry count: %d. Total key bytes: %d. Total value bytes: %d\n", count, totalKeySize, totalValueSize) + for p := range keySizeByPrefix { + fmt.Printf("prefix %d has key bytes %d and value bytes %d\n", p, keySizeByPrefix[p], valSizeByPrefix[p]) + } +} diff --git a/sei-iavl/diff.go b/sei-iavl/diff.go new file mode 100644 index 0000000000..9d3ec3edd2 --- /dev/null +++ b/sei-iavl/diff.go @@ -0,0 +1,13 @@ +package iavl + +import ( + "github.com/sei-protocol/sei-chain/sei-iavl/proto" +) + +type ( + KVPair = proto.KVPair + ChangeSet = proto.ChangeSet +) + +// KVPairReceiver is callback parameter of method `extractStateChanges` to receive stream of `KVPair`s. +type KVPairReceiver func(pair *KVPair) error diff --git a/sei-iavl/doc.go b/sei-iavl/doc.go new file mode 100644 index 0000000000..493f5bf286 --- /dev/null +++ b/sei-iavl/doc.go @@ -0,0 +1,52 @@ +// Package iavl implements a versioned, snapshottable (immutable) AVL+ tree +// for persisting key-value pairs. +// +// The tree is not safe for concurrent use, and must be guarded by a Mutex +// or RWLock as appropriate - the exception is immutable trees returned by +// MutableTree.GetImmutable() which are safe for concurrent use as long as +// the version is not deleted via DeleteVersion(). +// +// Basic usage of MutableTree: +// +// import "github.com/sei-protocol/sei-chain/sei-iavl" +// import "github.com/tendermint/tm-db" +// ... +// +// tree := iavl.NewMutableTree(db.NewMemDB(), 128) +// +// tree.IsEmpty() // true +// +// tree.Set([]byte("alice"), []byte("abc")) +// tree.SaveVersion(1) +// +// tree.Set([]byte("alice"), []byte("xyz")) +// tree.Set([]byte("bob"), []byte("xyz")) +// tree.SaveVersion(2) +// +// tree.LatestVersion() // 2 +// +// tree.GetVersioned([]byte("alice"), 1) // "abc" +// tree.GetVersioned([]byte("alice"), 2) // "xyz" +// +// Proof of existence: +// +// root := tree.Hash() +// val, proof, err := tree.GetVersionedWithProof([]byte("bob"), 2) // "xyz", RangeProof, nil +// proof.Verify([]byte("bob"), val, root) // nil +// +// Proof of absence: +// +// _, proof, err = tree.GetVersionedWithProof([]byte("tom"), 2) // nil, RangeProof, nil +// proof.Verify([]byte("tom"), nil, root) // nil +// +// Now we delete an old version: +// +// tree.DeleteVersion(1) +// tree.VersionExists(1) // false +// tree.Get([]byte("alice")) // "xyz" +// tree.GetVersioned([]byte("alice"), 1) // nil +// +// Can't create a proof of absence for a version we no longer have: +// +// _, proof, err = tree.GetVersionedWithProof([]byte("tom"), 1) // nil, nil, error +package iavl diff --git a/sei-iavl/export.go b/sei-iavl/export.go new file mode 100644 index 0000000000..387dc9121a --- /dev/null +++ b/sei-iavl/export.go @@ -0,0 +1,89 @@ +package iavl + +import ( + "context" + + "github.com/pkg/errors" +) + +// exportBufferSize is the number of nodes to buffer in the exporter. It improves throughput by +// processing multiple nodes per context switch, but take care to avoid excessive memory usage, +// especially since callers may export several IAVL stores in parallel (e.g. the Cosmos SDK). +const exportBufferSize = 32 + +// ExportDone is returned by Exporter.Next() when all items have been exported. +// nolint:revive +var ExportDone = errors.New("export is complete") // nolint:golint + +// ExportNode contains exported node data. +type ExportNode struct { + Key []byte + Value []byte + Version int64 + Height int8 +} + +// Exporter exports nodes from an ImmutableTree. It is created by ImmutableTree.Export(). +// +// Exported nodes can be imported into an empty tree with MutableTree.Import(). Nodes are exported +// depth-first post-order (LRN), this order must be preserved when importing in order to recreate +// the same tree structure. +type Exporter struct { + tree *ImmutableTree + ch chan *ExportNode + cancel context.CancelFunc +} + +// NewExporter creates a new Exporter. Callers must call Close() when done. +func newExporter(tree *ImmutableTree) *Exporter { + ctx, cancel := context.WithCancel(context.Background()) + exporter := &Exporter{ + tree: tree, + ch: make(chan *ExportNode, exportBufferSize), + cancel: cancel, + } + + tree.ndb.incrVersionReaders(tree.version) + go exporter.export(ctx) + + return exporter +} + +// export exports nodes +func (e *Exporter) export(ctx context.Context) { + e.tree.root.traversePost(e.tree, true, func(node *Node) bool { + exportNode := &ExportNode{ + Key: node.GetNodeKey(), + Value: node.GetValue(), + Version: node.GetVersion(), + Height: node.GetHeight(), + } + + select { + case e.ch <- exportNode: + return false + case <-ctx.Done(): + return true + } + }) + close(e.ch) +} + +// Next fetches the next exported node, or returns ExportDone when done. +func (e *Exporter) Next() (*ExportNode, error) { + if exportNode, ok := <-e.ch; ok { + return exportNode, nil + } + return nil, ExportDone +} + +// Close closes the exporter. It is safe to call multiple times. +func (e *Exporter) Close() { + e.cancel() + for range e.ch { // drain channel + } + if e.tree != nil { + e.tree.ndb.decrVersionReaders(e.tree.version) + } + e.tree = nil +} diff --git a/sei-iavl/export_test.go b/sei-iavl/export_test.go new file mode 100644 index 0000000000..f75effc51e --- /dev/null +++ b/sei-iavl/export_test.go @@ -0,0 +1,305 @@ +package iavl + +import ( + "math" + "math/rand" + "testing" + + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" + + db "github.com/tendermint/tm-db" +) + +// setupExportTreeBasic sets up a basic tree with a handful of +// create/update/delete operations over a few versions. +func setupExportTreeBasic(t require.TestingT) *ImmutableTree { + tree, err := NewMutableTree(db.NewMemDB(), 0, false) + require.NoError(t, err) + + tree.Set([]byte("x"), []byte{255}) + tree.Set([]byte("z"), []byte{255}) + tree.Set([]byte("a"), []byte{1}) + tree.Set([]byte("b"), []byte{2}) + tree.Set([]byte("c"), []byte{3}) + _, _, err = tree.SaveVersion() + require.NoError(t, err) + + tree.Remove([]byte("x")) + tree.Remove([]byte("b")) + tree.Set([]byte("c"), []byte{255}) + tree.Set([]byte("d"), []byte{4}) + _, _, err = tree.SaveVersion() + require.NoError(t, err) + + tree.Set([]byte("b"), []byte{2}) + tree.Set([]byte("c"), []byte{3}) + tree.Set([]byte("e"), []byte{5}) + tree.Remove([]byte("z")) + _, version, err := tree.SaveVersion() + require.NoError(t, err) + + itree, err := tree.GetImmutable(version) + require.NoError(t, err) + return itree +} + +// setupExportTreeRandom sets up a randomly generated tree. +// nolint: dupl +func setupExportTreeRandom(t *testing.T) *ImmutableTree { + const ( + randSeed = 49872768940 // For deterministic tests + keySize = 16 + valueSize = 16 + + versions = 8 // number of versions to generate + versionOps = 1024 // number of operations (create/update/delete) per version + updateRatio = 0.4 // ratio of updates out of all operations + deleteRatio = 0.2 // ratio of deletes out of all operations + ) + + r := rand.New(rand.NewSource(randSeed)) + tree, err := NewMutableTree(db.NewMemDB(), 0, false) + require.NoError(t, err) + + var version int64 + keys := make([][]byte, 0, versionOps) + for i := 0; i < versions; i++ { + for j := 0; j < versionOps; j++ { + key := make([]byte, keySize) + value := make([]byte, valueSize) + + // The performance of this is likely to be terrible, but that's fine for small tests + switch { + case len(keys) > 0 && r.Float64() <= deleteRatio: + index := r.Intn(len(keys)) + key = keys[index] + keys = append(keys[:index], keys[index+1:]...) + _, removed, err := tree.Remove(key) + require.NoError(t, err) + require.True(t, removed) + + case len(keys) > 0 && r.Float64() <= updateRatio: + key = keys[r.Intn(len(keys))] + r.Read(value) + updated, err := tree.Set(key, value) + require.NoError(t, err) + require.True(t, updated) + + default: + r.Read(key) + r.Read(value) + // If we get an update, set again + for updated, err := tree.Set(key, value); updated && err == nil; { + key = make([]byte, keySize) + r.Read(key) + } + keys = append(keys, key) + } + } + _, version, err = tree.SaveVersion() + require.NoError(t, err) + } + + require.EqualValues(t, versions, tree.Version()) + require.GreaterOrEqual(t, tree.ImmutableTree().Size(), int64(math.Trunc(versions*versionOps*(1-updateRatio-deleteRatio))/2)) + + itree, err := tree.GetImmutable(version) + require.NoError(t, err) + return itree +} + +// setupExportTreeSized sets up a single-version tree with a given number +// of randomly generated key/value pairs, useful for benchmarking. +func setupExportTreeSized(t require.TestingT, treeSize int) *ImmutableTree { + const ( + randSeed = 49872768940 // For deterministic tests + keySize = 16 + valueSize = 16 + ) + + r := rand.New(rand.NewSource(randSeed)) + tree, err := NewMutableTree(db.NewMemDB(), 0, false) + require.NoError(t, err) + + for i := 0; i < treeSize; i++ { + key := make([]byte, keySize) + value := make([]byte, valueSize) + r.Read(key) + r.Read(value) + updated, err := tree.Set(key, value) + require.NoError(t, err) + + if updated { + i-- + } + } + + _, version, err := tree.SaveVersion() + require.NoError(t, err) + + itree, err := tree.GetImmutable(version) + require.NoError(t, err) + + return itree +} + +func TestExporter(t *testing.T) { + tree := setupExportTreeBasic(t) + + expect := []*ExportNode{ + {Key: []byte("a"), Value: []byte{1}, Version: 1, Height: 0}, + {Key: []byte("b"), Value: []byte{2}, Version: 3, Height: 0}, + {Key: []byte("b"), Value: nil, Version: 3, Height: 1}, + {Key: []byte("c"), Value: []byte{3}, Version: 3, Height: 0}, + {Key: []byte("c"), Value: nil, Version: 3, Height: 2}, + {Key: []byte("d"), Value: []byte{4}, Version: 2, Height: 0}, + {Key: []byte("e"), Value: []byte{5}, Version: 3, Height: 0}, + {Key: []byte("e"), Value: nil, Version: 3, Height: 1}, + {Key: []byte("d"), Value: nil, Version: 3, Height: 3}, + } + + actual := make([]*ExportNode, 0, len(expect)) + exporter := tree.Export() + defer exporter.Close() + for { + node, err := exporter.Next() + if err == ExportDone { + break + } + require.NoError(t, err) + actual = append(actual, node) + } + + assert.Equal(t, expect, actual) +} + +func TestExporter_Import(t *testing.T) { + testcases := map[string]*ImmutableTree{ + "empty tree": NewImmutableTree(db.NewMemDB(), 0, false), + "basic tree": setupExportTreeBasic(t), + } + if !testing.Short() { + testcases["sized tree"] = setupExportTreeSized(t, 4096) + testcases["random tree"] = setupExportTreeRandom(t) + } + + for desc, tree := range testcases { + tree := tree + t.Run(desc, func(t *testing.T) { + t.Parallel() + + exporter := tree.Export() + defer exporter.Close() + + newTree, err := NewMutableTree(db.NewMemDB(), 0, false) + require.NoError(t, err) + importer, err := newTree.Import(tree.Version()) + require.NoError(t, err) + defer importer.Close() + + for { + item, err := exporter.Next() + if err == ExportDone { + err = importer.Commit() + require.NoError(t, err) + break + } + require.NoError(t, err) + err = importer.Add(item) + require.NoError(t, err) + } + + treeHash, err := tree.Hash() + require.NoError(t, err) + newTreeHash, err := newTree.Hash() + require.NoError(t, err) + + require.Equal(t, treeHash, newTreeHash, "Tree hash mismatch") + require.Equal(t, tree.Size(), newTree.ImmutableTree().Size(), "Tree size mismatch") + require.Equal(t, tree.Version(), newTree.Version(), "Tree version mismatch") + + tree.Iterate(func(key, value []byte) bool { + index, _, err := tree.GetWithIndex(key) + require.NoError(t, err) + newIndex, newValue, err := newTree.ImmutableTree().GetWithIndex(key) + require.NoError(t, err) + require.Equal(t, index, newIndex, "Index mismatch for key %v", key) + require.Equal(t, value, newValue, "Value mismatch for key %v", key) + return false + }) + }) + } +} + +func TestExporter_Close(t *testing.T) { + tree := setupExportTreeSized(t, 4096) + exporter := tree.Export() + + node, err := exporter.Next() + require.NoError(t, err) + require.NotNil(t, node) + + exporter.Close() + node, err = exporter.Next() + require.Error(t, err) + require.Equal(t, ExportDone, err) + require.Nil(t, node) + + node, err = exporter.Next() + require.Error(t, err) + require.Equal(t, ExportDone, err) + require.Nil(t, node) + + exporter.Close() + exporter.Close() +} + +func TestExporter_DeleteVersionErrors(t *testing.T) { + tree, err := NewMutableTree(db.NewMemDB(), 0, false) + require.NoError(t, err) + + tree.Set([]byte("a"), []byte{1}) + _, _, err = tree.SaveVersion() + require.NoError(t, err) + + tree.Set([]byte("b"), []byte{2}) + _, _, err = tree.SaveVersion() + require.NoError(t, err) + + tree.Set([]byte("c"), []byte{3}) + _, _, err = tree.SaveVersion() + require.NoError(t, err) + + itree, err := tree.GetImmutable(2) + require.NoError(t, err) + exporter := itree.Export() + defer exporter.Close() + + err = tree.DeleteVersion(2) + require.Error(t, err) + err = tree.DeleteVersion(1) + require.NoError(t, err) + + exporter.Close() + err = tree.DeleteVersion(2) + require.NoError(t, err) +} + +func BenchmarkExport(b *testing.B) { + b.StopTimer() + tree := setupExportTreeSized(b, 4096) + b.StartTimer() + for n := 0; n < b.N; n++ { + exporter := tree.Export() + for { + _, err := exporter.Next() + if err == ExportDone { + break + } else if err != nil { + b.Error(err) + } + } + exporter.Close() + } +} diff --git a/sei-iavl/fast_iterator.go b/sei-iavl/fast_iterator.go new file mode 100644 index 0000000000..aa423cae1d --- /dev/null +++ b/sei-iavl/fast_iterator.go @@ -0,0 +1,133 @@ +package iavl + +import ( + "errors" + + dbm "github.com/tendermint/tm-db" +) + +var errFastIteratorNilNdbGiven = errors.New("fast iterator must be created with a nodedb but it was nil") + +// FastIterator is a dbm.Iterator for ImmutableTree +// it iterates over the latest state via fast nodes, +// taking advantage of keys being located in sequence in the underlying database. +type FastIterator struct { + start, end []byte + + valid bool + + ascending bool + + err error + + ndb *nodeDB + + nextFastNode *FastNode + + fastIterator dbm.Iterator +} + +var _ dbm.Iterator = (*FastIterator)(nil) + +func NewFastIterator(start, end []byte, ascending bool, ndb *nodeDB) *FastIterator { + iter := &FastIterator{ + start: start, + end: end, + err: nil, + ascending: ascending, + ndb: ndb, + nextFastNode: nil, + fastIterator: nil, + } + // Move iterator before the first element + iter.Next() + return iter +} + +// Domain implements dbm.Iterator. +// Maps the underlying nodedb iterator domain, to the 'logical' keys involved. +func (iter *FastIterator) Domain() ([]byte, []byte) { + if iter.fastIterator == nil { + return iter.start, iter.end + } + + start, end := iter.fastIterator.Domain() + + if start != nil { + start = start[1:] + if len(start) == 0 { + start = nil + } + } + + if end != nil { + end = end[1:] + if len(end) == 0 { + end = nil + } + } + + return start, end +} + +// Valid implements dbm.Iterator. +func (iter *FastIterator) Valid() bool { + return iter.fastIterator != nil && iter.fastIterator.Valid() && iter.valid +} + +// Key implements dbm.Iterator +func (iter *FastIterator) Key() []byte { + if iter.valid { + return iter.nextFastNode.key + } + return nil +} + +// Value implements dbm.Iterator +func (iter *FastIterator) Value() []byte { + if iter.valid { + return iter.nextFastNode.value + } + return nil +} + +// Next implements dbm.Iterator +func (iter *FastIterator) Next() { + if iter.ndb == nil { + iter.err = errFastIteratorNilNdbGiven + iter.valid = false + return + } + + if iter.fastIterator == nil { + iter.fastIterator, iter.err = iter.ndb.getFastIterator(iter.start, iter.end, iter.ascending) + iter.valid = true + } else { + iter.fastIterator.Next() + } + + if iter.err == nil { + iter.err = iter.fastIterator.Error() + } + + iter.valid = iter.valid && iter.fastIterator.Valid() + if iter.valid { + iter.nextFastNode, iter.err = DeserializeFastNode(iter.fastIterator.Key()[1:], iter.fastIterator.Value()) + iter.valid = iter.err == nil + } +} + +// Close implements dbm.Iterator +func (iter *FastIterator) Close() error { + if iter.fastIterator != nil { + iter.err = iter.fastIterator.Close() + } + iter.valid = false + iter.fastIterator = nil + return iter.err +} + +// Error implements dbm.Iterator +func (iter *FastIterator) Error() error { + return iter.err +} diff --git a/sei-iavl/fast_node.go b/sei-iavl/fast_node.go new file mode 100644 index 0000000000..8fb93ba39e --- /dev/null +++ b/sei-iavl/fast_node.go @@ -0,0 +1,76 @@ +package iavl + +import ( + "io" + + "github.com/pkg/errors" + "github.com/sei-protocol/sei-chain/sei-iavl/cache" + "github.com/sei-protocol/sei-chain/sei-iavl/internal/encoding" +) + +// NOTE: This file favors int64 as opposed to int for size/counts. +// The Tree on the other hand favors int. This is intentional. + +type FastNode struct { + key []byte + versionLastUpdatedAt int64 + value []byte +} + +var _ cache.Node = (*FastNode)(nil) + +// NewFastNode returns a new fast node from a value and version. +func NewFastNode(key []byte, value []byte, version int64) *FastNode { + return &FastNode{ + key: key, + versionLastUpdatedAt: version, + value: value, + } +} + +// DeserializeFastNode constructs an *FastNode from an encoded byte slice. +func DeserializeFastNode(key []byte, buf []byte) (*FastNode, error) { + ver, n, cause := encoding.DecodeVarint(buf) + if cause != nil { + return nil, errors.Wrap(cause, "decoding fastnode.version") + } + buf = buf[n:] + + val, _, cause := encoding.DecodeBytes(buf) + if cause != nil { + return nil, errors.Wrap(cause, "decoding fastnode.value") + } + + fastNode := &FastNode{ + key: key, + versionLastUpdatedAt: ver, + value: val, + } + + return fastNode, nil +} + +func (fn *FastNode) GetCacheKey() []byte { + return fn.key +} + +func (node *FastNode) encodedSize() int { + n := encoding.EncodeVarintSize(node.versionLastUpdatedAt) + encoding.EncodeBytesSize(node.value) + return n +} + +// writeBytes writes the FastNode as a serialized byte slice to the supplied io.Writer. +func (node *FastNode) writeBytes(w io.Writer) error { + if node == nil { + return errors.New("cannot write nil node") + } + cause := encoding.EncodeVarint(w, node.versionLastUpdatedAt) + if cause != nil { + return errors.Wrap(cause, "writing version last updated at") + } + cause = encoding.EncodeBytes(w, node.value) + if cause != nil { + return errors.Wrap(cause, "writing value") + } + return nil +} diff --git a/sei-iavl/fast_node_test.go b/sei-iavl/fast_node_test.go new file mode 100644 index 0000000000..b6e1ffd984 --- /dev/null +++ b/sei-iavl/fast_node_test.go @@ -0,0 +1,58 @@ +package iavl + +import ( + "bytes" + "encoding/hex" + "testing" + + "github.com/stretchr/testify/require" +) + +func TestFastNode_encodedSize(t *testing.T) { + fastNode := &FastNode{ + key: randBytes(10), + versionLastUpdatedAt: 1, + value: randBytes(20), + } + + expectedSize := 1 + len(fastNode.value) + 1 + + require.Equal(t, expectedSize, fastNode.encodedSize()) +} + +func TestFastNode_encode_decode(t *testing.T) { + testcases := map[string]struct { + node *FastNode + expectHex string + expectError bool + }{ + "nil": {nil, "", true}, + "empty": {&FastNode{}, "0000", false}, + "inner": {&FastNode{ + key: []byte{0x4}, + versionLastUpdatedAt: 1, + value: []byte{0x2}, + }, "020102", false}, + } + for name, tc := range testcases { + tc := tc + t.Run(name, func(t *testing.T) { + var buf bytes.Buffer + err := tc.node.writeBytes(&buf) + if tc.expectError { + require.Error(t, err) + return + } + require.NoError(t, err) + require.Equal(t, tc.expectHex, hex.EncodeToString(buf.Bytes())) + + node, err := DeserializeFastNode(tc.node.key, buf.Bytes()) + require.NoError(t, err) + // since value and leafHash are always decoded to []byte{} we augment the expected struct here + if tc.node.value == nil { + tc.node.value = []byte{} + } + require.Equal(t, tc.node, node) + }) + } +} diff --git a/sei-iavl/immutable_tree.go b/sei-iavl/immutable_tree.go new file mode 100644 index 0000000000..c9962e657a --- /dev/null +++ b/sei-iavl/immutable_tree.go @@ -0,0 +1,334 @@ +package iavl + +import ( + "fmt" + "strings" + + dbm "github.com/tendermint/tm-db" +) + +// ImmutableTree contains the immutable tree at a given version. It is typically created by calling +// MutableTree.GetImmutable(), in which case the returned tree is safe for concurrent access as +// long as the version is not deleted via DeleteVersion() or the tree's pruning settings. +// +// Returned key/value byte slices must not be modified, since they may point to data located inside +// IAVL which would also be modified. +type ImmutableTree struct { + root *Node + ndb *nodeDB + version int64 + skipFastStorageUpgrade bool +} + +// NewImmutableTree creates both in-memory and persistent instances +func NewImmutableTree(db dbm.DB, cacheSize int, skipFastStorageUpgrade bool) *ImmutableTree { + if db == nil { + // In-memory Tree. + return &ImmutableTree{} + } + return &ImmutableTree{ + // NodeDB-backed Tree. + ndb: newNodeDB(db, cacheSize, nil), + skipFastStorageUpgrade: skipFastStorageUpgrade, + } +} + +// NewImmutableTreeWithOpts creates an ImmutableTree with the given options. +func NewImmutableTreeWithOpts(db dbm.DB, cacheSize int, opts *Options, skipFastStorageUpgrade bool) *ImmutableTree { + return &ImmutableTree{ + // NodeDB-backed Tree. + ndb: newNodeDB(db, cacheSize, opts), + skipFastStorageUpgrade: skipFastStorageUpgrade, + } +} + +// String returns a string representation of Tree. +func (t *ImmutableTree) String() string { + leaves := []string{} + _, _ = t.Iterate(func(key []byte, val []byte) (stop bool) { + leaves = append(leaves, fmt.Sprintf("%x: %x", key, val)) + return false + }) + return "Tree{" + strings.Join(leaves, ", ") + "}" +} + +// RenderShape provides a nested tree shape, ident is prepended in each level +// Returns an array of strings, one per line, to join with "\n" or display otherwise +func (t *ImmutableTree) RenderShape(indent string, encoder NodeEncoder) ([]string, error) { + if encoder == nil { + encoder = defaultNodeEncoder + } + return t.renderNode(t.root, indent, 0, encoder) +} + +// NodeEncoder will take an id (hash, or key for leaf nodes), the depth of the node, +// and whether or not this is a leaf node. +// It returns the string we wish to print, for iaviwer +type NodeEncoder func(id []byte, depth int, isLeaf bool) string + +// defaultNodeEncoder can encode any node unless the client overrides it +func defaultNodeEncoder(id []byte, depth int, isLeaf bool) string { + prefix := "- " + if isLeaf { + prefix = "* " + } + if len(id) == 0 { + return fmt.Sprintf("%s", prefix) + } + return fmt.Sprintf("%s%X", prefix, id) +} + +func (t *ImmutableTree) renderNode(node *Node, indent string, depth int, encoder func([]byte, int, bool) string) ([]string, error) { + prefix := strings.Repeat(indent, depth) + // handle nil + if node == nil { + return []string{fmt.Sprintf("%s", prefix)}, nil + } + // handle leaf + if node.isLeaf() { + here := fmt.Sprintf("%s%s", prefix, encoder(node.GetNodeKey(), depth, true)) + return []string{here}, nil + } + + // recurse on inner node + here := fmt.Sprintf("%s%s", prefix, encoder(node.GetHash(), depth, false)) + + rightNode, err := node.getRightNode(t) + if err != nil { + return nil, err + } + + leftNode, err := node.getLeftNode(t) + if err != nil { + return nil, err + } + + right, err := t.renderNode(rightNode, indent, depth+1, encoder) + if err != nil { + return nil, err + } + + result, err := t.renderNode(leftNode, indent, depth+1, encoder) // left + if err != nil { + return nil, err + } + + result = append(result, here) + result = append(result, right...) + return result, nil +} + +// Size returns the number of leaf nodes in the tree. +func (t *ImmutableTree) Size() int64 { + if t.root == nil { + return 0 + } + return t.root.GetSize() +} + +// Version returns the version of the tree. +func (t *ImmutableTree) Version() int64 { + return t.version +} + +// Height returns the height of the tree. +func (t *ImmutableTree) Height() int8 { + if t.root == nil { + return 0 + } + return t.root.GetHeight() +} + +// Has returns whether or not a key exists. +func (t *ImmutableTree) Has(key []byte) (bool, error) { + if t.root == nil { + return false, nil + } + return t.root.has(t, key) +} + +// Hash returns the root hash. +func (t *ImmutableTree) Hash() ([]byte, error) { + hash, _, err := t.root.hashWithCount() + return hash, err +} + +// Export returns an iterator that exports tree nodes as ExportNodes. These nodes can be +// imported with MutableTree.Import() to recreate an identical tree. +func (t *ImmutableTree) Export() *Exporter { + return newExporter(t) +} + +// GetWithIndex returns the index and value of the specified key if it exists, or nil and the next index +// otherwise. The returned value must not be modified, since it may point to data stored within +// IAVL. +// +// The index is the index in the list of leaf nodes sorted lexicographically by key. The leftmost leaf has index 0. +// It's neighbor has index 1 and so on. +func (t *ImmutableTree) GetWithIndex(key []byte) (int64, []byte, error) { + if t.root == nil { + return 0, nil, nil + } + return t.root.get(t, key) +} + +// Get returns the value of the specified key if it exists, or nil. +// The returned value must not be modified, since it may point to data stored within IAVL. +// Get potentially employs a more performant strategy than GetWithIndex for retrieving the value. +// If tree.skipFastStorageUpgrade is true, this will work almost the same as GetWithIndex. +func (t *ImmutableTree) Get(key []byte) ([]byte, error) { + if t.root == nil { + return nil, nil + } + + if !t.skipFastStorageUpgrade { + // attempt to get a FastNode directly from db/cache. + // if call fails, fall back to the original IAVL logic in place. + fastNode, err := t.ndb.GetFastNode(key) + if err != nil { + _, result, err := t.root.get(t, key) + return result, err + } + + if fastNode == nil { + // If the tree is of the latest version and fast node is not in the tree + // then the regular node is not in the tree either because fast node + // represents live state. + if t.version == t.ndb.latestVersion { + return nil, nil + } + + _, result, err := t.root.get(t, key) + return result, err + } + + if fastNode.versionLastUpdatedAt <= t.version { + return fastNode.value, nil + } + } + + // otherwise skipFastStorageUpgrade is true or + // the cached node was updated later than the current tree. In this case, + // we need to use the regular stategy for reading from the current tree to avoid staleness. + _, result, err := t.root.get(t, key) + return result, err +} + +// GetByIndex gets the key and value at the specified index. +func (t *ImmutableTree) GetByIndex(index int64) (key []byte, value []byte, err error) { + if t.root == nil { + return nil, nil, nil + } + + return t.root.getByIndex(t, index) +} + +// Iterate iterates over all keys of the tree. The keys and values must not be modified, +// since they may point to data stored within IAVL. Returns true if stopped by callback, false otherwise +func (t *ImmutableTree) Iterate(fn func(key []byte, value []byte) bool) (bool, error) { + if t.root == nil { + return false, nil + } + + itr, err := t.Iterator(nil, nil, true) + defer func() { _ = itr.Close() }() + if err != nil { + return false, err + } + for ; itr.Valid(); itr.Next() { + if fn(itr.Key(), itr.Value()) { + return true, nil + } + + } + return false, nil +} + +// Iterator returns an iterator over the immutable tree. +func (t *ImmutableTree) Iterator(start, end []byte, ascending bool) (dbm.Iterator, error) { + if !t.skipFastStorageUpgrade { + isFastCacheEnabled, err := t.IsFastCacheEnabled() + if err != nil { + return nil, err + } + + if isFastCacheEnabled { + return NewFastIterator(start, end, ascending, t.ndb), nil + } + } + + return NewIterator(start, end, ascending, t), nil +} + +// IterateRange makes a callback for all nodes with key between start and end non-inclusive. +// If either are nil, then it is open on that side (nil, nil is the same as Iterate). The keys and +// values must not be modified, since they may point to data stored within IAVL. +func (t *ImmutableTree) IterateRange(start, end []byte, ascending bool, fn func(key []byte, value []byte) bool) (stopped bool) { + if t.root == nil { + return false + } + return t.root.traverseInRange(t, start, end, ascending, false, false, func(node *Node) bool { + if node.GetHeight() == 0 { + return fn(node.GetNodeKey(), node.GetValue()) + } + return false + }) +} + +// IterateRangeInclusive makes a callback for all nodes with key between start and end inclusive. +// If either are nil, then it is open on that side (nil, nil is the same as Iterate). The keys and +// values must not be modified, since they may point to data stored within IAVL. +func (t *ImmutableTree) IterateRangeInclusive(start, end []byte, ascending bool, fn func(key, value []byte, version int64) bool) (stopped bool) { + if t.root == nil { + return false + } + return t.root.traverseInRange(t, start, end, ascending, true, false, func(node *Node) bool { + if node.GetHeight() == 0 { + return fn(node.GetNodeKey(), node.GetValue(), node.GetVersion()) + } + return false + }) +} + +// IsFastCacheEnabled returns true if fast cache is enabled, false otherwise. +// For fast cache to be enabled, the following 2 conditions must be met: +// 1. The tree is of the latest version. +// 2. The underlying storage has been upgraded to fast cache +func (t *ImmutableTree) IsFastCacheEnabled() (bool, error) { + isLatestTreeVersion, err := t.isLatestTreeVersion() + if err != nil { + return false, err + } + return isLatestTreeVersion && t.ndb.hasUpgradedToFastStorage(), nil +} + +func (t *ImmutableTree) isLatestTreeVersion() (bool, error) { + latestVersion, err := t.ndb.getLatestVersion() + if err != nil { + return false, err + } + return t.version == latestVersion, nil +} + +// Clone creates a clone of the tree. +// Used internally by MutableTree. +func (t *ImmutableTree) clone() *ImmutableTree { + return &ImmutableTree{ + root: t.root, + ndb: t.ndb, + version: t.version, + skipFastStorageUpgrade: t.skipFastStorageUpgrade, + } +} + +// nodeSize is like Size, but includes inner nodes too. +// +//nolint:unused +func (t *ImmutableTree) nodeSize() int { + size := 0 + t.root.traverse(t, true, func(n *Node) bool { + size++ + return false + }) + return size +} diff --git a/sei-iavl/import.go b/sei-iavl/import.go new file mode 100644 index 0000000000..45bf8ae976 --- /dev/null +++ b/sei-iavl/import.go @@ -0,0 +1,323 @@ +package iavl + +import ( + "bytes" + "fmt" + "sync" + + "github.com/pkg/errors" + db "github.com/tendermint/tm-db" +) + +// desiredBatchSize is the desired batch write size of the import batch before flushing it to the database. +// The actual batch write size could exceed this value when the previous batch is still flushing. +const defaultDesiredBatchSize = 20000 + +// If there's an ongoing pending batch write, we will keep batching more writes +// until the ongoing batch write completes or we reach maxBatchSize +const defaultMaxBatchSize = 400000 + +// ErrNoImport is returned when calling methods on a closed importer +var ErrNoImport = errors.New("no import in progress") + +// Importer imports data into an empty MutableTree. It is created by MutableTree.Import(). Users +// must call Close() when done. +// +// ExportNodes must be imported in the order returned by Exporter, i.e. depth-first post-order (LRN). +// +// Importer is not concurrency-safe, it is the caller's responsibility to ensure the tree is not +// modified while performing an import. +type Importer struct { + tree *MutableTree + version int64 + batch db.Batch + batchSize uint32 + stack []*Node + desiredBatchSize uint32 + maxBatchSize uint32 + batchMtx sync.RWMutex + chNodeData chan NodeData + chNodeDataWg sync.WaitGroup + chBatch chan db.Batch + chBatchWg sync.WaitGroup + chError chan error + allChannelClosed bool +} + +type NodeData struct { + node *Node + data []byte +} + +// newImporter creates a new Importer for an empty MutableTree. +// Underneath it spawns three goroutines to process the data import flow. +// +// version should correspond to the version that was initially exported. It must be greater than +// or equal to the highest ExportNode version number given. +func newImporter(tree *MutableTree, version int64) (*Importer, error) { + if version < 0 { + return nil, errors.New("imported version cannot be negative") + } + if tree.ndb.latestVersion > 0 { + return nil, errors.Errorf("found database at version %d, must be 0", tree.ndb.latestVersion) + } + if !tree.IsEmpty() { + return nil, errors.New("tree must be empty") + } + + importer := &Importer{ + tree: tree, + version: version, + batch: tree.ndb.db.NewBatch(), + stack: make([]*Node, 0, 8), + batchMtx: sync.RWMutex{}, + desiredBatchSize: defaultDesiredBatchSize, + maxBatchSize: defaultMaxBatchSize, + chNodeData: make(chan NodeData, 2*defaultDesiredBatchSize), + chNodeDataWg: sync.WaitGroup{}, + chBatch: make(chan db.Batch, 1), + chBatchWg: sync.WaitGroup{}, + chError: make(chan error, 1), + allChannelClosed: false, + } + + importer.chNodeDataWg.Add(1) + go setBatchData(importer) + + importer.chBatchWg.Add(1) + go batchWrite(importer) + + return importer, nil + +} + +// WithDesiredBatchSize set the desired batch size for write +func (i *Importer) WithDesiredBatchSize(batchSize uint32) *Importer { + i.desiredBatchSize = batchSize + return i +} + +// WithMaxBatchSize set the maximum allowed batch size for write, should be greater than desired batch size. +// Consider increase max batch size to reduce overall import time. +func (i *Importer) WithMaxBatchSize(batchSize uint32) *Importer { + i.maxBatchSize = batchSize + return i +} + +// setBatchData get the next serialized node data from channel, and write the data to the current batch +func setBatchData(i *Importer) { + for i.batch != nil { + if nodeData, open := <-i.chNodeData; open { + i.batchMtx.RLock() + if i.batch != nil { + err := i.batch.Set(i.tree.ndb.nodeKey(nodeData.node.GetHash()), nodeData.data) + if err != nil { + i.batchMtx.RUnlock() + i.chError <- err + break + } + } + i.batchMtx.RUnlock() + i.batchSize++ + // Only commit a new batch if size meet desiredBatchSize and there's no pending batch write + if (i.batchSize >= i.desiredBatchSize && len(i.chBatch) < 1) || i.batchSize >= i.maxBatchSize { + i.chBatch <- i.batch + i.batch = i.tree.ndb.db.NewBatch() + i.batchSize = 0 + } + } else { + break + } + } + i.chNodeDataWg.Done() +} + +// batchWrite get a new batch from the channel and execute the batch write to the underline DB. +func batchWrite(i *Importer) { + for i.batch != nil { + if nextBatch, open := <-i.chBatch; open { + err := nextBatch.Write() + if err != nil { + i.chError <- err + break + } + i.batchMtx.Lock() + _ = nextBatch.Close() + i.batchMtx.Unlock() + } else { + break + } + } + i.chBatchWg.Done() +} + +// Close frees all resources. It is safe to call multiple times. Uncommitted nodes may already have +// been flushed to the database, but will not be visible. Errors are ignored in the close functions. +func (i *Importer) Close() { + _ = i.waitAndCloseChannels() + if i.batch != nil { + _ = i.batch.Close() + } + i.batch = nil + i.tree = nil +} + +// Add adds an ExportNode to the import. ExportNodes must be added in the order returned by +// Exporter, i.e. depth-first post-order (LRN). Nodes are periodically flushed to the database, +// but the imported version is not visible until Commit() is called. +func (i *Importer) Add(exportNode *ExportNode) error { + if i.tree == nil { + return ErrNoImport + } + if exportNode == nil { + return errors.New("node cannot be nil") + } + if exportNode.Version > i.version { + return errors.Errorf("node version %v can't be greater than import version %v", + exportNode.Version, i.version) + } + + node := &Node{ + key: exportNode.Key, + value: exportNode.Value, + version: exportNode.Version, + height: exportNode.Height, + } + + // We build the tree from the bottom-left up. The stack is used to store unresolved left + // children while constructing right children. When all children are built, the parent can + // be constructed and the resolved children can be discarded from the stack. Using a stack + // ensures that we can handle additional unresolved left children while building a right branch. + // + // We don't modify the stack until we've verified the built node, to avoid leaving the + // importer in an inconsistent state when we return an error. + stackSize := len(i.stack) + switch { + case stackSize >= 2 && i.stack[stackSize-1].GetHeight() < node.GetHeight() && i.stack[stackSize-2].GetHeight() < node.GetHeight(): + node.SetLeftNode(i.stack[stackSize-2]) + node.SetLeftHash(node.GetLeftNode().GetHash()) + node.SetRightNode(i.stack[stackSize-1]) + node.SetRightHash(node.GetRightNode().GetHash()) + case stackSize >= 1 && i.stack[stackSize-1].GetHeight() < node.GetHeight(): + node.SetLeftNode(i.stack[stackSize-1]) + node.SetLeftHash(node.GetLeftNode().GetHash()) + } + + if node.GetHeight() == 0 { + node.SetSize(1) + } + if node.GetLeftNode() != nil { + node.SetSize(node.GetSize() + node.GetLeftNode().GetSize()) + } + if node.GetRightNode() != nil { + node.SetSize(node.GetSize() + node.GetRightNode().GetSize()) + } + + _, err := node._hash() + if err != nil { + return err + } + + err = node.validate() + if err != nil { + return err + } + + buf := bufPool.Get().(*bytes.Buffer) + buf.Reset() + if err := node.writeBytes(buf); err != nil { + panic(err) + } + + bytesCopy := make([]byte, buf.Len()) + copy(bytesCopy, buf.Bytes()) + bufPool.Put(buf) + + // Check errors + select { + case err := <-i.chError: + return err + default: + } + + // Handle the remaining steps in a separate goroutine + i.chNodeData <- NodeData{ + node: node, + data: bytesCopy, + } + + // Update the stack now that we know there were no errors + switch { + case node.GetLeftHash() != nil && node.GetRightHash() != nil: + i.stack = i.stack[:stackSize-2] + case node.GetLeftHash() != nil || node.GetRightHash() != nil: + i.stack = i.stack[:stackSize-1] + } + // Only hash\height\size of the node will be used after it be pushed into the stack. + i.stack = append(i.stack, &Node{hash: node.GetHash(), height: node.GetHeight(), size: node.GetSize()}) + + return nil +} + +// Commit finalizes the import by flushing any outstanding nodes to the database, making the +// version visible, and updating the tree metadata. It can only be called once, and calls Close() +// internally. +func (i *Importer) Commit() error { + if i.tree == nil { + return ErrNoImport + } + + err := i.waitAndCloseChannels() + if err != nil { + return err + } + + switch len(i.stack) { + case 0: + if err := i.batch.Set(i.tree.ndb.rootKey(i.version), []byte{}); err != nil { + return err + } + case 1: + if err := i.batch.Set(i.tree.ndb.rootKey(i.version), i.stack[0].GetHash()); err != nil { + return err + } + default: + return fmt.Errorf("invalid node structure, found stack size %v when committing", + len(i.stack)) + } + + err = i.batch.WriteSync() + if err != nil { + return err + } + i.tree.ndb.resetLatestVersion(i.version) + + _, err = i.tree.LoadVersion(i.version) + if err != nil { + return err + } + + i.Close() + return nil +} + +// waitAndCloseChannels will try to close all the channels for importer and wait for remaining work to be done. +// This function should only be called in the Commit or Close action. If any error happens when draining the remaining data in the channel, +// The error will be popped out and returned. +func (i *Importer) waitAndCloseChannels() error { + // Make sure all pending works are drained and close the channels in order + if !i.allChannelClosed { + i.allChannelClosed = true + close(i.chNodeData) + i.chNodeDataWg.Wait() + close(i.chBatch) + i.chBatchWg.Wait() + // Check errors + select { + case err := <-i.chError: + return err + default: + } + } + return nil +} diff --git a/sei-iavl/import_test.go b/sei-iavl/import_test.go new file mode 100644 index 0000000000..5fbf7d733e --- /dev/null +++ b/sei-iavl/import_test.go @@ -0,0 +1,248 @@ +package iavl + +import ( + "testing" + + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" + + db "github.com/tendermint/tm-db" +) + +func ExampleImporter() { + tree, err := NewMutableTree(db.NewMemDB(), 0, false) + if err != nil { + // handle err + } + + tree.Set([]byte("a"), []byte{1}) + tree.Set([]byte("b"), []byte{2}) + tree.Set([]byte("c"), []byte{3}) + _, version, err := tree.SaveVersion() + if err != nil { + // handle err + } + + itree, err := tree.GetImmutable(version) + if err != nil { + // handle err + } + exporter := itree.Export() + defer exporter.Close() + exported := []*ExportNode{} + for { + var node *ExportNode + node, err = exporter.Next() + if err == ExportDone { + break + } else if err != nil { + // handle err + } + exported = append(exported, node) + } + + newTree, err := NewMutableTree(db.NewMemDB(), 0, false) + if err != nil { + // handle err + } + importer, err := newTree.Import(version) + if err != nil { + // handle err + } + defer importer.Close() + for _, node := range exported { + err = importer.Add(node) + if err != nil { + // handle err + } + } + err = importer.Commit() + if err != nil { + // handle err + } +} + +func TestImporter_NegativeVersion(t *testing.T) { + tree, err := NewMutableTree(db.NewMemDB(), 0, false) + require.NoError(t, err) + _, err = tree.Import(-1) + require.Error(t, err) +} + +func TestImporter_NotEmpty(t *testing.T) { + tree, err := NewMutableTree(db.NewMemDB(), 0, false) + require.NoError(t, err) + tree.Set([]byte("a"), []byte{1}) + _, _, err = tree.SaveVersion() + require.NoError(t, err) + + _, err = tree.Import(1) + require.Error(t, err) +} + +func TestImporter_NotEmptyDatabase(t *testing.T) { + db := db.NewMemDB() + + tree, err := NewMutableTree(db, 0, false) + require.NoError(t, err) + tree.Set([]byte("a"), []byte{1}) + _, _, err = tree.SaveVersion() + require.NoError(t, err) + + tree, err = NewMutableTree(db, 0, false) + require.NoError(t, err) + _, err = tree.Load() + require.NoError(t, err) + + _, err = tree.Import(1) + require.Error(t, err) +} + +func TestImporter_NotEmptyUnsaved(t *testing.T) { + tree, err := NewMutableTree(db.NewMemDB(), 0, false) + require.NoError(t, err) + tree.Set([]byte("a"), []byte{1}) + + _, err = tree.Import(1) + require.Error(t, err) +} + +func TestImporter_Add(t *testing.T) { + k := []byte("key") + v := []byte("value") + + testcases := map[string]struct { + node *ExportNode + valid bool + }{ + "nil node": {nil, false}, + "valid": {&ExportNode{Key: k, Value: v, Version: 1, Height: 0}, true}, + "no key": {&ExportNode{Key: nil, Value: v, Version: 1, Height: 0}, false}, + "no value": {&ExportNode{Key: k, Value: nil, Version: 1, Height: 0}, false}, + "version too large": {&ExportNode{Key: k, Value: v, Version: 2, Height: 0}, false}, + "no version": {&ExportNode{Key: k, Value: v, Version: 0, Height: 0}, false}, + // further cases will be handled by Node.validate() + } + for desc, tc := range testcases { + tc := tc // appease scopelint + t.Run(desc, func(t *testing.T) { + tree, err := NewMutableTree(db.NewMemDB(), 0, false) + require.NoError(t, err) + importer, err := tree.Import(1) + require.NoError(t, err) + defer importer.Close() + + err = importer.Add(tc.node) + if tc.valid { + require.NoError(t, err) + } else { + require.Error(t, err) + } + }) + } +} + +func TestImporter_Add_Closed(t *testing.T) { + tree, err := NewMutableTree(db.NewMemDB(), 0, false) + require.NoError(t, err) + importer, err := tree.Import(1) + require.NoError(t, err) + + importer.Close() + err = importer.Add(&ExportNode{Key: []byte("key"), Value: []byte("value"), Version: 1, Height: 0}) + require.Error(t, err) + require.Equal(t, ErrNoImport, err) +} + +func TestImporter_Close(t *testing.T) { + tree, err := NewMutableTree(db.NewMemDB(), 0, false) + require.NoError(t, err) + importer, err := tree.Import(1) + require.NoError(t, err) + + err = importer.Add(&ExportNode{Key: []byte("key"), Value: []byte("value"), Version: 1, Height: 0}) + require.NoError(t, err) + + importer.Close() + has, err := tree.Has([]byte("key")) + require.NoError(t, err) + require.False(t, has) + + importer.Close() +} + +func TestImporter_Commit(t *testing.T) { + tree, err := NewMutableTree(db.NewMemDB(), 0, false) + require.NoError(t, err) + importer, err := tree.Import(1) + require.NoError(t, err) + + err = importer.Add(&ExportNode{Key: []byte("key"), Value: []byte("value"), Version: 1, Height: 0}) + require.NoError(t, err) + + err = importer.Commit() + require.NoError(t, err) + has, err := tree.Has([]byte("key")) + require.NoError(t, err) + require.True(t, has) +} + +func TestImporter_Commit_Closed(t *testing.T) { + tree, err := NewMutableTree(db.NewMemDB(), 0, false) + require.NoError(t, err) + importer, err := tree.Import(1) + require.NoError(t, err) + + err = importer.Add(&ExportNode{Key: []byte("key"), Value: []byte("value"), Version: 1, Height: 0}) + require.NoError(t, err) + + importer.Close() + err = importer.Commit() + require.Error(t, err) + require.Equal(t, ErrNoImport, err) +} + +func TestImporter_Commit_Empty(t *testing.T) { + tree, err := NewMutableTree(db.NewMemDB(), 0, false) + require.NoError(t, err) + importer, err := tree.Import(3) + require.NoError(t, err) + defer importer.Close() + + err = importer.Commit() + require.NoError(t, err) + assert.EqualValues(t, 3, tree.Version()) +} + +func BenchmarkImport(b *testing.B) { + b.StopTimer() + tree := setupExportTreeSized(b, 4096) + exported := make([]*ExportNode, 0, 4096) + exporter := tree.Export() + for { + item, err := exporter.Next() + if err == ExportDone { + break + } else if err != nil { + b.Error(err) + } + exported = append(exported, item) + } + exporter.Close() + b.StartTimer() + + for n := 0; n < b.N; n++ { + newTree, err := NewMutableTree(db.NewMemDB(), 0, false) + require.NoError(b, err) + importer, err := newTree.Import(tree.Version()) + require.NoError(b, err) + for _, item := range exported { + err = importer.Add(item) + if err != nil { + b.Error(err) + } + } + err = importer.Commit() + require.NoError(b, err) + } +} diff --git a/sei-iavl/internal/bytes/bytes.go b/sei-iavl/internal/bytes/bytes.go new file mode 100644 index 0000000000..f99a584397 --- /dev/null +++ b/sei-iavl/internal/bytes/bytes.go @@ -0,0 +1,94 @@ +package common + +import ( + "encoding/hex" + "fmt" + "strings" +) + +// The main purpose of HexBytes is to enable HEX-encoding for json/encoding. +type HexBytes []byte + +// Marshal needed for protobuf compatibility +func (bz HexBytes) Marshal() ([]byte, error) { + return bz, nil +} + +// Unmarshal needed for protobuf compatibility +func (bz *HexBytes) Unmarshal(data []byte) error { + *bz = data + return nil +} + +// This is the point of Bytes. +func (bz HexBytes) MarshalJSON() ([]byte, error) { + s := strings.ToUpper(hex.EncodeToString(bz)) + jbz := make([]byte, len(s)+2) + jbz[0] = '"' + copy(jbz[1:], s) + jbz[len(jbz)-1] = '"' + return jbz, nil +} + +// This is the point of Bytes. +func (bz *HexBytes) UnmarshalJSON(data []byte) error { + if len(data) < 2 || data[0] != '"' || data[len(data)-1] != '"' { + return fmt.Errorf("invalid hex string: %s", data) + } + data = data[1 : len(data)-1] + dest := make([]byte, hex.DecodedLen(len(data))) + _, err := hex.Decode(dest, data) + if err != nil { + return err + } + *bz = dest + return nil +} + +// Allow it to fulfill various interfaces in light-client, etc... +func (bz HexBytes) Bytes() []byte { + return bz +} + +func (bz HexBytes) String() string { + return strings.ToUpper(hex.EncodeToString(bz)) +} + +func (bz HexBytes) Format(s fmt.State, verb rune) { + switch verb { + case 'p': + _, _ = fmt.Fprintf(s, "%p", bz) + default: + _, _ = fmt.Fprintf(s, "%X", []byte(bz)) + } +} + +// Returns a copy of the given byte slice. +func Cp(bz []byte) (ret []byte) { + ret = make([]byte, len(bz)) + copy(ret, bz) + return ret +} + +// Returns a slice of the same length (big endian) +// except incremented by one. +// Returns nil on overflow (e.g. if bz bytes are all 0xFF) +// CONTRACT: len(bz) > 0 +func CpIncr(bz []byte) (ret []byte) { + if len(bz) == 0 { + panic("cpIncr expects non-zero bz length") + } + ret = Cp(bz) + for i := len(bz) - 1; i >= 0; i-- { + if ret[i] < byte(0xFF) { + ret[i]++ + return + } + ret[i] = byte(0x00) + if i == 0 { + // Overflow + return nil + } + } + return nil +} diff --git a/sei-iavl/internal/bytes/bytes_test.go b/sei-iavl/internal/bytes/bytes_test.go new file mode 100644 index 0000000000..40ef393a4f --- /dev/null +++ b/sei-iavl/internal/bytes/bytes_test.go @@ -0,0 +1,66 @@ +// nolint: scopelint +package common + +import ( + "encoding/json" + "fmt" + "testing" + + "github.com/stretchr/testify/assert" +) + +// This is a trivial test for protobuf compatibility. +func TestMarshal(t *testing.T) { + bz := []byte("hello world") + dataB := HexBytes(bz) + bz2, err := dataB.Marshal() + assert.Nil(t, err) + assert.Equal(t, bz, bz2) + + var dataB2 HexBytes + err = (&dataB2).Unmarshal(bz) + assert.Nil(t, err) + assert.Equal(t, dataB, dataB2) +} + +// Test that the hex encoding works. +func TestJSONMarshal(t *testing.T) { + + type TestStruct struct { + B1 []byte + B2 HexBytes + } + + cases := []struct { + input []byte + expected string + }{ + {[]byte(``), `{"B1":"","B2":""}`}, + {[]byte(`a`), `{"B1":"YQ==","B2":"61"}`}, + {[]byte(`abc`), `{"B1":"YWJj","B2":"616263"}`}, + } + + for i, tc := range cases { + t.Run(fmt.Sprintf("Case %d", i), func(t *testing.T) { + ts := TestStruct{B1: tc.input, B2: tc.input} + + // Test that it marshals correctly to JSON. + jsonBytes, err := json.Marshal(ts) + if err != nil { + t.Fatal(err) + } + assert.Equal(t, string(jsonBytes), tc.expected) + + // TODO do fuzz testing to ensure that unmarshal fails + + // Test that unmarshaling works correctly. + ts2 := TestStruct{} + err = json.Unmarshal(jsonBytes, &ts2) + if err != nil { + t.Fatal(err) + } + assert.Equal(t, ts2.B1, tc.input) + assert.Equal(t, ts2.B2, HexBytes(tc.input)) + }) + } +} diff --git a/sei-iavl/internal/bytes/string.go b/sei-iavl/internal/bytes/string.go new file mode 100644 index 0000000000..0d882488a8 --- /dev/null +++ b/sei-iavl/internal/bytes/string.go @@ -0,0 +1,24 @@ +package common + +import ( + "unsafe" +) + +// UnsafeStrToBytes uses unsafe to convert string into byte array. Returned bytes +// must not be altered after this function is called as it will cause a segmentation fault. +// #nosec G103 -- unsafe usage is intentional for zero-copy string to byte conversion +func UnsafeStrToBytes(s string) []byte { + if s == "" { + return nil + } + return unsafe.Slice(unsafe.StringData(s), len(s)) +} + +// UnsafeBytesToStr is meant to make a zero allocation conversion +// from []byte -> string to speed up operations, it is not meant +// to be used generally, but for a specific pattern to delete keys +// from a map. +// #nosec G103 -- unsafe usage is intentional for zero-copy byte slice to string conversion +func UnsafeBytesToStr(b []byte) string { + return *(*string)(unsafe.Pointer(&b)) +} diff --git a/sei-iavl/internal/bytes/string_test.go b/sei-iavl/internal/bytes/string_test.go new file mode 100644 index 0000000000..f704e2f852 --- /dev/null +++ b/sei-iavl/internal/bytes/string_test.go @@ -0,0 +1,54 @@ +package common + +import ( + "runtime" + "strconv" + "testing" + "time" + + "github.com/stretchr/testify/suite" +) + +func TestStringSuite(t *testing.T) { + suite.Run(t, new(StringSuite)) +} + +type StringSuite struct{ suite.Suite } + +func unsafeConvertStr() []byte { + return UnsafeStrToBytes("abc") +} + +func (s *StringSuite) TestUnsafeStrToBytes() { + // we convert in other function to trigger GC. We want to check that + // the underlying array in []bytes is accessible after GC will finish swapping. + for i := 0; i < 5; i++ { + b := unsafeConvertStr() + runtime.GC() + <-time.NewTimer(2 * time.Millisecond).C + b2 := append(b, 'd') + s.Equal("abc", string(b)) + s.Equal("abcd", string(b2)) + } +} + +func unsafeConvertBytes() string { + return UnsafeBytesToStr([]byte("abc")) +} + +func (s *StringSuite) TestUnsafeBytesToStr() { + // we convert in other function to trigger GC. We want to check that + // the underlying array in []bytes is accessible after GC will finish swapping. + for i := 0; i < 5; i++ { + str := unsafeConvertBytes() + runtime.GC() + <-time.NewTimer(2 * time.Millisecond).C + s.Equal("abc", str) + } +} + +func BenchmarkUnsafeStrToBytes(b *testing.B) { + for i := 0; i < b.N; i++ { + UnsafeStrToBytes(strconv.Itoa(i)) + } +} diff --git a/sei-iavl/internal/encoding/encoding.go b/sei-iavl/internal/encoding/encoding.go new file mode 100644 index 0000000000..788a6fff77 --- /dev/null +++ b/sei-iavl/internal/encoding/encoding.go @@ -0,0 +1,164 @@ +package encoding + +import ( + "bytes" + "encoding/binary" + "errors" + "fmt" + "io" + "math/bits" + "sync" +) + +var bufPool = &sync.Pool{ + New: func() interface{} { + return new(bytes.Buffer) + }, +} + +var varintPool = &sync.Pool{ + New: func() interface{} { + return &[binary.MaxVarintLen64]byte{} + }, +} + +var uvarintPool = &sync.Pool{ + New: func() interface{} { + return &[binary.MaxVarintLen64]byte{} + }, +} + +// decodeBytes decodes a varint length-prefixed byte slice, returning it along with the number +// of input bytes read. +func DecodeBytes(bz []byte) ([]byte, int, error) { + s, n, err := DecodeUvarint(bz) + if err != nil { + return nil, n, err + } + // Make sure size doesn't overflow. ^uint(0) >> 1 will help determine the + // max int value variably on 32-bit and 64-bit machines. We also doublecheck + // that size is positive. + size := int(s) + if s >= uint64(^uint(0)>>1) || size < 0 { + return nil, n, fmt.Errorf("invalid out of range length %v decoding []byte", s) + } + // Make sure end index doesn't overflow. We know n>0 from decodeUvarint(). + end := n + size + if end < n { + return nil, n, fmt.Errorf("invalid out of range length %v decoding []byte", size) + } + // Make sure the end index is within bounds. + if len(bz) < end { + return nil, n, fmt.Errorf("insufficient bytes decoding []byte of length %v", size) + } + bz2 := make([]byte, size) + copy(bz2, bz[n:end]) + return bz2, end, nil +} + +// decodeUvarint decodes a varint-encoded unsigned integer from a byte slice, returning it and the +// number of bytes decoded. +func DecodeUvarint(bz []byte) (uint64, int, error) { + u, n := binary.Uvarint(bz) + if n == 0 { + // buf too small + return u, n, errors.New("buffer too small") + } else if n < 0 { + // value larger than 64 bits (overflow) + // and -n is the number of bytes read + n = -n + return u, n, errors.New("EOF decoding uvarint") + } + return u, n, nil +} + +// decodeVarint decodes a varint-encoded integer from a byte slice, returning it and the number of +// bytes decoded. +func DecodeVarint(bz []byte) (int64, int, error) { + i, n := binary.Varint(bz) + if n == 0 { + return i, n, errors.New("buffer too small") + } else if n < 0 { + // value larger than 64 bits (overflow) + // and -n is the number of bytes read + n = -n + return i, n, errors.New("EOF decoding varint") + } + return i, n, nil +} + +// EncodeBytes writes a varint length-prefixed byte slice to the writer. +func EncodeBytes(w io.Writer, bz []byte) error { + err := EncodeUvarint(w, uint64(len(bz))) + if err != nil { + return err + } + _, err = w.Write(bz) + return err +} + +// encodeBytesSlice length-prefixes the byte slice and returns it. +func EncodeBytesSlice(bz []byte) ([]byte, error) { + buf := bufPool.Get().(*bytes.Buffer) + buf.Reset() + defer bufPool.Put(buf) + + err := EncodeBytes(buf, bz) + + bytesCopy := make([]byte, buf.Len()) + copy(bytesCopy, buf.Bytes()) + + return bytesCopy, err +} + +// encodeBytesSize returns the byte size of the given slice including length-prefixing. +func EncodeBytesSize(bz []byte) int { + return EncodeUvarintSize(uint64(len(bz))) + len(bz) +} + +// EncodeUvarint writes a varint-encoded unsigned integer to an io.Writer. +func EncodeUvarint(w io.Writer, u uint64) error { + // See comment in encodeVarint + buf := uvarintPool.Get().(*[binary.MaxVarintLen64]byte) + + n := binary.PutUvarint(buf[:], u) + _, err := w.Write(buf[0:n]) + + uvarintPool.Put(buf) + + return err +} + +// EncodeUvarintSize returns the byte size of the given integer as a varint. +func EncodeUvarintSize(u uint64) int { + if u == 0 { + return 1 + } + return (bits.Len64(u) + 6) / 7 +} + +// EncodeVarint writes a varint-encoded integer to an io.Writer. +func EncodeVarint(w io.Writer, i int64) error { + // Use a pool here to reduce allocations. + // + // Though this allocates just 10 bytes on the stack, doing allocation for every calls + // cost us a huge memory. The profiling show that using pool save us ~30% memory. + // + // Since when we don't have concurrent access to the pool, the speed will nearly identical. + // If we need to support concurrent access, we can accept a *[binary.MaxVarintLen64]byte as + // input, so the caller can allocate just one and pass the same array pointer to each call. + buf := varintPool.Get().(*[binary.MaxVarintLen64]byte) + + n := binary.PutVarint(buf[:], i) + _, err := w.Write(buf[0:n]) + + varintPool.Put(buf) + + return err +} + +// EncodeVarintSize returns the byte size of the given integer as a varint. +func EncodeVarintSize(i int64) int { + var buf [binary.MaxVarintLen64]byte + return binary.PutVarint(buf[:], i) +} diff --git a/sei-iavl/internal/encoding/encoding_test.go b/sei-iavl/internal/encoding/encoding_test.go new file mode 100644 index 0000000000..36c402f3e2 --- /dev/null +++ b/sei-iavl/internal/encoding/encoding_test.go @@ -0,0 +1,88 @@ +package encoding + +import ( + "encoding/binary" + "math" + "testing" + + "github.com/stretchr/testify/require" +) + +func TestDecodeBytes(t *testing.T) { + bz := []byte{0, 1, 2, 3, 4, 5, 6, 7} + testcases := map[string]struct { + bz []byte + lengthPrefix uint64 + expect []byte + expectErr bool + }{ + "full": {bz, 8, bz, false}, + "empty": {bz, 0, []byte{}, false}, + "partial": {bz, 3, []byte{0, 1, 2}, false}, + "out of bounds": {bz, 9, nil, true}, + "empty input": {[]byte{}, 0, []byte{}, false}, + "empty input out of bounds": {[]byte{}, 1, nil, true}, + + // The following will always fail, since the byte slice is only 8 bytes, + // but we're making sure they don't panic due to overflow issues. See: + // https://github.com/cosmos/iavl/issues/339 + "max int32": {bz, uint64(math.MaxInt32), nil, true}, + "max int32 -1": {bz, uint64(math.MaxInt32) - 1, nil, true}, + "max int32 -10": {bz, uint64(math.MaxInt32) - 10, nil, true}, + "max int32 +1": {bz, uint64(math.MaxInt32) + 1, nil, true}, + "max int32 +10": {bz, uint64(math.MaxInt32) + 10, nil, true}, + + "max int32*2": {bz, uint64(math.MaxInt32) * 2, nil, true}, + "max int32*2 -1": {bz, uint64(math.MaxInt32)*2 - 1, nil, true}, + "max int32*2 -10": {bz, uint64(math.MaxInt32)*2 - 10, nil, true}, + "max int32*2 +1": {bz, uint64(math.MaxInt32)*2 + 1, nil, true}, + "max int32*2 +10": {bz, uint64(math.MaxInt32)*2 + 10, nil, true}, + + "max uint32": {bz, uint64(math.MaxUint32), nil, true}, + "max uint32 -1": {bz, uint64(math.MaxUint32) - 1, nil, true}, + "max uint32 -10": {bz, uint64(math.MaxUint32) - 10, nil, true}, + "max uint32 +1": {bz, uint64(math.MaxUint32) + 1, nil, true}, + "max uint32 +10": {bz, uint64(math.MaxUint32) + 10, nil, true}, + + "max uint32*2": {bz, uint64(math.MaxUint32) * 2, nil, true}, + "max uint32*2 -1": {bz, uint64(math.MaxUint32)*2 - 1, nil, true}, + "max uint32*2 -10": {bz, uint64(math.MaxUint32)*2 - 10, nil, true}, + "max uint32*2 +1": {bz, uint64(math.MaxUint32)*2 + 1, nil, true}, + "max uint32*2 +10": {bz, uint64(math.MaxUint32)*2 + 10, nil, true}, + + "max int64": {bz, uint64(math.MaxInt64), nil, true}, + "max int64 -1": {bz, uint64(math.MaxInt64) - 1, nil, true}, + "max int64 -10": {bz, uint64(math.MaxInt64) - 10, nil, true}, + "max int64 +1": {bz, uint64(math.MaxInt64) + 1, nil, true}, + "max int64 +10": {bz, uint64(math.MaxInt64) + 10, nil, true}, + + "max uint64": {bz, uint64(math.MaxUint64), nil, true}, + "max uint64 -1": {bz, uint64(math.MaxUint64) - 1, nil, true}, + "max uint64 -10": {bz, uint64(math.MaxUint64) - 10, nil, true}, + } + for name, tc := range testcases { + tc := tc + t.Run(name, func(t *testing.T) { + // Generate an input slice. + buf := make([]byte, binary.MaxVarintLen64) + varintBytes := binary.PutUvarint(buf, tc.lengthPrefix) + buf = append(buf[:varintBytes], tc.bz...) + + // Attempt to decode it. + b, n, err := DecodeBytes(buf) + if tc.expectErr { + require.Error(t, err) + require.Equal(t, varintBytes, n) + } else { + require.NoError(t, err) + require.Equal(t, uint64(n), uint64(varintBytes)+tc.lengthPrefix) + require.Equal(t, tc.bz[:tc.lengthPrefix], b) + } + }) + } +} + +func TestDecodeBytes_invalidVarint(t *testing.T) { + _, _, err := DecodeBytes([]byte{0xff}) + require.Error(t, err) +} diff --git a/sei-iavl/internal/logger/logger.go b/sei-iavl/internal/logger/logger.go new file mode 100644 index 0000000000..093576bd1a --- /dev/null +++ b/sei-iavl/internal/logger/logger.go @@ -0,0 +1,15 @@ +package logger + +import ( + "fmt" +) + +var ( + debugging = false +) + +func Debug(format string, args ...interface{}) { + if debugging { + fmt.Printf(format, args...) + } +} diff --git a/sei-iavl/internal/rand/random.go b/sei-iavl/internal/rand/random.go new file mode 100644 index 0000000000..1bbb8609aa --- /dev/null +++ b/sei-iavl/internal/rand/random.go @@ -0,0 +1,253 @@ +package common + +import ( + crand "crypto/rand" + "math" + mrand "math/rand" + "sync" + "time" +) + +const ( + strChars = "0123456789ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz" // 62 characters +) + +// Rand is a prng, that is seeded with OS randomness. +// The OS randomness is obtained from crypto/rand, however none of the provided +// methods are suitable for cryptographic usage. +// They all utilize math/rand's prng internally. +// +// All of the methods here are suitable for concurrent use. +// This is achieved by using a mutex lock on all of the provided methods. +type Rand struct { + sync.Mutex + rand *mrand.Rand +} + +var grand *Rand + +func init() { + grand = NewRand() + grand.init() +} + +func NewRand() *Rand { + rand := &Rand{} + rand.init() + return rand +} + +func (r *Rand) init() { + bz := cRandBytes(8) + var seed uint64 + for i := 0; i < 8; i++ { + seed |= uint64(bz[i]) + seed <<= 8 + } + // #nosec G115 -- seed is intentionally truncated for PRNG seeding + r.reset(int64(seed)) +} + +func (r *Rand) reset(seed int64) { + r.rand = mrand.New(mrand.NewSource(seed)) +} + +//---------------------------------------- +// Global functions + +func Seed(seed int64) { + grand.Seed(seed) +} + +func RandStr(length int) string { + return grand.Str(length) +} + +func RandInt() int { + return grand.Int() +} + +func RandInt31() int32 { + return grand.Int31() +} + +func RandBytes(n int) []byte { + return grand.Bytes(n) +} + +func RandPerm(n int) []int { + return grand.Perm(n) +} + +//---------------------------------------- +// Rand methods + +func (r *Rand) Seed(seed int64) { + r.Lock() + r.reset(seed) + r.Unlock() +} + +// Str constructs a random alphanumeric string of given length. +func (r *Rand) Str(length int) string { + chars := []byte{} +MAIN_LOOP: + for { + val := r.Int63() + for i := 0; i < 10; i++ { + v := int(val & 0x3f) // rightmost 6 bits + if v >= 62 { // only 62 characters in strChars + val >>= 6 + continue + } else { + chars = append(chars, strChars[v]) + if len(chars) == length { + break MAIN_LOOP + } + val >>= 6 + } + } + } + + return string(chars) +} + +func (r *Rand) Uint16() uint16 { + // #nosec G115 -- value is masked to 16 bits, always fits in uint16 + return uint16(r.Uint32() & (1<<16 - 1)) +} + +func (r *Rand) Uint32() uint32 { + r.Lock() + u32 := r.rand.Uint32() + r.Unlock() + return u32 +} + +func (r *Rand) Uint64() uint64 { + return uint64(r.Uint32())<<32 + uint64(r.Uint32()) +} + +func (r *Rand) Uint() uint { + r.Lock() + i := r.rand.Int() + r.Unlock() + // #nosec G115 -- rand.Int() always returns non-negative values + return uint(i) +} + +func (r *Rand) Int16() int16 { + // #nosec G115 -- value is masked to 16 bits, always fits in int16 + return int16(r.Uint32() & (1<<16 - 1)) +} + +func (r *Rand) Int32() int32 { + // #nosec G115 -- intentional conversion for random number generation + return int32(r.Uint32()) +} + +func (r *Rand) Int64() int64 { + // #nosec G115 -- intentional conversion for random number generation + return int64(r.Uint64()) +} + +func (r *Rand) Int() int { + r.Lock() + i := r.rand.Int() + r.Unlock() + return i +} + +func (r *Rand) Int31() int32 { + r.Lock() + i31 := r.rand.Int31() + r.Unlock() + return i31 +} + +func (r *Rand) Int31n(n int32) int32 { + r.Lock() + i31n := r.rand.Int31n(n) + r.Unlock() + return i31n +} + +func (r *Rand) Int63() int64 { + r.Lock() + i63 := r.rand.Int63() + r.Unlock() + return i63 +} + +func (r *Rand) Int63n(n int64) int64 { + r.Lock() + i63n := r.rand.Int63n(n) + r.Unlock() + return i63n +} + +func (r *Rand) Float32() float32 { + r.Lock() + f32 := r.rand.Float32() + r.Unlock() + return f32 +} + +func (r *Rand) Float64() float64 { + r.Lock() + f64 := r.rand.Float64() + r.Unlock() + return f64 +} + +func (r *Rand) Time() time.Time { + // #nosec G115 -- limiting to MaxInt64 to ensure valid Unix timestamp + return time.Unix(int64(r.Uint64()&math.MaxInt64), 0) +} + +// Bytes returns n random bytes generated from the internal +// prng. +func (r *Rand) Bytes(n int) []byte { + // cRandBytes isn't guaranteed to be fast so instead + // use random bytes generated from the internal PRNG + bs := make([]byte, n) + for i := 0; i < len(bs); i++ { + bs[i] = byte(r.Int() & 0xFF) + } + return bs +} + +// Intn returns, as an int, a uniform pseudo-random number in the range [0, n). +// It panics if n <= 0. +func (r *Rand) Intn(n int) int { + r.Lock() + i := r.rand.Intn(n) + r.Unlock() + return i +} + +// Bool returns a uniformly random boolean +func (r *Rand) Bool() bool { + // See https://github.com/golang/go/issues/23804#issuecomment-365370418 + // for reasoning behind computing like this + return r.Int63()%2 == 0 +} + +// Perm returns a pseudo-random permutation of n integers in [0, n). +func (r *Rand) Perm(n int) []int { + r.Lock() + perm := r.rand.Perm(n) + r.Unlock() + return perm +} + +// NOTE: This relies on the os's random number generator. +// For real security, we should salt that with some seed. +func cRandBytes(numBytes int) []byte { + b := make([]byte, numBytes) + _, err := crand.Read(b) + if err != nil { + panic(err) + } + return b +} diff --git a/sei-iavl/internal/rand/random_test.go b/sei-iavl/internal/rand/random_test.go new file mode 100644 index 0000000000..1827419442 --- /dev/null +++ b/sei-iavl/internal/rand/random_test.go @@ -0,0 +1,84 @@ +package common + +import ( + "bytes" + "encoding/json" + "fmt" + mrand "math/rand" + "testing" + + "github.com/stretchr/testify/assert" +) + +func TestRandStr(t *testing.T) { + l := 243 + s := RandStr(l) + assert.Equal(t, l, len(s)) +} + +func TestRandBytes(t *testing.T) { + l := 243 + b := RandBytes(l) + assert.Equal(t, l, len(b)) +} + +// Test to make sure that we never call math.rand(). +// We do this by ensuring that outputs are deterministic. +func TestDeterminism(t *testing.T) { + var firstOutput string + + // Set math/rand's seed for the sake of debugging this test. + // (It isn't strictly necessary). + mrand.Seed(1) + + for i := 0; i < 100; i++ { + output := testThemAll() + if i == 0 { + firstOutput = output + } else if firstOutput != output { + t.Errorf("Run #%d's output was different from first run.\nfirst: %v\nlast: %v", + i, firstOutput, output) + } + } +} + +func testThemAll() string { + + // Such determinism. + grand.reset(1) + + // Use it. + out := new(bytes.Buffer) + perm := RandPerm(10) + blob, _ := json.Marshal(perm) + fmt.Fprintf(out, "perm: %s\n", blob) + fmt.Fprintf(out, "randInt: %d\n", RandInt()) + fmt.Fprintf(out, "randInt31: %d\n", RandInt31()) + return out.String() +} + +func BenchmarkRandBytes10B(b *testing.B) { + benchmarkRandBytes(b, 10) +} +func BenchmarkRandBytes100B(b *testing.B) { + benchmarkRandBytes(b, 100) +} +func BenchmarkRandBytes1KiB(b *testing.B) { + benchmarkRandBytes(b, 1024) +} +func BenchmarkRandBytes10KiB(b *testing.B) { + benchmarkRandBytes(b, 10*1024) +} +func BenchmarkRandBytes100KiB(b *testing.B) { + benchmarkRandBytes(b, 100*1024) +} +func BenchmarkRandBytes1MiB(b *testing.B) { + benchmarkRandBytes(b, 1024*1024) +} + +func benchmarkRandBytes(b *testing.B, n int) { + for i := 0; i < b.N; i++ { + _ = RandBytes(n) + } + b.ReportAllocs() +} diff --git a/sei-iavl/iterator.go b/sei-iavl/iterator.go new file mode 100644 index 0000000000..520d8f2167 --- /dev/null +++ b/sei-iavl/iterator.go @@ -0,0 +1,289 @@ +package iavl + +// NOTE: This file favors int64 as opposed to int for size/counts. +// The Tree on the other hand favors int. This is intentional. + +import ( + "bytes" + "errors" + + dbm "github.com/tendermint/tm-db" +) + +type traversal struct { + tree *ImmutableTree + start, end []byte // iteration domain + ascending bool // ascending traversal + inclusive bool // end key inclusiveness + post bool // postorder traversal + delayedNodes *delayedNodes // delayed nodes to be traversed + unlocked bool // whether traversal should not lock tree's mutex +} + +var errIteratorNilTreeGiven = errors.New("iterator must be created with an immutable tree but the tree was nil") + +func (node *Node) newTraversal(tree *ImmutableTree, start, end []byte, ascending bool, inclusive bool, post bool, unlocked bool) *traversal { + return &traversal{ + tree: tree, + start: start, + end: end, + ascending: ascending, + inclusive: inclusive, + post: post, + delayedNodes: &delayedNodes{{node, true}}, // set initial traverse to the node + unlocked: unlocked, + } +} + +// delayedNode represents the delayed iteration on the nodes. +// When delayed is set to true, the delayedNode should be expanded, and their +// children should be traversed. When delayed is set to false, the delayedNode is +// already have expanded, and it could be immediately returned. +type delayedNode struct { + node *Node + delayed bool +} + +type delayedNodes []delayedNode + +func (nodes *delayedNodes) pop() (*Node, bool) { + node := (*nodes)[len(*nodes)-1] + *nodes = (*nodes)[:len(*nodes)-1] + return node.node, node.delayed +} + +func (nodes *delayedNodes) push(node *Node, delayed bool) { + *nodes = append(*nodes, delayedNode{node, delayed}) +} + +func (nodes *delayedNodes) length() int { + return len(*nodes) +} + +// `traversal` returns the delayed execution of recursive traversal on a tree. +// +// `traversal` will traverse the tree in a depth-first manner. To handle locating +// the next element, and to handle unwinding, the traversal maintains its future +// iteration under `delayedNodes`. At each call of `next()`, it will retrieve the +// next element from the `delayedNodes` and acts accordingly. The `next()` itself +// defines how to unwind the delayed nodes stack. The caller can either call the +// next traversal to proceed, or simply discard the `traversal` struct to stop iteration. +// +// At the each step of `next`, the `delayedNodes` can have one of the three states: +// 1. It has length of 0, meaning that their is no more traversable nodes. +// 2. It has length of 1, meaning that the traverse is being started from the initial node. +// 3. It has length of 2>=, meaning that there are delayed nodes to be traversed. +// +// When the `delayedNodes` are not empty, `next` retrieves the first `delayedNode` and initially check: +// 1. If it is not an delayed node (node.delayed == false) it immediately returns it. +// +// A. If the `node` is a branch node: +// 1. If the traversal is postorder, then append the current node to the t.delayedNodes, +// with `delayed` set to false. This makes the current node returned *after* all the children +// are traversed, without being expanded. +// 2. Append the traversable children nodes into the `delayedNodes`, with `delayed` set to true. This +// makes the children nodes to be traversed, and expanded with their respective children. +// 3. If the traversal is preorder, (with the children to be traversed already pushed to the +// `delayedNodes`), returns the current node. +// 4. Call `traversal.next()` to further traverse through the `delayedNodes`. +// +// B. If the `node` is a leaf node, it will be returned without expand, by the following process: +// 1. If the traversal is postorder, the current node will be append to the `delayedNodes` with `delayed` +// set to false, and immediately returned at the subsequent call of `traversal.next()` at the last line. +// 2. If the traversal is preorder, the current node will be returned. +func (t *traversal) next() (*Node, error) { + n, err, shouldReturn := t.doNext() + if shouldReturn { + return n, err + } + + // Keep traversing and expanding the remaning delayed nodes. A-4. + return t.next() +} + +func (t *traversal) doNext() (*Node, error, bool) { + // End of traversal. + if t.delayedNodes.length() == 0 { + return nil, nil, true + } + + node, delayed := t.delayedNodes.pop() + + // Already expanded, immediately return. + if !delayed || node == nil { + return node, nil, true + } + + afterStart := t.start == nil || bytes.Compare(t.start, node.GetNodeKey()) < 0 + startOrAfter := afterStart || bytes.Equal(t.start, node.GetNodeKey()) + beforeEnd := t.end == nil || bytes.Compare(node.GetNodeKey(), t.end) < 0 + if t.inclusive { + beforeEnd = beforeEnd || bytes.Equal(node.GetNodeKey(), t.end) + } + + // case of postorder. A-1 and B-1 + // Recursively process left sub-tree, then right-subtree, then node itself. + if t.post && (!node.isLeaf() || (startOrAfter && beforeEnd)) { + t.delayedNodes.push(node, false) + } + + // case of branch node, traversing children. A-2. + if !node.isLeaf() { + // if node is a branch node and the order is ascending, + // We traverse through the left subtree, then the right subtree. + if t.ascending { + if beforeEnd { + // push the delayed traversal for the right nodes, + rightNode, err := node.getRightNode(t.tree) + if err != nil { + return nil, err, true + } + t.delayedNodes.push(rightNode, true) + } + if afterStart { + // push the delayed traversal for the left nodes, + leftNode, err := node.getLeftNode(t.tree) + if err != nil { + return nil, err, true + } + t.delayedNodes.push(leftNode, true) + } + } else { + // if node is a branch node and the order is not ascending + // We traverse through the right subtree, then the left subtree. + if afterStart { + // push the delayed traversal for the left nodes, + leftNode, err := node.getLeftNode(t.tree) + if err != nil { + return nil, err, true + } + t.delayedNodes.push(leftNode, true) + } + if beforeEnd { + // push the delayed traversal for the right nodes, + rightNode, err := node.getRightNode(t.tree) + if err != nil { + return nil, err, true + } + t.delayedNodes.push(rightNode, true) + } + } + } + + // case of preorder traversal. A-3 and B-2. + // Process root then (recursively) processing left child, then process right child + if !t.post && (!node.isLeaf() || (startOrAfter && beforeEnd)) { + return node, nil, true + } + + return nil, nil, false +} + +// Iterator is a dbm.Iterator for ImmutableTree +type Iterator struct { + start, end []byte + + key, value []byte + + valid bool + + err error + + t *traversal +} + +var _ dbm.Iterator = (*Iterator)(nil) + +// Returns a new iterator over the immutable tree. If the tree is nil, the iterator will be invalid. +func NewIterator(start, end []byte, ascending bool, tree *ImmutableTree) dbm.Iterator { + iter := &Iterator{ + start: start, + end: end, + } + + if tree == nil { + iter.err = errIteratorNilTreeGiven + } else { + iter.valid = true + iter.t = tree.root.newTraversal(tree, start, end, ascending, false, false, false) + // Move iterator before the first element + iter.Next() + } + return iter +} + +func NewIteratorUnlocked(start, end []byte, ascending bool, tree *ImmutableTree) dbm.Iterator { + iter := &Iterator{ + start: start, + end: end, + } + + if tree == nil { + iter.err = errIteratorNilTreeGiven + } else { + iter.valid = true + iter.t = tree.root.newTraversal(tree, start, end, ascending, false, false, true) + // Move iterator before the first element + iter.Next() + } + return iter +} + +// Domain implements dbm.Iterator. +func (iter *Iterator) Domain() ([]byte, []byte) { + return iter.start, iter.end +} + +// Valid implements dbm.Iterator. +func (iter *Iterator) Valid() bool { + return iter.valid +} + +// Key implements dbm.Iterator +func (iter *Iterator) Key() []byte { + return iter.key +} + +// Value implements dbm.Iterator +func (iter *Iterator) Value() []byte { + return iter.value +} + +// Next implements dbm.Iterator +func (iter *Iterator) Next() { + if iter.t == nil { + return + } + + node, err := iter.t.next() + // TODO: double-check if this error is correctly handled. + if node == nil || err != nil { + iter.t = nil + iter.valid = false + return + } + + if node.GetHeight() == 0 { + iter.key, iter.value = node.GetNodeKey(), node.GetValue() + return + } + + iter.Next() +} + +// Close implements dbm.Iterator +func (iter *Iterator) Close() error { + iter.t = nil + iter.valid = false + return iter.err +} + +// Error implements dbm.Iterator +func (iter *Iterator) Error() error { + return iter.err +} + +// IsFast returnts true if iterator uses fast strategy +func (iter *Iterator) IsFast() bool { + return false +} diff --git a/sei-iavl/iterator_test.go b/sei-iavl/iterator_test.go new file mode 100644 index 0000000000..4164e055a8 --- /dev/null +++ b/sei-iavl/iterator_test.go @@ -0,0 +1,331 @@ +package iavl + +import ( + "math/rand" + "sort" + "testing" + + "github.com/stretchr/testify/require" + dbm "github.com/tendermint/tm-db" +) + +func TestIterator_NewIterator_NilTree_Failure(t *testing.T) { + var start, end = []byte{'a'}, []byte{'c'} + ascending := true + + performTest := func(t *testing.T, itr dbm.Iterator) { + require.NotNil(t, itr) + require.False(t, itr.Valid()) + actualsStart, actualEnd := itr.Domain() + require.Equal(t, start, actualsStart) + require.Equal(t, end, actualEnd) + require.Error(t, itr.Error()) + } + + t.Run("Iterator", func(t *testing.T) { + itr := NewIterator(start, end, ascending, nil) + performTest(t, itr) + require.ErrorIs(t, errIteratorNilTreeGiven, itr.Error()) + }) + + t.Run("Fast Iterator", func(t *testing.T) { + itr := NewFastIterator(start, end, ascending, nil) + performTest(t, itr) + require.ErrorIs(t, errFastIteratorNilNdbGiven, itr.Error()) + }) + + t.Run("Unsaved Fast Iterator", func(t *testing.T) { + itr := NewUnsavedFastIterator(start, end, ascending, nil, map[string]*FastNode{}, map[string]interface{}{}) + performTest(t, itr) + require.ErrorIs(t, errFastIteratorNilNdbGiven, itr.Error()) + }) +} + +func TestUnsavedFastIterator_NewIterator_NilAdditions_Failure(t *testing.T) { + var start, end = []byte{'a'}, []byte{'c'} + ascending := true + + performTest := func(t *testing.T, itr dbm.Iterator) { + require.NotNil(t, itr) + require.False(t, itr.Valid()) + actualsStart, actualEnd := itr.Domain() + require.Equal(t, start, actualsStart) + require.Equal(t, end, actualEnd) + require.Error(t, itr.Error()) + } + + t.Run("Nil additions given", func(t *testing.T) { + tree, err := NewMutableTree(dbm.NewMemDB(), 0, false) + require.NoError(t, err) + itr := NewUnsavedFastIterator(start, end, ascending, tree.ndb, nil, tree.unsavedFastNodeRemovals) + performTest(t, itr) + require.ErrorIs(t, errUnsavedFastIteratorNilAdditionsGiven, itr.Error()) + }) + + t.Run("Nil removals given", func(t *testing.T) { + tree, err := NewMutableTree(dbm.NewMemDB(), 0, false) + require.NoError(t, err) + itr := NewUnsavedFastIterator(start, end, ascending, tree.ndb, tree.unsavedFastNodeAdditions, nil) + performTest(t, itr) + require.ErrorIs(t, errUnsavedFastIteratorNilRemovalsGiven, itr.Error()) + }) + + t.Run("All nil", func(t *testing.T) { + itr := NewUnsavedFastIterator(start, end, ascending, nil, nil, nil) + performTest(t, itr) + require.ErrorIs(t, errFastIteratorNilNdbGiven, itr.Error()) + }) + + t.Run("Additions and removals are nil", func(t *testing.T) { + tree, err := NewMutableTree(dbm.NewMemDB(), 0, false) + require.NoError(t, err) + itr := NewUnsavedFastIterator(start, end, ascending, tree.ndb, nil, nil) + performTest(t, itr) + require.ErrorIs(t, errUnsavedFastIteratorNilAdditionsGiven, itr.Error()) + }) +} + +func TestIterator_Empty_Invalid(t *testing.T) { + config := &iteratorTestConfig{ + startByteToSet: 'a', + endByteToSet: 'z', + startIterate: []byte("a"), + endIterate: []byte("a"), + ascending: true, + } + + performTest := func(t *testing.T, itr dbm.Iterator, mirror [][]string) { + require.Equal(t, 0, len(mirror)) + require.False(t, itr.Valid()) + } + + t.Run("Iterator", func(t *testing.T) { + itr, mirror := setupIteratorAndMirror(t, config) + performTest(t, itr, mirror) + }) + + t.Run("Fast Iterator", func(t *testing.T) { + itr, mirror := setupFastIteratorAndMirror(t, config) + performTest(t, itr, mirror) + }) + + t.Run("Unsaved Fast Iterator", func(t *testing.T) { + itr, mirror := setupUnsavedFastIterator(t, config) + performTest(t, itr, mirror) + }) +} + +func TestIterator_Basic_Ranged_Ascending_Success(t *testing.T) { + config := &iteratorTestConfig{ + startByteToSet: 'a', + endByteToSet: 'z', + startIterate: []byte("e"), + endIterate: []byte("w"), + ascending: true, + } + iteratorSuccessTest(t, config) +} + +func TestIterator_Basic_Ranged_Descending_Success(t *testing.T) { + config := &iteratorTestConfig{ + startByteToSet: 'a', + endByteToSet: 'z', + startIterate: []byte("e"), + endIterate: []byte("w"), + ascending: false, + } + iteratorSuccessTest(t, config) +} + +func TestIterator_Basic_Full_Ascending_Success(t *testing.T) { + config := &iteratorTestConfig{ + startByteToSet: 'a', + endByteToSet: 'z', + startIterate: nil, + endIterate: nil, + ascending: true, + } + + iteratorSuccessTest(t, config) +} + +func TestIterator_Basic_Full_Descending_Success(t *testing.T) { + config := &iteratorTestConfig{ + startByteToSet: 'a', + endByteToSet: 'z', + startIterate: nil, + endIterate: nil, + ascending: false, + } + iteratorSuccessTest(t, config) +} + +func TestIterator_WithDelete_Full_Ascending_Success(t *testing.T) { + config := &iteratorTestConfig{ + startByteToSet: 'a', + endByteToSet: 'z', + startIterate: nil, + endIterate: nil, + ascending: false, + } + + tree, mirror := getRandomizedTreeAndMirror(t) + + _, _, err := tree.SaveVersion() + require.NoError(t, err) + + randomizeTreeAndMirror(t, tree, mirror) + + _, _, err = tree.SaveVersion() + require.NoError(t, err) + + err = tree.DeleteVersion(1) + require.NoError(t, err) + + latestVersion, err := tree.ndb.getLatestVersion() + require.NoError(t, err) + immutableTree, err := tree.GetImmutable(latestVersion) + require.NoError(t, err) + + // sort mirror for assertion + sortedMirror := make([][]string, 0, len(mirror)) + for k, v := range mirror { + sortedMirror = append(sortedMirror, []string{k, v}) + } + + sort.Slice(sortedMirror, func(i, j int) bool { + return sortedMirror[i][0] > sortedMirror[j][0] + }) + + t.Run("Iterator", func(t *testing.T) { + itr := NewIterator(config.startIterate, config.endIterate, config.ascending, immutableTree) + require.True(t, itr.Valid()) + assertIterator(t, itr, sortedMirror, config.ascending) + }) + + t.Run("Fast Iterator", func(t *testing.T) { + itr := NewFastIterator(config.startIterate, config.endIterate, config.ascending, immutableTree.ndb) + require.True(t, itr.Valid()) + assertIterator(t, itr, sortedMirror, config.ascending) + }) + + t.Run("Unsaved Fast Iterator", func(t *testing.T) { + itr := NewUnsavedFastIterator(config.startIterate, config.endIterate, config.ascending, immutableTree.ndb, tree.unsavedFastNodeAdditions, tree.unsavedFastNodeRemovals) + require.True(t, itr.Valid()) + assertIterator(t, itr, sortedMirror, config.ascending) + }) +} + +func iteratorSuccessTest(t *testing.T, config *iteratorTestConfig) { + performTest := func(t *testing.T, itr dbm.Iterator, mirror [][]string) { + actualStart, actualEnd := itr.Domain() + require.Equal(t, config.startIterate, actualStart) + require.Equal(t, config.endIterate, actualEnd) + + require.NoError(t, itr.Error()) + + assertIterator(t, itr, mirror, config.ascending) + } + + t.Run("Iterator", func(t *testing.T) { + itr, mirror := setupIteratorAndMirror(t, config) + require.True(t, itr.Valid()) + performTest(t, itr, mirror) + }) + + t.Run("Fast Iterator", func(t *testing.T) { + itr, mirror := setupFastIteratorAndMirror(t, config) + require.True(t, itr.Valid()) + performTest(t, itr, mirror) + }) + + t.Run("Unsaved Fast Iterator", func(t *testing.T) { + itr, mirror := setupUnsavedFastIterator(t, config) + require.True(t, itr.Valid()) + performTest(t, itr, mirror) + }) +} + +func setupIteratorAndMirror(t *testing.T, config *iteratorTestConfig) (dbm.Iterator, [][]string) { + tree, err := NewMutableTree(dbm.NewMemDB(), 0, false) + require.NoError(t, err) + + mirror := setupMirrorForIterator(t, config, tree) + _, _, err = tree.SaveVersion() + require.NoError(t, err) + + latestVersion, err := tree.ndb.getLatestVersion() + require.NoError(t, err) + immutableTree, err := tree.GetImmutable(latestVersion) + require.NoError(t, err) + + itr := NewIterator(config.startIterate, config.endIterate, config.ascending, immutableTree) + return itr, mirror +} + +func setupFastIteratorAndMirror(t *testing.T, config *iteratorTestConfig) (dbm.Iterator, [][]string) { + tree, err := NewMutableTree(dbm.NewMemDB(), 0, false) + require.NoError(t, err) + + mirror := setupMirrorForIterator(t, config, tree) + _, _, err = tree.SaveVersion() + require.NoError(t, err) + + itr := NewFastIterator(config.startIterate, config.endIterate, config.ascending, tree.ndb) + return itr, mirror +} + +func setupUnsavedFastIterator(t *testing.T, config *iteratorTestConfig) (dbm.Iterator, [][]string) { + tree, err := NewMutableTree(dbm.NewMemDB(), 0, false) + require.NoError(t, err) + + // For unsaved fast iterator, we would like to test the state where + // there are saved fast nodes as well as some unsaved additions and removals. + // So, we split the byte range in half where the first half is saved and the second half is unsaved. + breakpointByte := (config.endByteToSet + config.startByteToSet) / 2 + + firstHalfConfig := *config + firstHalfConfig.endByteToSet = breakpointByte // exclusive + + secondHalfConfig := *config + secondHalfConfig.startByteToSet = breakpointByte + + // First half of the mirror + mirror := setupMirrorForIterator(t, &firstHalfConfig, tree) + _, _, err = tree.SaveVersion() + require.NoError(t, err) + + // No unsaved additions or removals should be present after saving + require.Equal(t, 0, len(tree.unsavedFastNodeAdditions)) + require.Equal(t, 0, len(tree.unsavedFastNodeRemovals)) + + // Ensure that there are unsaved additions and removals present + secondHalfMirror := setupMirrorForIterator(t, &secondHalfConfig, tree) + + require.True(t, len(tree.unsavedFastNodeAdditions) >= len(secondHalfMirror)) + require.Equal(t, 0, len(tree.unsavedFastNodeRemovals)) + + // Merge the two halves + if config.ascending { + mirror = append(mirror, secondHalfMirror...) + } else { + mirror = append(secondHalfMirror, mirror...) + } + + if len(mirror) > 0 { + // Remove random keys + for i := 0; i < len(mirror)/4; i++ { + randIndex := rand.Intn(len(mirror)) + keyToRemove := mirror[randIndex][0] + + _, removed, err := tree.Remove([]byte(keyToRemove)) + require.NoError(t, err) + require.True(t, removed) + + mirror = append(mirror[:randIndex], mirror[randIndex+1:]...) + } + } + + itr := NewUnsavedFastIterator(config.startIterate, config.endIterate, config.ascending, tree.ndb, tree.unsavedFastNodeAdditions, tree.unsavedFastNodeRemovals) + return itr, mirror +} diff --git a/sei-iavl/key_format.go b/sei-iavl/key_format.go new file mode 100644 index 0000000000..3c1cbecc23 --- /dev/null +++ b/sei-iavl/key_format.go @@ -0,0 +1,185 @@ +package iavl + +import ( + "encoding/binary" + "fmt" +) + +// Provides a fixed-width lexicographically sortable []byte key format +type KeyFormat struct { + layout []int + length int + prefix byte + unbounded bool +} + +// Create a []byte key format based on a single byte prefix and fixed width key segments each of whose length is +// specified by by the corresponding element of layout. +// +// For example, to store keys that could index some objects by a version number and their SHA256 hash using the form: +// 'c' then you would define the KeyFormat with: +// +// var keyFormat = NewKeyFormat('c', 8, 32) +// +// Then you can create a key with: +// +// func ObjectKey(version uint64, objectBytes []byte) []byte { +// hasher := sha256.New() +// hasher.Sum(nil) +// return keyFormat.Key(version, hasher.Sum(nil)) +// } +// +// if the last term of the layout ends in 0 +func NewKeyFormat(prefix byte, layout ...int) *KeyFormat { + // For prefix byte + length := 1 + for i, l := range layout { + length += l + if l == 0 && i != len(layout)-1 { + panic("Only the last item in a key format can be 0") + } + } + return &KeyFormat{ + prefix: prefix, + layout: layout, + length: length, + unbounded: len(layout) > 0 && layout[len(layout)-1] == 0, + } +} + +// Format the byte segments into the key format - will panic if the segment lengths do not match the layout. +func (kf *KeyFormat) KeyBytes(segments ...[]byte) []byte { + keyLen := kf.length + // In case segments length is less than layouts length, + // we don't have to allocate the whole kf.length, just + // enough space to store the segments. + if len(segments) < len(kf.layout) { + keyLen = 1 + for i := range segments { + keyLen += kf.layout[i] + } + } + + if kf.unbounded { + if len(segments) > 0 { + keyLen += len(segments[len(segments)-1]) + } + } + key := make([]byte, keyLen) + key[0] = kf.prefix + n := 1 + for i, s := range segments { + l := kf.layout[i] + + switch l { + case 0: + // If the expected segment length is unbounded, increase it by `string length` + n += len(s) + default: + if len(s) > l { + panic(fmt.Errorf("length of segment %X provided to KeyFormat.KeyBytes() is longer than the %d bytes "+ + "required by layout for segment %d", s, l, i)) + } + // Otherwise increase n by the segment length + n += l + + } + // Big endian so pad on left if not given the full width for this segment + copy(key[n-len(s):n], s) + } + return key[:n] +} + +// Format the args passed into the key format - will panic if the arguments passed do not match the length +// of the segment to which they correspond. When called with no arguments returns the raw prefix (useful as a start +// element of the entire keys space when sorted lexicographically). +func (kf *KeyFormat) Key(args ...interface{}) []byte { + if len(args) > len(kf.layout) { + panic(fmt.Errorf("keyFormat.Key() is provided with %d args but format only has %d segments", + len(args), len(kf.layout))) + } + segments := make([][]byte, len(args)) + for i, a := range args { + segments[i] = format(a) + } + return kf.KeyBytes(segments...) +} + +// Reads out the bytes associated with each segment of the key format from key. +func (kf *KeyFormat) ScanBytes(key []byte) [][]byte { + segments := make([][]byte, len(kf.layout)) + n := 1 + for i, l := range kf.layout { + n += l + // if current section is longer than key, then there are no more subsequent segments. + if n > len(key) { + return segments[:i] + } + // if unbounded, segment is rest of key + if l == 0 { + segments[i] = key[n:] + break + } else { + segments[i] = key[n-l : n] + } + } + return segments +} + +// Extracts the segments into the values pointed to by each of args. Each arg must be a pointer to int64, uint64, or +// []byte, and the width of the args must match layout. +func (kf *KeyFormat) Scan(key []byte, args ...interface{}) { + segments := kf.ScanBytes(key) + if len(args) > len(segments) { + panic(fmt.Errorf("keyFormat.Scan() is provided with %d args but format only has %d segments in key %X", + len(args), len(segments), key)) + } + for i, a := range args { + scan(a, segments[i]) + } +} + +// Return the prefix as a string. +func (kf *KeyFormat) Prefix() string { + return string([]byte{kf.prefix}) +} + +func scan(a interface{}, value []byte) { + switch v := a.(type) { + case *int64: + // #nosec G115 -- Negative values will be mapped correctly when read in as uint64 and then type converted + *v = int64(binary.BigEndian.Uint64(value)) + case *uint64: + *v = binary.BigEndian.Uint64(value) + case *[]byte: + *v = value + default: + panic(fmt.Errorf("keyFormat scan() does not support scanning value of type %T: %v", a, a)) + } +} + +func format(a interface{}) []byte { + switch v := a.(type) { + case uint64: + return formatUint64(v) + case int64: + // #nosec G115 -- TestNegativeKeys seems to expect negative keys? + return formatUint64(uint64(v)) + // Provide formatting from int,uint as a convenience to avoid casting arguments + case uint: + return formatUint64(uint64(v)) + case int: + // #nosec G115 -- TestNegativeKeys seems to expect negative keys? + return formatUint64(uint64(v)) + case []byte: + return v + default: + panic(fmt.Errorf("keyFormat format() does not support formatting value of type %T: %v", a, a)) + } +} + +func formatUint64(v uint64) []byte { + bs := make([]byte, 8) + binary.BigEndian.PutUint64(bs, v) + return bs +} diff --git a/sei-iavl/key_format_test.go b/sei-iavl/key_format_test.go new file mode 100644 index 0000000000..cc3a2d4f71 --- /dev/null +++ b/sei-iavl/key_format_test.go @@ -0,0 +1,149 @@ +package iavl + +import ( + "testing" + + "github.com/stretchr/testify/assert" +) + +func TestKeyFormatBytes(t *testing.T) { + type keyPairs struct { + key [][]byte + expected []byte + } + emptyTestVector := keyPairs{key: [][]byte{}, expected: []byte{'e'}} + threeByteTestVector := keyPairs{ + key: [][]byte{{1, 2, 3}}, + expected: []byte{'e', 0, 0, 0, 0, 0, 1, 2, 3}, + } + eightByteTestVector := keyPairs{ + key: [][]byte{{1, 2, 3, 4, 5, 6, 7, 8}}, + expected: []byte{'e', 1, 2, 3, 4, 5, 6, 7, 8}, + } + + tests := []struct { + name string + kf *KeyFormat + testVectors []keyPairs + }{{ + name: "simple 3 int key format", + kf: NewKeyFormat(byte('e'), 8, 8, 8), + testVectors: []keyPairs{ + emptyTestVector, + threeByteTestVector, + eightByteTestVector, + { + key: [][]byte{{1, 2, 3, 4, 5, 6, 7, 8}, {1, 2, 3, 4, 5, 6, 7, 8}, {1, 1, 2, 2, 3, 3}}, + expected: []byte{'e', 1, 2, 3, 4, 5, 6, 7, 8, 1, 2, 3, 4, 5, 6, 7, 8, 0, 0, 1, 1, 2, 2, 3, 3}, + }, + }, + }, { + name: "zero suffix key format", + kf: NewKeyFormat(byte('e'), 8, 0), + testVectors: []keyPairs{ + emptyTestVector, + threeByteTestVector, + eightByteTestVector, + { + key: [][]byte{{1, 2, 3, 4, 5, 6, 7, 8}, {1, 2, 3, 4, 5, 6, 7, 8, 9}}, + expected: []byte{'e', 1, 2, 3, 4, 5, 6, 7, 8, 1, 2, 3, 4, 5, 6, 7, 8, 9}, + }, + { + key: [][]byte{{1, 2, 3, 4, 5, 6, 7, 8}, []byte("hellohello")}, + expected: []byte{'e', 1, 2, 3, 4, 5, 6, 7, 8, 0x68, 0x65, 0x6c, 0x6c, 0x6f, 0x68, 0x65, 0x6c, 0x6c, 0x6f}, + }, + }, + }} + for _, tc := range tests { + kf := tc.kf + for i, v := range tc.testVectors { + assert.Equal(t, v.expected, kf.KeyBytes(v.key...), "key format %s, test case %d", tc.name, i) + } + } +} + +func TestKeyFormat(t *testing.T) { + kf := NewKeyFormat(byte('e'), 8, 8, 8) + key := []byte{'e', 0, 0, 0, 0, 0, 0, 0, 100, 0, 0, 0, 0, 0, 0, 0, 200, 0, 0, 0, 0, 0, 0, 1, 144} + var a, b, c int64 = 100, 200, 400 + assert.Equal(t, key, kf.Key(a, b, c)) + + var ao, bo, co = new(int64), new(int64), new(int64) + kf.Scan(key, ao, bo, co) + assert.Equal(t, a, *ao) + assert.Equal(t, b, *bo) + assert.Equal(t, c, *co) + + bs := new([]byte) + kf.Scan(key, ao, bo, bs) + assert.Equal(t, a, *ao) + assert.Equal(t, b, *bo) + assert.Equal(t, []byte{0, 0, 0, 0, 0, 0, 1, 144}, *bs) + + assert.Equal(t, []byte{'e', 0, 0, 0, 0, 0, 0, 0, 100, 0, 0, 0, 0, 0, 0, 0, 200}, kf.Key(a, b)) +} + +func TestNegativeKeys(t *testing.T) { + kf := NewKeyFormat(byte('e'), 8, 8) + + var a, b int64 = -100, -200 + // One's complement plus one + key := []byte{'e', + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, byte(0xff + a + 1), + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, byte(0xff + b + 1)} + assert.Equal(t, key, kf.Key(a, b)) + + var ao, bo = new(int64), new(int64) + kf.Scan(key, ao, bo) + assert.Equal(t, a, *ao) + assert.Equal(t, b, *bo) +} + +func TestOverflow(t *testing.T) { + kf := NewKeyFormat(byte('o'), 8, 8) + + var a int64 = 1 << 62 + var b uint64 = 1 << 63 + key := []byte{'o', + 0x40, 0, 0, 0, 0, 0, 0, 0, + 0x80, 0, 0, 0, 0, 0, 0, 0, + } + assert.Equal(t, key, kf.Key(a, b)) + + var ao, bo = new(int64), new(int64) + kf.Scan(key, ao, bo) + assert.Equal(t, a, *ao) + assert.Equal(t, int64(b), *bo) +} + +func benchmarkKeyFormatBytes(b *testing.B, kf *KeyFormat, segments ...[]byte) { + for i := 0; i < b.N; i++ { + kf.KeyBytes(segments...) + } +} + +func BenchmarkKeyFormat_KeyBytesOneSegment(b *testing.B) { + benchmarkKeyFormatBytes(b, NewKeyFormat('e', 8, 8, 8), nil) +} + +func BenchmarkKeyFormat_KeyBytesThreeSegment(b *testing.B) { + segments := [][]byte{ + {1, 2, 3, 4, 5, 6, 7, 8}, + {1, 2, 3, 4, 5, 6, 7, 8}, + {1, 1, 2, 2, 3, 3}, + } + benchmarkKeyFormatBytes(b, NewKeyFormat('e', 8, 8, 8), segments...) +} + +func BenchmarkKeyFormat_KeyBytesOneSegmentWithVariousLayouts(b *testing.B) { + benchmarkKeyFormatBytes(b, NewKeyFormat('e', 8, 16, 32), nil) +} + +func BenchmarkKeyFormat_KeyBytesThreeSegmentWithVariousLayouts(b *testing.B) { + segments := [][]byte{ + {1, 2, 3, 4, 5, 6, 7, 8}, + {1, 2, 3, 4, 5, 6, 7, 8}, + {1, 1, 2, 2, 3, 3}, + } + benchmarkKeyFormatBytes(b, NewKeyFormat('e', 8, 16, 32), segments...) +} diff --git a/sei-iavl/mock/db_mock.go b/sei-iavl/mock/db_mock.go new file mode 100644 index 0000000000..8120cc64f8 --- /dev/null +++ b/sei-iavl/mock/db_mock.go @@ -0,0 +1,420 @@ +// Code generated by MockGen. DO NOT EDIT. +// Source: /root/go/pkg/mod/github.com/tendermint/tm-db@v0.6.4/types.go + +// Package mock is a generated GoMock package. +package mock + +import ( + reflect "reflect" + + gomock "github.com/golang/mock/gomock" + db "github.com/tendermint/tm-db" +) + +// MockDB is a mock of DB interface. +type MockDB struct { + ctrl *gomock.Controller + recorder *MockDBMockRecorder +} + +// MockDBMockRecorder is the mock recorder for MockDB. +type MockDBMockRecorder struct { + mock *MockDB +} + +// NewMockDB creates a new mock instance. +func NewMockDB(ctrl *gomock.Controller) *MockDB { + mock := &MockDB{ctrl: ctrl} + mock.recorder = &MockDBMockRecorder{mock} + return mock +} + +// EXPECT returns an object that allows the caller to indicate expected use. +func (m *MockDB) EXPECT() *MockDBMockRecorder { + return m.recorder +} + +// Close mocks base method. +func (m *MockDB) Close() error { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "Close") + ret0, _ := ret[0].(error) + return ret0 +} + +// Close indicates an expected call of Close. +func (mr *MockDBMockRecorder) Close() *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Close", reflect.TypeOf((*MockDB)(nil).Close)) +} + +// Delete mocks base method. +func (m *MockDB) Delete(arg0 []byte) error { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "Delete", arg0) + ret0, _ := ret[0].(error) + return ret0 +} + +// Delete indicates an expected call of Delete. +func (mr *MockDBMockRecorder) Delete(arg0 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Delete", reflect.TypeOf((*MockDB)(nil).Delete), arg0) +} + +// DeleteSync mocks base method. +func (m *MockDB) DeleteSync(arg0 []byte) error { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "DeleteSync", arg0) + ret0, _ := ret[0].(error) + return ret0 +} + +// DeleteSync indicates an expected call of DeleteSync. +func (mr *MockDBMockRecorder) DeleteSync(arg0 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "DeleteSync", reflect.TypeOf((*MockDB)(nil).DeleteSync), arg0) +} + +// Get mocks base method. +func (m *MockDB) Get(arg0 []byte) ([]byte, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "Get", arg0) + ret0, _ := ret[0].([]byte) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// Get indicates an expected call of Get. +func (mr *MockDBMockRecorder) Get(arg0 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Get", reflect.TypeOf((*MockDB)(nil).Get), arg0) +} + +// Has mocks base method. +func (m *MockDB) Has(key []byte) (bool, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "Has", key) + ret0, _ := ret[0].(bool) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// Has indicates an expected call of Has. +func (mr *MockDBMockRecorder) Has(key interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Has", reflect.TypeOf((*MockDB)(nil).Has), key) +} + +// Iterator mocks base method. +func (m *MockDB) Iterator(start, end []byte) (db.Iterator, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "Iterator", start, end) + ret0, _ := ret[0].(db.Iterator) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// Iterator indicates an expected call of Iterator. +func (mr *MockDBMockRecorder) Iterator(start, end interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Iterator", reflect.TypeOf((*MockDB)(nil).Iterator), start, end) +} + +// NewBatch mocks base method. +func (m *MockDB) NewBatch() db.Batch { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "NewBatch") + ret0, _ := ret[0].(db.Batch) + return ret0 +} + +// NewBatch indicates an expected call of NewBatch. +func (mr *MockDBMockRecorder) NewBatch() *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "NewBatch", reflect.TypeOf((*MockDB)(nil).NewBatch)) +} + +// Print mocks base method. +func (m *MockDB) Print() error { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "Print") + ret0, _ := ret[0].(error) + return ret0 +} + +// Print indicates an expected call of Print. +func (mr *MockDBMockRecorder) Print() *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Print", reflect.TypeOf((*MockDB)(nil).Print)) +} + +// ReverseIterator mocks base method. +func (m *MockDB) ReverseIterator(start, end []byte) (db.Iterator, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "ReverseIterator", start, end) + ret0, _ := ret[0].(db.Iterator) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// ReverseIterator indicates an expected call of ReverseIterator. +func (mr *MockDBMockRecorder) ReverseIterator(start, end interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ReverseIterator", reflect.TypeOf((*MockDB)(nil).ReverseIterator), start, end) +} + +// Set mocks base method. +func (m *MockDB) Set(arg0, arg1 []byte) error { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "Set", arg0, arg1) + ret0, _ := ret[0].(error) + return ret0 +} + +// Set indicates an expected call of Set. +func (mr *MockDBMockRecorder) Set(arg0, arg1 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Set", reflect.TypeOf((*MockDB)(nil).Set), arg0, arg1) +} + +// SetSync mocks base method. +func (m *MockDB) SetSync(arg0, arg1 []byte) error { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "SetSync", arg0, arg1) + ret0, _ := ret[0].(error) + return ret0 +} + +// SetSync indicates an expected call of SetSync. +func (mr *MockDBMockRecorder) SetSync(arg0, arg1 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "SetSync", reflect.TypeOf((*MockDB)(nil).SetSync), arg0, arg1) +} + +// Stats mocks base method. +func (m *MockDB) Stats() map[string]string { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "Stats") + ret0, _ := ret[0].(map[string]string) + return ret0 +} + +// Stats indicates an expected call of Stats. +func (mr *MockDBMockRecorder) Stats() *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Stats", reflect.TypeOf((*MockDB)(nil).Stats)) +} + +// MockBatch is a mock of Batch interface. +type MockBatch struct { + ctrl *gomock.Controller + recorder *MockBatchMockRecorder +} + +// MockBatchMockRecorder is the mock recorder for MockBatch. +type MockBatchMockRecorder struct { + mock *MockBatch +} + +// NewMockBatch creates a new mock instance. +func NewMockBatch(ctrl *gomock.Controller) *MockBatch { + mock := &MockBatch{ctrl: ctrl} + mock.recorder = &MockBatchMockRecorder{mock} + return mock +} + +// EXPECT returns an object that allows the caller to indicate expected use. +func (m *MockBatch) EXPECT() *MockBatchMockRecorder { + return m.recorder +} + +// Close mocks base method. +func (m *MockBatch) Close() error { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "Close") + ret0, _ := ret[0].(error) + return ret0 +} + +// Close indicates an expected call of Close. +func (mr *MockBatchMockRecorder) Close() *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Close", reflect.TypeOf((*MockBatch)(nil).Close)) +} + +// Delete mocks base method. +func (m *MockBatch) Delete(key []byte) error { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "Delete", key) + ret0, _ := ret[0].(error) + return ret0 +} + +// Delete indicates an expected call of Delete. +func (mr *MockBatchMockRecorder) Delete(key interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Delete", reflect.TypeOf((*MockBatch)(nil).Delete), key) +} + +// Set mocks base method. +func (m *MockBatch) Set(key, value []byte) error { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "Set", key, value) + ret0, _ := ret[0].(error) + return ret0 +} + +// Set indicates an expected call of Set. +func (mr *MockBatchMockRecorder) Set(key, value interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Set", reflect.TypeOf((*MockBatch)(nil).Set), key, value) +} + +// Write mocks base method. +func (m *MockBatch) Write() error { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "Write") + ret0, _ := ret[0].(error) + return ret0 +} + +// Write indicates an expected call of Write. +func (mr *MockBatchMockRecorder) Write() *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Write", reflect.TypeOf((*MockBatch)(nil).Write)) +} + +// WriteSync mocks base method. +func (m *MockBatch) WriteSync() error { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "WriteSync") + ret0, _ := ret[0].(error) + return ret0 +} + +// WriteSync indicates an expected call of WriteSync. +func (mr *MockBatchMockRecorder) WriteSync() *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "WriteSync", reflect.TypeOf((*MockBatch)(nil).WriteSync)) +} + +// MockIterator is a mock of Iterator interface. +type MockIterator struct { + ctrl *gomock.Controller + recorder *MockIteratorMockRecorder +} + +// MockIteratorMockRecorder is the mock recorder for MockIterator. +type MockIteratorMockRecorder struct { + mock *MockIterator +} + +// NewMockIterator creates a new mock instance. +func NewMockIterator(ctrl *gomock.Controller) *MockIterator { + mock := &MockIterator{ctrl: ctrl} + mock.recorder = &MockIteratorMockRecorder{mock} + return mock +} + +// EXPECT returns an object that allows the caller to indicate expected use. +func (m *MockIterator) EXPECT() *MockIteratorMockRecorder { + return m.recorder +} + +// Close mocks base method. +func (m *MockIterator) Close() error { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "Close") + ret0, _ := ret[0].(error) + return ret0 +} + +// Close indicates an expected call of Close. +func (mr *MockIteratorMockRecorder) Close() *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Close", reflect.TypeOf((*MockIterator)(nil).Close)) +} + +// Domain mocks base method. +func (m *MockIterator) Domain() ([]byte, []byte) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "Domain") + ret0, _ := ret[0].([]byte) + ret1, _ := ret[1].([]byte) + return ret0, ret1 +} + +// Domain indicates an expected call of Domain. +func (mr *MockIteratorMockRecorder) Domain() *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Domain", reflect.TypeOf((*MockIterator)(nil).Domain)) +} + +// Error mocks base method. +func (m *MockIterator) Error() error { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "Error") + ret0, _ := ret[0].(error) + return ret0 +} + +// Error indicates an expected call of Error. +func (mr *MockIteratorMockRecorder) Error() *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Error", reflect.TypeOf((*MockIterator)(nil).Error)) +} + +// Key mocks base method. +func (m *MockIterator) Key() []byte { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "Key") + ret0, _ := ret[0].([]byte) + return ret0 +} + +// Key indicates an expected call of Key. +func (mr *MockIteratorMockRecorder) Key() *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Key", reflect.TypeOf((*MockIterator)(nil).Key)) +} + +// Next mocks base method. +func (m *MockIterator) Next() { + m.ctrl.T.Helper() + m.ctrl.Call(m, "Next") +} + +// Next indicates an expected call of Next. +func (mr *MockIteratorMockRecorder) Next() *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Next", reflect.TypeOf((*MockIterator)(nil).Next)) +} + +// Valid mocks base method. +func (m *MockIterator) Valid() bool { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "Valid") + ret0, _ := ret[0].(bool) + return ret0 +} + +// Valid indicates an expected call of Valid. +func (mr *MockIteratorMockRecorder) Valid() *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Valid", reflect.TypeOf((*MockIterator)(nil).Valid)) +} + +// Value mocks base method. +func (m *MockIterator) Value() []byte { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "Value") + ret0, _ := ret[0].([]byte) + return ret0 +} + +// Value indicates an expected call of Value. +func (mr *MockIteratorMockRecorder) Value() *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Value", reflect.TypeOf((*MockIterator)(nil).Value)) +} diff --git a/sei-iavl/mutable_tree.go b/sei-iavl/mutable_tree.go new file mode 100644 index 0000000000..8cbfc04719 --- /dev/null +++ b/sei-iavl/mutable_tree.go @@ -0,0 +1,1468 @@ +package iavl + +import ( + "bytes" + "crypto/sha256" + "fmt" + "math" + "sort" + "sync" + + "github.com/pkg/errors" + dbm "github.com/tendermint/tm-db" + + "github.com/sei-protocol/sei-chain/sei-iavl/internal/logger" +) + +// commitGap after upgrade/delete commitGap FastNodes when commit the batch +var commitGap uint64 = 5000000 + +// ErrVersionDoesNotExist is returned if a requested version does not exist. +var ErrVersionDoesNotExist = errors.New("version does not exist") + +// MutableTree is a persistent tree which keeps track of versions. It is not safe for concurrent +// use, and should be guarded by a Mutex or RWLock as appropriate. An immutable tree at a given +// version can be returned via GetImmutable, which is safe for concurrent access. +// +// Given and returned key/value byte slices must not be modified, since they may point to data +// located inside IAVL which would also be modified. +// +// The inner ImmutableTree should not be used directly by callers. +type MutableTree struct { + ITree *ImmutableTree // The current, working tree. + Mtx *sync.RWMutex + lastSaved *ImmutableTree // The most recently saved tree. + orphans map[string]int64 // Nodes removed by changes to working tree. + unsavedFastNodeAdditions map[string]*FastNode // FastNodes that have not yet been saved to disk + unsavedFastNodeRemovals map[string]interface{} // FastNodes that have not yet been removed from disk + ndb *nodeDB + skipFastStorageUpgrade bool // If true, the tree will work like no fast storage and always not upgrade fast storage + separateOrphanStorage bool + separateOrphanVersionsToKeep int64 + orphandb *orphanDB +} + +// NewMutableTree returns a new tree with the specified cache size and datastore. +func NewMutableTree(db dbm.DB, cacheSize int, skipFastStorageUpgrade bool) (*MutableTree, error) { + return NewMutableTreeWithOpts(db, cacheSize, nil, skipFastStorageUpgrade) +} + +// NewMutableTreeWithOpts returns a new tree with the specified options. +func NewMutableTreeWithOpts(db dbm.DB, cacheSize int, opts *Options, skipFastStorageUpgrade bool) (*MutableTree, error) { + ndb := newNodeDB(db, cacheSize, opts) + head := &ImmutableTree{ndb: ndb, skipFastStorageUpgrade: skipFastStorageUpgrade} + if opts == nil { + defaultOpts := DefaultOptions() + opts = &defaultOpts + } + var orphandb *orphanDB + if opts.SeparateOrphanStorage { + orphandb = NewOrphanDB(opts) + } + + return &MutableTree{ + ITree: head, + lastSaved: head.clone(), + orphans: map[string]int64{}, + unsavedFastNodeAdditions: make(map[string]*FastNode), + unsavedFastNodeRemovals: make(map[string]interface{}), + ndb: ndb, + skipFastStorageUpgrade: skipFastStorageUpgrade, + separateOrphanStorage: opts.SeparateOrphanStorage, + separateOrphanVersionsToKeep: opts.SeparateOphanVersionsToKeep, + orphandb: orphandb, + Mtx: &sync.RWMutex{}, + }, nil +} + +func (tree *MutableTree) ImmutableTree() *ImmutableTree { + tree.Mtx.RLock() + defer tree.Mtx.RUnlock() + return tree.ITree +} + +func (tree *MutableTree) LastSaved() *ImmutableTree { + tree.Mtx.RLock() + defer tree.Mtx.RUnlock() + return tree.lastSaved +} + +func (tree *MutableTree) Has(key []byte) (bool, error) { + return tree.ImmutableTree().Has(key) +} + +// IsEmpty returns whether or not the tree has any keys. Only trees that are +// not empty can be saved. +func (tree *MutableTree) IsEmpty() bool { + return tree.ImmutableTree().Size() == 0 +} + +// VersionExists returns whether or not a version exists. +func (tree *MutableTree) VersionExists(version int64) bool { + tree.Mtx.Lock() + defer tree.Mtx.Unlock() + latestVersion, err := tree.ndb.getLatestVersion() + if err != nil { + return false + } + if version <= latestVersion { + has, err := tree.ndb.hasVersion(version) + return err == nil && has + } + return false +} + +// AvailableVersions returns all available versions in ascending order +func (tree *MutableTree) AvailableVersions() []int { + tree.Mtx.Lock() + defer tree.Mtx.Unlock() + firstVersion, err := tree.ndb.getFirstVersion() + if err != nil { + return nil + } + latestVersion, err := tree.ndb.getLatestVersion() + if err != nil { + return nil + } + res := make([]int, 0) + for version := firstVersion; version <= latestVersion; version++ { + res = append(res, int(version)) + } + return res +} + +// Hash returns the hash of the latest saved version of the tree, as returned +// by SaveVersion. If no versions have been saved, Hash returns nil. +func (tree *MutableTree) Hash() ([]byte, error) { + return tree.LastSaved().Hash() +} + +// WorkingHash returns the hash of the current working tree. +func (tree *MutableTree) WorkingHash() ([]byte, error) { + return tree.ImmutableTree().Hash() +} + +// String returns a string representation of the tree. +func (tree *MutableTree) String() (string, error) { + return tree.ndb.String() +} + +// Set/Remove will orphan at most tree.Height nodes, +// balancing the tree after a Set/Remove will orphan at most 3 nodes. +func (tree *MutableTree) prepareOrphansSlice() []*Node { + return make([]*Node, 0, tree.ITree.Height()+3) +} + +// Set sets a key in the working tree. Nil values are invalid. The given +// key/value byte slices must not be modified after this call, since they point +// to slices stored within IAVL. It returns true when an existing value was +// updated, while false means it was a new key. +func (tree *MutableTree) Set(key, value []byte) (updated bool, err error) { + tree.Mtx.Lock() + defer tree.Mtx.Unlock() + var orphaned []*Node + orphaned, updated, err = tree.set(key, value) + if err != nil { + return false, err + } + err = tree.addOrphans(orphaned) + if err != nil { + return updated, err + } + return updated, nil +} + +// Get returns the value of the specified key if it exists, or nil otherwise. +// The returned value must not be modified, since it may point to data stored within IAVL. +func (tree *MutableTree) Get(key []byte) ([]byte, error) { + if tree.ImmutableTree().root == nil { + return nil, nil + } + + tree.Mtx.RLock() + defer tree.Mtx.RUnlock() + if !tree.skipFastStorageUpgrade { + if fastNode, ok := tree.unsavedFastNodeAdditions[unsafeToStr(key)]; ok { + return fastNode.value, nil + } + // check if node was deleted + if _, ok := tree.unsavedFastNodeRemovals[string(key)]; ok { + return nil, nil + } + } + + // Mtx is already acquired + return tree.ITree.Get(key) +} + +// Import returns an importer for tree nodes previously exported by ImmutableTree.Export(), +// producing an identical IAVL tree. The caller must call Close() on the importer when done. +// +// version should correspond to the version that was initially exported. It must be greater than +// or equal to the highest ExportNode version number given. +// +// Import can only be called on an empty tree. It is the callers responsibility that no other +// modifications are made to the tree while importing. +func (tree *MutableTree) Import(version int64) (*Importer, error) { + return newImporter(tree, version) +} + +// Iterate iterates over all keys of the tree. The keys and values must not be modified, +// since they may point to data stored within IAVL. Returns true if stopped by callnack, false otherwise +func (tree *MutableTree) Iterate(fn func(key []byte, value []byte) bool) (stopped bool, err error) { + if tree.ImmutableTree().root == nil { + return false, nil + } + + if tree.skipFastStorageUpgrade { + return tree.ImmutableTree().Iterate(fn) + } + + isFastCacheEnabled, err := tree.ImmutableTree().IsFastCacheEnabled() + if err != nil { + return false, err + } + if !isFastCacheEnabled { + return tree.ImmutableTree().Iterate(fn) + } + + itr := NewUnsavedFastIterator(nil, nil, true, tree.ndb, tree.unsavedFastNodeAdditions, tree.unsavedFastNodeRemovals) + defer func() { _ = itr.Close() }() + for ; itr.Valid(); itr.Next() { + if fn(itr.Key(), itr.Value()) { + return true, nil + } + } + return false, nil +} + +// Iterator returns an iterator over the mutable tree. +// CONTRACT: no updates are made to the tree while an iterator is active. +func (tree *MutableTree) Iterator(start, end []byte, ascending bool) (dbm.Iterator, error) { + if !tree.skipFastStorageUpgrade { + isFastCacheEnabled, err := tree.ImmutableTree().IsFastCacheEnabled() + if err != nil { + return nil, err + } + + if isFastCacheEnabled { + return NewUnsavedFastIterator(start, end, ascending, tree.ndb, tree.unsavedFastNodeAdditions, tree.unsavedFastNodeRemovals), nil + } + } + + return tree.ImmutableTree().Iterator(start, end, ascending) +} + +// no need to acquire Mtx since it's only called by `Set` which already holds the Mtx +func (tree *MutableTree) set(key []byte, value []byte) (orphans []*Node, updated bool, err error) { + if value == nil { + return nil, updated, fmt.Errorf("attempt to store nil value at key '%s'", key) + } + + if tree.ITree.root == nil { + if !tree.skipFastStorageUpgrade { + tree.addUnsavedAddition(key, NewFastNode(key, value, tree.ITree.version+1)) + } + tree.ITree.root = NewNode(key, value, tree.ITree.version+1) + return nil, updated, nil + } + + orphans = tree.prepareOrphansSlice() + tree.ITree.root, updated, err = tree.recursiveSet(tree.ITree.root, key, value, &orphans) + return orphans, updated, err +} + +// no need to acquire Mtx since it's only called by `set` which already holds the Mtx +func (tree *MutableTree) recursiveSet(node *Node, key []byte, value []byte, orphans *[]*Node) ( + newSelf *Node, updated bool, err error, +) { + version := tree.ITree.version + 1 + + if node.isLeaf() { + if !tree.skipFastStorageUpgrade { + tree.addUnsavedAddition(key, NewFastNode(key, value, version)) + } + + switch bytes.Compare(key, node.GetNodeKey()) { + case -1: + return &Node{ + key: node.GetNodeKey(), + height: 1, + size: 2, + leftNode: NewNode(key, value, version), + rightNode: node, + version: version, + }, false, nil + case 1: + return &Node{ + key: key, + height: 1, + size: 2, + leftNode: node, + rightNode: NewNode(key, value, version), + version: version, + }, false, nil + default: + *orphans = append(*orphans, node) + return NewNode(key, value, version), true, nil + } + } else { + *orphans = append(*orphans, node) + node, err = node.clone(version) + if err != nil { + return nil, false, err + } + + if bytes.Compare(key, node.GetNodeKey()) < 0 { + leftNode, err := node.getLeftNode(tree.ITree) + if err != nil { + return nil, false, err + } + lNode, u, e := tree.recursiveSet(leftNode, key, value, orphans) + if e != nil { + return nil, u, e + } + updated = u + node.SetLeftNode(lNode) + node.SetLeftHash(nil) // leftHash is yet unknown + } else { + rightNode, err := node.getRightNode(tree.ITree) + if err != nil { + return nil, false, err + } + rNode, u, e := tree.recursiveSet(rightNode, key, value, orphans) + if e != nil { + return nil, u, e + } + updated = u + node.SetRightNode(rNode) + node.SetRightHash(nil) // rightHash is yet unknown + } + + if updated { + return node, updated, nil + } + err = node.calcHeightAndSize(tree.ITree) + if err != nil { + return nil, false, err + } + + newNode, err := tree.balance(node, orphans) + if err != nil { + return nil, false, err + } + return newNode, updated, err + } +} + +// Remove removes a key from the working tree. The given key byte slice should not be modified +// after this call, since it may point to data stored inside IAVL. +func (tree *MutableTree) Remove(key []byte) ([]byte, bool, error) { + tree.Mtx.Lock() + defer tree.Mtx.Unlock() + val, orphaned, removed, err := tree.remove(key) + if err != nil { + return nil, false, err + } + + err = tree.addOrphans(orphaned) + if err != nil { + return val, removed, err + } + return val, removed, nil +} + +// remove tries to remove a key from the tree and if removed, returns its +// value, nodes orphaned and 'true'. +// no need to acquire Mtx since it's only called by `Remove` which already holds the Mtx. +func (tree *MutableTree) remove(key []byte) (value []byte, orphaned []*Node, removed bool, err error) { + if tree.ITree.root == nil { + return nil, nil, false, nil + } + orphaned = tree.prepareOrphansSlice() + newRootHash, newRoot, _, value, err := tree.recursiveRemove(tree.ITree.root, key, &orphaned) + if err != nil { + return nil, nil, false, err + } + if len(orphaned) == 0 { + return nil, nil, false, nil + } + + if !tree.skipFastStorageUpgrade { + tree.addUnsavedRemoval(key) + } + + if newRoot == nil && newRootHash != nil { + tree.ITree.root, err = tree.ndb.GetNode(newRootHash) + if err != nil { + return nil, nil, false, err + } + } else { + tree.ITree.root = newRoot + } + return value, orphaned, true, nil +} + +// removes the node corresponding to the passed key and balances the tree. +// It returns: +// - the hash of the new node (or nil if the node is the one removed) +// - the node that replaces the orig. node after remove +// - new leftmost leaf key for tree after successfully removing 'key' if changed. +// - the removed value +// - the orphaned nodes. +// no need to acquire Mtx since it's only called by `remove` which already holds the Mtx +func (tree *MutableTree) recursiveRemove(node *Node, key []byte, orphans *[]*Node) (newHash []byte, newSelf *Node, newKey []byte, newValue []byte, err error) { + version := tree.ITree.version + 1 + + if node.isLeaf() { + if bytes.Equal(key, node.GetNodeKey()) { + *orphans = append(*orphans, node) + return nil, nil, nil, node.GetValue(), nil + } + return node.GetHash(), node, nil, nil, nil + } + + // node.key < key; we go to the left to find the key: + if bytes.Compare(key, node.GetNodeKey()) < 0 { + leftNode, err := node.getLeftNode(tree.ITree) + if err != nil { + return nil, nil, nil, nil, err + } + newLeftHash, newLeftNode, newKey, value, err := tree.recursiveRemove(leftNode, key, orphans) + if err != nil { + return nil, nil, nil, nil, err + } + + if len(*orphans) == 0 { + return node.GetHash(), node, nil, value, nil + } + *orphans = append(*orphans, node) + if newLeftHash == nil && newLeftNode == nil { // left node held value, was removed + return node.GetRightHash(), node.GetRightNode(), node.GetNodeKey(), value, nil + } + + newNode, err := node.clone(version) + if err != nil { + return nil, nil, nil, nil, err + } + + newNode.SetLeftHash(newLeftHash) + newNode.SetLeftNode(newLeftNode) + err = newNode.calcHeightAndSize(tree.ITree) + if err != nil { + return nil, nil, nil, nil, err + } + newNode, err = tree.balance(newNode, orphans) + if err != nil { + return nil, nil, nil, nil, err + } + + return newNode.GetHash(), newNode, newKey, value, nil + } + // node.key >= key; either found or look to the right: + rightNode, err := node.getRightNode(tree.ITree) + if err != nil { + return nil, nil, nil, nil, err + } + newRightHash, newRightNode, newKey, value, err := tree.recursiveRemove(rightNode, key, orphans) + if err != nil { + return nil, nil, nil, nil, err + } + if len(*orphans) == 0 { + return node.GetHash(), node, nil, value, nil + } + *orphans = append(*orphans, node) + if newRightHash == nil && newRightNode == nil { // right node held value, was removed + return node.GetLeftHash(), node.GetLeftNode(), nil, value, nil + } + + newNode, err := node.clone(version) + if err != nil { + return nil, nil, nil, nil, err + } + + newNode.SetRightHash(newRightHash) + newNode.SetRightNode(newRightNode) + if newKey != nil { + newNode.SetKey(newKey) + } + err = newNode.calcHeightAndSize(tree.ITree) + if err != nil { + return nil, nil, nil, nil, err + } + + newNode, err = tree.balance(newNode, orphans) + if err != nil { + return nil, nil, nil, nil, err + } + + return newNode.GetHash(), newNode, nil, value, nil +} + +// Load the latest versioned tree from disk. +func (tree *MutableTree) Load() (int64, error) { + return tree.LoadVersion(int64(0)) +} + +// LazyLoadVersion attempts to lazy load only the specified target version +// without loading previous roots/versions. Lazy loading should be used in cases +// where only reads are expected. Any writes to a lazy loaded tree may result in +// unexpected behavior. If the targetVersion is non-positive, the latest version +// will be loaded by default. If the latest version is non-positive, this method +// performs a no-op. Otherwise, if the root does not exist, an error will be +// returned. +func (tree *MutableTree) LazyLoadVersion(targetVersion int64) (toReturn int64, toErr error) { + latestVersion, err := tree.ndb.getLatestVersion() + if err != nil { + return 0, err + } + if latestVersion < targetVersion { + return latestVersion, fmt.Errorf("wanted to load target %d but only found up to %d", targetVersion, latestVersion) + } + + // no versions have been saved if the latest version is non-positive + if latestVersion <= 0 { + if targetVersion <= 0 { + if !tree.skipFastStorageUpgrade { + _, err := tree.enableFastStorageAndCommitIfNotEnabled() + return 0, err + } + return 0, nil + } + return 0, fmt.Errorf("no versions found while trying to load %v", targetVersion) + } + + // default to the latest version if the targeted version is non-positive + if targetVersion <= 0 { + targetVersion = latestVersion + } + + rootHash, err := tree.ndb.getRoot(targetVersion) + if err != nil { + return 0, err + } + if rootHash == nil { + return latestVersion, ErrVersionDoesNotExist + } + + tree.Mtx.Lock() + defer func() { + tree.Mtx.Unlock() + if !tree.skipFastStorageUpgrade { + // Attempt to upgrade + if _, err := tree.enableFastStorageAndCommitIfNotEnabled(); err != nil { + toReturn = 0 + toErr = err + } + } + }() + + iTree := &ImmutableTree{ + ndb: tree.ndb, + version: targetVersion, + skipFastStorageUpgrade: tree.skipFastStorageUpgrade, + } + if len(rootHash) > 0 { + // If rootHash is empty then root of tree should be nil + // This makes `LazyLoadVersion` to do the same thing as `LoadVersion` + iTree.root, err = tree.ndb.GetNode(rootHash) + if err != nil { + return 0, err + } + } + + tree.orphans = map[string]int64{} + tree.ITree = iTree // Mtx is already held + tree.lastSaved = iTree.clone() + + return targetVersion, nil +} + +// Returns the version number of the latest version found +func (tree *MutableTree) LoadVersion(targetVersion int64) (toReturn int64, toErr error) { + firstVersion, err := tree.ndb.getFirstVersion() + if err != nil { + return 0, err + } + if tree.ndb.opts.InitialVersion > math.MaxInt64 { + return firstVersion, fmt.Errorf("initial version %d exceeds max int64", tree.ndb.opts.InitialVersion) + } + // #nosec G115 -- InitialVersion is bounds checked above + if firstVersion > 0 && firstVersion < int64(tree.ndb.opts.InitialVersion) { + return firstVersion, fmt.Errorf("initial version set to %v, but found earlier version %v", + tree.ndb.opts.InitialVersion, firstVersion) + } + + latestVersion, err := tree.ndb.getLatestVersion() + if err != nil { + return 0, err + } + + if latestVersion < targetVersion { + return latestVersion, fmt.Errorf("wanted to load target %d but only found up to %d", targetVersion, latestVersion) + } + + if firstVersion == 0 { + if targetVersion <= 0 { + if !tree.skipFastStorageUpgrade { + _, err := tree.enableFastStorageAndCommitIfNotEnabled() + return 0, err + } + return 0, nil + } + return 0, fmt.Errorf("no versions found while trying to load %v", targetVersion) + } + + if targetVersion <= 0 { + targetVersion = latestVersion + } + + if !tree.VersionExists(targetVersion) { + return 0, ErrVersionDoesNotExist + } + + rootNodeKey, err := tree.ndb.getRoot(targetVersion) + if err != nil { + return 0, err + } + + t := &ImmutableTree{ + ndb: tree.ndb, + version: targetVersion, + skipFastStorageUpgrade: tree.skipFastStorageUpgrade, + } + + if rootNodeKey != nil { + t.root, err = tree.ndb.GetNode(rootNodeKey) + if err != nil { + return tree.LegacyLoadVersion(targetVersion) + } + } + + tree.orphans = map[string]int64{} + tree.ITree = t // Mtx is already held + tree.lastSaved = t.clone() + + return latestVersion, nil +} + +// Returns the version number of the latest version found +func (tree *MutableTree) LegacyLoadVersion(targetVersion int64) (toReturn int64, toErr error) { + roots, err := tree.ndb.getRoots() + if err != nil { + return 0, err + } + + if len(roots) == 0 { + if targetVersion <= 0 { + if !tree.skipFastStorageUpgrade { + _, err := tree.enableFastStorageAndCommitIfNotEnabled() + return 0, err + } + return 0, nil + } + return 0, fmt.Errorf("no versions found while trying to load %v", targetVersion) + } + + firstVersion := int64(0) + latestVersion := int64(0) + + var latestRoot []byte + for version, r := range roots { + if version > latestVersion && (targetVersion == 0 || version <= targetVersion) { + latestVersion = version + latestRoot = r + } + if firstVersion == 0 || version < firstVersion { + firstVersion = version + } + } + + if targetVersion != 0 && latestVersion != targetVersion { + return latestVersion, fmt.Errorf("wanted to load target %v but only found up to %v", + targetVersion, latestVersion) + } + + if tree.ndb.opts.InitialVersion > math.MaxInt64 { + return latestVersion, fmt.Errorf("initial version %d exceeds max int64", tree.ndb.opts.InitialVersion) + } + // #nosec G115 -- InitialVersion is bounds checked above + if firstVersion > 0 && firstVersion < int64(tree.ndb.opts.InitialVersion) { + return latestVersion, fmt.Errorf("initial version set to %v, but found earlier version %v", + tree.ndb.opts.InitialVersion, firstVersion) + } + + t := &ImmutableTree{ + ndb: tree.ndb, + version: latestVersion, + skipFastStorageUpgrade: tree.skipFastStorageUpgrade, + } + + if len(latestRoot) != 0 { + t.root, err = tree.ndb.GetNode(latestRoot) + if err != nil { + return 0, err + } + } + + tree.orphans = map[string]int64{} + tree.ITree = t // Mtx is already held + tree.lastSaved = t.clone() + return latestVersion, nil +} + +// LoadVersionForOverwriting attempts to load a tree at a previously committed +// version, or the latest version below it. Any versions greater than targetVersion will be deleted. +func (tree *MutableTree) LoadVersionForOverwriting(targetVersion int64) (int64, error) { + latestVersion, err := tree.LoadVersion(targetVersion) + if err != nil { + return latestVersion, err + } + + if err = tree.ndb.DeleteVersionsFrom(targetVersion + 1); err != nil { + return latestVersion, err + } + + if !tree.skipFastStorageUpgrade { + if err := tree.enableFastStorageAndCommit(); err != nil { + return latestVersion, err + } + } + + tree.ndb.resetLatestVersion(targetVersion) + fmt.Printf("[Debug] Tree verrsion is %d after revert\n", tree.ITree.version) + + tree.Mtx.Lock() + defer tree.Mtx.Unlock() + + return latestVersion, nil +} + +// Returns true if the tree may be auto-upgraded, false otherwise +// An example of when an upgrade may be performed is when we are enaling fast storage for the first time or +// need to overwrite fast nodes due to mismatch with live state. +func (tree *MutableTree) IsUpgradeable() (bool, error) { + shouldForce, err := tree.ndb.shouldForceFastStorageUpgrade() + if err != nil { + return false, err + } + return !tree.skipFastStorageUpgrade && (!tree.ndb.hasUpgradedToFastStorage() || shouldForce), nil +} + +// enableFastStorageAndCommitIfNotEnabled if nodeDB doesn't mark fast storage as enabled, enable it, and commit the update. +// Checks whether the fast cache on disk matches latest live state. If not, deletes all existing fast nodes and repopulates them +// from latest tree. +// nolint: unparam +func (tree *MutableTree) enableFastStorageAndCommitIfNotEnabled() (bool, error) { + isUpgradeable, err := tree.IsUpgradeable() + if err != nil { + return false, err + } + + if !isUpgradeable { + return false, nil + } + + // If there is a mismatch between which fast nodes are on disk and the live state due to temporary + // downgrade and subsequent re-upgrade, we cannot know for sure which fast nodes have been removed while downgraded, + // Therefore, there might exist stale fast nodes on disk. As a result, to avoid persisting the stale state, it might + // be worth to delete the fast nodes from disk. + fastItr := NewFastIterator(nil, nil, true, tree.ndb) + defer func() { _ = fastItr.Close() }() + var deletedFastNodes uint64 + for ; fastItr.Valid(); fastItr.Next() { + deletedFastNodes++ + if err := tree.ndb.DeleteFastNode(fastItr.Key()); err != nil { + return false, err + } + if deletedFastNodes%commitGap == 0 { + if err := tree.ndb.Commit(); err != nil { + return false, err + } + } + } + if deletedFastNodes%commitGap != 0 { + if err := tree.ndb.Commit(); err != nil { + return false, err + } + } + + if err := tree.enableFastStorageAndCommit(); err != nil { + tree.ndb.storageVersion = defaultStorageVersionValue + return false, err + } + return true, nil +} + +func (tree *MutableTree) enableFastStorageAndCommit() error { + var err error + + itr := NewIteratorUnlocked(nil, nil, true, tree.ImmutableTree()) + defer func() { _ = itr.Close() }() + version := tree.ImmutableTree().version + var upgradedFastNodes uint64 + for ; itr.Valid(); itr.Next() { + upgradedFastNodes++ + if err = tree.ndb.SaveFastNodeNoCache(NewFastNode(itr.Key(), itr.Value(), version)); err != nil { + return err + } + if upgradedFastNodes%commitGap == 0 { + if err := tree.ndb.Commit(); err != nil { + return err + } + } + } + + if err = itr.Error(); err != nil { + return err + } + + if err = tree.ndb.setFastStorageVersionToBatch(); err != nil { + return err + } + + return tree.ndb.Commit() +} + +// GetImmutable loads an ImmutableTree at a given version for querying. The returned tree is +// safe for concurrent access, provided the version is not deleted, e.g. via `DeleteVersion()`. +func (tree *MutableTree) GetImmutable(version int64) (*ImmutableTree, error) { + rootHash, err := tree.ndb.getRoot(version) + if err != nil { + return nil, err + } + if rootHash == nil { + return nil, ErrVersionDoesNotExist + } + + tree.Mtx.Lock() + defer tree.Mtx.Unlock() + + if len(rootHash) == 0 { + return &ImmutableTree{ + ndb: tree.ndb, + version: version, + skipFastStorageUpgrade: tree.skipFastStorageUpgrade, + }, nil + } + + root, err := tree.ndb.GetNode(rootHash) + if err != nil { + return nil, err + } + return &ImmutableTree{ + root: root, + ndb: tree.ndb, + version: version, + skipFastStorageUpgrade: tree.skipFastStorageUpgrade, + }, nil +} + +// Rollback resets the working tree to the latest saved version, discarding +// any unsaved modifications. +func (tree *MutableTree) Rollback() { + tree.Mtx.Lock() + defer tree.Mtx.Unlock() + if tree.ITree.version > 0 { + tree.ITree = tree.lastSaved.clone() + } else { + tree.ITree = &ImmutableTree{ + ndb: tree.ndb, + version: 0, + skipFastStorageUpgrade: tree.skipFastStorageUpgrade, + } + } + tree.orphans = map[string]int64{} + if !tree.skipFastStorageUpgrade { + tree.unsavedFastNodeAdditions = map[string]*FastNode{} + tree.unsavedFastNodeRemovals = map[string]interface{}{} + } +} + +// GetVersioned gets the value at the specified key and version. The returned value must not be +// modified, since it may point to data stored within IAVL. +func (tree *MutableTree) GetVersioned(key []byte, version int64) ([]byte, error) { + if tree.VersionExists(version) { + if !tree.skipFastStorageUpgrade { + isFastCacheEnabled, err := tree.ImmutableTree().IsFastCacheEnabled() + if err != nil { + return nil, err + } + + if isFastCacheEnabled { + fastNode, _ := tree.ndb.GetFastNode(key) + if fastNode == nil && version == tree.ndb.latestVersion { + return nil, nil + } + + if fastNode != nil && fastNode.versionLastUpdatedAt <= version { + return fastNode.value, nil + } + } + } + t, err := tree.GetImmutable(version) + if err != nil { + return nil, nil + } + value, err := t.Get(key) + if err != nil { + return nil, err + } + return value, nil + } + return nil, nil +} + +// SaveCurrentVersion overwrites the current version without bumping. +// It will return an error if the version does not exist in tree, or if +// the hash being saved is different. In +// other words, only SaveVersion can insert new node into the tree. +func (tree *MutableTree) SaveCurrentVersion() ([]byte, int64, error) { + version := tree.ImmutableTree().version + if version == 1 && tree.ndb.opts.InitialVersion > 0 { + if tree.ndb.opts.InitialVersion > math.MaxInt64 { + return nil, 0, fmt.Errorf("initial version %d exceeds max int64", tree.ndb.opts.InitialVersion) + } + // #nosec G115 -- InitialVersion is bounds checked above + version = int64(tree.ndb.opts.InitialVersion) + } + + if !tree.VersionExists(version) { + return nil, version, errors.New(fmt.Sprintf("attempting to overwrite non-existent version %d", version)) + } + + existingHash, err := tree.ndb.getRoot(version) + if err != nil { + return nil, version, err + } + + // If the existing root hash is empty (because the tree is empty), then we need to + // compare with the hash of an empty input which is what `WorkingHash()` returns. + if len(existingHash) == 0 { + existingHash = sha256.New().Sum(nil) + } + + newHash, err := tree.WorkingHash() + if err != nil { + return nil, version, err + } + + if bytes.Equal(existingHash, newHash) { + tree.Mtx.Lock() + defer tree.Mtx.Unlock() + if v, err := tree.commitVersion(version, true); err != nil { + return nil, v, err + } + clone := tree.ITree.clone() + clone.version = version + tree.ITree = clone + tree.lastSaved = clone.clone() + tree.orphans = map[string]int64{} + return existingHash, version, nil + } + + return nil, version, fmt.Errorf("version %d was already saved to different hash %X (existing hash %X)", version, newHash, existingHash) +} + +// SaveVersion saves a new tree version to disk, based on the current state of +// the tree. Returns the hash and new version number. +func (tree *MutableTree) SaveVersion() ([]byte, int64, error) { + version := tree.ImmutableTree().version + 1 + if version == 1 && tree.ndb.opts.InitialVersion > 0 { + if tree.ndb.opts.InitialVersion > math.MaxInt64 { + return nil, 0, fmt.Errorf("initial version %d exceeds max int64", tree.ndb.opts.InitialVersion) + } + // #nosec G115 -- InitialVersion is bounds checked above + version = int64(tree.ndb.opts.InitialVersion) + } + + if tree.VersionExists(version) { + // If the version already exists, return an error as we're attempting to overwrite. + // However, the same hash means idempotent (i.e. no-op). + existingHash, err := tree.ndb.getRoot(version) + if err != nil { + return nil, version, err + } + + // If the existing root hash is empty (because the tree is empty), then we need to + // compare with the hash of an empty input which is what `WorkingHash()` returns. + if len(existingHash) == 0 { + existingHash = sha256.New().Sum(nil) + } + + newHash, err := tree.WorkingHash() + if err != nil { + return nil, version, err + } + + if bytes.Equal(existingHash, newHash) { + tree.Mtx.Lock() + defer tree.Mtx.Unlock() + clone := tree.ITree.clone() + clone.version = version + tree.ITree = clone + tree.lastSaved = clone.clone() + tree.orphans = map[string]int64{} + return existingHash, version, nil + } + + return nil, version, fmt.Errorf("version %d was already saved to different hash %X (existing hash %X)", version, newHash, existingHash) + } + + tree.Mtx.Lock() + defer tree.Mtx.Unlock() + + if v, err := tree.commitVersion(version, false); err != nil { + return nil, v, err + } + + // Mtx is already held at this point + clone := tree.ITree.clone() + clone.version = version + tree.ndb.resetLatestVersion(version) + + // set new working tree + tree.ITree = clone + tree.lastSaved = clone.clone() + tree.orphans = map[string]int64{} + if !tree.skipFastStorageUpgrade { + tree.unsavedFastNodeAdditions = make(map[string]*FastNode) + tree.unsavedFastNodeRemovals = make(map[string]interface{}) + } + + hash, err := tree.lastSaved.Hash() + if err != nil { + return nil, version, err + } + + return hash, version, nil +} + +func (tree *MutableTree) saveFastNodeVersion() error { + if err := tree.saveFastNodeAdditions(); err != nil { + return err + } + if err := tree.saveFastNodeRemovals(); err != nil { + return err + } + return tree.ndb.setFastStorageVersionToBatch() +} + +func (tree *MutableTree) handleOrphans(version int64) error { + if !tree.separateOrphanStorage { + // store orphan in the same levelDB as application data + return tree.ndb.SaveOrphans(version, tree.orphans) + } + + if tree.separateOrphanVersionsToKeep == 0 { + panic("must keep at least one version") + } + + // optimization for the 1 version case so that we don't have to save and immediately delete the same version + if tree.separateOrphanVersionsToKeep == 1 { + for orphan := range tree.orphans { + if err := tree.ndb.deleteOrphanedData([]byte(orphan)); err != nil { + return err + } + } + return nil + } + + if err := tree.orphandb.SaveOrphans(version, tree.orphans); err != nil { + return err + } + oldOrphans := tree.orphandb.GetOrphans(version - tree.separateOrphanVersionsToKeep + 1) + for orphan := range oldOrphans { + if err := tree.ndb.deleteOrphanedData([]byte(orphan)); err != nil { + return err + } + } + return tree.orphandb.DeleteOrphans(version - tree.separateOrphanVersionsToKeep + 1) +} + +func (tree *MutableTree) commitVersion(version int64, silentSaveRootError bool) (int64, error) { + if tree.ITree.root == nil { + // There can still be orphans, for example if the root is the node being + // removed. + logger.Debug("SAVE EMPTY TREE %v\n", version) + if err := tree.handleOrphans(version); err != nil { + return 0, err + } + if err := tree.ndb.SaveEmptyRoot(version); !silentSaveRootError && err != nil { + return 0, err + } + } else { + logger.Debug("SAVE TREE %v\n", version) + if _, err := tree.ndb.SaveBranch(tree.ITree.root); err != nil { + return 0, err + } + if err := tree.handleOrphans(version); err != nil { + return 0, err + } + if err := tree.ndb.SaveRoot(tree.ITree.root, version); !silentSaveRootError && err != nil { + return 0, err + } + } + + if !tree.skipFastStorageUpgrade { + if err := tree.saveFastNodeVersion(); err != nil { + return version, err + } + } + + if err := tree.ndb.Commit(); err != nil { + return version, err + } + + return version, nil +} + +// nolint: unused +func (tree *MutableTree) getUnsavedFastNodeAdditions() map[string]*FastNode { + return tree.unsavedFastNodeAdditions +} + +// getUnsavedFastNodeRemovals returns unsaved FastNodes to remove +// nolint: unused +func (tree *MutableTree) getUnsavedFastNodeRemovals() map[string]interface{} { + return tree.unsavedFastNodeRemovals +} + +func (tree *MutableTree) addUnsavedAddition(key []byte, node *FastNode) { + skey := unsafeToStr(key) + delete(tree.unsavedFastNodeRemovals, skey) + tree.unsavedFastNodeAdditions[skey] = node +} + +func (tree *MutableTree) saveFastNodeAdditions() error { + keysToSort := make([]string, 0, len(tree.unsavedFastNodeAdditions)) + for key := range tree.unsavedFastNodeAdditions { + keysToSort = append(keysToSort, key) + } + sort.Strings(keysToSort) + + for _, key := range keysToSort { + if err := tree.ndb.SaveFastNode(tree.unsavedFastNodeAdditions[key]); err != nil { + return err + } + } + return nil +} + +func (tree *MutableTree) addUnsavedRemoval(key []byte) { + skey := unsafeToStr(key) + delete(tree.unsavedFastNodeAdditions, skey) + tree.unsavedFastNodeRemovals[skey] = true +} + +func (tree *MutableTree) saveFastNodeRemovals() error { + keysToSort := make([]string, 0, len(tree.unsavedFastNodeRemovals)) + for key := range tree.unsavedFastNodeRemovals { + keysToSort = append(keysToSort, key) + } + sort.Strings(keysToSort) + + for _, key := range keysToSort { + if err := tree.ndb.DeleteFastNode(unsafeToBz(key)); err != nil { + return err + } + } + return nil +} + +// unlocked +func (tree *MutableTree) deleteVersion(version int64) error { + if version <= 0 { + return errors.New("version must be greater than 0") + } + if version == tree.ImmutableTree().version { + return errors.Errorf("cannot delete latest saved version (%d)", version) + } + if !tree.VersionExists(version) { + return errors.Wrap(ErrVersionDoesNotExist, "") + } + if err := tree.ndb.DeleteVersion(version, true); err != nil { + return err + } + + return nil +} + +// SetInitialVersion sets the initial version of the tree, replacing Options.InitialVersion. +// It is only used during the initial SaveVersion() call for a tree with no other versions, +// and is otherwise ignored. +func (tree *MutableTree) SetInitialVersion(version uint64) { + tree.Mtx.Lock() + defer tree.Mtx.Unlock() + tree.ndb.opts.InitialVersion = version +} + +// DeleteVersions deletes a series of versions from the MutableTree. +// Deprecated: please use DeleteVersionsRange instead. +func (tree *MutableTree) DeleteVersions(versions ...int64) error { + logger.Debug("DELETING VERSIONS: %v\n", versions) + + if tree.separateOrphanStorage { + // no need to delete versions if we are keeping orphans separately + return nil + } + + if len(versions) == 0 { + return nil + } + + sort.Slice(versions, func(i, j int) bool { + return versions[i] < versions[j] + }) + + // Find ordered data and delete by interval + intervals := map[int64]int64{} + var fromVersion int64 + for _, version := range versions { + if version-fromVersion != intervals[fromVersion] { + fromVersion = version + } + intervals[fromVersion]++ + } + + for fromVersion, sortedBatchSize := range intervals { + if err := tree.DeleteVersionsRange(fromVersion, fromVersion+sortedBatchSize); err != nil { + return err + } + } + + return nil +} + +// DeleteVersionsRange removes versions from an interval from the MutableTree (not inclusive). +// An error is returned if any single version has active readers. +// All writes happen in a single batch with a single commit. +func (tree *MutableTree) DeleteVersionsRange(fromVersion, toVersion int64) error { + tree.Mtx.Lock() + defer tree.Mtx.Unlock() + + fmt.Printf("[Debug] Delete version range from %d to %d\n", fromVersion, toVersion) + if err := tree.ndb.DeleteVersionsRange(fromVersion, toVersion); err != nil { + return err + } + + if err := tree.ndb.Commit(); err != nil { + return err + } + + return nil +} + +// DeleteVersion deletes a tree version from disk. The version can then no +// longer be accessed. +func (tree *MutableTree) DeleteVersion(version int64) error { + logger.Debug("DELETE VERSION: %d\n", version) + + if err := tree.deleteVersion(version); err != nil { + return err + } + + tree.Mtx.Lock() + defer tree.Mtx.Unlock() + + if err := tree.ndb.Commit(); err != nil { + return err + } + + return nil +} + +// Rotate right and return the new node and orphan. +// Mtx already held +func (tree *MutableTree) rotateRight(node *Node) (*Node, *Node, error) { + version := tree.ITree.version + 1 + + var err error + // TODO: optimize balance & rotate. + node, err = node.clone(version) + if err != nil { + return nil, nil, err + } + + orphaned, err := node.getLeftNode(tree.ITree) + if err != nil { + return nil, nil, err + } + newNode, err := orphaned.clone(version) + if err != nil { + return nil, nil, err + } + + newNoderHash, newNoderCached := newNode.GetRightHash(), newNode.GetRightNode() + newNode.SetRightHash(node.GetHash()) + newNode.SetRightNode(node) + node.SetLeftHash(newNoderHash) + node.SetLeftNode(newNoderCached) + + err = node.calcHeightAndSize(tree.ITree) + if err != nil { + return nil, nil, err + } + + err = newNode.calcHeightAndSize(tree.ITree) + if err != nil { + return nil, nil, err + } + + return newNode, orphaned, nil +} + +// Rotate left and return the new node and orphan. +func (tree *MutableTree) rotateLeft(node *Node) (*Node, *Node, error) { + version := tree.ITree.version + 1 + + var err error + // TODO: optimize balance & rotate. + node, err = node.clone(version) + if err != nil { + return nil, nil, err + } + + orphaned, err := node.getRightNode(tree.ITree) + if err != nil { + return nil, nil, err + } + newNode, err := orphaned.clone(version) + if err != nil { + return nil, nil, err + } + + newNodelHash, newNodelCached := newNode.GetLeftHash(), newNode.GetLeftNode() + newNode.SetLeftHash(node.GetHash()) + newNode.SetLeftNode(node) + node.SetRightHash(newNodelHash) + node.SetRightNode(newNodelCached) + + err = node.calcHeightAndSize(tree.ITree) + if err != nil { + return nil, nil, err + } + + err = newNode.calcHeightAndSize(tree.ITree) + if err != nil { + return nil, nil, err + } + + return newNode, orphaned, nil +} + +// NOTE: assumes that node can be modified +// TODO: optimize balance & rotate +func (tree *MutableTree) balance(node *Node, orphans *[]*Node) (newSelf *Node, err error) { + if node.GetPersisted() { + return nil, fmt.Errorf("unexpected balance() call on persisted node") + } + balance, err := node.calcBalance(tree.ITree) + if err != nil { + return nil, err + } + + if balance > 1 { + leftNode, err := node.getLeftNode(tree.ITree) + if err != nil { + return nil, err + } + + lftBalance, err := leftNode.calcBalance(tree.ITree) + if err != nil { + return nil, err + } + + if lftBalance >= 0 { + // Left Left Case + newNode, orphaned, err := tree.rotateRight(node) + if err != nil { + return nil, err + } + *orphans = append(*orphans, orphaned) + return newNode, nil + } + // Left Right Case + var leftOrphaned *Node + + left, err := node.getLeftNode(tree.ITree) + if err != nil { + return nil, err + } + node.SetLeftHash(nil) + lNode, lOrphaned, err := tree.rotateLeft(left) + if err != nil { + return nil, err + } + leftOrphaned = lOrphaned + node.SetLeftNode(lNode) + + newNode, rightOrphaned, err := tree.rotateRight(node) + if err != nil { + return nil, err + } + *orphans = append(*orphans, left, leftOrphaned, rightOrphaned) + return newNode, nil + } + if balance < -1 { + rightNode, err := node.getRightNode(tree.ITree) + if err != nil { + return nil, err + } + + rightBalance, err := rightNode.calcBalance(tree.ITree) + if err != nil { + return nil, err + } + if rightBalance <= 0 { + // Right Right Case + newNode, orphaned, err := tree.rotateLeft(node) + if err != nil { + return nil, err + } + *orphans = append(*orphans, orphaned) + return newNode, nil + } + // Right Left Case + var rightOrphaned *Node + + right, err := node.getRightNode(tree.ITree) + if err != nil { + return nil, err + } + node.SetRightHash(nil) + rNode, rOrphaned, err := tree.rotateRight(right) + if err != nil { + return nil, err + } + rightOrphaned = rOrphaned + node.SetRightNode(rNode) + newNode, leftOrphaned, err := tree.rotateLeft(node) + if err != nil { + return nil, err + } + + *orphans = append(*orphans, right, leftOrphaned, rightOrphaned) + return newNode, nil + } + // Nothing changed + return node, nil +} + +func (tree *MutableTree) addOrphans(orphans []*Node) error { + for _, node := range orphans { + if !node.GetPersisted() { + // We don't need to orphan nodes that were never persisted. + continue + } + if len(node.GetHash()) == 0 { + return fmt.Errorf("expected to find node hash, but was empty") + } + tree.orphans[unsafeToStr(node.GetHash())] = node.GetVersion() + } + return nil +} + +func (tree *MutableTree) Version() int64 { + return tree.ImmutableTree().Version() +} diff --git a/sei-iavl/mutable_tree_test.go b/sei-iavl/mutable_tree_test.go new file mode 100644 index 0000000000..f65f54f4ea --- /dev/null +++ b/sei-iavl/mutable_tree_test.go @@ -0,0 +1,1447 @@ +package iavl + +import ( + "bytes" + "errors" + "fmt" + "runtime" + "sort" + "strconv" + "testing" + + "github.com/golang/mock/gomock" + "github.com/sei-protocol/sei-chain/sei-iavl/internal/encoding" + "github.com/sei-protocol/sei-chain/sei-iavl/mock" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" + + db "github.com/tendermint/tm-db" +) + +var ( + tKey1 = []byte("k1") + tVal1 = []byte("v1") + + tKey2 = []byte("k2") + tVal2 = []byte("v2") +) + +func setupMutableTree(t *testing.T) *MutableTree { + memDB := db.NewMemDB() + tree, err := NewMutableTree(memDB, 0, false) + require.NoError(t, err) + return tree +} + +func TestDelete(t *testing.T) { + tree := setupMutableTree(t) + + tree.set([]byte("k1"), []byte("Fred")) + hash, version, err := tree.SaveVersion() + require.NoError(t, err) + _, _, err = tree.SaveVersion() + require.NoError(t, err) + + require.NoError(t, tree.DeleteVersion(version)) + + k1Value, _, _ := tree.GetVersionedWithProof([]byte("k1"), version) + require.Nil(t, k1Value) + + key := tree.ndb.rootKey(version) + err = tree.ndb.db.Set(key, hash) + require.NoError(t, err) + + k1Value, _, err = tree.GetVersionedWithProof([]byte("k1"), version) + require.Nil(t, err) + require.Equal(t, 0, bytes.Compare([]byte("Fred"), k1Value)) +} + +func TestGetRemove(t *testing.T) { + require := require.New(t) + tree := setupMutableTree(t) + testGet := func(exists bool) { + v, err := tree.Get(tKey1) + require.NoError(err) + if exists { + require.Equal(tVal1, v, "key should exist") + } else { + require.Nil(v, "key should not exist") + } + } + + testGet(false) + + ok, err := tree.Set(tKey1, tVal1) + require.NoError(err) + require.False(ok, "new key set: nothing to update") + + // add second key to avoid tree.root removal + ok, err = tree.Set(tKey2, tVal2) + require.NoError(err) + require.False(ok, "new key set: nothing to update") + + testGet(true) + + // Save to tree.ImmutableTree + _, version, err := tree.SaveVersion() + require.NoError(err) + require.Equal(int64(1), version) + + testGet(true) + + v, ok, err := tree.Remove(tKey1) + require.NoError(err) + require.True(ok, "key should be removed") + require.Equal(tVal1, v, "key should exist") + + testGet(false) +} + +func TestTraverse(t *testing.T) { + tree := setupMutableTree(t) + + for i := 0; i < 6; i++ { + tree.set([]byte(fmt.Sprintf("k%d", i)), []byte(fmt.Sprintf("v%d", i))) + } + + require.Equal(t, 11, tree.ImmutableTree().nodeSize(), "Size of tree unexpected") +} + +func TestMutableTree_DeleteVersions(t *testing.T) { + tree := setupMutableTree(t) + + type entry struct { + key []byte + value []byte + } + + versionEntries := make(map[int64][]entry) + + // create 10 tree versions, each with 1000 random key/value entries + for i := 0; i < 10; i++ { + entries := make([]entry, 1000) + + for j := 0; j < 1000; j++ { + k := randBytes(10) + v := randBytes(10) + + entries[j] = entry{k, v} + _, err := tree.Set(k, v) + require.NoError(t, err) + } + + _, v, err := tree.SaveVersion() + require.NoError(t, err) + + versionEntries[v] = entries + } + +} + +func TestMutableTree_LoadVersion_Empty(t *testing.T) { + tree := setupMutableTree(t) + + version, err := tree.LoadVersion(0) + require.NoError(t, err) + assert.EqualValues(t, 0, version) + + version, err = tree.LoadVersion(-1) + require.NoError(t, err) + assert.EqualValues(t, 0, version) + + _, err = tree.LoadVersion(3) + require.Error(t, err) +} + +func TestMutableTree_LazyLoadVersion_Empty(t *testing.T) { + memDB := db.NewMemDB() + tree, err := NewMutableTree(memDB, 0, false) + require.NoError(t, err) + + version, err := tree.LazyLoadVersion(0) + require.NoError(t, err) + assert.EqualValues(t, 0, version) + + version, err = tree.LazyLoadVersion(-1) + require.NoError(t, err) + assert.EqualValues(t, 0, version) + + _, err = tree.LazyLoadVersion(3) + require.Error(t, err) +} + +func TestMutableTree_DeleteVersionsRange(t *testing.T) { + require := require.New(t) + + mdb := db.NewMemDB() + tree, err := NewMutableTree(mdb, 0, false) + require.NoError(err) + const maxLength = 100 + const fromLength = 10 + + versions := make([]int64, 0, maxLength) + for count := 1; count <= maxLength; count++ { + versions = append(versions, int64(count)) + countStr := strconv.Itoa(count) + // Set kv pair and save version + tree.Set([]byte("aaa"), []byte("bbb")) + tree.Set([]byte("key"+countStr), []byte("value"+countStr)) + _, _, err = tree.SaveVersion() + require.NoError(err, "SaveVersion should not fail") + } + + tree, err = NewMutableTree(mdb, 0, false) + require.NoError(err) + targetVersion, err := tree.LoadVersion(int64(maxLength)) + require.NoError(err) + require.Equal(targetVersion, int64(maxLength), "targetVersion shouldn't larger than the actual tree latest version") + + err = tree.DeleteVersionsRange(fromLength, int64(maxLength/2)) + require.NoError(err, "DeleteVersionsTo should not fail") + + for _, version := range versions[:fromLength-1] { + + v, err := tree.LazyLoadVersion(version) + require.NoError(err, version) + require.Equal(v, version) + + value, err := tree.Get([]byte("aaa")) + require.NoError(err) + require.Equal(string(value), "bbb") + + for _, count := range versions[:version] { + countStr := strconv.Itoa(int(count)) + value, err := tree.Get([]byte("key" + countStr)) + require.NoError(err) + require.Equal(string(value), "value"+countStr) + } + } + + for _, version := range versions[int64(maxLength/2)-1:] { + v, err := tree.LazyLoadVersion(version) + require.NoError(err) + require.Equal(v, version) + + value, err := tree.Get([]byte("aaa")) + require.NoError(err) + require.Equal(string(value), "bbb") + + for _, count := range versions[:fromLength] { + countStr := strconv.Itoa(int(count)) + value, err := tree.Get([]byte("key" + countStr)) + require.NoError(err) + require.Equal(string(value), "value"+countStr) + } + for _, count := range versions[int64(maxLength/2)-1 : version] { + countStr := strconv.Itoa(int(count)) + value, err := tree.Get([]byte("key" + countStr)) + require.NoError(err) + require.Equal(string(value), "value"+countStr) + } + } +} + +func TestMutableTree_InitialVersion(t *testing.T) { + memDB := db.NewMemDB() + tree, err := NewMutableTreeWithOpts(memDB, 0, &Options{InitialVersion: 9}, false) + require.NoError(t, err) + + tree.Set([]byte("a"), []byte{0x01}) + _, version, err := tree.SaveVersion() + require.NoError(t, err) + assert.EqualValues(t, 9, version) + + tree.Set([]byte("b"), []byte{0x02}) + _, version, err = tree.SaveVersion() + require.NoError(t, err) + assert.EqualValues(t, 10, version) + + // Reloading the tree with the same initial version is fine + tree, err = NewMutableTreeWithOpts(memDB, 0, &Options{InitialVersion: 9}, false) + require.NoError(t, err) + version, err = tree.Load() + require.NoError(t, err) + assert.EqualValues(t, 10, version) + + // Reloading the tree with an initial version beyond the lowest should error + tree, err = NewMutableTreeWithOpts(memDB, 0, &Options{InitialVersion: 10}, false) + require.NoError(t, err) + _, err = tree.Load() + require.Error(t, err) + + // Reloading the tree with a lower initial version is fine, and new versions can be produced + tree, err = NewMutableTreeWithOpts(memDB, 0, &Options{InitialVersion: 3}, false) + require.NoError(t, err) + version, err = tree.Load() + require.NoError(t, err) + assert.EqualValues(t, 10, version) + + tree.Set([]byte("c"), []byte{0x03}) + _, version, err = tree.SaveVersion() + require.NoError(t, err) + assert.EqualValues(t, 11, version) +} + +func TestMutableTree_SetInitialVersion(t *testing.T) { + tree := setupMutableTree(t) + tree.SetInitialVersion(9) + + tree.Set([]byte("a"), []byte{0x01}) + _, version, err := tree.SaveVersion() + require.NoError(t, err) + assert.EqualValues(t, 9, version) +} + +func BenchmarkMutableTree_Set(b *testing.B) { + db, err := db.NewDB("test", db.MemDBBackend, "") + require.NoError(b, err) + t, err := NewMutableTree(db, 100000, false) + require.NoError(b, err) + for i := 0; i < 1000000; i++ { + t.Set(randBytes(10), []byte{}) + } + b.ReportAllocs() + runtime.GC() + + b.ResetTimer() + + for i := 0; i < b.N; i++ { + t.Set(randBytes(10), []byte{}) + } +} + +func prepareTree(t *testing.T) *MutableTree { + mdb := db.NewMemDB() + tree, err := NewMutableTree(mdb, 1000, false) + require.NoError(t, err) + for i := 0; i < 100; i++ { + tree.Set([]byte{byte(i)}, []byte("a")) + } + _, ver, err := tree.SaveVersion() + require.True(t, ver == 1) + require.NoError(t, err) + for i := 0; i < 100; i++ { + tree.Set([]byte{byte(i)}, []byte("b")) + } + _, ver, err = tree.SaveVersion() + require.True(t, ver == 2) + require.NoError(t, err) + newTree, err := NewMutableTree(mdb, 1000, false) + require.NoError(t, err) + + return newTree +} + +func TestMutableTree_VersionExists(t *testing.T) { + tree := prepareTree(t) + require.True(t, tree.VersionExists(1)) + require.True(t, tree.VersionExists(2)) + require.False(t, tree.VersionExists(3)) +} + +func checkGetVersioned(t *testing.T, tree *MutableTree, version int64, key, value []byte) { + val, err := tree.GetVersioned(key, version) + require.NoError(t, err) + require.True(t, bytes.Equal(val, value)) +} + +func TestMutableTree_GetVersioned(t *testing.T) { + tree := prepareTree(t) + ver, err := tree.LazyLoadVersion(1) + require.True(t, ver == 1) + require.NoError(t, err) + // check key of unloaded version + checkGetVersioned(t, tree, 1, []byte{1}, []byte("a")) + checkGetVersioned(t, tree, 2, []byte{1}, []byte("b")) + checkGetVersioned(t, tree, 3, []byte{1}, nil) + + tree = prepareTree(t) + ver, err = tree.LazyLoadVersion(2) + require.True(t, ver == 2) + require.NoError(t, err) + checkGetVersioned(t, tree, 1, []byte{1}, []byte("a")) + checkGetVersioned(t, tree, 2, []byte{1}, []byte("b")) + checkGetVersioned(t, tree, 3, []byte{1}, nil) +} + +func TestMutableTree_DeleteVersion(t *testing.T) { + tree := prepareTree(t) + ver, err := tree.LazyLoadVersion(2) + require.True(t, ver == 2) + require.NoError(t, err) + + require.NoError(t, tree.DeleteVersion(1)) + + require.False(t, tree.VersionExists(1)) + require.True(t, tree.VersionExists(2)) + require.False(t, tree.VersionExists(3)) + + // cannot delete latest version + require.Error(t, tree.DeleteVersion(2)) +} + +func TestMutableTree_LazyLoadVersionWithEmptyTree(t *testing.T) { + mdb := db.NewMemDB() + tree, err := NewMutableTree(mdb, 1000, false) + require.NoError(t, err) + _, v1, err := tree.SaveVersion() + require.NoError(t, err) + + newTree1, err := NewMutableTree(mdb, 1000, false) + require.NoError(t, err) + v2, err := newTree1.LazyLoadVersion(1) + require.NoError(t, err) + require.True(t, v1 == v2) + + newTree2, err := NewMutableTree(mdb, 1000, false) + require.NoError(t, err) + v2, err = newTree1.LoadVersion(1) + require.NoError(t, err) + require.True(t, v1 == v2) + + require.True(t, newTree1.ImmutableTree().root == newTree2.ImmutableTree().root) +} + +func TestMutableTree_SetSimple(t *testing.T) { + mdb := db.NewMemDB() + tree, err := NewMutableTree(mdb, 0, false) + require.NoError(t, err) + + const testKey1 = "a" + const testVal1 = "test" + + isUpdated, err := tree.Set([]byte(testKey1), []byte(testVal1)) + require.NoError(t, err) + require.False(t, isUpdated) + + fastValue, err := tree.Get([]byte(testKey1)) + require.NoError(t, err) + _, regularValue, err := tree.ImmutableTree().GetWithIndex([]byte(testKey1)) + require.NoError(t, err) + + require.Equal(t, []byte(testVal1), fastValue) + require.Equal(t, []byte(testVal1), regularValue) + + fastNodeAdditions := tree.getUnsavedFastNodeAdditions() + require.Equal(t, 1, len(fastNodeAdditions)) + + fastNodeAddition := fastNodeAdditions[testKey1] + require.Equal(t, []byte(testKey1), fastNodeAddition.key) + require.Equal(t, []byte(testVal1), fastNodeAddition.value) + require.Equal(t, int64(1), fastNodeAddition.versionLastUpdatedAt) +} + +func TestMutableTree_SetTwoKeys(t *testing.T) { + tree := setupMutableTree(t) + + const testKey1 = "a" + const testVal1 = "test" + + const testKey2 = "b" + const testVal2 = "test2" + + isUpdated, err := tree.Set([]byte(testKey1), []byte(testVal1)) + require.NoError(t, err) + require.False(t, isUpdated) + + isUpdated, err = tree.Set([]byte(testKey2), []byte(testVal2)) + require.NoError(t, err) + require.False(t, isUpdated) + + fastValue, err := tree.Get([]byte(testKey1)) + require.NoError(t, err) + _, regularValue, err := tree.ImmutableTree().GetWithIndex([]byte(testKey1)) + require.NoError(t, err) + require.Equal(t, []byte(testVal1), fastValue) + require.Equal(t, []byte(testVal1), regularValue) + + fastValue2, err := tree.Get([]byte(testKey2)) + require.NoError(t, err) + _, regularValue2, err := tree.ImmutableTree().GetWithIndex([]byte(testKey2)) + require.NoError(t, err) + require.Equal(t, []byte(testVal2), fastValue2) + require.Equal(t, []byte(testVal2), regularValue2) + + fastNodeAdditions := tree.getUnsavedFastNodeAdditions() + require.Equal(t, 2, len(fastNodeAdditions)) + + fastNodeAddition := fastNodeAdditions[testKey1] + require.Equal(t, []byte(testKey1), fastNodeAddition.key) + require.Equal(t, []byte(testVal1), fastNodeAddition.value) + require.Equal(t, int64(1), fastNodeAddition.versionLastUpdatedAt) + + fastNodeAddition = fastNodeAdditions[testKey2] + require.Equal(t, []byte(testKey2), fastNodeAddition.key) + require.Equal(t, []byte(testVal2), fastNodeAddition.value) + require.Equal(t, int64(1), fastNodeAddition.versionLastUpdatedAt) +} + +func TestMutableTree_SetOverwrite(t *testing.T) { + tree := setupMutableTree(t) + const testKey1 = "a" + const testVal1 = "test" + const testVal2 = "test2" + + isUpdated, err := tree.Set([]byte(testKey1), []byte(testVal1)) + require.NoError(t, err) + require.False(t, isUpdated) + + isUpdated, err = tree.Set([]byte(testKey1), []byte(testVal2)) + require.NoError(t, err) + require.True(t, isUpdated) + + fastValue, err := tree.Get([]byte(testKey1)) + require.NoError(t, err) + _, regularValue, err := tree.ImmutableTree().GetWithIndex([]byte(testKey1)) + require.NoError(t, err) + require.Equal(t, []byte(testVal2), fastValue) + require.Equal(t, []byte(testVal2), regularValue) + + fastNodeAdditions := tree.getUnsavedFastNodeAdditions() + require.Equal(t, 1, len(fastNodeAdditions)) + + fastNodeAddition := fastNodeAdditions[testKey1] + require.Equal(t, []byte(testKey1), fastNodeAddition.key) + require.Equal(t, []byte(testVal2), fastNodeAddition.value) + require.Equal(t, int64(1), fastNodeAddition.versionLastUpdatedAt) +} + +func TestMutableTree_SetRemoveSet(t *testing.T) { + tree := setupMutableTree(t) + const testKey1 = "a" + const testVal1 = "test" + + // Set 1 + isUpdated, err := tree.Set([]byte(testKey1), []byte(testVal1)) + require.NoError(t, err) + require.False(t, isUpdated) + + fastValue, err := tree.Get([]byte(testKey1)) + require.NoError(t, err) + _, regularValue, err := tree.ImmutableTree().GetWithIndex([]byte(testKey1)) + require.Equal(t, []byte(testVal1), fastValue) + require.Equal(t, []byte(testVal1), regularValue) + + fastNodeAdditions := tree.getUnsavedFastNodeAdditions() + require.Equal(t, 1, len(fastNodeAdditions)) + + fastNodeAddition := fastNodeAdditions[testKey1] + require.Equal(t, []byte(testKey1), fastNodeAddition.key) + require.Equal(t, []byte(testVal1), fastNodeAddition.value) + require.Equal(t, int64(1), fastNodeAddition.versionLastUpdatedAt) + + // Remove + removedVal, isRemoved, err := tree.Remove([]byte(testKey1)) + require.NoError(t, err) + require.NotNil(t, removedVal) + require.True(t, isRemoved) + + fastNodeAdditions = tree.getUnsavedFastNodeAdditions() + require.Equal(t, 0, len(fastNodeAdditions)) + + fastNodeRemovals := tree.getUnsavedFastNodeRemovals() + require.Equal(t, 1, len(fastNodeRemovals)) + + fastValue, err = tree.Get([]byte(testKey1)) + require.NoError(t, err) + _, regularValue, err = tree.ImmutableTree().GetWithIndex([]byte(testKey1)) + require.NoError(t, err) + require.Nil(t, fastValue) + require.Nil(t, regularValue) + + // Set 2 + isUpdated, err = tree.Set([]byte(testKey1), []byte(testVal1)) + require.NoError(t, err) + require.False(t, isUpdated) + + fastValue, err = tree.Get([]byte(testKey1)) + require.NoError(t, err) + _, regularValue, err = tree.ImmutableTree().GetWithIndex([]byte(testKey1)) + require.NoError(t, err) + require.Equal(t, []byte(testVal1), fastValue) + require.Equal(t, []byte(testVal1), regularValue) + + fastNodeAdditions = tree.getUnsavedFastNodeAdditions() + require.Equal(t, 1, len(fastNodeAdditions)) + + fastNodeAddition = fastNodeAdditions[testKey1] + require.Equal(t, []byte(testKey1), fastNodeAddition.key) + require.Equal(t, []byte(testVal1), fastNodeAddition.value) + require.Equal(t, int64(1), fastNodeAddition.versionLastUpdatedAt) + + fastNodeRemovals = tree.getUnsavedFastNodeRemovals() + require.Equal(t, 0, len(fastNodeRemovals)) +} + +func TestMutableTree_FastNodeIntegration(t *testing.T) { + mdb := db.NewMemDB() + tree, err := NewMutableTree(mdb, 1000, false) + require.NoError(t, err) + + const key1 = "a" + const key2 = "b" + const key3 = "c" + + const testVal1 = "test" + const testVal2 = "test2" + + // Set key1 + res, err := tree.Set([]byte(key1), []byte(testVal1)) + require.NoError(t, err) + require.False(t, res) + + unsavedNodeAdditions := tree.getUnsavedFastNodeAdditions() + require.Equal(t, len(unsavedNodeAdditions), 1) + + // Set key2 + res, err = tree.Set([]byte(key2), []byte(testVal1)) + require.NoError(t, err) + require.False(t, res) + + unsavedNodeAdditions = tree.getUnsavedFastNodeAdditions() + require.Equal(t, len(unsavedNodeAdditions), 2) + + // Set key3 + res, err = tree.Set([]byte(key3), []byte(testVal1)) + require.NoError(t, err) + require.False(t, res) + + unsavedNodeAdditions = tree.getUnsavedFastNodeAdditions() + require.Equal(t, len(unsavedNodeAdditions), 3) + + // Set key3 with new value + res, err = tree.Set([]byte(key3), []byte(testVal2)) + require.NoError(t, err) + require.True(t, res) + + unsavedNodeAdditions = tree.getUnsavedFastNodeAdditions() + require.Equal(t, len(unsavedNodeAdditions), 3) + + // Remove key2 + removedVal, isRemoved, err := tree.Remove([]byte(key2)) + require.NoError(t, err) + require.True(t, isRemoved) + require.Equal(t, []byte(testVal1), removedVal) + + unsavedNodeAdditions = tree.getUnsavedFastNodeAdditions() + require.Equal(t, len(unsavedNodeAdditions), 2) + + unsavedNodeRemovals := tree.getUnsavedFastNodeRemovals() + require.Equal(t, len(unsavedNodeRemovals), 1) + + // Save + _, _, err = tree.SaveVersion() + require.NoError(t, err) + + unsavedNodeAdditions = tree.getUnsavedFastNodeAdditions() + require.Equal(t, len(unsavedNodeAdditions), 0) + + unsavedNodeRemovals = tree.getUnsavedFastNodeRemovals() + require.Equal(t, len(unsavedNodeRemovals), 0) + + // Load + t2, err := NewMutableTree(mdb, 0, false) + require.NoError(t, err) + + _, err = t2.Load() + require.NoError(t, err) + + // Get and GetFast + fastValue, err := t2.Get([]byte(key1)) + require.NoError(t, err) + _, regularValue, err := tree.ImmutableTree().GetWithIndex([]byte(key1)) + require.NoError(t, err) + require.Equal(t, []byte(testVal1), fastValue) + require.Equal(t, []byte(testVal1), regularValue) + + fastValue, err = t2.Get([]byte(key2)) + require.NoError(t, err) + _, regularValue, err = t2.ImmutableTree().GetWithIndex([]byte(key2)) + require.NoError(t, err) + require.Nil(t, fastValue) + require.Nil(t, regularValue) + + fastValue, err = t2.Get([]byte(key3)) + require.NoError(t, err) + _, regularValue, err = tree.ImmutableTree().GetWithIndex([]byte(key3)) + require.NoError(t, err) + require.Equal(t, []byte(testVal2), fastValue) + require.Equal(t, []byte(testVal2), regularValue) +} + +func TestIterate_MutableTree_Unsaved(t *testing.T) { + tree, mirror := getRandomizedTreeAndMirror(t) + assertMutableMirrorIterate(t, tree, mirror) +} + +func TestIterate_MutableTree_Saved(t *testing.T) { + tree, mirror := getRandomizedTreeAndMirror(t) + + _, _, err := tree.SaveVersion() + require.NoError(t, err) + + assertMutableMirrorIterate(t, tree, mirror) +} + +func TestIterate_MutableTree_Unsaved_NextVersion(t *testing.T) { + tree, mirror := getRandomizedTreeAndMirror(t) + + _, _, err := tree.SaveVersion() + require.NoError(t, err) + + assertMutableMirrorIterate(t, tree, mirror) + + randomizeTreeAndMirror(t, tree, mirror) + + assertMutableMirrorIterate(t, tree, mirror) +} + +func TestIterator_MutableTree_Invalid(t *testing.T) { + tree, err := getTestTree(0) + require.NoError(t, err) + + itr, err := tree.Iterator([]byte("a"), []byte("b"), true) + require.NoError(t, err) + require.NotNil(t, itr) + require.False(t, itr.Valid()) +} + +func TestUpgradeStorageToFast_LatestVersion_Success(t *testing.T) { + // Setup + db := db.NewMemDB() + tree, err := NewMutableTree(db, 1000, false) + require.NoError(t, err) + + // Default version when storage key does not exist in the db + isFastCacheEnabled, err := tree.ImmutableTree().IsFastCacheEnabled() + require.NoError(t, err) + require.False(t, isFastCacheEnabled) + + mirror := make(map[string]string) + // Fill with some data + randomizeTreeAndMirror(t, tree, mirror) + + // Enable fast storage + isUpgradeable, err := tree.IsUpgradeable() + require.True(t, isUpgradeable) + require.NoError(t, err) + enabled, err := tree.enableFastStorageAndCommitIfNotEnabled() + require.NoError(t, err) + require.True(t, enabled) + isUpgradeable, err = tree.IsUpgradeable() + require.False(t, isUpgradeable) + require.NoError(t, err) + + isFastCacheEnabled, err = tree.ImmutableTree().IsFastCacheEnabled() + require.NoError(t, err) + require.True(t, isFastCacheEnabled) +} + +func TestUpgradeStorageToFast_AlreadyUpgraded_Success(t *testing.T) { + // Setup + db := db.NewMemDB() + tree, err := NewMutableTree(db, 1000, false) + require.NoError(t, err) + + // Default version when storage key does not exist in the db + isFastCacheEnabled, err := tree.ImmutableTree().IsFastCacheEnabled() + require.NoError(t, err) + require.False(t, isFastCacheEnabled) + + mirror := make(map[string]string) + // Fill with some data + randomizeTreeAndMirror(t, tree, mirror) + + // Enable fast storage + isUpgradeable, err := tree.IsUpgradeable() + require.True(t, isUpgradeable) + require.NoError(t, err) + enabled, err := tree.enableFastStorageAndCommitIfNotEnabled() + require.NoError(t, err) + require.True(t, enabled) + isFastCacheEnabled, err = tree.ImmutableTree().IsFastCacheEnabled() + require.NoError(t, err) + require.True(t, isFastCacheEnabled) + isUpgradeable, err = tree.IsUpgradeable() + require.False(t, isUpgradeable) + require.NoError(t, err) + + // Test enabling fast storage when already enabled + enabled, err = tree.enableFastStorageAndCommitIfNotEnabled() + require.NoError(t, err) + require.False(t, enabled) + isFastCacheEnabled, err = tree.ImmutableTree().IsFastCacheEnabled() + require.NoError(t, err) + require.True(t, isFastCacheEnabled) + +} + +func TestUpgradeStorageToFast_DbErrorConstructor_Failure(t *testing.T) { + ctrl := gomock.NewController(t) + dbMock := mock.NewMockDB(ctrl) + rIterMock := mock.NewMockIterator(ctrl) + + // rIterMock is used to get the latest version from disk. We are mocking that rIterMock returns latestTreeVersion from disk + rIterMock.EXPECT().Valid().Return(true).Times(1) + rIterMock.EXPECT().Key().Return(rootKeyFormat.Key([]byte(defaultStorageVersionValue))) + rIterMock.EXPECT().Close().Return(nil).Times(1) + + expectedError := errors.New("some db error") + + dbMock.EXPECT().Get(gomock.Any()).Return(nil, expectedError).Times(1) + dbMock.EXPECT().NewBatch().Return(nil).Times(1) + dbMock.EXPECT().ReverseIterator(gomock.Any(), gomock.Any()).Return(rIterMock, nil).Times(1) + + tree, err := NewMutableTree(dbMock, 0, false) + require.Nil(t, err) + require.NotNil(t, tree) + + isFastCacheEnabled, err := tree.ImmutableTree().IsFastCacheEnabled() + require.NoError(t, err) + require.False(t, isFastCacheEnabled) +} + +func TestUpgradeStorageToFast_DbErrorEnableFastStorage_Failure(t *testing.T) { + ctrl := gomock.NewController(t) + dbMock := mock.NewMockDB(ctrl) + rIterMock := mock.NewMockIterator(ctrl) + + // rIterMock is used to get the latest version from disk. We are mocking that rIterMock returns latestTreeVersion from disk + rIterMock.EXPECT().Valid().Return(true).Times(1) + rIterMock.EXPECT().Key().Return(rootKeyFormat.Key([]byte(defaultStorageVersionValue))) + rIterMock.EXPECT().Close().Return(nil).Times(1) + + expectedError := errors.New("some db error") + + batchMock := mock.NewMockBatch(ctrl) + + dbMock.EXPECT().Get(gomock.Any()).Return(nil, nil).Times(1) + dbMock.EXPECT().NewBatch().Return(batchMock).Times(1) + dbMock.EXPECT().ReverseIterator(gomock.Any(), gomock.Any()).Return(rIterMock, nil).Times(1) + + iterMock := mock.NewMockIterator(ctrl) + dbMock.EXPECT().Iterator(gomock.Any(), gomock.Any()).Return(iterMock, nil) + iterMock.EXPECT().Error() + iterMock.EXPECT().Valid().Times(2) + iterMock.EXPECT().Close() + + batchMock.EXPECT().Set(gomock.Any(), gomock.Any()).Return(expectedError).Times(1) + + tree, err := NewMutableTree(dbMock, 0, false) + require.Nil(t, err) + require.NotNil(t, tree) + + isFastCacheEnabled, err := tree.ImmutableTree().IsFastCacheEnabled() + require.NoError(t, err) + require.False(t, isFastCacheEnabled) + + enabled, err := tree.enableFastStorageAndCommitIfNotEnabled() + require.ErrorIs(t, err, expectedError) + require.False(t, enabled) + + isFastCacheEnabled, err = tree.ImmutableTree().IsFastCacheEnabled() + require.NoError(t, err) + require.False(t, isFastCacheEnabled) +} + +func TestFastStorageReUpgradeProtection_NoForceUpgrade_Success(t *testing.T) { + ctrl := gomock.NewController(t) + dbMock := mock.NewMockDB(ctrl) + rIterMock := mock.NewMockIterator(ctrl) + + // We are trying to test downgrade and re-upgrade protection + // We need to set up a state where latest fast storage version is equal to latest tree version + const latestFastStorageVersionOnDisk = 1 + const latestTreeVersion = latestFastStorageVersionOnDisk + + // Setup fake reverse iterator db to traverse root versions, called by ndb's getLatestVersion + expectedStorageVersion := []byte(fastStorageVersionValue + fastStorageVersionDelimiter + strconv.Itoa(latestFastStorageVersionOnDisk)) + + // rIterMock is used to get the latest version from disk. We are mocking that rIterMock returns latestTreeVersion from disk + rIterMock.EXPECT().Valid().Return(true).Times(1) + rIterMock.EXPECT().Key().Return(rootKeyFormat.Key(latestTreeVersion)) + rIterMock.EXPECT().Close().Return(nil).Times(1) + + batchMock := mock.NewMockBatch(ctrl) + + dbMock.EXPECT().Get(gomock.Any()).Return(expectedStorageVersion, nil).Times(1) + dbMock.EXPECT().NewBatch().Return(batchMock).Times(1) + dbMock.EXPECT().ReverseIterator(gomock.Any(), gomock.Any()).Return(rIterMock, nil).Times(1) // called to get latest version + + tree, err := NewMutableTree(dbMock, 0, false) + require.Nil(t, err) + require.NotNil(t, tree) + + // Pretend that we called Load and have the latest state in the tree + tree.ImmutableTree().version = latestTreeVersion + latestVersion, err := tree.ndb.getLatestVersion() + require.NoError(t, err) + require.Equal(t, latestVersion, int64(latestTreeVersion)) + + // Ensure that the right branch of enableFastStorageAndCommitIfNotEnabled will be triggered + isFastCacheEnabled, err := tree.ImmutableTree().IsFastCacheEnabled() + require.NoError(t, err) + require.True(t, isFastCacheEnabled) + shouldForce, err := tree.ndb.shouldForceFastStorageUpgrade() + require.False(t, shouldForce) + require.NoError(t, err) + + enabled, err := tree.enableFastStorageAndCommitIfNotEnabled() + require.NoError(t, err) + require.False(t, enabled) +} + +func TestFastStorageReUpgradeProtection_ForceUpgradeFirstTime_NoForceSecondTime_Success(t *testing.T) { + ctrl := gomock.NewController(t) + dbMock := mock.NewMockDB(ctrl) + batchMock := mock.NewMockBatch(ctrl) + iterMock := mock.NewMockIterator(ctrl) + rIterMock := mock.NewMockIterator(ctrl) + + // We are trying to test downgrade and re-upgrade protection + // We need to set up a state where latest fast storage version is of a lower version + // than tree version + const latestFastStorageVersionOnDisk = 1 + const latestTreeVersion = latestFastStorageVersionOnDisk + 1 + + // Setup db for iterator and reverse iterator mocks + expectedStorageVersion := []byte(fastStorageVersionValue + fastStorageVersionDelimiter + strconv.Itoa(latestFastStorageVersionOnDisk)) + + // Setup fake reverse iterator db to traverse root versions, called by ndb's getLatestVersion + // rItr, err := db.ReverseIterator(rootKeyFormat.Key(1), rootKeyFormat.Key(latestTreeVersion + 1)) + // require.NoError(t, err) + + // dbMock represents the underlying database under the hood of nodeDB + dbMock.EXPECT().Get(gomock.Any()).Return(expectedStorageVersion, nil).Times(1) + dbMock.EXPECT().NewBatch().Return(batchMock).Times(3) + dbMock.EXPECT().ReverseIterator(gomock.Any(), gomock.Any()).Return(rIterMock, nil).Times(1) // called to get latest version + startFormat := fastKeyFormat.Key() + endFormat := fastKeyFormat.Key() + endFormat[0]++ + dbMock.EXPECT().Iterator(startFormat, endFormat).Return(iterMock, nil).Times(1) + + // rIterMock is used to get the latest version from disk. We are mocking that rIterMock returns latestTreeVersion from disk + rIterMock.EXPECT().Valid().Return(true).Times(1) + rIterMock.EXPECT().Key().Return(rootKeyFormat.Key(latestTreeVersion)) + rIterMock.EXPECT().Close().Return(nil).Times(1) + + fastNodeKeyToDelete := []byte("some_key") + + // batchMock represents a structure that receives all the updates related to + // upgrade and then commits them all in the end. + updatedExpectedStorageVersion := make([]byte, len(expectedStorageVersion)) + copy(updatedExpectedStorageVersion, expectedStorageVersion) + updatedExpectedStorageVersion[len(updatedExpectedStorageVersion)-1]++ + batchMock.EXPECT().Delete(fastKeyFormat.Key(fastNodeKeyToDelete)).Return(nil).Times(1) + batchMock.EXPECT().Set(metadataKeyFormat.Key([]byte(storageVersionKey)), updatedExpectedStorageVersion).Return(nil).Times(1) + batchMock.EXPECT().Write().Return(nil).Times(2) + batchMock.EXPECT().Close().Return(nil).Times(2) + + // iterMock is used to mock the underlying db iterator behing fast iterator + // Here, we want to mock the behavior of deleting fast nodes from disk when + // force upgrade is detected. + iterMock.EXPECT().Valid().Return(true).Times(1) + iterMock.EXPECT().Error().Return(nil).Times(1) + iterMock.EXPECT().Key().Return(fastKeyFormat.Key(fastNodeKeyToDelete)).Times(1) + // encode value + var buf bytes.Buffer + testValue := "test_value" + buf.Grow(encoding.EncodeVarintSize(int64(latestFastStorageVersionOnDisk)) + encoding.EncodeBytesSize([]byte(testValue))) + err := encoding.EncodeVarint(&buf, int64(latestFastStorageVersionOnDisk)) + require.NoError(t, err) + err = encoding.EncodeBytes(&buf, []byte(testValue)) + require.NoError(t, err) + iterMock.EXPECT().Value().Return(buf.Bytes()).Times(1) // this is encoded as version 1 with value "2" + iterMock.EXPECT().Valid().Return(true).Times(1) + // Call Next at the end of loop iteration + iterMock.EXPECT().Next().Return().Times(1) + iterMock.EXPECT().Error().Return(nil).Times(1) + iterMock.EXPECT().Valid().Return(false).Times(1) + // Call Valid after first iteraton + iterMock.EXPECT().Valid().Return(false).Times(1) + iterMock.EXPECT().Close().Return(nil).Times(1) + + tree, err := NewMutableTree(dbMock, 0, false) + require.Nil(t, err) + require.NotNil(t, tree) + + // Pretend that we called Load and have the latest state in the tree + tree.ImmutableTree().version = latestTreeVersion + latestVersion, err := tree.ndb.getLatestVersion() + require.NoError(t, err) + require.Equal(t, latestVersion, int64(latestTreeVersion)) + + // Ensure that the right branch of enableFastStorageAndCommitIfNotEnabled will be triggered + isFastCacheEnabled, err := tree.ImmutableTree().IsFastCacheEnabled() + require.NoError(t, err) + require.True(t, isFastCacheEnabled) + shouldForce, err := tree.ndb.shouldForceFastStorageUpgrade() + require.True(t, shouldForce) + require.NoError(t, err) + + // Actual method under test + enabled, err := tree.enableFastStorageAndCommitIfNotEnabled() + require.NoError(t, err) + require.True(t, enabled) + + // Test that second time we call this, force upgrade does not happen + enabled, err = tree.enableFastStorageAndCommitIfNotEnabled() + require.NoError(t, err) + require.False(t, enabled) +} + +func TestUpgradeStorageToFast_Integration_Upgraded_FastIterator_Success(t *testing.T) { + // Setup + tree, mirror := setupTreeAndMirror(t, 100, false) + + isFastCacheEnabled, err := tree.ImmutableTree().IsFastCacheEnabled() + require.NoError(t, err) + require.False(t, isFastCacheEnabled) + isUpgradeable, err := tree.IsUpgradeable() + require.True(t, isUpgradeable) + require.NoError(t, err) + + // Should auto enable in save version + _, _, err = tree.SaveVersion() + require.NoError(t, err) + + isFastCacheEnabled, err = tree.ImmutableTree().IsFastCacheEnabled() + require.NoError(t, err) + require.True(t, isFastCacheEnabled) + isUpgradeable, err = tree.IsUpgradeable() + require.False(t, isUpgradeable) + require.NoError(t, err) + + sut, _ := NewMutableTree(tree.ndb.db, 1000, false) + + isFastCacheEnabled, err = sut.ImmutableTree().IsFastCacheEnabled() + require.NoError(t, err) + require.False(t, isFastCacheEnabled) + isUpgradeable, err = sut.IsUpgradeable() + require.False(t, isUpgradeable) // upgraded in save version + require.NoError(t, err) + + // Load version - should auto enable fast storage + version, err := sut.Load() + require.NoError(t, err) + + isFastCacheEnabled, err = tree.ImmutableTree().IsFastCacheEnabled() + require.NoError(t, err) + require.True(t, isFastCacheEnabled) + + require.Equal(t, int64(1), version) + + // Test that upgraded mutable tree iterates as expected + t.Run("Mutable tree", func(t *testing.T) { + i := 0 + sut.Iterate(func(k, v []byte) bool { + require.Equal(t, []byte(mirror[i][0]), k) + require.Equal(t, []byte(mirror[i][1]), v) + i++ + return false + }) + }) + + // Test that upgraded immutable tree iterates as expected + t.Run("Immutable tree", func(t *testing.T) { + immutableTree, err := sut.GetImmutable(sut.ImmutableTree().version) + require.NoError(t, err) + + i := 0 + immutableTree.Iterate(func(k, v []byte) bool { + require.Equal(t, []byte(mirror[i][0]), k) + require.Equal(t, []byte(mirror[i][1]), v) + i++ + return false + }) + }) +} + +func TestUpgradeStorageToFast_Integration_Upgraded_GetFast_Success(t *testing.T) { + // Setup + tree, mirror := setupTreeAndMirror(t, 100, false) + + isFastCacheEnabled, err := tree.ImmutableTree().IsFastCacheEnabled() + require.NoError(t, err) + require.False(t, isFastCacheEnabled) + isUpgradeable, err := tree.IsUpgradeable() + require.True(t, isUpgradeable) + require.NoError(t, err) + + // Should auto enable in save version + _, _, err = tree.SaveVersion() + require.NoError(t, err) + + isFastCacheEnabled, err = tree.ImmutableTree().IsFastCacheEnabled() + require.NoError(t, err) + require.True(t, isFastCacheEnabled) + isUpgradeable, err = tree.IsUpgradeable() + require.False(t, isUpgradeable) + require.NoError(t, err) + + sut, _ := NewMutableTree(tree.ndb.db, 1000, false) + + isFastCacheEnabled, err = sut.ImmutableTree().IsFastCacheEnabled() + require.NoError(t, err) + require.False(t, isFastCacheEnabled) + isUpgradeable, err = sut.IsUpgradeable() + require.False(t, isUpgradeable) // upgraded in save version + require.NoError(t, err) + + // LazyLoadVersion - should auto enable fast storage + version, err := sut.LazyLoadVersion(1) + require.NoError(t, err) + + isFastCacheEnabled, err = tree.ImmutableTree().IsFastCacheEnabled() + require.NoError(t, err) + require.True(t, isFastCacheEnabled) + + require.Equal(t, int64(1), version) + + t.Run("Mutable tree", func(t *testing.T) { + for _, kv := range mirror { + v, err := sut.Get([]byte(kv[0])) + require.NoError(t, err) + require.Equal(t, []byte(kv[1]), v) + } + }) + + t.Run("Immutable tree", func(t *testing.T) { + immutableTree, err := sut.GetImmutable(sut.ImmutableTree().version) + require.NoError(t, err) + + for _, kv := range mirror { + v, err := immutableTree.Get([]byte(kv[0])) + require.NoError(t, err) + require.Equal(t, []byte(kv[1]), v) + } + }) +} + +func TestUpgradeStorageToFast_Success(t *testing.T) { + tmpCommitGap := commitGap + commitGap = 1000 + defer func() { + commitGap = tmpCommitGap + }() + + type fields struct { + nodeCount int + } + tests := []struct { + name string + fields fields + }{ + {"less than commit gap", fields{nodeCount: 100}}, + {"equal to commit gap", fields{nodeCount: int(commitGap)}}, + {"great than commit gap", fields{nodeCount: int(commitGap) + 100}}, + {"two times commit gap", fields{nodeCount: int(commitGap) * 2}}, + {"two times plus commit gap", fields{nodeCount: int(commitGap)*2 + 1}}, + } + + for _, tt := range tests { + tree, mirror := setupTreeAndMirror(t, tt.fields.nodeCount, false) + enabled, err := tree.enableFastStorageAndCommitIfNotEnabled() + require.Nil(t, err) + require.True(t, enabled) + t.Run(tt.name, func(t *testing.T) { + i := 0 + iter := NewFastIterator(nil, nil, true, tree.ndb) + for ; iter.Valid(); iter.Next() { + require.Equal(t, []byte(mirror[i][0]), iter.Key()) + require.Equal(t, []byte(mirror[i][1]), iter.Value()) + i++ + } + require.Equal(t, len(mirror), i) + }) + } +} + +func TestUpgradeStorageToFast_Delete_Stale_Success(t *testing.T) { + // we delete fast node, in case of deadlock. we should limit the stale count lower than chBufferSize(64) + tmpCommitGap := commitGap + commitGap = 5 + defer func() { + commitGap = tmpCommitGap + }() + + valStale := "val_stale" + addStaleKey := func(ndb *nodeDB, staleCount int) { + var keyPrefix = "key" + for i := 0; i < staleCount; i++ { + key := fmt.Sprintf("%s_%d", keyPrefix, i) + + node := NewFastNode([]byte(key), []byte(valStale), 100) + var buf bytes.Buffer + buf.Grow(node.encodedSize()) + err := node.writeBytes(&buf) + require.NoError(t, err) + err = ndb.db.Set(ndb.fastNodeKey([]byte(key)), buf.Bytes()) + require.NoError(t, err) + } + } + type fields struct { + nodeCount int + staleCount int + } + + tests := []struct { + name string + fields fields + }{ + {"stale less than commit gap", fields{nodeCount: 100, staleCount: 4}}, + {"stale equal to commit gap", fields{nodeCount: int(commitGap), staleCount: int(commitGap)}}, + {"stale great than commit gap", fields{nodeCount: int(commitGap) + 100, staleCount: int(commitGap)*2 - 1}}, + {"stale twice commit gap", fields{nodeCount: int(commitGap) + 100, staleCount: int(commitGap) * 2}}, + {"stale great than twice commit gap", fields{nodeCount: int(commitGap), staleCount: int(commitGap)*2 + 1}}, + } + + for _, tt := range tests { + tree, mirror := setupTreeAndMirror(t, tt.fields.nodeCount, false) + addStaleKey(tree.ndb, tt.fields.staleCount) + enabled, err := tree.enableFastStorageAndCommitIfNotEnabled() + require.Nil(t, err) + require.True(t, enabled) + t.Run(tt.name, func(t *testing.T) { + i := 0 + iter := NewFastIterator(nil, nil, true, tree.ndb) + for ; iter.Valid(); iter.Next() { + require.Equal(t, []byte(mirror[i][0]), iter.Key()) + require.Equal(t, []byte(mirror[i][1]), iter.Value()) + i++ + } + require.Equal(t, len(mirror), i) + }) + } +} + +func setupTreeAndMirror(t *testing.T, numEntries int, skipFastStorageUpgrade bool) (*MutableTree, [][]string) { + db := db.NewMemDB() + + tree, _ := NewMutableTree(db, 0, skipFastStorageUpgrade) + + var keyPrefix, valPrefix = "key", "val" + + mirror := make([][]string, 0, numEntries) + for i := 0; i < numEntries; i++ { + key := fmt.Sprintf("%s_%d", keyPrefix, i) + val := fmt.Sprintf("%s_%d", valPrefix, i) + mirror = append(mirror, []string{key, val}) + updated, err := tree.Set([]byte(key), []byte(val)) + require.False(t, updated) + require.NoError(t, err) + } + + // Delete fast nodes from database to mimic a version with no upgrade + for i := 0; i < numEntries; i++ { + key := fmt.Sprintf("%s_%d", keyPrefix, i) + require.NoError(t, db.Delete(fastKeyFormat.Key([]byte(key)))) + } + + sort.Slice(mirror, func(i, j int) bool { + return mirror[i][0] < mirror[j][0] + }) + return tree, mirror +} + +func TestNoFastStorageUpgrade_Integration_SaveVersion_Load_Get_Success(t *testing.T) { + // Setup + tree, mirror := setupTreeAndMirror(t, 100, true) + + isFastCacheEnabled, err := tree.ImmutableTree().IsFastCacheEnabled() + require.NoError(t, err) + require.False(t, isFastCacheEnabled) + isUpgradeable, err := tree.IsUpgradeable() + require.False(t, isUpgradeable) + require.NoError(t, err) + + // Should Not auto enable in save version + _, _, err = tree.SaveVersion() + require.NoError(t, err) + + isFastCacheEnabled, err = tree.ImmutableTree().IsFastCacheEnabled() + require.NoError(t, err) + require.False(t, isFastCacheEnabled) + isUpgradeable, err = tree.IsUpgradeable() + require.False(t, isUpgradeable) + require.NoError(t, err) + + sut, _ := NewMutableTree(tree.ndb.db, 1000, true) + + isFastCacheEnabled, err = sut.ImmutableTree().IsFastCacheEnabled() + require.NoError(t, err) + require.False(t, isFastCacheEnabled) + isUpgradeable, err = sut.IsUpgradeable() + require.False(t, isUpgradeable) + require.NoError(t, err) + + // LazyLoadVersion - should not auto enable fast storage + version, err := sut.LazyLoadVersion(1) + require.NoError(t, err) + require.Equal(t, int64(1), version) + + isFastCacheEnabled, err = sut.ImmutableTree().IsFastCacheEnabled() + require.NoError(t, err) + require.False(t, isFastCacheEnabled) + + // Load - should not auto enable fast storage + version, err = sut.Load() + require.NoError(t, err) + require.Equal(t, int64(1), version) + + isFastCacheEnabled, err = sut.ImmutableTree().IsFastCacheEnabled() + require.NoError(t, err) + require.False(t, isFastCacheEnabled) + + // LoadVersion - should not auto enable fast storage + version, err = sut.LoadVersion(1) + require.NoError(t, err) + require.Equal(t, int64(1), version) + + isFastCacheEnabled, err = sut.ImmutableTree().IsFastCacheEnabled() + require.NoError(t, err) + require.False(t, isFastCacheEnabled) + + // LoadVersionForOverwriting - should not auto enable fast storage + version, err = sut.LoadVersionForOverwriting(1) + require.NoError(t, err) + require.Equal(t, int64(1), version) + + isFastCacheEnabled, err = sut.ImmutableTree().IsFastCacheEnabled() + require.NoError(t, err) + require.False(t, isFastCacheEnabled) + + t.Run("Mutable tree", func(t *testing.T) { + for _, kv := range mirror { + v, err := sut.Get([]byte(kv[0])) + require.NoError(t, err) + require.Equal(t, []byte(kv[1]), v) + } + }) + + t.Run("Immutable tree", func(t *testing.T) { + immutableTree, err := sut.GetImmutable(sut.ImmutableTree().version) + require.NoError(t, err) + + for _, kv := range mirror { + v, err := immutableTree.Get([]byte(kv[0])) + require.NoError(t, err) + require.Equal(t, []byte(kv[1]), v) + } + }) +} + +func TestNoFastStorageUpgrade_Integration_SaveVersion_Load_Iterate_Success(t *testing.T) { + // Setup + tree, mirror := setupTreeAndMirror(t, 100, true) + + isFastCacheEnabled, err := tree.ImmutableTree().IsFastCacheEnabled() + require.NoError(t, err) + require.False(t, isFastCacheEnabled) + isUpgradeable, err := tree.IsUpgradeable() + require.False(t, isUpgradeable) + require.NoError(t, err) + + // Should Not auto enable in save version + _, _, err = tree.SaveVersion() + require.NoError(t, err) + + isFastCacheEnabled, err = tree.ImmutableTree().IsFastCacheEnabled() + require.NoError(t, err) + require.False(t, isFastCacheEnabled) + isUpgradeable, err = tree.IsUpgradeable() + require.False(t, isUpgradeable) + require.NoError(t, err) + + sut, _ := NewMutableTree(tree.ndb.db, 1000, true) + + isFastCacheEnabled, err = sut.ImmutableTree().IsFastCacheEnabled() + require.NoError(t, err) + require.False(t, isFastCacheEnabled) + isUpgradeable, err = sut.IsUpgradeable() + require.False(t, isUpgradeable) + require.NoError(t, err) + + // Load - should not auto enable fast storage + version, err := sut.Load() + require.NoError(t, err) + require.Equal(t, int64(1), version) + + isFastCacheEnabled, err = sut.ImmutableTree().IsFastCacheEnabled() + require.NoError(t, err) + require.False(t, isFastCacheEnabled) + + // Load - should not auto enable fast storage + version, err = sut.Load() + require.NoError(t, err) + require.Equal(t, int64(1), version) + + isFastCacheEnabled, err = tree.ImmutableTree().IsFastCacheEnabled() + require.NoError(t, err) + require.False(t, isFastCacheEnabled) + + // Test that the mutable tree iterates as expected + t.Run("Mutable tree", func(t *testing.T) { + i := 0 + sut.Iterate(func(k, v []byte) bool { + require.Equal(t, []byte(mirror[i][0]), k) + require.Equal(t, []byte(mirror[i][1]), v) + i++ + return false + }) + }) + + // Test that the immutable tree iterates as expected + t.Run("Immutable tree", func(t *testing.T) { + immutableTree, err := sut.GetImmutable(sut.ImmutableTree().version) + require.NoError(t, err) + + i := 0 + immutableTree.Iterate(func(k, v []byte) bool { + require.Equal(t, []byte(mirror[i][0]), k) + require.Equal(t, []byte(mirror[i][1]), v) + i++ + return false + }) + }) +} + +func TestSaveCurrentVersion(t *testing.T) { + tree := setupMutableTree(t) + tree.SetInitialVersion(9) + + tree.Set([]byte("a"), []byte{0x01}) + _, version, err := tree.SaveVersion() + require.NoError(t, err) + assert.EqualValues(t, 9, version) + _, version, err = tree.SaveCurrentVersion() + require.NoError(t, err) + assert.EqualValues(t, 9, version) +} + +func TestSaveCurrentVersion_BadVersion(t *testing.T) { + tree := setupMutableTree(t) + tree.SetInitialVersion(9) + + tree.Set([]byte("a"), []byte{0x01}) + _, version, err := tree.SaveVersion() + require.NoError(t, err) + assert.EqualValues(t, 9, version) + tree.ImmutableTree().version = 10 + _, version, err = tree.SaveCurrentVersion() + require.Error(t, err) + assert.EqualValues(t, 10, version) +} + +func TestSaveCurrentVersion_ChangedHash(t *testing.T) { + tree := setupMutableTree(t) + tree.SetInitialVersion(9) + + tree.Set([]byte("a"), []byte{0x01}) + _, version, err := tree.SaveVersion() + require.NoError(t, err) + assert.EqualValues(t, 9, version) + tree.Set([]byte("b"), []byte{0x02}) + _, version, err = tree.SaveCurrentVersion() + require.Error(t, err) +} diff --git a/sei-iavl/node.go b/sei-iavl/node.go new file mode 100644 index 0000000000..d2d8fb8f1b --- /dev/null +++ b/sei-iavl/node.go @@ -0,0 +1,703 @@ +package iavl + +// NOTE: This file favors int64 as opposed to int for size/counts. +// The Tree on the other hand favors int. This is intentional. + +import ( + "bytes" + "crypto/sha256" + "fmt" + "io" + "math" + "sync" + + "github.com/pkg/errors" + "github.com/sei-protocol/sei-chain/sei-iavl/cache" + + "github.com/sei-protocol/sei-chain/sei-iavl/internal/encoding" +) + +// Node represents a node in a Tree. +type Node struct { + key []byte + value []byte + hash []byte + leftHash []byte + rightHash []byte + version int64 + size int64 + leftNode *Node + rightNode *Node + height int8 + persisted bool + + mtx sync.RWMutex +} + +var _ cache.Node = (*Node)(nil) + +// NewNode returns a new node from a key, value and version. +func NewNode(key []byte, value []byte, version int64) *Node { + return &Node{ + key: key, + value: value, + height: 0, + size: 1, + version: version, + } +} + +// MakeNode constructs an *Node from an encoded byte slice. +// +// The new node doesn't have its hash saved or set. The caller must set it +// afterward. +func MakeNode(buf []byte) (*Node, error) { + + // Read node header (height, size, version, key). + height, n, cause := encoding.DecodeVarint(buf) + if cause != nil { + return nil, errors.Wrap(cause, "decoding node.height") + } + buf = buf[n:] + if height < int64(math.MinInt8) || height > int64(math.MaxInt8) { + return nil, errors.New("invalid height, must be int8") + } + + size, n, cause := encoding.DecodeVarint(buf) + if cause != nil { + return nil, errors.Wrap(cause, "decoding node.size") + } + buf = buf[n:] + + ver, n, cause := encoding.DecodeVarint(buf) + if cause != nil { + return nil, errors.Wrap(cause, "decoding node.version") + } + buf = buf[n:] + + key, n, cause := encoding.DecodeBytes(buf) + if cause != nil { + return nil, errors.Wrap(cause, "decoding node.key") + } + buf = buf[n:] + + node := &Node{ + // #nosec G115 -- height is bounds checked above to be within int8 range + height: int8(height), + size: size, + version: ver, + key: key, + } + + // Read node body. + + if node.isLeaf() { + val, _, cause := encoding.DecodeBytes(buf) + if cause != nil { + return nil, errors.Wrap(cause, "decoding node.value") + } + node.value = val + } else { // Read children. + leftHash, n, cause := encoding.DecodeBytes(buf) + if cause != nil { + return nil, errors.Wrap(cause, "deocding node.leftHash") + } + buf = buf[n:] + + rightHash, _, cause := encoding.DecodeBytes(buf) + if cause != nil { + return nil, errors.Wrap(cause, "decoding node.rightHash") + } + node.leftHash = leftHash + node.rightHash = rightHash + } + return node, nil +} + +// to conform with interface name +func (n *Node) GetCacheKey() []byte { + n.mtx.RLock() + defer n.mtx.RUnlock() + return n.hash +} + +func (n *Node) GetHash() []byte { + n.mtx.RLock() + defer n.mtx.RUnlock() + return n.hash +} + +func (node *Node) GetNodeKey() []byte { + node.mtx.RLock() + defer node.mtx.RUnlock() + return node.key +} + +func (node *Node) GetValue() []byte { + node.mtx.RLock() + defer node.mtx.RUnlock() + return node.value +} + +func (node *Node) GetSize() int64 { + node.mtx.RLock() + defer node.mtx.RUnlock() + return node.size +} + +func (node *Node) GetHeight() int8 { + node.mtx.RLock() + defer node.mtx.RUnlock() + return node.height +} + +func (node *Node) GetVersion() int64 { + node.mtx.RLock() + defer node.mtx.RUnlock() + return node.version +} + +func (node *Node) GetLeftHash() []byte { + node.mtx.RLock() + defer node.mtx.RUnlock() + return node.leftHash +} + +func (node *Node) GetRightHash() []byte { + node.mtx.RLock() + defer node.mtx.RUnlock() + return node.rightHash +} + +func (node *Node) GetLeftNode() *Node { + node.mtx.RLock() + defer node.mtx.RUnlock() + return node.leftNode +} + +func (node *Node) GetRightNode() *Node { + node.mtx.RLock() + defer node.mtx.RUnlock() + return node.rightNode +} + +func (node *Node) GetPersisted() bool { + node.mtx.RLock() + defer node.mtx.RUnlock() + return node.persisted +} + +func (node *Node) SetKey(k []byte) { + node.mtx.Lock() + defer node.mtx.Unlock() + node.key = k +} + +func (node *Node) SetLeftHash(h []byte) { + node.mtx.Lock() + defer node.mtx.Unlock() + node.leftHash = h +} + +func (node *Node) SetRightHash(h []byte) { + node.mtx.Lock() + defer node.mtx.Unlock() + node.rightHash = h +} + +func (node *Node) SetLeftNode(n *Node) { + node.mtx.Lock() + defer node.mtx.Unlock() + node.leftNode = n +} + +func (node *Node) SetRightNode(n *Node) { + node.mtx.Lock() + defer node.mtx.Unlock() + node.rightNode = n +} + +func (node *Node) SetHeight(h int8) { + node.mtx.Lock() + defer node.mtx.Unlock() + node.height = h +} + +func (node *Node) SetVersion(v int64) { + node.mtx.Lock() + defer node.mtx.Unlock() + node.version = v +} + +func (node *Node) SetSize(s int64) { + node.mtx.Lock() + defer node.mtx.Unlock() + node.size = s +} + +func (node *Node) SetHash(h []byte) { + node.mtx.Lock() + defer node.mtx.Unlock() + node.hash = h +} + +func (node *Node) SetPersisted(p bool) { + node.mtx.Lock() + defer node.mtx.Unlock() + node.persisted = p +} + +// String returns a string representation of the node. +func (node *Node) String() string { + node.mtx.RLock() + defer node.mtx.RUnlock() + hashstr := "" + if len(node.hash) > 0 { + hashstr = fmt.Sprintf("%X", node.hash) + } + return fmt.Sprintf("Node{%s:%s@%d %X;%X}#%s", + ColoredBytes(node.key, Green, Blue), + ColoredBytes(node.value, Cyan, Blue), + node.version, + node.leftHash, node.rightHash, + hashstr) +} + +// clone creates a shallow copy of a node with its hash set to nil. +func (node *Node) clone(version int64) (*Node, error) { + if node.isLeaf() { + return nil, ErrCloneLeafNode + } + node.mtx.RLock() + defer node.mtx.RUnlock() + return &Node{ + key: node.key, + height: node.height, + version: version, + size: node.size, + hash: nil, + leftHash: node.leftHash, + leftNode: node.leftNode, + rightHash: node.rightHash, + rightNode: node.rightNode, + persisted: false, + }, nil +} + +func (node *Node) isLeaf() bool { + node.mtx.RLock() + defer node.mtx.RUnlock() + return node.height == 0 +} + +// Check if the node has a descendant with the given key. +func (node *Node) has(t *ImmutableTree, key []byte) (has bool, err error) { + if bytes.Equal(node.GetNodeKey(), key) { + return true, nil + } + if node.isLeaf() { + return false, nil + } + if bytes.Compare(key, node.GetNodeKey()) < 0 { + leftNode, err := node.getLeftNode(t) + if err != nil { + return false, err + } + return leftNode.has(t, key) + } + + rightNode, err := node.getRightNode(t) + if err != nil { + return false, err + } + + return rightNode.has(t, key) +} + +// Get a key under the node. +// +// The index is the index in the list of leaf nodes sorted lexicographically by key. The leftmost leaf has index 0. +// It's neighbor has index 1 and so on. +func (node *Node) get(t *ImmutableTree, key []byte) (index int64, value []byte, err error) { + if node.isLeaf() { + switch bytes.Compare(node.GetNodeKey(), key) { + case -1: + return 1, nil, nil + case 1: + return 0, nil, nil + default: + return 0, node.GetValue(), nil + } + } + + if bytes.Compare(key, node.GetNodeKey()) < 0 { + leftNode, err := node.getLeftNode(t) + if err != nil { + return 0, nil, err + } + + return leftNode.get(t, key) + } + + rightNode, err := node.getRightNode(t) + if err != nil { + return 0, nil, err + } + + index, value, err = rightNode.get(t, key) + if err != nil { + return 0, nil, err + } + + index += node.GetSize() - rightNode.GetSize() + return index, value, nil +} + +func (node *Node) getByIndex(t *ImmutableTree, index int64) (key []byte, value []byte, err error) { + if node.isLeaf() { + if index == 0 { + return node.GetNodeKey(), node.GetValue(), nil + } + return nil, nil, nil + } + // TODO: could improve this by storing the + // sizes as well as left/right hash. + leftNode, err := node.getLeftNode(t) + if err != nil { + return nil, nil, err + } + + if index < leftNode.GetSize() { + return leftNode.getByIndex(t, index) + } + + rightNode, err := node.getRightNode(t) + if err != nil { + return nil, nil, err + } + + return rightNode.getByIndex(t, index-leftNode.GetSize()) +} + +// Computes the hash of the node without computing its descendants. Must be +// called on nodes which have descendant node hashes already computed. +func (node *Node) _hash() ([]byte, error) { + if node.GetHash() != nil { + return node.GetHash(), nil + } + + h := sha256.New() + buf := new(bytes.Buffer) + if err := node.writeHashBytes(buf); err != nil { + return nil, err + } + _, err := h.Write(buf.Bytes()) + if err != nil { + return nil, err + } + node.mtx.Lock() + defer node.mtx.Unlock() + node.hash = h.Sum(nil) + + return node.hash, nil +} + +// Hash the node and its descendants recursively. This usually mutates all +// descendant nodes. Returns the node hash and number of nodes hashed. +// If the tree is empty (i.e. the node is nil), returns the hash of an empty input, +// to conform with RFC-6962. +func (node *Node) hashWithCount() ([]byte, int64, error) { + if node == nil { + return sha256.New().Sum(nil), 0, nil + } + if node.GetHash() != nil { + return node.GetHash(), 0, nil + } + + h := sha256.New() + buf := new(bytes.Buffer) + hashCount, err := node.writeHashBytesRecursively(buf) + if err != nil { + return nil, 0, err + } + _, err = h.Write(buf.Bytes()) + if err != nil { + return nil, 0, err + } + node.mtx.Lock() + defer node.mtx.Unlock() + node.hash = h.Sum(nil) + + return node.hash, hashCount + 1, nil +} + +// validate validates the node contents +func (node *Node) validate() error { + if node == nil { + return errors.New("node cannot be nil") + } + node.mtx.RLock() + defer node.mtx.RUnlock() + if node.key == nil { + return errors.New("key cannot be nil") + } + if node.version <= 0 { + return errors.New("version must be greater than 0") + } + if node.height < 0 { + return errors.New("height cannot be less than 0") + } + if node.size < 1 { + return errors.New("size must be at least 1") + } + + if node.height == 0 { + // Leaf nodes + if node.value == nil { + return errors.New("value cannot be nil for leaf node") + } + if node.leftHash != nil || node.leftNode != nil || node.rightHash != nil || node.rightNode != nil { + return errors.New("leaf node cannot have children") + } + if node.size != 1 { + return errors.New("leaf nodes must have size 1") + } + } else { + // Inner nodes + if node.value != nil { + return errors.New("value must be nil for non-leaf node") + } + if node.leftHash == nil && node.rightHash == nil { + return errors.New("inner node must have children") + } + } + return nil +} + +// Writes the node's hash to the given io.Writer. This function expects +// child hashes to be already set. +func (node *Node) writeHashBytes(w io.Writer) error { + err := encoding.EncodeVarint(w, int64(node.GetHeight())) + if err != nil { + return errors.Wrap(err, "writing height") + } + err = encoding.EncodeVarint(w, node.GetSize()) + if err != nil { + return errors.Wrap(err, "writing size") + } + err = encoding.EncodeVarint(w, node.GetVersion()) + if err != nil { + return errors.Wrap(err, "writing version") + } + + // Key is not written for inner nodes, unlike writeBytes. + + if node.isLeaf() { + err = encoding.EncodeBytes(w, node.GetNodeKey()) + if err != nil { + return errors.Wrap(err, "writing key") + } + + // Indirection needed to provide proofs without values. + // (e.g. ProofLeafNode.ValueHash) + valueHash := sha256.Sum256(node.GetValue()) + + err = encoding.EncodeBytes(w, valueHash[:]) + if err != nil { + return errors.Wrap(err, "writing value") + } + } else { + if node.GetLeftHash() == nil || node.GetRightHash() == nil { + return ErrEmptyChildHash + } + err = encoding.EncodeBytes(w, node.GetLeftHash()) + if err != nil { + return errors.Wrap(err, "writing left hash") + } + err = encoding.EncodeBytes(w, node.GetRightHash()) + if err != nil { + return errors.Wrap(err, "writing right hash") + } + } + + return nil +} + +// Writes the node's hash to the given io.Writer. +// This function has the side-effect of calling hashWithCount. +func (node *Node) writeHashBytesRecursively(w io.Writer) (hashCount int64, err error) { + if node.GetLeftNode() != nil { + leftHash, leftCount, err := node.GetLeftNode().hashWithCount() + if err != nil { + return 0, err + } + node.SetLeftHash(leftHash) + hashCount += leftCount + } + if node.GetRightNode() != nil { + rightHash, rightCount, err := node.GetRightNode().hashWithCount() + if err != nil { + return 0, err + } + node.SetRightHash(rightHash) + hashCount += rightCount + } + err = node.writeHashBytes(w) + + return +} + +func (node *Node) encodedSize() int { + n := 1 + + encoding.EncodeVarintSize(node.GetSize()) + + encoding.EncodeVarintSize(node.GetVersion()) + + encoding.EncodeBytesSize(node.GetNodeKey()) + if node.isLeaf() { + n += encoding.EncodeBytesSize(node.GetValue()) + } else { + n += encoding.EncodeBytesSize(node.GetLeftHash()) + + encoding.EncodeBytesSize(node.GetRightHash()) + } + return n +} + +// Writes the node as a serialized byte slice to the supplied io.Writer. +func (node *Node) writeBytes(w io.Writer) error { + if node == nil { + return errors.New("cannot write nil node") + } + cause := encoding.EncodeVarint(w, int64(node.GetHeight())) + if cause != nil { + return errors.Wrap(cause, "writing height") + } + cause = encoding.EncodeVarint(w, node.GetSize()) + if cause != nil { + return errors.Wrap(cause, "writing size") + } + cause = encoding.EncodeVarint(w, node.GetVersion()) + if cause != nil { + return errors.Wrap(cause, "writing version") + } + + // Unlike writeHashBytes, key is written for inner nodes. + cause = encoding.EncodeBytes(w, node.GetNodeKey()) + if cause != nil { + return errors.Wrap(cause, "writing key") + } + + if node.isLeaf() { + cause = encoding.EncodeBytes(w, node.GetValue()) + if cause != nil { + return errors.Wrap(cause, "writing value") + } + } else { + if node.GetLeftHash() == nil { + return ErrLeftHashIsNil + } + cause = encoding.EncodeBytes(w, node.GetLeftHash()) + if cause != nil { + return errors.Wrap(cause, "writing left hash") + } + + if node.GetRightHash() == nil { + return ErrRightHashIsNil + } + cause = encoding.EncodeBytes(w, node.GetRightHash()) + if cause != nil { + return errors.Wrap(cause, "writing right hash") + } + } + return nil +} + +func (node *Node) getLeftNode(t *ImmutableTree) (*Node, error) { + if node.GetLeftNode() != nil { + return node.GetLeftNode(), nil + } + leftNode, err := t.ndb.GetNode(node.GetLeftHash()) + if err != nil { + return nil, err + } + + return leftNode, nil +} + +func (node *Node) getRightNode(t *ImmutableTree) (*Node, error) { + if node.GetRightNode() != nil { + return node.GetRightNode(), nil + } + rightNode, err := t.ndb.GetNode(node.GetRightHash()) + if err != nil { + return nil, err + } + + return rightNode, nil +} + +// NOTE: mutates height and size +func (node *Node) calcHeightAndSize(t *ImmutableTree) error { + leftNode, err := node.getLeftNode(t) + if err != nil { + return err + } + + rightNode, err := node.getRightNode(t) + if err != nil { + return err + } + + height := maxInt8(leftNode.GetHeight(), rightNode.GetHeight()) + 1 + size := leftNode.GetSize() + rightNode.GetSize() + node.SetHeight(height) + node.SetSize(size) + return nil +} + +func (node *Node) calcBalance(t *ImmutableTree) (int, error) { + leftNode, err := node.getLeftNode(t) + if err != nil { + return 0, err + } + + rightNode, err := node.getRightNode(t) + if err != nil { + return 0, err + } + + return int(leftNode.GetHeight()) - int(rightNode.GetHeight()), nil +} + +// traverse is a wrapper over traverseInRange when we want the whole tree +// nolint: unparam +func (node *Node) traverse(t *ImmutableTree, ascending bool, cb func(*Node) bool) bool { + return node.traverseInRange(t, nil, nil, ascending, false, false, func(node *Node) bool { + return cb(node) + }) +} + +// traversePost is a wrapper over traverseInRange when we want the whole tree post-order +func (node *Node) traversePost(t *ImmutableTree, ascending bool, cb func(*Node) bool) bool { + return node.traverseInRange(t, nil, nil, ascending, false, true, func(node *Node) bool { + return cb(node) + }) +} + +func (node *Node) traverseInRange(tree *ImmutableTree, start, end []byte, ascending bool, inclusive bool, post bool, cb func(*Node) bool) bool { + stop := false + t := node.newTraversal(tree, start, end, ascending, inclusive, post, false) + // TODO: figure out how to handle these errors + for node2, err := t.next(); node2 != nil && err == nil; node2, err = t.next() { + stop = cb(node2) + if stop { + return stop + } + } + return stop +} + +var ( + ErrCloneLeafNode = fmt.Errorf("attempt to copy a leaf node") + ErrEmptyChildHash = fmt.Errorf("found an empty child hash") + ErrLeftHashIsNil = fmt.Errorf("node.leftHash was nil in writeBytes") + ErrRightHashIsNil = fmt.Errorf("node.rightHash was nil in writeBytes") +) diff --git a/sei-iavl/node_test.go b/sei-iavl/node_test.go new file mode 100644 index 0000000000..12d09fb796 --- /dev/null +++ b/sei-iavl/node_test.go @@ -0,0 +1,177 @@ +package iavl + +import ( + "bytes" + "encoding/hex" + "math/rand" + "testing" + + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" +) + +func TestNode_encodedSize(t *testing.T) { + node := &Node{ + key: randBytes(10), + value: randBytes(10), + version: 1, + height: 0, + size: 100, + hash: randBytes(20), + leftHash: randBytes(20), + leftNode: nil, + rightHash: randBytes(20), + rightNode: nil, + persisted: false, + } + + // leaf node + require.Equal(t, 26, node.encodedSize()) + + // non-leaf node + node.height = 1 + require.Equal(t, 57, node.encodedSize()) +} + +func TestNode_encode_decode(t *testing.T) { + testcases := map[string]struct { + node *Node + expectHex string + expectError bool + }{ + "nil": {nil, "", true}, + "empty": {&Node{}, "0000000000", false}, + "inner": {&Node{ + height: 3, + version: 2, + size: 7, + key: []byte("key"), + leftHash: []byte{0x70, 0x80, 0x90, 0xa0}, + rightHash: []byte{0x10, 0x20, 0x30, 0x40}, + }, "060e04036b657904708090a00410203040", false}, + "leaf": {&Node{ + height: 0, + version: 3, + size: 1, + key: []byte("key"), + value: []byte("value"), + }, "000206036b65790576616c7565", false}, + } + for name, tc := range testcases { + tc := tc + t.Run(name, func(t *testing.T) { + var buf bytes.Buffer + err := tc.node.writeBytes(&buf) + if tc.expectError { + require.Error(t, err) + return + } + require.NoError(t, err) + require.Equal(t, tc.expectHex, hex.EncodeToString(buf.Bytes())) + + node, err := MakeNode(buf.Bytes()) + require.NoError(t, err) + // since key and value is always decoded to []byte{} we augment the expected struct here + if tc.node.key == nil { + tc.node.key = []byte{} + } + if tc.node.value == nil && tc.node.height == 0 { + tc.node.value = []byte{} + } + require.Equal(t, tc.node, node) + }) + } +} + +func TestNode_validate(t *testing.T) { + k := []byte("key") + v := []byte("value") + h := []byte{1, 2, 3} + c := &Node{key: []byte("child"), value: []byte("x"), version: 1, size: 1} + + testcases := map[string]struct { + node *Node + valid bool + }{ + "nil node": {nil, false}, + "leaf": {&Node{key: k, value: v, version: 1, size: 1}, true}, + "leaf with nil key": {&Node{key: nil, value: v, version: 1, size: 1}, false}, + "leaf with empty key": {&Node{key: []byte{}, value: v, version: 1, size: 1}, true}, + "leaf with nil value": {&Node{key: k, value: nil, version: 1, size: 1}, false}, + "leaf with empty value": {&Node{key: k, value: []byte{}, version: 1, size: 1}, true}, + "leaf with version 0": {&Node{key: k, value: v, version: 0, size: 1}, false}, + "leaf with version -1": {&Node{key: k, value: v, version: -1, size: 1}, false}, + "leaf with size 0": {&Node{key: k, value: v, version: 1, size: 0}, false}, + "leaf with size 2": {&Node{key: k, value: v, version: 1, size: 2}, false}, + "leaf with size -1": {&Node{key: k, value: v, version: 1, size: -1}, false}, + "leaf with left hash": {&Node{key: k, value: v, version: 1, size: 1, leftHash: h}, false}, + "leaf with left child": {&Node{key: k, value: v, version: 1, size: 1, leftNode: c}, false}, + "leaf with right hash": {&Node{key: k, value: v, version: 1, size: 1, rightNode: c}, false}, + "leaf with right child": {&Node{key: k, value: v, version: 1, size: 1, rightNode: c}, false}, + "inner": {&Node{key: k, version: 1, size: 1, height: 1, leftHash: h, rightHash: h}, true}, + "inner with nil key": {&Node{key: nil, value: v, version: 1, size: 1, height: 1, leftHash: h, rightHash: h}, false}, + "inner with value": {&Node{key: k, value: v, version: 1, size: 1, height: 1, leftHash: h, rightHash: h}, false}, + "inner with empty value": {&Node{key: k, value: []byte{}, version: 1, size: 1, height: 1, leftHash: h, rightHash: h}, false}, + "inner with left child": {&Node{key: k, version: 1, size: 1, height: 1, leftHash: h}, true}, + "inner with right child": {&Node{key: k, version: 1, size: 1, height: 1, rightHash: h}, true}, + "inner with no child": {&Node{key: k, version: 1, size: 1, height: 1}, false}, + "inner with height 0": {&Node{key: k, version: 1, size: 1, height: 0, leftHash: h, rightHash: h}, false}, + } + + for desc, tc := range testcases { + tc := tc // appease scopelint + t.Run(desc, func(t *testing.T) { + err := tc.node.validate() + if tc.valid { + assert.NoError(t, err) + } else { + assert.Error(t, err) + } + }) + } +} + +func BenchmarkNode_encodedSize(b *testing.B) { + node := &Node{ + key: randBytes(25), + value: randBytes(100), + version: rand.Int63n(10000000), + height: 1, + size: rand.Int63n(10000000), + leftHash: randBytes(20), + rightHash: randBytes(20), + } + b.ReportAllocs() + b.ResetTimer() + for i := 0; i < b.N; i++ { + node.encodedSize() + } +} + +func BenchmarkNode_WriteBytes(b *testing.B) { + node := &Node{ + key: randBytes(25), + value: randBytes(100), + version: rand.Int63n(10000000), + height: 1, + size: rand.Int63n(10000000), + leftHash: randBytes(20), + rightHash: randBytes(20), + } + b.ResetTimer() + b.Run("NoPreAllocate", func(sub *testing.B) { + sub.ReportAllocs() + for i := 0; i < sub.N; i++ { + var buf bytes.Buffer + _ = node.writeBytes(&buf) + } + }) + b.Run("PreAllocate", func(sub *testing.B) { + sub.ReportAllocs() + for i := 0; i < sub.N; i++ { + var buf bytes.Buffer + buf.Grow(node.encodedSize()) + _ = node.writeBytes(&buf) + } + }) +} diff --git a/sei-iavl/nodedb.go b/sei-iavl/nodedb.go new file mode 100644 index 0000000000..8ec04a8aeb --- /dev/null +++ b/sei-iavl/nodedb.go @@ -0,0 +1,1203 @@ +package iavl + +import ( + "bytes" + "crypto/sha256" + "fmt" + "math" + "sort" + "strconv" + "strings" + "sync" + + "github.com/pkg/errors" + "github.com/sei-protocol/sei-chain/sei-iavl/cache" + ibytes "github.com/sei-protocol/sei-chain/sei-iavl/internal/bytes" + "github.com/sei-protocol/sei-chain/sei-iavl/internal/logger" + dbm "github.com/tendermint/tm-db" +) + +const ( + int64Size = 8 + hashSize = sha256.Size + genesisVersion = 1 + storageVersionKey = "storage_version" + // We store latest saved version together with storage version delimited by the constant below. + // This delimiter is valid only if fast storage is enabled (i.e. storageVersion >= fastStorageVersionValue). + // The latest saved version is needed for protection against downgrade and re-upgrade. In such a case, it would + // be possible to observe mismatch between the latest version state and the fast nodes on disk. + // Therefore, we would like to detect that and overwrite fast nodes on disk with the latest version state. + fastStorageVersionDelimiter = "-" + // Using semantic versioning: https://semver.org/ + defaultStorageVersionValue = "1.0.0" + fastStorageVersionValue = "1.1.0" + fastNodeCacheSize = 100000 +) + +var ( + // All node keys are prefixed with the byte 'n'. This ensures no collision is + // possible with the other keys, and makes them easier to traverse. They are indexed by the node hash. + nodeKeyFormat = NewKeyFormat('n', hashSize) // n + + // Orphans are keyed in the database by their expected lifetime. + // The first number represents the *last* version at which the orphan needs + // to exist, while the second number represents the *earliest* version at + // which it is expected to exist - which starts out by being the version + // of the node being orphaned. + // To clarify: + // When I write to key {X} with value V and old value O, we orphan O with =time of write + // and = version O was created at. + orphanKeyFormat = NewKeyFormat('o', int64Size, int64Size, hashSize) // o + + // Key Format for making reads and iterates go through a data-locality preserving db. + // The value at an entry will list what version it was written to. + // Then to query values, you first query state via this fast method. + // If its present, then check the tree version. If tree version >= result_version, + // return result_version. Else, go through old (slow) IAVL get method that walks through tree. + fastKeyFormat = NewKeyFormat('f', 0) // f + + // Key Format for storing metadata about the chain such as the vesion number. + // The value at an entry will be in a variable format and up to the caller to + // decide how to parse. + metadataKeyFormat = NewKeyFormat('m', 0) // v + + // Root nodes are indexed separately by their version + rootKeyFormat = NewKeyFormat('r', int64Size) // r +) + +var ( + errInvalidFastStorageVersion = fmt.Sprintf("Fast storage version must be in the format %s", fastStorageVersionDelimiter) +) + +type nodeDB struct { + mtx sync.Mutex // Read/write lock. + db dbm.DB // Persistent node storage. + batch dbm.Batch // Batched writing buffer. + opts Options // Options to customize for pruning/writing + versionReaders map[int64]uint32 // Number of active version readers + storageVersion string // Storage version + firstVersion int64 // First version of nodeDB. + latestVersion int64 // Latest version of nodeDB. + nodeCache cache.Cache // Cache for nodes in the regular tree that consists of key-value pairs at any version. + fastNodeCache cache.Cache // Cache for nodes in the fast index that represents only key-value pairs at the latest version. +} + +func newNodeDB(db dbm.DB, cacheSize int, opts *Options) *nodeDB { + if opts == nil { + o := DefaultOptions() + opts = &o + } + + storeVersion, err := db.Get(metadataKeyFormat.Key(unsafeToBz(storageVersionKey))) + + if err != nil || storeVersion == nil { + storeVersion = []byte(defaultStorageVersionValue) + } + + return &nodeDB{ + db: db, + batch: db.NewBatch(), + opts: *opts, + firstVersion: 0, + latestVersion: 0, // initially invalid + nodeCache: cache.New(cacheSize), + fastNodeCache: cache.New(fastNodeCacheSize), + versionReaders: make(map[int64]uint32, 8), + storageVersion: string(storeVersion), + } +} + +// GetNode gets a node from memory or disk. If it is an inner node, it does not +// load its children. +func (ndb *nodeDB) GetNode(hash []byte) (*Node, error) { + ndb.mtx.Lock() + defer ndb.mtx.Unlock() + + if len(hash) == 0 { + return nil, ErrNodeMissingHash + } + + // Check the cache. + if cachedNode := ndb.nodeCache.Get(hash); cachedNode != nil { + ndb.opts.Stat.IncCacheHitCnt() + return cachedNode.(*Node), nil + } + + ndb.opts.Stat.IncCacheMissCnt() + + // Doesn't exist, load. + buf, err := ndb.db.Get(ndb.nodeKey(hash)) + if err != nil { + return nil, fmt.Errorf("can't get node %X: %v", hash, err) + } + if buf == nil { + return nil, fmt.Errorf("Value missing for hash %x corresponding to nodeKey %x", hash, ndb.nodeKey(hash)) + } + + node, err := MakeNode(buf) + if err != nil { + return nil, fmt.Errorf("Error reading Node. bytes: %x, error: %v", buf, err) + } + + node.SetHash(hash) + node.SetPersisted(true) + ndb.nodeCache.Add(node) + + return node, nil +} + +func (ndb *nodeDB) GetFastNode(key []byte) (*FastNode, error) { + if !ndb.hasUpgradedToFastStorage() { + return nil, errors.New("storage version is not fast") + } + + ndb.mtx.Lock() + defer ndb.mtx.Unlock() + + if len(key) == 0 { + return nil, fmt.Errorf("nodeDB.GetFastNode() requires key, len(key) equals 0") + } + + if cachedFastNode := ndb.fastNodeCache.Get(key); cachedFastNode != nil { + ndb.opts.Stat.IncFastCacheHitCnt() + return cachedFastNode.(*FastNode), nil + } + + ndb.opts.Stat.IncFastCacheMissCnt() + + // Doesn't exist, load. + buf, err := ndb.db.Get(ndb.fastNodeKey(key)) + if err != nil { + return nil, fmt.Errorf("can't get FastNode %X: %w", key, err) + } + if buf == nil { + return nil, nil + } + + fastNode, err := DeserializeFastNode(key, buf) + if err != nil { + return nil, fmt.Errorf("error reading FastNode. bytes: %x, error: %w", buf, err) + } + + ndb.fastNodeCache.Add(fastNode) + return fastNode, nil +} + +// SaveNode saves a node to disk. +func (ndb *nodeDB) SaveNode(node *Node) error { + ndb.mtx.Lock() + defer ndb.mtx.Unlock() + + if node.GetHash() == nil { + return ErrNodeMissingHash + } + if node.GetPersisted() { + return ErrNodeAlreadyPersisted + } + + // Save node bytes to db. + var buf bytes.Buffer + buf.Grow(node.encodedSize()) + + if err := node.writeBytes(&buf); err != nil { + return err + } + + if err := ndb.batch.Set(ndb.nodeKey(node.GetHash()), buf.Bytes()); err != nil { + return err + } + logger.Debug("BATCH SAVE %X %p\n", node.GetHash(), node) + node.SetPersisted(true) + ndb.nodeCache.Add(node) + return nil +} + +// SaveNode saves a FastNode to disk and add to cache. +func (ndb *nodeDB) SaveFastNode(node *FastNode) error { + ndb.mtx.Lock() + defer ndb.mtx.Unlock() + return ndb.saveFastNodeUnlocked(node, true) +} + +// SaveNode saves a FastNode to disk without adding to cache. +func (ndb *nodeDB) SaveFastNodeNoCache(node *FastNode) error { + ndb.mtx.Lock() + defer ndb.mtx.Unlock() + return ndb.saveFastNodeUnlocked(node, false) +} + +// setFastStorageVersionToBatch sets storage version to fast where the version is +// 1.1.0-. Returns error if storage version is incorrect or on +// db error, nil otherwise. Requires changes to be committed after to be persisted. +func (ndb *nodeDB) setFastStorageVersionToBatch() error { + var newVersion string + if ndb.storageVersion >= fastStorageVersionValue { + // Storage version should be at index 0 and latest fast cache version at index 1 + versions := strings.Split(ndb.storageVersion, fastStorageVersionDelimiter) + + if len(versions) > 2 { + return errors.New(errInvalidFastStorageVersion) + } + + newVersion = versions[0] + } else { + newVersion = fastStorageVersionValue + } + + latestVersion, err := ndb.getLatestVersion() + if err != nil { + return err + } + + newVersion += fastStorageVersionDelimiter + strconv.Itoa(int(latestVersion)) + + if err := ndb.batch.Set(metadataKeyFormat.Key([]byte(storageVersionKey)), []byte(newVersion)); err != nil { + return err + } + ndb.storageVersion = newVersion + return nil +} + +func (ndb *nodeDB) getStorageVersion() string { + return ndb.storageVersion +} + +// Returns true if the upgrade to latest storage version has been performed, false otherwise. +func (ndb *nodeDB) hasUpgradedToFastStorage() bool { + return ndb.getStorageVersion() >= fastStorageVersionValue +} + +// Returns true if the upgrade to fast storage has occurred but it does not match the live state, false otherwise. +// When the live state is not matched, we must force reupgrade. +// We determine this by checking the version of the live state and the version of the live state when +// latest storage was updated on disk the last time. +func (ndb *nodeDB) shouldForceFastStorageUpgrade() (bool, error) { + versions := strings.Split(ndb.storageVersion, fastStorageVersionDelimiter) + + if len(versions) == 2 { + latestVersion, err := ndb.getLatestVersion() + if err != nil { + // TODO: should be true or false as default? (removed panic here) + return false, err + } + if versions[1] != strconv.Itoa(int(latestVersion)) { + return true, nil + } + } + return false, nil +} + +// SaveNode saves a FastNode to disk. +func (ndb *nodeDB) saveFastNodeUnlocked(node *FastNode, shouldAddToCache bool) error { + if node.key == nil { + return fmt.Errorf("cannot have FastNode with a nil value for key") + } + + // Save node bytes to db. + var buf bytes.Buffer + buf.Grow(node.encodedSize()) + + if err := node.writeBytes(&buf); err != nil { + return fmt.Errorf("error while writing fastnode bytes. Err: %w", err) + } + + if err := ndb.batch.Set(ndb.fastNodeKey(node.key), buf.Bytes()); err != nil { + return fmt.Errorf("error while writing key/val to nodedb batch. Err: %w", err) + } + if shouldAddToCache { + ndb.fastNodeCache.Add(node) + } + return nil +} + +// Has checks if a hash exists in the database. +func (ndb *nodeDB) Has(hash []byte) (bool, error) { + key := ndb.nodeKey(hash) + + if ldb, ok := ndb.db.(*dbm.GoLevelDB); ok { + exists, err := ldb.DB().Has(key, nil) + if err != nil { + return false, err + } + return exists, nil + } + value, err := ndb.db.Get(key) + if err != nil { + return false, err + } + + return value != nil, nil +} + +// SaveBranch saves the given node and all of its descendants. +// NOTE: This function clears leftNode/rigthNode recursively and +// calls _hash() on the given node. +// TODO refactor, maybe use hashWithCount() but provide a callback. +func (ndb *nodeDB) SaveBranch(node *Node) ([]byte, error) { + if node.GetPersisted() { + return node.GetHash(), nil + } + + var err error + if node.GetLeftNode() != nil { + leftHash, err := ndb.SaveBranch(node.GetLeftNode()) + if err != nil { + return nil, err + } + node.SetLeftHash(leftHash) + } + + if node.GetRightNode() != nil { + rightHash, err := ndb.SaveBranch(node.GetRightNode()) + if err != nil { + return nil, err + } + node.SetRightHash(rightHash) + } + + _, err = node._hash() + if err != nil { + return nil, err + } + + err = ndb.SaveNode(node) + if err != nil { + return nil, err + } + + // resetBatch only working on generate a genesis block + if node.GetVersion() <= genesisVersion { + if err = ndb.resetBatch(); err != nil { + return nil, err + } + } + node.SetLeftNode(nil) + node.SetRightNode(nil) + + return node.GetHash(), nil +} + +// resetBatch reset the db batch, keep low memory used +func (ndb *nodeDB) resetBatch() error { + var err error + if ndb.opts.Sync { + err = ndb.batch.WriteSync() + } else { + err = ndb.batch.Write() + } + if err != nil { + return err + } + err = ndb.batch.Close() + if err != nil { + return err + } + + ndb.batch = ndb.db.NewBatch() + + return nil +} + +// DeleteVersion deletes a tree version from disk. +// calls deleteOrphans(version), deleteRoot(version, checkLatestVersion) +func (ndb *nodeDB) DeleteVersion(version int64, checkLatestVersion bool) error { + ndb.mtx.Lock() + defer ndb.mtx.Unlock() + + if ndb.versionReaders[version] > 0 { + return errors.Errorf("unable to delete version %v, it has %v active readers", version, ndb.versionReaders[version]) + } + + err := ndb.deleteOrphans(version) + if err != nil { + return err + } + + err = ndb.deleteRoot(version, checkLatestVersion) + if err != nil { + return err + } + return err +} + +// DeleteVersionsFrom permanently deletes all tree versions from the given version upwards. +func (ndb *nodeDB) DeleteVersionsFrom(version int64) error { + latest, err := ndb.getLatestVersion() + if err != nil { + return err + } + if latest < version { + return nil + } + root, err := ndb.getRoot(latest) + if err != nil { + return err + } + if root == nil { + return errors.Errorf("root for version %v not found", latest) + } + + for v, r := range ndb.versionReaders { + if v >= version && r != 0 { + return errors.Errorf("unable to delete version %v with %v active readers", v, r) + } + } + + // First, delete all active nodes in the current (latest) version whose node version is after + // the given version. + err = ndb.deleteNodesFrom(version, root) + if err != nil { + return err + } + + // Next, delete orphans: + // - Delete orphan entries *and referred nodes* with fromVersion >= version + // - Delete orphan entries with toVersion >= version-1 (since orphans at latest are not orphans) + err = ndb.traverseOrphans(func(key, hash []byte) error { + var fromVersion, toVersion int64 + orphanKeyFormat.Scan(key, &toVersion, &fromVersion) + + if fromVersion >= version { + if err = ndb.batch.Delete(key); err != nil { + return err + } + if err = ndb.batch.Delete(ndb.nodeKey(hash)); err != nil { + return err + } + ndb.nodeCache.Remove(hash) + } else if toVersion >= version-1 { + if err = ndb.batch.Delete(key); err != nil { + return err + } + } + return nil + }) + + if err != nil { + return err + } + + // Delete the version root entries + err = ndb.traverseRange(rootKeyFormat.Key(version), rootKeyFormat.Key(int64(math.MaxInt64)), func(k, v []byte) error { + if err = ndb.batch.Delete(k); err != nil { + return err + } + return nil + }) + + if err != nil { + return err + } + + // Delete fast node entries + err = ndb.traverseFastNodes(func(keyWithPrefix, v []byte) error { + key := keyWithPrefix[1:] + fastNode, err := DeserializeFastNode(key, v) + + if err != nil { + return err + } + + if version <= fastNode.versionLastUpdatedAt { + if err = ndb.batch.Delete(keyWithPrefix); err != nil { + return err + } + ndb.fastNodeCache.Remove(key) + } + return nil + }) + + if err != nil { + return err + } + + return nil +} + +// DeleteVersionsRange deletes versions from an interval (not inclusive). +func (ndb *nodeDB) DeleteVersionsRange(fromVersion, toVersion int64) error { + if fromVersion >= toVersion { + return errors.New("toVersion must be greater than fromVersion") + } + if toVersion == 0 { + return errors.New("toVersion must be greater than 0") + } + + ndb.mtx.Lock() + defer ndb.mtx.Unlock() + + latest, err := ndb.getLatestVersion() + if err != nil { + return err + } + first, err := ndb.getFirstVersion() + if err != nil { + return err + } + if latest < toVersion { + return errors.Errorf("cannot delete latest saved version (%d)", latest) + } + + predecessor, err := ndb.getPreviousVersion(fromVersion) + if err != nil { + return err + } + + for v, r := range ndb.versionReaders { + if v < toVersion && v > predecessor && r != 0 { + return errors.Errorf("unable to delete version %v with %v active readers", v, r) + } + } + + // If the predecessor is earlier than the beginning of the lifetime, we can delete the orphan. + // Otherwise, we shorten its lifetime, by moving its endpoint to the predecessor version. + for version := fromVersion; version < toVersion; version++ { + err := ndb.traverseOrphansVersion(version, func(key, hash []byte) error { + var from, to int64 + orphanKeyFormat.Scan(key, &to, &from) + if err := ndb.batch.Delete(key); err != nil { + return err + } + if from > predecessor { + if err := ndb.batch.Delete(ndb.nodeKey(hash)); err != nil { + return err + } + ndb.nodeCache.Remove(hash) + } else { + if err := ndb.saveOrphan(hash, from, predecessor); err != nil { + return err + } + } + return nil + }) + if err != nil { + return err + } + } + + // Delete the version root entries + err = ndb.traverseRange(rootKeyFormat.Key(fromVersion), rootKeyFormat.Key(toVersion), func(k, v []byte) error { + if err := ndb.batch.Delete(k); err != nil { + return err + } + return nil + }) + + if first < toVersion && first >= fromVersion { + // Reset first version if we are deleting all versions from first -> toVersion + ndb.resetFirstVersion(toVersion) + } + if latest <= toVersion-1 { + // Reset latest version if we are deleting all versions from fromVersion -> latest + ndb.resetLatestVersion(fromVersion + 1) + } + + if err != nil { + return err + } + return nil +} + +func (ndb *nodeDB) DeleteFastNode(key []byte) error { + ndb.mtx.Lock() + defer ndb.mtx.Unlock() + if err := ndb.batch.Delete(ndb.fastNodeKey(key)); err != nil { + return err + } + ndb.fastNodeCache.Remove(key) + return nil +} + +// deleteNodesFrom deletes the given node and any descendants that have versions after the given +// (inclusive). It is mainly used via LoadVersionForOverwriting, to delete the current version. +func (ndb *nodeDB) deleteNodesFrom(version int64, hash []byte) error { + if len(hash) == 0 { + return nil + } + + node, err := ndb.GetNode(hash) + if err != nil { + return err + } + + if node.GetLeftHash() != nil { + if err := ndb.deleteNodesFrom(version, node.GetLeftHash()); err != nil { + return err + } + } + if node.GetRightHash() != nil { + if err := ndb.deleteNodesFrom(version, node.GetRightHash()); err != nil { + return err + } + } + + if node.GetVersion() >= version { + if err := ndb.batch.Delete(ndb.nodeKey(hash)); err != nil { + return err + } + + ndb.nodeCache.Remove(hash) + } + + return nil +} + +// Saves orphaned nodes to disk under a special prefix. +// version: the new version being saved. +// orphans: the orphan nodes created since version-1 +func (ndb *nodeDB) SaveOrphans(version int64, orphans map[string]int64) error { + ndb.mtx.Lock() + defer ndb.mtx.Unlock() + + toVersion, err := ndb.getPreviousVersion(version) + if err != nil { + return err + } + + for hash, fromVersion := range orphans { + logger.Debug("SAVEORPHAN %v-%v %X\n", fromVersion, toVersion, hash) + err := ndb.saveOrphan([]byte(hash), fromVersion, toVersion) + if err != nil { + return err + } + } + return nil +} + +func (ndb *nodeDB) deleteOrphanedData(hash []byte) error { + if err := ndb.batch.Delete(ndb.nodeKey(hash)); err != nil { + return err + } + ndb.nodeCache.Remove(hash) + return nil +} + +// Saves a single orphan to disk. +func (ndb *nodeDB) saveOrphan(hash []byte, fromVersion, toVersion int64) error { + if fromVersion > toVersion { + return fmt.Errorf("orphan expires before it comes alive. %d > %d", fromVersion, toVersion) + } + key := ndb.orphanKey(fromVersion, toVersion, hash) + if err := ndb.batch.Set(key, hash); err != nil { + return err + } + return nil +} + +// deleteOrphans deletes orphaned nodes from disk, and the associated orphan +// entries. +func (ndb *nodeDB) deleteOrphans(version int64) error { + // Will be zero if there is no previous version. + predecessor, err := ndb.getPreviousVersion(version) + if err != nil { + return err + } + + // Traverse orphans with a lifetime ending at the version specified. + // TODO optimize. + return ndb.traverseOrphansVersion(version, func(key, hash []byte) error { + var fromVersion, toVersion int64 + + // See comment on `orphanKeyFmt`. Note that here, `version` and + // `toVersion` are always equal. + orphanKeyFormat.Scan(key, &toVersion, &fromVersion) + + // Delete orphan key and reverse-lookup key. + if err := ndb.batch.Delete(key); err != nil { + return err + } + + // If there is no predecessor, or the predecessor is earlier than the + // beginning of the lifetime (ie: negative lifetime), or the lifetime + // spans a single version and that version is the one being deleted, we + // can delete the orphan. Otherwise, we shorten its lifetime, by + // moving its endpoint to the previous version. + if predecessor < fromVersion || fromVersion == toVersion { + logger.Debug("DELETE predecessor:%v fromVersion:%v toVersion:%v %X\n", predecessor, fromVersion, toVersion, hash) + if err := ndb.batch.Delete(ndb.nodeKey(hash)); err != nil { + return err + } + ndb.nodeCache.Remove(hash) + } else { + logger.Debug("MOVE predecessor:%v fromVersion:%v toVersion:%v %X\n", predecessor, fromVersion, toVersion, hash) + if err := ndb.saveOrphan(hash, fromVersion, predecessor); err != nil { + return err + } + } + return nil + }) +} + +func (ndb *nodeDB) nodeKey(hash []byte) []byte { + return nodeKeyFormat.KeyBytes(hash) +} + +func (ndb *nodeDB) fastNodeKey(key []byte) []byte { + return fastKeyFormat.KeyBytes(key) +} + +func (ndb *nodeDB) orphanKey(fromVersion, toVersion int64, hash []byte) []byte { + return orphanKeyFormat.Key(toVersion, fromVersion, hash) +} + +func (ndb *nodeDB) rootKey(version int64) []byte { + return rootKeyFormat.Key(version) +} + +func (ndb *nodeDB) getLatestVersion() (int64, error) { + if ndb.latestVersion == 0 { + var err error + ndb.latestVersion, err = ndb.getPreviousVersion(1<<63 - 1) + if err != nil { + return 0, err + } + } + return ndb.latestVersion, nil +} + +// Get the iterator for a given prefix. +func (ndb *nodeDB) getPrefixIterator(prefix []byte) (dbm.Iterator, error) { + var start, end []byte + if len(prefix) == 0 { + start = nil + end = nil + } else { + start = ibytes.Cp(prefix) + end = ibytes.CpIncr(prefix) + } + + return ndb.db.Iterator(start, end) +} + +func (ndb *nodeDB) getFirstVersion() (int64, error) { + firstVersion := ndb.firstVersion + + if firstVersion > 0 { + return firstVersion, nil + } + + // Check if we have a legacy version + itr, err := ndb.getPrefixIterator(rootKeyFormat.Key()) + if err != nil { + return 0, err + } + defer func() { _ = itr.Close() }() + if itr.Valid() { + var version int64 + rootKeyFormat.Scan(itr.Key(), &version) + return version, nil + } + // Find the first version + latestVersion, err := ndb.getLatestVersion() + if err != nil { + return 0, err + } + for firstVersion < latestVersion { + version := (latestVersion + firstVersion) >> 1 + has, err := ndb.hasVersion(version) + if err != nil { + return 0, err + } + if has { + latestVersion = version + } else { + firstVersion = version + 1 + } + } + ndb.resetFirstVersion(latestVersion) + + return latestVersion, nil +} + +func (ndb *nodeDB) resetFirstVersion(version int64) { + ndb.firstVersion = version +} + +func (ndb *nodeDB) updateLatestVersion(version int64) { + if ndb.latestVersion < version { + ndb.latestVersion = version + } +} + +func (ndb *nodeDB) resetLatestVersion(version int64) { + ndb.latestVersion = version +} + +func (ndb *nodeDB) getPreviousVersion(version int64) (int64, error) { + itr, err := ndb.db.ReverseIterator( + rootKeyFormat.Key(1), + rootKeyFormat.Key(version), + ) + if err != nil { + return 0, err + } + defer func() { _ = itr.Close() }() + + pversion := int64(-1) + for ; itr.Valid(); itr.Next() { + k := itr.Key() + rootKeyFormat.Scan(k, &pversion) + return pversion, nil + } + + if err := itr.Error(); err != nil { + return 0, err + } + + return 0, nil +} + +// deleteRoot deletes the root entry from disk, but not the node it points to. +func (ndb *nodeDB) deleteRoot(version int64, checkLatestVersion bool) error { + latestVersion, err := ndb.getLatestVersion() + if err != nil { + return err + } + + if checkLatestVersion && version == latestVersion { + return errors.New("tried to delete latest version") + } + if err := ndb.batch.Delete(ndb.rootKey(version)); err != nil { + return err + } + return nil +} + +// Traverse orphans and return error if any, nil otherwise +func (ndb *nodeDB) traverseOrphans(fn func(keyWithPrefix, v []byte) error) error { + return ndb.traversePrefix(orphanKeyFormat.Key(), fn) +} + +// Traverse fast nodes and return error if any, nil otherwise +func (ndb *nodeDB) traverseFastNodes(fn func(k, v []byte) error) error { + return ndb.traversePrefix(fastKeyFormat.Key(), fn) +} + +// Traverse orphans ending at a certain version. return error if any, nil otherwise +func (ndb *nodeDB) traverseOrphansVersion(version int64, fn func(k, v []byte) error) error { + return ndb.traversePrefix(orphanKeyFormat.Key(version), fn) +} + +// Traverse all keys and return error if any, nil otherwise +// nolint: unused +func (ndb *nodeDB) traverse(fn func(key, value []byte) error) error { + return ndb.traverseRange(nil, nil, fn) +} + +// Traverse all keys between a given range (excluding end) and return error if any, nil otherwise +func (ndb *nodeDB) traverseRange(start []byte, end []byte, fn func(k, v []byte) error) error { + itr, err := ndb.db.Iterator(start, end) + if err != nil { + return err + } + defer func() { _ = itr.Close() }() + + for ; itr.Valid(); itr.Next() { + if err := fn(itr.Key(), itr.Value()); err != nil { + return err + } + } + + if err := itr.Error(); err != nil { + return err + } + + return nil +} + +// Traverse all keys with a certain prefix. Return error if any, nil otherwise +func (ndb *nodeDB) traversePrefix(prefix []byte, fn func(k, v []byte) error) error { + itr, err := dbm.IteratePrefix(ndb.db, prefix) + if err != nil { + return err + } + defer func() { _ = itr.Close() }() + + for ; itr.Valid(); itr.Next() { + if err := fn(itr.Key(), itr.Value()); err != nil { + return err + } + } + + return nil +} + +// Get iterator for fast prefix and error, if any +func (ndb *nodeDB) getFastIterator(start, end []byte, ascending bool) (dbm.Iterator, error) { + var startFormatted, endFormatted []byte + + if start != nil { + startFormatted = fastKeyFormat.KeyBytes(start) + } else { + startFormatted = fastKeyFormat.Key() + } + + if end != nil { + endFormatted = fastKeyFormat.KeyBytes(end) + } else { + endFormatted = fastKeyFormat.Key() + endFormatted[0]++ + } + + if ascending { + return ndb.db.Iterator(startFormatted, endFormatted) + } + + return ndb.db.ReverseIterator(startFormatted, endFormatted) +} + +// Write to disk. +func (ndb *nodeDB) Commit() error { + ndb.mtx.Lock() + defer ndb.mtx.Unlock() + + var err error + if ndb.opts.Sync { + err = ndb.batch.WriteSync() + } else { + err = ndb.batch.Write() + } + if err != nil { + return errors.Wrap(err, "failed to write batch") + } + + if err := ndb.batch.Close(); err != nil { + return err + } + ndb.batch = ndb.db.NewBatch() + + return nil +} + +func (ndb *nodeDB) HasRoot(version int64) (bool, error) { + return ndb.db.Has(ndb.rootKey(version)) +} + +// hasVersion checks if the given version exists. +func (ndb *nodeDB) hasVersion(version int64) (bool, error) { + return ndb.HasRoot(version) +} + +func (ndb *nodeDB) getRoot(version int64) ([]byte, error) { + return ndb.db.Get(ndb.rootKey(version)) +} + +func (ndb *nodeDB) getRoots() (roots map[int64][]byte, err error) { + roots = make(map[int64][]byte) + err = ndb.traversePrefix(rootKeyFormat.Key(), func(k, v []byte) error { + var version int64 + rootKeyFormat.Scan(k, &version) + roots[version] = v + return nil + }) + return roots, err +} + +// SaveRoot creates an entry on disk for the given root, so that it can be +// loaded later. +func (ndb *nodeDB) SaveRoot(root *Node, version int64) error { + if len(root.GetHash()) == 0 { + return ErrRootMissingHash + } + return ndb.saveRoot(root.GetHash(), version) +} + +// SaveEmptyRoot creates an entry on disk for an empty root. +func (ndb *nodeDB) SaveEmptyRoot(version int64) error { + return ndb.saveRoot([]byte{}, version) +} + +func (ndb *nodeDB) saveRoot(hash []byte, version int64) error { + ndb.mtx.Lock() + defer ndb.mtx.Unlock() + + // We allow the initial version to be arbitrary + latest, err := ndb.getLatestVersion() + if err != nil { + return err + } + if latest > 0 && version != latest+1 { + return fmt.Errorf("must save consecutive versions; expected %d, got %d", latest+1, version) + } + + if err := ndb.batch.Set(ndb.rootKey(version), hash); err != nil { + return err + } + + ndb.updateLatestVersion(version) + + return nil +} + +func (ndb *nodeDB) incrVersionReaders(version int64) { + ndb.mtx.Lock() + defer ndb.mtx.Unlock() + ndb.versionReaders[version]++ +} + +func (ndb *nodeDB) decrVersionReaders(version int64) { + ndb.mtx.Lock() + defer ndb.mtx.Unlock() + if ndb.versionReaders[version] > 0 { + ndb.versionReaders[version]-- + } +} + +// Utility and test functions + +// nolint: unused +func (ndb *nodeDB) leafNodes() ([]*Node, error) { + leaves := []*Node{} + + err := ndb.traverseNodes(func(hash []byte, node *Node) error { + if node.isLeaf() { + leaves = append(leaves, node) + } + return nil + }) + + if err != nil { + return nil, err + } + + return leaves, nil +} + +// nolint: unused +func (ndb *nodeDB) nodes() ([]*Node, error) { + nodes := []*Node{} + + err := ndb.traverseNodes(func(hash []byte, node *Node) error { + nodes = append(nodes, node) + return nil + }) + + if err != nil { + return nil, err + } + + return nodes, nil +} + +// nolint: unused +func (ndb *nodeDB) orphans() ([][]byte, error) { + orphans := [][]byte{} + + err := ndb.traverseOrphans(func(k, v []byte) error { + orphans = append(orphans, v) + return nil + }) + + if err != nil { + return nil, err + } + + return orphans, nil +} + +// Not efficient. +// NOTE: DB cannot implement Size() because +// mutations are not always synchronous. +// +//nolint:unused +func (ndb *nodeDB) size() int { + size := 0 + err := ndb.traverse(func(k, v []byte) error { + size++ + return nil + }) + + if err != nil { + return -1 + } + return size +} + +func (ndb *nodeDB) traverseNodes(fn func(hash []byte, node *Node) error) error { + nodes := []*Node{} + + err := ndb.traversePrefix(nodeKeyFormat.Key(), func(key, value []byte) error { + node, err := MakeNode(value) + if err != nil { + return err + } + h := node.GetHash() + nodeKeyFormat.Scan(key, &h) + nodes = append(nodes, node) + return nil + }) + + if err != nil { + return err + } + + sort.Slice(nodes, func(i, j int) bool { + return bytes.Compare(nodes[i].GetNodeKey(), nodes[j].GetNodeKey()) < 0 + }) + + for _, n := range nodes { + if err := fn(n.GetHash(), n); err != nil { + return err + } + } + return nil +} + +func (ndb *nodeDB) String() (string, error) { + buf := bufPool.Get().(*bytes.Buffer) + defer bufPool.Put(buf) + buf.Reset() + + index := 0 + + err := ndb.traversePrefix(rootKeyFormat.Key(), func(key, value []byte) error { + fmt.Fprintf(buf, "%s: %x\n", key, value) + return nil + }) + + if err != nil { + return "", err + } + + buf.WriteByte('\n') + + err = ndb.traverseOrphans(func(key, value []byte) error { + fmt.Fprintf(buf, "%s: %x\n", key, value) + return nil + }) + + if err != nil { + return "", err + } + + buf.WriteByte('\n') + + err = ndb.traverseNodes(func(hash []byte, node *Node) error { + switch { + case len(hash) == 0: + buf.WriteByte('\n') + case node == nil: + fmt.Fprintf(buf, "%s%40x: \n", nodeKeyFormat.Prefix(), hash) + case node.GetValue() == nil && node.GetHeight() > 0: + fmt.Fprintf(buf, "%s%40x: %s %-16s h=%d version=%d\n", + nodeKeyFormat.Prefix(), hash, node.GetNodeKey(), "", node.GetHeight(), node.GetVersion()) + default: + fmt.Fprintf(buf, "%s%40x: %s = %-16s h=%d version=%d\n", + nodeKeyFormat.Prefix(), hash, node.GetNodeKey(), node.GetValue(), node.GetHeight(), node.GetVersion()) + } + index++ + return nil + }) + + if err != nil { + return "", err + } + + return "-" + "\n" + buf.String() + "-", nil +} + +var ( + ErrNodeMissingHash = fmt.Errorf("node does not have a hash") + ErrNodeAlreadyPersisted = fmt.Errorf("shouldn't be calling save on an already persisted node") + ErrRootMissingHash = fmt.Errorf("root hash must not be empty") +) diff --git a/sei-iavl/nodedb_test.go b/sei-iavl/nodedb_test.go new file mode 100644 index 0000000000..1bca7f2b61 --- /dev/null +++ b/sei-iavl/nodedb_test.go @@ -0,0 +1,295 @@ +package iavl + +import ( + "encoding/binary" + "errors" + "math/rand" + "strconv" + "testing" + + "github.com/golang/mock/gomock" + "github.com/stretchr/testify/require" + db "github.com/tendermint/tm-db" + + "github.com/sei-protocol/sei-chain/sei-iavl/mock" +) + +func BenchmarkNodeKey(b *testing.B) { + ndb := &nodeDB{} + hashes := makeHashes(b, 2432325) + for i := 0; i < b.N; i++ { + ndb.nodeKey(hashes[i]) + } +} + +func BenchmarkOrphanKey(b *testing.B) { + ndb := &nodeDB{} + hashes := makeHashes(b, 2432325) + for i := 0; i < b.N; i++ { + ndb.orphanKey(1234, 1239, hashes[i]) + } +} + +func BenchmarkTreeString(b *testing.B) { + tree := makeAndPopulateMutableTree(b) + b.ReportAllocs() + b.ResetTimer() + for i := 0; i < b.N; i++ { + sink, _ = tree.String() + require.NotNil(b, sink) + } + + if sink == nil { + b.Fatal("Benchmark did not run") + } + sink = (interface{})(nil) +} + +func TestNewNoDbStorage_StorageVersionInDb_Success(t *testing.T) { + const expectedVersion = defaultStorageVersionValue + + ctrl := gomock.NewController(t) + dbMock := mock.NewMockDB(ctrl) + + dbMock.EXPECT().Get(gomock.Any()).Return([]byte(expectedVersion), nil).Times(1) + dbMock.EXPECT().NewBatch().Return(nil).Times(1) + + ndb := newNodeDB(dbMock, 0, nil) + require.Equal(t, expectedVersion, ndb.storageVersion) +} + +func TestNewNoDbStorage_ErrorInConstructor_DefaultSet(t *testing.T) { + const expectedVersion = defaultStorageVersionValue + + ctrl := gomock.NewController(t) + dbMock := mock.NewMockDB(ctrl) + + dbMock.EXPECT().Get(gomock.Any()).Return(nil, errors.New("some db error")).Times(1) + dbMock.EXPECT().NewBatch().Return(nil).Times(1) + + ndb := newNodeDB(dbMock, 0, nil) + require.Equal(t, expectedVersion, ndb.getStorageVersion()) +} + +func TestNewNoDbStorage_DoesNotExist_DefaultSet(t *testing.T) { + const expectedVersion = defaultStorageVersionValue + + ctrl := gomock.NewController(t) + dbMock := mock.NewMockDB(ctrl) + + dbMock.EXPECT().Get(gomock.Any()).Return(nil, nil).Times(1) + dbMock.EXPECT().NewBatch().Return(nil).Times(1) + + ndb := newNodeDB(dbMock, 0, nil) + require.Equal(t, expectedVersion, ndb.getStorageVersion()) +} + +func TestSetStorageVersion_Success(t *testing.T) { + const expectedVersion = fastStorageVersionValue + + db := db.NewMemDB() + + ndb := newNodeDB(db, 0, nil) + require.Equal(t, defaultStorageVersionValue, ndb.getStorageVersion()) + + err := ndb.setFastStorageVersionToBatch() + require.NoError(t, err) + + latestVersion, err := ndb.getLatestVersion() + require.NoError(t, err) + require.Equal(t, expectedVersion+fastStorageVersionDelimiter+strconv.Itoa(int(latestVersion)), ndb.getStorageVersion()) + require.NoError(t, ndb.batch.Write()) +} + +func TestSetStorageVersion_DBFailure_OldKept(t *testing.T) { + ctrl := gomock.NewController(t) + dbMock := mock.NewMockDB(ctrl) + batchMock := mock.NewMockBatch(ctrl) + rIterMock := mock.NewMockIterator(ctrl) + + expectedErrorMsg := "some db error" + + expectedFastCacheVersion := 2 + + dbMock.EXPECT().Get(gomock.Any()).Return([]byte(defaultStorageVersionValue), nil).Times(1) + dbMock.EXPECT().NewBatch().Return(batchMock).Times(1) + + // rIterMock is used to get the latest version from disk. We are mocking that rIterMock returns latestTreeVersion from disk + rIterMock.EXPECT().Valid().Return(true).Times(1) + rIterMock.EXPECT().Key().Return(rootKeyFormat.Key(expectedFastCacheVersion)).Times(1) + rIterMock.EXPECT().Close().Return(nil).Times(1) + + dbMock.EXPECT().ReverseIterator(gomock.Any(), gomock.Any()).Return(rIterMock, nil).Times(1) + batchMock.EXPECT().Set(metadataKeyFormat.Key([]byte(storageVersionKey)), []byte(fastStorageVersionValue+fastStorageVersionDelimiter+strconv.Itoa(expectedFastCacheVersion))).Return(errors.New(expectedErrorMsg)).Times(1) + + ndb := newNodeDB(dbMock, 0, nil) + require.Equal(t, defaultStorageVersionValue, ndb.getStorageVersion()) + + err := ndb.setFastStorageVersionToBatch() + require.Error(t, err) + require.Equal(t, expectedErrorMsg, err.Error()) + require.Equal(t, defaultStorageVersionValue, ndb.getStorageVersion()) +} + +func TestSetStorageVersion_InvalidVersionFailure_OldKept(t *testing.T) { + ctrl := gomock.NewController(t) + dbMock := mock.NewMockDB(ctrl) + batchMock := mock.NewMockBatch(ctrl) + + expectedErrorMsg := errInvalidFastStorageVersion + + invalidStorageVersion := fastStorageVersionValue + fastStorageVersionDelimiter + "1" + fastStorageVersionDelimiter + "2" + + dbMock.EXPECT().Get(gomock.Any()).Return([]byte(invalidStorageVersion), nil).Times(1) + dbMock.EXPECT().NewBatch().Return(batchMock).Times(1) + + ndb := newNodeDB(dbMock, 0, nil) + require.Equal(t, invalidStorageVersion, ndb.getStorageVersion()) + + err := ndb.setFastStorageVersionToBatch() + require.Error(t, err) + require.Equal(t, expectedErrorMsg, err.Error()) + require.Equal(t, invalidStorageVersion, ndb.getStorageVersion()) +} + +func TestSetStorageVersion_FastVersionFirst_VersionAppended(t *testing.T) { + db := db.NewMemDB() + ndb := newNodeDB(db, 0, nil) + ndb.storageVersion = fastStorageVersionValue + ndb.latestVersion = 100 + + err := ndb.setFastStorageVersionToBatch() + require.NoError(t, err) + require.Equal(t, fastStorageVersionValue+fastStorageVersionDelimiter+strconv.Itoa(int(ndb.latestVersion)), ndb.storageVersion) +} + +func TestSetStorageVersion_FastVersionSecond_VersionAppended(t *testing.T) { + db := db.NewMemDB() + ndb := newNodeDB(db, 0, nil) + ndb.latestVersion = 100 + + storageVersionBytes := []byte(fastStorageVersionValue) + storageVersionBytes[len(fastStorageVersionValue)-1]++ // increment last byte + ndb.storageVersion = string(storageVersionBytes) + + err := ndb.setFastStorageVersionToBatch() + require.NoError(t, err) + require.Equal(t, string(storageVersionBytes)+fastStorageVersionDelimiter+strconv.Itoa(int(ndb.latestVersion)), ndb.storageVersion) +} + +func TestSetStorageVersion_SameVersionTwice(t *testing.T) { + db := db.NewMemDB() + ndb := newNodeDB(db, 0, nil) + ndb.latestVersion = 100 + + storageVersionBytes := []byte(fastStorageVersionValue) + storageVersionBytes[len(fastStorageVersionValue)-1]++ // increment last byte + ndb.storageVersion = string(storageVersionBytes) + + err := ndb.setFastStorageVersionToBatch() + require.NoError(t, err) + newStorageVersion := string(storageVersionBytes) + fastStorageVersionDelimiter + strconv.Itoa(int(ndb.latestVersion)) + require.Equal(t, newStorageVersion, ndb.storageVersion) + + err = ndb.setFastStorageVersionToBatch() + require.NoError(t, err) + require.Equal(t, newStorageVersion, ndb.storageVersion) +} + +// Test case where version is incorrect and has some extra garbage at the end +func TestShouldForceFastStorageUpdate_DefaultVersion_True(t *testing.T) { + db := db.NewMemDB() + ndb := newNodeDB(db, 0, nil) + ndb.storageVersion = defaultStorageVersionValue + ndb.latestVersion = 100 + + shouldForce, err := ndb.shouldForceFastStorageUpgrade() + require.False(t, shouldForce) + require.NoError(t, err) +} + +func TestShouldForceFastStorageUpdate_FastVersion_Greater_True(t *testing.T) { + db := db.NewMemDB() + ndb := newNodeDB(db, 0, nil) + ndb.latestVersion = 100 + ndb.storageVersion = fastStorageVersionValue + fastStorageVersionDelimiter + strconv.Itoa(int(ndb.latestVersion+1)) + + shouldForce, err := ndb.shouldForceFastStorageUpgrade() + require.True(t, shouldForce) + require.NoError(t, err) +} + +func TestShouldForceFastStorageUpdate_FastVersion_Smaller_True(t *testing.T) { + db := db.NewMemDB() + ndb := newNodeDB(db, 0, nil) + ndb.latestVersion = 100 + ndb.storageVersion = fastStorageVersionValue + fastStorageVersionDelimiter + strconv.Itoa(int(ndb.latestVersion-1)) + + shouldForce, err := ndb.shouldForceFastStorageUpgrade() + require.True(t, shouldForce) + require.NoError(t, err) +} + +func TestShouldForceFastStorageUpdate_FastVersion_Match_False(t *testing.T) { + db := db.NewMemDB() + ndb := newNodeDB(db, 0, nil) + ndb.latestVersion = 100 + ndb.storageVersion = fastStorageVersionValue + fastStorageVersionDelimiter + strconv.Itoa(int(ndb.latestVersion)) + + shouldForce, err := ndb.shouldForceFastStorageUpgrade() + require.False(t, shouldForce) + require.NoError(t, err) +} + +func TestIsFastStorageEnabled_True(t *testing.T) { + db := db.NewMemDB() + ndb := newNodeDB(db, 0, nil) + ndb.latestVersion = 100 + ndb.storageVersion = fastStorageVersionValue + fastStorageVersionDelimiter + strconv.Itoa(int(ndb.latestVersion)) + + require.True(t, ndb.hasUpgradedToFastStorage()) +} + +func TestIsFastStorageEnabled_False(t *testing.T) { + db := db.NewMemDB() + ndb := newNodeDB(db, 0, nil) + ndb.latestVersion = 100 + ndb.storageVersion = defaultStorageVersionValue + + shouldForce, err := ndb.shouldForceFastStorageUpgrade() + require.False(t, shouldForce) + require.NoError(t, err) +} + +func makeHashes(b *testing.B, seed int64) [][]byte { + b.StopTimer() + rnd := rand.NewSource(seed) + hashes := make([][]byte, b.N) + hashBytes := 8 * ((hashSize + 7) / 8) + for i := 0; i < b.N; i++ { + hashes[i] = make([]byte, hashBytes) + for b := 0; b < hashBytes; b += 8 { + binary.BigEndian.PutUint64(hashes[i][b:b+8], uint64(rnd.Int63())) + } + hashes[i] = hashes[i][:hashSize] + } + b.StartTimer() + return hashes +} + +func makeAndPopulateMutableTree(tb testing.TB) *MutableTree { + memDB := db.NewMemDB() + tree, err := NewMutableTreeWithOpts(memDB, 0, &Options{InitialVersion: 9}, false) + require.NoError(tb, err) + + for i := 0; i < 1e4; i++ { + buf := make([]byte, 0, (i/255)+1) + for j := 0; 1<>j)&0xff)) + } + tree.Set(buf, buf) + } + _, _, err = tree.SaveVersion() + require.Nil(tb, err, "Expected .SaveVersion to succeed") + return tree +} diff --git a/sei-iavl/options.go b/sei-iavl/options.go new file mode 100644 index 0000000000..e42defdabb --- /dev/null +++ b/sei-iavl/options.go @@ -0,0 +1,107 @@ +package iavl + +import "sync/atomic" + +// Statisc about db runtime state +type Statistics struct { + // Each time GetNode operation hit cache + cacheHitCnt uint64 + + // Each time GetNode and GetFastNode operation miss cache + cacheMissCnt uint64 + + // Each time GetFastNode operation hit cache + fastCacheHitCnt uint64 + + // Each time GetFastNode operation miss cache + fastCacheMissCnt uint64 +} + +func (stat *Statistics) IncCacheHitCnt() { + if stat == nil { + return + } + atomic.AddUint64(&stat.cacheHitCnt, 1) +} + +func (stat *Statistics) IncCacheMissCnt() { + if stat == nil { + return + } + atomic.AddUint64(&stat.cacheMissCnt, 1) +} + +func (stat *Statistics) IncFastCacheHitCnt() { + if stat == nil { + return + } + atomic.AddUint64(&stat.fastCacheHitCnt, 1) +} + +func (stat *Statistics) IncFastCacheMissCnt() { + if stat == nil { + return + } + atomic.AddUint64(&stat.fastCacheMissCnt, 1) +} + +func (stat *Statistics) GetCacheHitCnt() uint64 { + return atomic.LoadUint64(&stat.cacheHitCnt) +} + +func (stat *Statistics) GetCacheMissCnt() uint64 { + return atomic.LoadUint64(&stat.cacheMissCnt) +} + +func (stat *Statistics) GetFastCacheHitCnt() uint64 { + return atomic.LoadUint64(&stat.fastCacheHitCnt) +} + +func (stat *Statistics) GetFastCacheMissCnt() uint64 { + return atomic.LoadUint64(&stat.fastCacheMissCnt) +} + +func (stat *Statistics) Reset() { + atomic.StoreUint64(&stat.cacheHitCnt, 0) + atomic.StoreUint64(&stat.cacheMissCnt, 0) + atomic.StoreUint64(&stat.fastCacheHitCnt, 0) + atomic.StoreUint64(&stat.fastCacheMissCnt, 0) +} + +// Options define tree options. +type Options struct { + // Sync synchronously flushes all writes to storage, using e.g. the fsync syscall. + // Disabling this significantly improves performance, but can lose data on e.g. power loss. + Sync bool + + // InitialVersion specifies the initial version number. If any versions already exist below + // this, an error is returned when loading the tree. Only used for the initial SaveVersion() + // call. + InitialVersion uint64 + + // When Stat is not nil, statistical logic needs to be executed + Stat *Statistics + + // When true, orphan data will be stored in separate directory than application data, and + // the pruning of application data will happen during commit (rather than after commit) + SeparateOrphanStorage bool + + // Only meaningful if SeparateOrphanStorage is true. + // The number of application data versions to keep in the application database. + SeparateOphanVersionsToKeep int64 + + // Only meaningful if SeparateOrphanStorage is true. + // The max number of orphan entries to store in the separate orphan files. + NumOrphansPerFile int + + // Only meaningful if SeparateOrphanStorage is true. + // The directory to store orphan files. + OrphanDirectory string +} + +// DefaultOptions returns the default options for IAVL. +func DefaultOptions() Options { + return Options{ + NumOrphansPerFile: 100000, + } +} diff --git a/sei-iavl/orphandb.go b/sei-iavl/orphandb.go new file mode 100644 index 0000000000..da49926851 --- /dev/null +++ b/sei-iavl/orphandb.go @@ -0,0 +1,86 @@ +package iavl + +import ( + "fmt" + "io/fs" + "os" + "path/filepath" + "strings" +) + +type orphanDB struct { + cache map[int64]map[string]int64 // key: version, value: orphans + directory string + numOrphansPerFile int +} + +func NewOrphanDB(opts *Options) *orphanDB { + return &orphanDB{ + cache: map[int64]map[string]int64{}, + directory: opts.OrphanDirectory, + numOrphansPerFile: opts.NumOrphansPerFile, + } +} + +func (o *orphanDB) SaveOrphans(version int64, orphans map[string]int64) error { + o.cache[version] = orphans + chunks := [][]string{{}} + for orphan := range orphans { + if len(chunks[len(chunks)-1]) == o.numOrphansPerFile { + chunks = append(chunks, []string{}) + } + chunks[len(chunks)-1] = append(chunks[len(chunks)-1], orphan) + } + dir := filepath.Clean(filepath.Join(o.directory, fmt.Sprintf("%d", version))) + if err := os.RemoveAll(dir); err != nil { + return err + } + if err := os.MkdirAll(dir, fs.ModePerm); err != nil { + return err + } + for i, chunk := range chunks { + subPath := filepath.Clean(filepath.Join(dir, fmt.Sprintf("%d", i))) + f, err := os.Create(subPath) + if err != nil { + return err + } + if _, err := f.WriteString(strings.Join(chunk, "\n")); err != nil { + _ = f.Close() + _ = os.RemoveAll(subPath) + return err + } + if err := f.Close(); err != nil { + _ = os.RemoveAll(subPath) + return err + } + } + return nil +} + +func (o *orphanDB) GetOrphans(version int64) map[string]int64 { + if _, ok := o.cache[version]; !ok { + o.cache[version] = map[string]int64{} + dir := filepath.Clean(filepath.Join(o.directory, fmt.Sprintf("%d", version))) + files, err := os.ReadDir(dir) + if err != nil { + // no orphans found + return o.cache[version] + } + for _, file := range files { + content, err := os.ReadFile(filepath.Clean(filepath.Join(dir, file.Name()))) + if err != nil { + return o.cache[version] + } + for _, orphan := range strings.Split(string(content), "\n") { + o.cache[version][orphan] = version + } + } + } + return o.cache[version] +} + +func (o *orphanDB) DeleteOrphans(version int64) error { + delete(o.cache, version) + dir := filepath.Clean(filepath.Join(o.directory, fmt.Sprintf("%d", version))) + return os.RemoveAll(dir) +} diff --git a/sei-iavl/orphandb_test.go b/sei-iavl/orphandb_test.go new file mode 100644 index 0000000000..51a7dd8df6 --- /dev/null +++ b/sei-iavl/orphandb_test.go @@ -0,0 +1,73 @@ +package iavl + +import ( + "fmt" + "io/ioutil" + "path" + "testing" + + "github.com/stretchr/testify/require" +) + +func TestOrphanDBSaveGet(t *testing.T) { + dir := t.TempDir() + db := NewOrphanDB(&Options{ + NumOrphansPerFile: 2, + OrphanDirectory: dir, + }) + err := db.SaveOrphans(123, map[string]int64{ + "o1": 123, + "o2": 123, + "o3": 123, + }) + require.Nil(t, err) + files, err := ioutil.ReadDir(path.Join(dir, fmt.Sprintf("%d", 123))) + require.Nil(t, err) + require.Equal(t, 2, len(files)) // 3 orphans would result in 2 files + orphans := db.GetOrphans(123) + require.Equal(t, map[string]int64{ + "o1": 123, + "o2": 123, + "o3": 123, + }, orphans) + orphans = db.GetOrphans(456) // not exist + require.Equal(t, map[string]int64{}, orphans) + + // flush cache + db = NewOrphanDB(&Options{ + NumOrphansPerFile: 2, + OrphanDirectory: dir, + }) + orphans = db.GetOrphans(123) // would load from disk + require.Equal(t, map[string]int64{ + "o1": 123, + "o2": 123, + "o3": 123, + }, orphans) +} + +func TestOrphanDelete(t *testing.T) { + dir := t.TempDir() + db := NewOrphanDB(&Options{ + NumOrphansPerFile: 2, + OrphanDirectory: dir, + }) + err := db.SaveOrphans(123, map[string]int64{ + "o1": 123, + "o2": 123, + "o3": 123, + }) + require.Nil(t, err) + err = db.DeleteOrphans(123) + require.Nil(t, err) + orphans := db.GetOrphans(123) // not exist in cache + require.Equal(t, map[string]int64{}, orphans) + + // flush cache + db = NewOrphanDB(&Options{ + NumOrphansPerFile: 2, + OrphanDirectory: dir, + }) + orphans = db.GetOrphans(123) // would load from disk + require.Equal(t, map[string]int64{}, orphans) +} diff --git a/sei-iavl/proof.go b/sei-iavl/proof.go new file mode 100644 index 0000000000..63ad0a420f --- /dev/null +++ b/sei-iavl/proof.go @@ -0,0 +1,288 @@ +package iavl + +import ( + "bytes" + "crypto/sha256" + "fmt" + "math" + "sync" + + "github.com/pkg/errors" + + hexbytes "github.com/sei-protocol/sei-chain/sei-iavl/internal/bytes" + "github.com/sei-protocol/sei-chain/sei-iavl/internal/encoding" + iavlproto "github.com/sei-protocol/sei-chain/sei-iavl/proto" +) + +var bufPool = &sync.Pool{ + New: func() interface{} { + return new(bytes.Buffer) + }, +} + +var ( + // ErrInvalidProof is returned by Verify when a proof cannot be validated. + ErrInvalidProof = fmt.Errorf("invalid proof") + + // ErrInvalidInputs is returned when the inputs passed to the function are invalid. + ErrInvalidInputs = fmt.Errorf("invalid inputs") + + // ErrInvalidRoot is returned when the root passed in does not match the proof's. + ErrInvalidRoot = fmt.Errorf("invalid root") +) + +//---------------------------------------- +// ProofInnerNode +// Contract: Left and Right can never both be set. Will result in a empty `[]` roothash + +type ProofInnerNode struct { + Height int8 `json:"height"` + Size int64 `json:"size"` + Version int64 `json:"version"` + Left []byte `json:"left"` + Right []byte `json:"right"` +} + +func (pin ProofInnerNode) String() string { + return pin.stringIndented("") +} + +func (pin ProofInnerNode) stringIndented(indent string) string { + return fmt.Sprintf(`ProofInnerNode{ +%s Height: %v +%s Size: %v +%s Version: %v +%s Left: %X +%s Right: %X +%s}`, + indent, pin.Height, + indent, pin.Size, + indent, pin.Version, + indent, pin.Left, + indent, pin.Right, + indent) +} + +func (pin ProofInnerNode) Hash(childHash []byte) ([]byte, error) { + hasher := sha256.New() + + buf := bufPool.Get().(*bytes.Buffer) + buf.Reset() + defer bufPool.Put(buf) + + err := encoding.EncodeVarint(buf, int64(pin.Height)) + if err == nil { + err = encoding.EncodeVarint(buf, pin.Size) + } + if err == nil { + err = encoding.EncodeVarint(buf, pin.Version) + } + + if len(pin.Left) > 0 && len(pin.Right) > 0 { + return nil, errors.New("both left and right child hashes are set") + } + + if len(pin.Left) == 0 { + if err == nil { + err = encoding.EncodeBytes(buf, childHash) + } + if err == nil { + err = encoding.EncodeBytes(buf, pin.Right) + } + } else { + if err == nil { + err = encoding.EncodeBytes(buf, pin.Left) + } + if err == nil { + err = encoding.EncodeBytes(buf, childHash) + } + } + + if err != nil { + return nil, fmt.Errorf("failed to hash ProofInnerNode: %v", err) + } + + _, err = hasher.Write(buf.Bytes()) + if err != nil { + return nil, err + } + return hasher.Sum(nil), nil +} + +// toProto converts the inner node proof to Protobuf, for use in ProofOps. +func (pin ProofInnerNode) toProto() *iavlproto.ProofInnerNode { + return &iavlproto.ProofInnerNode{ + Height: int32(pin.Height), + Size_: pin.Size, + Version: pin.Version, + Left: pin.Left, + Right: pin.Right, + } +} + +// proofInnerNodeFromProto converts a Protobuf ProofInnerNode to a ProofInnerNode. +func proofInnerNodeFromProto(pbInner *iavlproto.ProofInnerNode) (ProofInnerNode, error) { + if pbInner == nil { + return ProofInnerNode{}, errors.New("inner node cannot be nil") + } + if pbInner.Height > math.MaxInt8 || pbInner.Height < math.MinInt8 { + return ProofInnerNode{}, fmt.Errorf("height must fit inside an int8, got %v", pbInner.Height) + } + return ProofInnerNode{ + Height: int8(pbInner.Height), + Size: pbInner.Size_, + Version: pbInner.Version, + Left: pbInner.Left, + Right: pbInner.Right, + }, nil +} + +//---------------------------------------- + +type ProofLeafNode struct { + Key hexbytes.HexBytes `json:"key"` + ValueHash hexbytes.HexBytes `json:"value"` + Version int64 `json:"version"` +} + +func (pln ProofLeafNode) String() string { + return pln.stringIndented("") +} + +func (pln ProofLeafNode) stringIndented(indent string) string { + return fmt.Sprintf(`ProofLeafNode{ +%s Key: %v +%s ValueHash: %X +%s Version: %v +%s}`, + indent, pln.Key, + indent, pln.ValueHash, + indent, pln.Version, + indent) +} + +func (pln ProofLeafNode) Hash() ([]byte, error) { + hasher := sha256.New() + + buf := bufPool.Get().(*bytes.Buffer) + buf.Reset() + defer bufPool.Put(buf) + + err := encoding.EncodeVarint(buf, 0) + if err == nil { + err = encoding.EncodeVarint(buf, 1) + } + if err == nil { + err = encoding.EncodeVarint(buf, pln.Version) + } + if err == nil { + err = encoding.EncodeBytes(buf, pln.Key) + } + if err == nil { + err = encoding.EncodeBytes(buf, pln.ValueHash) + } + if err != nil { + return nil, fmt.Errorf("failed to hash ProofLeafNode: %v", err) + } + _, err = hasher.Write(buf.Bytes()) + if err != nil { + return nil, err + + } + + return hasher.Sum(nil), nil +} + +// toProto converts the leaf node proof to Protobuf, for use in ProofOps. +func (pln ProofLeafNode) toProto() *iavlproto.ProofLeafNode { + return &iavlproto.ProofLeafNode{ + Key: pln.Key, + ValueHash: pln.ValueHash, + Version: pln.Version, + } +} + +// proofLeafNodeFromProto converts a Protobuf ProofLeadNode to a ProofLeafNode. +func proofLeafNodeFromProto(pbLeaf *iavlproto.ProofLeafNode) (ProofLeafNode, error) { + if pbLeaf == nil { + return ProofLeafNode{}, errors.New("leaf node cannot be nil") + } + return ProofLeafNode{ + Key: pbLeaf.Key, + ValueHash: pbLeaf.ValueHash, + Version: pbLeaf.Version, + }, nil +} + +//---------------------------------------- + +// If the key does not exist, returns the path to the next leaf left of key (w/ +// path), except when key is less than the least item, in which case it returns +// a path to the least item. +func (node *Node) PathToLeaf(t *ImmutableTree, key []byte) (PathToLeaf, *Node, error) { + path := new(PathToLeaf) + val, err := node.pathToLeaf(t, key, path) + return *path, val, err +} + +// pathToLeaf is a helper which recursively constructs the PathToLeaf. +// As an optimization the already constructed path is passed in as an argument +// and is shared among recursive calls. +func (node *Node) pathToLeaf(t *ImmutableTree, key []byte, path *PathToLeaf) (*Node, error) { + if node.GetHeight() == 0 { + if bytes.Equal(node.GetNodeKey(), key) { + return node, nil + } + return node, errors.New("key does not exist") + } + + // Note that we do not store the left child in the ProofInnerNode when we're going to add the + // left node as part of the path, similarly we don't store the right child info when going down + // the right child node. This is done as an optimization since the child info is going to be + // already stored in the next ProofInnerNode in PathToLeaf. + if bytes.Compare(key, node.GetNodeKey()) < 0 { + // left side + rightNode, err := node.getRightNode(t) + if err != nil { + return nil, err + } + + pin := ProofInnerNode{ + Height: node.GetHeight(), + Size: node.GetSize(), + Version: node.GetVersion(), + Left: nil, + Right: rightNode.GetHash(), + } + *path = append(*path, pin) + + leftNode, err := node.getLeftNode(t) + if err != nil { + return nil, err + } + n, err := leftNode.pathToLeaf(t, key, path) + return n, err + } + // right side + leftNode, err := node.getLeftNode(t) + if err != nil { + return nil, err + } + + pin := ProofInnerNode{ + Height: node.GetHeight(), + Size: node.GetSize(), + Version: node.GetVersion(), + Left: leftNode.GetHash(), + Right: nil, + } + *path = append(*path, pin) + + rightNode, err := node.getRightNode(t) + if err != nil { + return nil, err + } + + n, err := rightNode.pathToLeaf(t, key, path) + return n, err +} diff --git a/sei-iavl/proof_forgery_test.go b/sei-iavl/proof_forgery_test.go new file mode 100644 index 0000000000..8d0d1e8cd5 --- /dev/null +++ b/sei-iavl/proof_forgery_test.go @@ -0,0 +1,106 @@ +package iavl_test + +import ( + "encoding/hex" + "math/rand" + "strings" + "testing" + + iavl "github.com/sei-protocol/sei-chain/sei-iavl" + "github.com/stretchr/testify/require" + "github.com/tendermint/tendermint/crypto/tmhash" + db "github.com/tendermint/tm-db" +) + +func TestProofFogery(t *testing.T) { + source := rand.NewSource(0) + r := rand.New(source) + cacheSize := 0 + tree, err := iavl.NewMutableTreeWithOpts(db.NewMemDB(), cacheSize, nil, false) + require.NoError(t, err) + + // two keys only + keys := []byte{0x11, 0x32} + values := make([][]byte, len(keys)) + // make random values and insert into tree + for i, ikey := range keys { + key := []byte{ikey} + v := r.Intn(255) + values[i] = []byte{byte(v)} + tree.Set(key, values[i]) + } + + // get root + root, err := tree.WorkingHash() + require.NoError(t, err) + // use the rightmost kv pair in the tree so the inner nodes will populate left + k := []byte{keys[1]} + v := values[1] + + val, proof, err := tree.ImmutableTree().GetWithProof(k) + require.NoError(t, err) + + err = proof.Verify(root) + require.NoError(t, err) + err = proof.VerifyItem(k, val) + require.NoError(t, err) + + // ------------------- FORGE PROOF ------------------- + + forgedPayloadBytes := mustDecode("0xabcd") + forgedValueHash := tmhash.Sum(forgedPayloadBytes) + // make a forgery of the proof by adding: + // - a new leaf node to the right + // - an empty inner node + // - a right entry in the path + _, proof2, _ := tree.ImmutableTree().GetWithProof(k) + forgedNode := proof2.Leaves[0] + forgedNode.Key = []byte{0xFF} + forgedNode.ValueHash = forgedValueHash + proof2.Leaves = append(proof2.Leaves, forgedNode) + proof2.InnerNodes = append(proof2.InnerNodes, iavl.PathToLeaf{}) + // figure out what hash we need via https://twitter.com/samczsun/status/1578181160345034752 + proof2.LeftPath[0].Right = mustDecode("82C36CED85E914DAE8FDF6DD11FD5833121AA425711EB126C470CE28FF6623D5") + + rootHashValid := proof.ComputeRootHash() + verifyErr := proof.Verify(rootHashValid) + require.NoError(t, verifyErr, "should verify") + // forgery gives empty root hash (previously it returned the same one!) + rootHashForged := proof2.ComputeRootHash() + require.Empty(t, rootHashForged, "roothash must be empty if both left and right are set") + verifyErr = proof2.Verify(rootHashForged) + require.Error(t, verifyErr, "should not verify") + + // verify proof two fails with valid proof + err = proof2.Verify(rootHashValid) + require.Error(t, err, "should not verify different root hash") + + { + // legit node verifies against legit proof (expected) + verifyErr = proof.VerifyItem(k, v) + require.NoError(t, verifyErr, "valid proof should verify") + // forged node fails to verify against legit proof (expected) + verifyErr = proof.VerifyItem(forgedNode.Key, forgedPayloadBytes) + require.Error(t, verifyErr, "forged proof should fail to verify") + } + { + // legit node fails to verify against forged proof (expected) + verifyErr = proof2.VerifyItem(k, v) + require.Error(t, verifyErr, "valid proof should verify, but has a forged sister node") + + // forged node fails to verify against forged proof (previously this succeeded!) + verifyErr = proof2.VerifyItem(forgedNode.Key, forgedPayloadBytes) + require.Error(t, verifyErr, "forged proof should fail verify") + } +} + +func mustDecode(str string) []byte { + if strings.HasPrefix(str, "0x") { + str = str[2:] + } + b, err := hex.DecodeString(str) + if err != nil { + panic(err) + } + return b +} diff --git a/sei-iavl/proof_iavl_absence.go b/sei-iavl/proof_iavl_absence.go new file mode 100644 index 0000000000..ff71ffba0c --- /dev/null +++ b/sei-iavl/proof_iavl_absence.go @@ -0,0 +1,121 @@ +package iavl + +import ( + "fmt" + + proto "github.com/gogo/protobuf/proto" + "github.com/pkg/errors" + "github.com/tendermint/tendermint/crypto/merkle" + tmmerkle "github.com/tendermint/tendermint/proto/tendermint/crypto" + + "github.com/sei-protocol/sei-chain/sei-iavl/internal/encoding" + iavlproto "github.com/sei-protocol/sei-chain/sei-iavl/proto" +) + +const ProofOpIAVLAbsence = "iavl:a" + +// IAVLAbsenceOp takes a key as its only argument +// +// If the produced root hash matches the expected hash, the proof +// is good. +type AbsenceOp struct { + // Encoded in ProofOp.Key. + key []byte + + // To encode in ProofOp.Data. + // Proof is nil for an empty tree. + // The hash of an empty tree is nil. + Proof *RangeProof `json:"proof"` +} + +var _ merkle.ProofOperator = AbsenceOp{} + +func NewAbsenceOp(key []byte, proof *RangeProof) AbsenceOp { + return AbsenceOp{ + key: key, + Proof: proof, + } +} + +func AbsenceOpDecoder(pop tmmerkle.ProofOp) (merkle.ProofOperator, error) { + if pop.Type != ProofOpIAVLAbsence { + return nil, errors.Errorf("unexpected ProofOp.Type; got %v, want %v", pop.Type, ProofOpIAVLAbsence) + } + // Strip the varint length prefix, used for backwards compatibility with Amino. + bz, n, err := encoding.DecodeBytes(pop.Data) + if err != nil { + return nil, err + } + + if n != len(pop.Data) { + return nil, fmt.Errorf("unexpected bytes, expected %v got %v", n, len(pop.Data)) + } + + pbProofOp := &iavlproto.AbsenceOp{} + err = proto.Unmarshal(bz, pbProofOp) + if err != nil { + return nil, err + } + + proof, err := RangeProofFromProto(pbProofOp.Proof) + if err != nil { + return nil, err + } + + return NewAbsenceOp(pop.Key, &proof), nil +} + +func (op AbsenceOp) ProofOp() tmmerkle.ProofOp { + pbProof := iavlproto.AbsenceOp{Proof: op.Proof.ToProto()} + bz, err := proto.Marshal(&pbProof) + if err != nil { + panic(err) + } + // We length-prefix the byte slice to retain backwards compatibility with the Amino proofs. + bz, err = encoding.EncodeBytesSlice(bz) + if err != nil { + panic(err) + } + return tmmerkle.ProofOp{ + Type: ProofOpIAVLAbsence, + Key: op.key, + Data: bz, + } +} + +func (op AbsenceOp) String() string { + return fmt.Sprintf("IAVLAbsenceOp{%v}", op.GetKey()) +} + +func (op AbsenceOp) Run(args [][]byte) ([][]byte, error) { + if len(args) != 0 { + return nil, errors.Errorf("expected 0 args, got %v", len(args)) + } + + // If the tree is nil, the proof is nil, and all keys are absent. + if op.Proof == nil { + return [][]byte{[]byte(nil)}, nil + } + + // Compute the root hash and assume it is valid. + // The caller checks the ultimate root later. + root := op.Proof.ComputeRootHash() + err := op.Proof.Verify(root) + if err != nil { + return nil, errors.Wrap(err, "computing root hash") + } + + // XXX What is the encoding for keys? + // We should decode the key depending on whether it's a string or hex, + // maybe based on quotes and 0x prefix? + err = op.Proof.VerifyAbsence(op.key) + if err != nil { + return nil, errors.Wrap(err, "verifying absence") + } + + return [][]byte{root}, nil +} + +func (op AbsenceOp) GetKey() []byte { + return op.key +} diff --git a/sei-iavl/proof_iavl_test.go b/sei-iavl/proof_iavl_test.go new file mode 100644 index 0000000000..1581ecfc98 --- /dev/null +++ b/sei-iavl/proof_iavl_test.go @@ -0,0 +1,100 @@ +package iavl + +import ( + "encoding/hex" + "fmt" + "testing" + + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" + tmmerkle "github.com/tendermint/tendermint/proto/tendermint/crypto" + db "github.com/tendermint/tm-db" +) + +func TestProofOp(t *testing.T) { + tree, err := NewMutableTreeWithOpts(db.NewMemDB(), 0, nil, false) + require.NoError(t, err) + keys := []byte{0x0a, 0x11, 0x2e, 0x32, 0x50, 0x72, 0x99, 0xa1, 0xe4, 0xf7} // 10 total. + for _, ikey := range keys { + key := []byte{ikey} + tree.Set(key, key) + } + root, err := tree.WorkingHash() + require.NoError(t, err) + + testcases := []struct { + key byte + expectPresent bool + expectProofOp string + }{ + {0x00, false, "aa010aa7010a280808100a18012a2022b4e34a1778d6a03aac39f00d89deb886e0cc37454e300b7aebeb4f4939c0790a280804100418012a20734fad809673ab2b9672453a8b2bc8c9591e2d1d97933df5b4c3b0531bf82e720a280802100218012a20154b101a72acffe0f5e65d1e144a57dc6f97758d2049821231f02b6a5b44fe811a270a010a122001ba4719c80b6fe911b091a7c05124b64eeece964e09c058ef8f9805daca546b1801"}, + {0x0a, true, "aa010aa7010a280808100a18012a2022b4e34a1778d6a03aac39f00d89deb886e0cc37454e300b7aebeb4f4939c0790a280804100418012a20734fad809673ab2b9672453a8b2bc8c9591e2d1d97933df5b4c3b0531bf82e720a280802100218012a20154b101a72acffe0f5e65d1e144a57dc6f97758d2049821231f02b6a5b44fe811a270a010a122001ba4719c80b6fe911b091a7c05124b64eeece964e09c058ef8f9805daca546b1801"}, + {0x0b, false, "d5010ad2010a280808100a18012a2022b4e34a1778d6a03aac39f00d89deb886e0cc37454e300b7aebeb4f4939c0790a280804100418012a20734fad809673ab2b9672453a8b2bc8c9591e2d1d97933df5b4c3b0531bf82e720a280802100218012a20154b101a72acffe0f5e65d1e144a57dc6f97758d2049821231f02b6a5b44fe8112001a270a010a122001ba4719c80b6fe911b091a7c05124b64eeece964e09c058ef8f9805daca546b18011a270a011112204a64a107f0cb32536e5bce6c98c393db21cca7f4ea187ba8c4dca8b51d4ea80a1801"}, + {0x11, true, "aa010aa7010a280808100a18012a2022b4e34a1778d6a03aac39f00d89deb886e0cc37454e300b7aebeb4f4939c0790a280804100418012a20734fad809673ab2b9672453a8b2bc8c9591e2d1d97933df5b4c3b0531bf82e720a28080210021801222053d2828f35e33aecab8e411a40afb0475288973b96aed2220e9894f43a5375ad1a270a011112204a64a107f0cb32536e5bce6c98c393db21cca7f4ea187ba8c4dca8b51d4ea80a1801"}, + {0x60, false, "d5010ad2010a280808100a18012220e39776faa9ef2b83ae828860d24f807efab321d02b78081c0e68e1bf801b0e220a280806100618012a20631b10ce49ece4cc9130befac927865742fb11caf2e8fc08fc00a4a25e4bc7940a280802100218012a207a4a97f565ae0b3ea8abf175208f176ac8301665ac2d26c89be3664f90e23da612001a270a015012205c62e091b8c0565f1bafad0dad5934276143ae2ccef7a5381e8ada5b1a8d26d218011a270a01721220454349e422f05297191ead13e21d3db520e5abef52055e4964b82fb213f593a11801"}, + {0x72, true, "aa010aa7010a280808100a18012220e39776faa9ef2b83ae828860d24f807efab321d02b78081c0e68e1bf801b0e220a280806100618012a20631b10ce49ece4cc9130befac927865742fb11caf2e8fc08fc00a4a25e4bc7940a28080210021801222035f8ea805390e084854f399b42ccdeaea33a1dedc115638ac48d0600637dba1f1a270a01721220454349e422f05297191ead13e21d3db520e5abef52055e4964b82fb213f593a11801"}, + {0x99, true, "d4010ad1010a280808100a18012220e39776faa9ef2b83ae828860d24f807efab321d02b78081c0e68e1bf801b0e220a2808061006180122201d6b29f2c439fc9f15703eb7031e4a216002ea36ee9496583f97b20302b6a74e0a280804100418012a2043b83a6acefd4fd33970d1bc8fc47bed81220c752b8de7053e8ee082a2c7c1290a280802100218012a208f69a1db006c0ee9fad3c7c624b92acc88e9ed00771976ea24a64796c236fef01a270a01991220fd9528b920d6d3956e9e16114523e1889c751e8c1e040182116d4c906b43f5581801"}, + {0xaa, false, "a9020aa6020a280808100a18012220e39776faa9ef2b83ae828860d24f807efab321d02b78081c0e68e1bf801b0e220a2808061006180122201d6b29f2c439fc9f15703eb7031e4a216002ea36ee9496583f97b20302b6a74e0a280804100418012a2043b83a6acefd4fd33970d1bc8fc47bed81220c752b8de7053e8ee082a2c7c1290a280802100218012220a303930ca8831618ac7e4ddd10546cfc366fb730d6630c030a97226bbefc6935122a0a280802100218012a2077ad141b2010cf7107de941aac5b46f44fa4f41251076656a72308263a964fb91a270a01a112208a8950f7623663222542c9469c73be3c4c81bbdf019e2c577590a61f2ce9a15718011a270a01e412205e1effe9b7bab73dce628ccd9f0cbbb16c1e6efc6c4f311e59992a467bc119fd1801"}, + {0xe4, true, "d4010ad1010a280808100a18012220e39776faa9ef2b83ae828860d24f807efab321d02b78081c0e68e1bf801b0e220a2808061006180122201d6b29f2c439fc9f15703eb7031e4a216002ea36ee9496583f97b20302b6a74e0a2808041004180122208bc4764843fdd745dc853fa62f2fac0001feae9e46136192f466c09773e2ed050a280802100218012a2077ad141b2010cf7107de941aac5b46f44fa4f41251076656a72308263a964fb91a270a01e412205e1effe9b7bab73dce628ccd9f0cbbb16c1e6efc6c4f311e59992a467bc119fd1801"}, + {0xf7, true, "d4010ad1010a280808100a18012220e39776faa9ef2b83ae828860d24f807efab321d02b78081c0e68e1bf801b0e220a2808061006180122201d6b29f2c439fc9f15703eb7031e4a216002ea36ee9496583f97b20302b6a74e0a2808041004180122208bc4764843fdd745dc853fa62f2fac0001feae9e46136192f466c09773e2ed050a28080210021801222032af6e3eec2b63d5fe1bd992a89ef3467b3cee639c068cace942f01326098f171a270a01f7122050868f20258bbc9cce0da2719e8654c108733dd2f663b8737c574ec0ead93eb31801"}, + {0xff, false, "d4010ad1010a280808100a18012220e39776faa9ef2b83ae828860d24f807efab321d02b78081c0e68e1bf801b0e220a2808061006180122201d6b29f2c439fc9f15703eb7031e4a216002ea36ee9496583f97b20302b6a74e0a2808041004180122208bc4764843fdd745dc853fa62f2fac0001feae9e46136192f466c09773e2ed050a28080210021801222032af6e3eec2b63d5fe1bd992a89ef3467b3cee639c068cace942f01326098f171a270a01f7122050868f20258bbc9cce0da2719e8654c108733dd2f663b8737c574ec0ead93eb31801"}, + } + + for _, tc := range testcases { + tc := tc + t.Run(fmt.Sprintf("%02x", tc.key), func(t *testing.T) { + key := []byte{tc.key} + value, proof, err := tree.ImmutableTree().GetWithProof(key) + require.NoError(t, err) + + // Verify that proof is valid. + err = proof.Verify(root) + require.NoError(t, err) + + // Encode and decode proof, either ValueOp or AbsentOp depending on key existence. + expectBytes, err := hex.DecodeString(tc.expectProofOp) + require.NoError(t, err) + + if tc.expectPresent { + require.NotNil(t, value) + err = proof.VerifyItem(key, value) + require.NoError(t, err) + + valueOp := NewValueOp(key, proof) + proofOp := valueOp.ProofOp() + assert.Equal(t, tmmerkle.ProofOp{ + Type: ProofOpIAVLValue, + Key: key, + Data: expectBytes, + }, proofOp) + + d, e := ValueOpDecoder(proofOp) + require.NoError(t, e) + decoded := d.(ValueOp) + err = decoded.Proof.Verify(root) + require.NoError(t, err) + assert.Equal(t, valueOp, decoded) + + } else { + require.Nil(t, value) + err = proof.VerifyAbsence(key) + require.NoError(t, err) + + absenceOp := NewAbsenceOp(key, proof) + proofOp := absenceOp.ProofOp() + assert.Equal(t, tmmerkle.ProofOp{ + Type: ProofOpIAVLAbsence, + Key: key, + Data: expectBytes, + }, proofOp) + + d, e := AbsenceOpDecoder(proofOp) + require.NoError(t, e) + decoded := d.(AbsenceOp) + err = decoded.Proof.Verify(root) + require.NoError(t, err) + assert.Equal(t, absenceOp, decoded) + } + }) + } +} diff --git a/sei-iavl/proof_iavl_value.go b/sei-iavl/proof_iavl_value.go new file mode 100644 index 0000000000..46136fd3cd --- /dev/null +++ b/sei-iavl/proof_iavl_value.go @@ -0,0 +1,112 @@ +package iavl + +import ( + "fmt" + + proto "github.com/gogo/protobuf/proto" + "github.com/pkg/errors" + "github.com/tendermint/tendermint/crypto/merkle" + tmmerkle "github.com/tendermint/tendermint/proto/tendermint/crypto" + + "github.com/sei-protocol/sei-chain/sei-iavl/internal/encoding" + iavlproto "github.com/sei-protocol/sei-chain/sei-iavl/proto" +) + +const ProofOpIAVLValue = "iavl:v" + +// IAVLValueOp takes a key and a single value as argument and +// produces the root hash. +// +// If the produced root hash matches the expected hash, the proof +// is good. +type ValueOp struct { + // Encoded in ProofOp.Key. + key []byte + + // To encode in ProofOp.Data. + // Proof is nil for an empty tree. + // The hash of an empty tree is nil. + Proof *RangeProof `json:"proof"` +} + +var _ merkle.ProofOperator = ValueOp{} + +func NewValueOp(key []byte, proof *RangeProof) ValueOp { + return ValueOp{ + key: key, + Proof: proof, + } +} + +func ValueOpDecoder(pop tmmerkle.ProofOp) (merkle.ProofOperator, error) { + if pop.Type != ProofOpIAVLValue { + return nil, errors.Errorf("unexpected ProofOp.Type; got %v, want %v", pop.Type, ProofOpIAVLValue) + } + // Strip the varint length prefix, used for backwards compatibility with Amino. + bz, n, err := encoding.DecodeBytes(pop.Data) + if err != nil { + return nil, err + } + if n != len(pop.Data) { + return nil, fmt.Errorf("unexpected bytes, expected %v got %v", n, len(pop.Data)) + } + pbProofOp := &iavlproto.ValueOp{} + err = proto.Unmarshal(bz, pbProofOp) + if err != nil { + return nil, err + } + proof, err := RangeProofFromProto(pbProofOp.Proof) + if err != nil { + return nil, err + } + return NewValueOp(pop.Key, &proof), nil +} + +func (op ValueOp) ProofOp() tmmerkle.ProofOp { + pbProof := iavlproto.ValueOp{Proof: op.Proof.ToProto()} + bz, err := proto.Marshal(&pbProof) + if err != nil { + panic(err) + } + // We length-prefix the byte slice to retain backwards compatibility with the Amino proofs. + bz, err = encoding.EncodeBytesSlice(bz) + if err != nil { + panic(err) + } + return tmmerkle.ProofOp{ + Type: ProofOpIAVLValue, + Key: op.key, + Data: bz, + } +} + +func (op ValueOp) String() string { + return fmt.Sprintf("IAVLValueOp{%v}", op.GetKey()) +} + +func (op ValueOp) Run(args [][]byte) ([][]byte, error) { + if len(args) != 1 { + return nil, errors.New("value size is not 1") + } + value := args[0] + + // Compute the root hash and assume it is valid. + // The caller checks the ultimate root later. + root := op.Proof.ComputeRootHash() + err := op.Proof.Verify(root) + if err != nil { + return nil, errors.Wrap(err, "computing root hash") + } + // XXX What is the encoding for keys? + // We should decode the key depending on whether it's a string or hex, + // maybe based on quotes and 0x prefix? + err = op.Proof.VerifyItem(op.key, value) + if err != nil { + return nil, errors.Wrap(err, "verifying value") + } + return [][]byte{root}, nil +} + +func (op ValueOp) GetKey() []byte { + return op.key +} diff --git a/sei-iavl/proof_ics23.go b/sei-iavl/proof_ics23.go new file mode 100644 index 0000000000..991359a790 --- /dev/null +++ b/sei-iavl/proof_ics23.go @@ -0,0 +1,168 @@ +package iavl + +import ( + "encoding/binary" + "fmt" + + ics23 "github.com/confio/ics23/go" +) + +/* +GetMembershipProof will produce a CommitmentProof that the given key (and queries value) exists in the iavl tree. +If the key doesn't exist in the tree, this will return an error. +*/ +func (t *ImmutableTree) GetMembershipProof(key []byte) (*ics23.CommitmentProof, error) { + exist, err := createExistenceProof(t, key) + if err != nil { + return nil, err + } + proof := &ics23.CommitmentProof{ + Proof: &ics23.CommitmentProof_Exist{ + Exist: exist, + }, + } + return proof, nil +} + +/* +GetNonMembershipProof will produce a CommitmentProof that the given key doesn't exist in the iavl tree. +If the key exists in the tree, this will return an error. +*/ +func (t *ImmutableTree) GetNonMembershipProof(key []byte) (*ics23.CommitmentProof, error) { + // idx is one node right of what we want.... + var err error + idx, val, err := t.GetWithIndex(key) + if err != nil { + return nil, err + } + + if val != nil { + return nil, fmt.Errorf("cannot create NonExistanceProof when Key in State") + } + + nonexist := &ics23.NonExistenceProof{ + Key: key, + } + + if idx >= 1 { + leftkey, _, err := t.GetByIndex(idx - 1) + if err != nil { + return nil, err + } + + nonexist.Left, err = createExistenceProof(t, leftkey) + if err != nil { + return nil, err + } + } + + // this will be nil if nothing right of the queried key + rightkey, _, err := t.GetByIndex(idx) + if err != nil { + return nil, err + } + + if rightkey != nil { + nonexist.Right, err = createExistenceProof(t, rightkey) + if err != nil { + return nil, err + } + } + + proof := &ics23.CommitmentProof{ + Proof: &ics23.CommitmentProof_Nonexist{ + Nonexist: nonexist, + }, + } + return proof, nil +} + +func createExistenceProof(tree *ImmutableTree, key []byte) (*ics23.ExistenceProof, error) { + value, proof, err := tree.GetWithProof(key) + if err != nil { + return nil, err + } + if value == nil { + return nil, fmt.Errorf("cannot create ExistanceProof when Key not in State") + } + return convertExistenceProof(proof, key, value) +} + +// convertExistenceProof will convert the given proof into a valid +// existence proof, if that's what it is. +// +// This is the simplest case of the range proof and we will focus on +// demoing compatibility here +func convertExistenceProof(p *RangeProof, key, value []byte) (*ics23.ExistenceProof, error) { + if len(p.Leaves) != 1 { + return nil, fmt.Errorf("existence proof requires RangeProof to have exactly one leaf") + } + return &ics23.ExistenceProof{ + Key: key, + Value: value, + Leaf: convertLeafOp(p.Leaves[0].Version), + Path: convertInnerOps(p.LeftPath), + }, nil +} + +func convertLeafOp(version int64) *ics23.LeafOp { + var varintBuf [binary.MaxVarintLen64]byte + // this is adapted from iavl/proof.go:proofLeafNode.Hash() + prefix := convertVarIntToBytes(0, varintBuf) + prefix = append(prefix, convertVarIntToBytes(1, varintBuf)...) + prefix = append(prefix, convertVarIntToBytes(version, varintBuf)...) + + return &ics23.LeafOp{ + Hash: ics23.HashOp_SHA256, + PrehashValue: ics23.HashOp_SHA256, + Length: ics23.LengthOp_VAR_PROTO, + Prefix: prefix, + } +} + +// we cannot get the proofInnerNode type, so we need to do the whole path in one function +func convertInnerOps(path PathToLeaf) []*ics23.InnerOp { + steps := make([]*ics23.InnerOp, 0, len(path)) + + // lengthByte is the length prefix prepended to each of the sha256 sub-hashes + var lengthByte byte = 0x20 + + var varintBuf [binary.MaxVarintLen64]byte + + // we need to go in reverse order, iavl starts from root to leaf, + // we want to go up from the leaf to the root + for i := len(path) - 1; i >= 0; i-- { + // this is adapted from iavl/proof.go:proofInnerNode.Hash() + prefix := convertVarIntToBytes(int64(path[i].Height), varintBuf) + prefix = append(prefix, convertVarIntToBytes(path[i].Size, varintBuf)...) + prefix = append(prefix, convertVarIntToBytes(path[i].Version, varintBuf)...) + + var suffix []byte + if len(path[i].Left) > 0 { + // length prefixed left side + prefix = append(prefix, lengthByte) + prefix = append(prefix, path[i].Left...) + // prepend the length prefix for child + prefix = append(prefix, lengthByte) + } else { + // prepend the length prefix for child + prefix = append(prefix, lengthByte) + // length-prefixed right side + suffix = []byte{lengthByte} + suffix = append(suffix, path[i].Right...) + } + + op := &ics23.InnerOp{ + Hash: ics23.HashOp_SHA256, + Prefix: prefix, + Suffix: suffix, + } + steps = append(steps, op) + } + return steps +} + +func convertVarIntToBytes(orig int64, buf [binary.MaxVarintLen64]byte) []byte { + n := binary.PutVarint(buf[:], orig) + return buf[:n] +} diff --git a/sei-iavl/proof_ics23_test.go b/sei-iavl/proof_ics23_test.go new file mode 100644 index 0000000000..c375ab41c5 --- /dev/null +++ b/sei-iavl/proof_ics23_test.go @@ -0,0 +1,325 @@ +package iavl + +import ( + "bytes" + "fmt" + "math/rand" + "sort" + "testing" + + ics23 "github.com/confio/ics23/go" + "github.com/stretchr/testify/require" + + db "github.com/tendermint/tm-db" +) + +func TestConvertExistence(t *testing.T) { + proof, err := GenerateResult(200, Middle) + require.NoError(t, err) + + converted, err := convertExistenceProof(proof.Proof, proof.Key, proof.Value) + require.NoError(t, err) + + calc, err := converted.Calculate() + require.NoError(t, err) + + require.Equal(t, []byte(calc), proof.RootHash, "Calculated: %X\nExpected: %X", calc, proof.RootHash) +} + +func TestGetMembership(t *testing.T) { + cases := map[string]struct { + size int + loc Where + }{ + "small left": {size: 100, loc: Left}, + "small middle": {size: 100, loc: Middle}, + "small right": {size: 100, loc: Right}, + "big left": {size: 5431, loc: Left}, + "big middle": {size: 5431, loc: Middle}, + "big right": {size: 5431, loc: Right}, + } + + for name, tc := range cases { + tc := tc + t.Run(name, func(t *testing.T) { + tree, allkeys, err := BuildTree(tc.size, 0) + require.NoError(t, err, "Creating tree: %+v", err) + + key := GetKey(allkeys, tc.loc) + val, err := tree.Get(key) + require.NoError(t, err) + proof, err := tree.ImmutableTree().GetMembershipProof(key) + require.NoError(t, err, "Creating Proof: %+v", err) + + root, err := tree.Hash() + require.NoError(t, err) + valid := ics23.VerifyMembership(ics23.IavlSpec, root, proof, key, val) + if !valid { + require.NoError(t, err, "Membership Proof Invalid") + } + }) + } +} + +func TestGetNonMembership(t *testing.T) { + cases := map[string]struct { + size int + loc Where + }{ + "small left": {size: 100, loc: Left}, + "small middle": {size: 100, loc: Middle}, + "small right": {size: 100, loc: Right}, + "big left": {size: 5431, loc: Left}, + "big middle": {size: 5431, loc: Middle}, + "big right": {size: 5431, loc: Right}, + } + + performTest := func(tree *MutableTree, allKeys [][]byte, loc Where) { + key := GetNonKey(allKeys, loc) + + proof, err := tree.ImmutableTree().GetNonMembershipProof(key) + require.NoError(t, err, "Creating Proof: %+v", err) + + root, err := tree.Hash() + require.NoError(t, err) + valid := ics23.VerifyNonMembership(ics23.IavlSpec, root, proof, key) + if !valid { + require.NoError(t, err, "Non Membership Proof Invalid") + } + } + + for name, tc := range cases { + tc := tc + t.Run("fast-"+name, func(t *testing.T) { + tree, allkeys, err := BuildTree(tc.size, 0) + require.NoError(t, err, "Creating tree: %+v", err) + // Save version to enable fast cache + _, _, err = tree.SaveVersion() + require.NoError(t, err) + + isFastCacheEnabled, err := tree.ImmutableTree().IsFastCacheEnabled() + require.NoError(t, err) + require.True(t, isFastCacheEnabled) + + performTest(tree, allkeys, tc.loc) + }) + + t.Run("regular-"+name, func(t *testing.T) { + tree, allkeys, err := BuildTree(tc.size, 0) + require.NoError(t, err, "Creating tree: %+v", err) + isFastCacheEnabled, err := tree.ImmutableTree().IsFastCacheEnabled() + require.NoError(t, err) + require.False(t, isFastCacheEnabled) + + performTest(tree, allkeys, tc.loc) + }) + } +} + +func BenchmarkGetNonMembership(b *testing.B) { + cases := []struct { + size int + loc Where + }{ + {size: 100, loc: Left}, + {size: 100, loc: Middle}, + {size: 100, loc: Right}, + {size: 5431, loc: Left}, + {size: 5431, loc: Middle}, + {size: 5431, loc: Right}, + } + + performTest := func(tree *MutableTree, allKeys [][]byte, loc Where) { + key := GetNonKey(allKeys, loc) + + proof, err := tree.ImmutableTree().GetNonMembershipProof(key) + require.NoError(b, err, "Creating Proof: %+v", err) + + b.StopTimer() + root, err := tree.Hash() + require.NoError(b, err) + valid := ics23.VerifyNonMembership(ics23.IavlSpec, root, proof, key) + if !valid { + require.NoError(b, err, "Non Membership Proof Invalid") + } + b.StartTimer() + } + + b.Run("fast", func(b *testing.B) { + for i := 0; i < b.N; i++ { + b.StopTimer() + caseIdx := rand.Intn(len(cases)) + tc := cases[caseIdx] + + tree, allkeys, err := BuildTree(tc.size, 100000) + require.NoError(b, err, "Creating tree: %+v", err) + // Save version to enable fast cache + _, _, err = tree.SaveVersion() + require.NoError(b, err) + + isFastCacheEnabled, err := tree.ImmutableTree().IsFastCacheEnabled() + require.NoError(b, err) + require.True(b, isFastCacheEnabled) + b.StartTimer() + performTest(tree, allkeys, tc.loc) + } + }) + + b.Run("regular", func(b *testing.B) { + for i := 0; i < b.N; i++ { + b.StopTimer() + caseIdx := rand.Intn(len(cases)) + tc := cases[caseIdx] + + tree, allkeys, err := BuildTree(tc.size, 100000) + require.NoError(b, err, "Creating tree: %+v", err) + isFastCacheEnabled, err := tree.ImmutableTree().IsFastCacheEnabled() + require.NoError(b, err) + require.False(b, isFastCacheEnabled) + + b.StartTimer() + performTest(tree, allkeys, tc.loc) + } + }) +} + +// Test Helpers + +// Result is the result of one match +type Result struct { + Key []byte + Value []byte + Proof *RangeProof + RootHash []byte +} + +// GenerateResult makes a tree of size and returns a range proof for one random element +// +// returns a range proof and the root hash of the tree +func GenerateResult(size int, loc Where) (*Result, error) { + tree, allkeys, err := BuildTree(size, 0) + if err != nil { + return nil, err + } + _, _, err = tree.SaveVersion() + if err != nil { + return nil, err + } + key := GetKey(allkeys, loc) + + value, proof, err := tree.ImmutableTree().GetWithProof(key) + if err != nil { + return nil, err + } + if value == nil { + return nil, fmt.Errorf("tree.GetWithProof returned nil value") + } + if len(proof.Leaves) != 1 { + return nil, fmt.Errorf("tree.GetWithProof returned %d leaves", len(proof.Leaves)) + } + root, err := tree.Hash() + if err != nil { + return nil, err + } + + res := &Result{ + Key: key, + Value: value, + Proof: proof, + RootHash: root, + } + return res, nil +} + +// Where selects a location for a key - Left, Right, or Middle +type Where int + +const ( + Left Where = iota + Right + Middle +) + +// GetKey this returns a key, on Left/Right/Middle +func GetKey(allkeys [][]byte, loc Where) []byte { + if loc == Left { + return allkeys[0] + } + if loc == Right { + return allkeys[len(allkeys)-1] + } + // select a random index between 1 and allkeys-2 + idx := rand.Int()%(len(allkeys)-2) + 1 + return allkeys[idx] +} + +// GetNonKey returns a missing key - Left of all, Right of all, or in the Middle +func GetNonKey(allkeys [][]byte, loc Where) []byte { + if loc == Left { + return []byte{0, 0, 0, 1} + } + if loc == Right { + return []byte{0xff, 0xff, 0xff, 0xff} + } + // otherwise, next to an existing key (copy before mod) + key := append([]byte{}, GetKey(allkeys, loc)...) + key[len(key)-2] = 255 + key[len(key)-1] = 255 + return key +} + +// BuildTree creates random key/values and stores in tree +// returns a list of all keys in sorted order +func BuildTree(size int, cacheSize int) (itree *MutableTree, keys [][]byte, err error) { + tree, _ := NewMutableTree(db.NewMemDB(), cacheSize, false) + + // insert lots of info and store the bytes + keys = make([][]byte, size) + for i := 0; i < size; i++ { + key := make([]byte, 4) + // create random 4 byte key + rand.Read(key) + value := "value_for_key:" + string(key) + tree.Set(key, []byte(value)) + keys[i] = key + } + sort.Slice(keys, func(i, j int) bool { + return bytes.Compare(keys[i], keys[j]) < 0 + }) + + return tree, keys, nil +} + +// sink is kept as a global to ensure that value checks and assignments to it can't be +// optimized away, and this will help us ensure that benchmarks successfully run. +var sink interface{} + +func BenchmarkConvertLeafOp(b *testing.B) { + var versions = []int64{ + 0, + 1, + 100, + 127, + 128, + 1 << 29, + -0, + -1, + -100, + -127, + -128, + -1 << 29, + } + + b.ReportAllocs() + b.ResetTimer() + + for i := 0; i < b.N; i++ { + for _, version := range versions { + sink = convertLeafOp(version) + } + } + if sink == nil { + b.Fatal("Benchmark wasn't run") + } + sink = nil +} diff --git a/sei-iavl/proof_path.go b/sei-iavl/proof_path.go new file mode 100644 index 0000000000..690ce0ab32 --- /dev/null +++ b/sei-iavl/proof_path.go @@ -0,0 +1,119 @@ +package iavl + +import ( + "fmt" + "strings" +) + +// pathWithLeaf is a path to a leaf node and the leaf node itself. +type pathWithLeaf struct { + Path PathToLeaf `json:"path"` + Leaf ProofLeafNode `json:"leaf"` +} + +func (pwl pathWithLeaf) String() string { + return pwl.StringIndented("") +} + +func (pwl pathWithLeaf) StringIndented(indent string) string { + return fmt.Sprintf(`pathWithLeaf{ +%s Path: %v +%s Leaf: %v +%s}`, + indent, pwl.Path.stringIndented(indent+" "), + indent, pwl.Leaf.stringIndented(indent+" "), + indent) +} + +// `computeRootHash` computes the root hash with leaf node. +// Does not verify the root hash. +func (pwl pathWithLeaf) computeRootHash() ([]byte, error) { + leafHash, err := pwl.Leaf.Hash() + if err != nil { + return nil, err + } + return pwl.Path.computeRootHash(leafHash) +} + +//---------------------------------------- + +// PathToLeaf represents an inner path to a leaf node. +// Note that the nodes are ordered such that the last one is closest +// to the root of the tree. +type PathToLeaf []ProofInnerNode + +func (pl PathToLeaf) String() string { + return pl.stringIndented("") +} + +func (pl PathToLeaf) stringIndented(indent string) string { + if len(pl) == 0 { + return "empty-PathToLeaf" + } + strs := make([]string, 0, len(pl)) + for i, pin := range pl { + if i == 20 { + strs = append(strs, fmt.Sprintf("... (%v total)", len(pl))) + break + } + strs = append(strs, fmt.Sprintf("%v:%v", i, pin.stringIndented(indent+" "))) + } + return fmt.Sprintf(`PathToLeaf{ +%s %v +%s}`, + indent, strings.Join(strs, "\n"+indent+" "), + indent) +} + +// `computeRootHash` computes the root hash assuming some leaf hash. +// Does not verify the root hash. +// Contract: Caller must verify that the roothash is correct by calling `.verify()`. +func (pl PathToLeaf) computeRootHash(leafHash []byte) ([]byte, error) { + var err error + hash := leafHash + for i := len(pl) - 1; i >= 0; i-- { + pin := pl[i] + hash, err = pin.Hash(hash) + if err != nil { + return nil, err + } + } + return hash, nil +} + +func (pl PathToLeaf) isLeftmost() bool { + for _, node := range pl { + if len(node.Left) > 0 { + return false + } + } + return true +} + +func (pl PathToLeaf) isRightmost() bool { + for _, node := range pl { + if len(node.Right) > 0 { + return false + } + } + return true +} + +// returns -1 if invalid. +func (pl PathToLeaf) Index() (idx int64) { + for i, node := range pl { + switch { + case node.Left == nil: + continue + case node.Right == nil: + if i < len(pl)-1 { + idx += node.Size - pl[i+1].Size + } else { + idx += node.Size - 1 + } + default: + return -1 + } + } + return idx +} diff --git a/sei-iavl/proof_range.go b/sei-iavl/proof_range.go new file mode 100644 index 0000000000..1ea0798e2d --- /dev/null +++ b/sei-iavl/proof_range.go @@ -0,0 +1,584 @@ +package iavl + +import ( + "bytes" + "crypto/sha256" + "fmt" + "sort" + "strings" + + "github.com/pkg/errors" + + iavlproto "github.com/sei-protocol/sei-chain/sei-iavl/proto" +) + +type RangeProof struct { + // You don't need the right path because + // it can be derived from what we have. + LeftPath PathToLeaf `json:"left_path"` + InnerNodes []PathToLeaf `json:"inner_nodes"` + Leaves []ProofLeafNode `json:"leaves"` + + // memoize + rootHash []byte // valid iff rootVerified is true + rootVerified bool + treeEnd bool // valid iff rootVerified is true +} + +// Keys returns all the keys in the RangeProof. NOTE: The keys here may +// include more keys than provided by tree.GetRangeWithProof or +// MutableTree.GetVersionedRangeWithProof. The keys returned there are only +// in the provided [startKey,endKey){limit} range. The keys returned here may +// include extra keys, such as: +// - the key before startKey if startKey is provided and doesn't exist; +// - the key after a queried key with tree.GetWithProof, when the key is absent. +func (proof *RangeProof) Keys() (keys [][]byte) { + if proof == nil { + return nil + } + for _, leaf := range proof.Leaves { + keys = append(keys, leaf.Key) + } + return keys +} + +// String returns a string representation of the proof. +func (proof *RangeProof) String() string { + if proof == nil { + return "" + } + return proof.StringIndented("") +} + +func (proof *RangeProof) StringIndented(indent string) string { + istrs := make([]string, 0, len(proof.InnerNodes)) + for _, ptl := range proof.InnerNodes { + istrs = append(istrs, ptl.stringIndented(indent+" ")) + } + lstrs := make([]string, 0, len(proof.Leaves)) + for _, leaf := range proof.Leaves { + lstrs = append(lstrs, leaf.stringIndented(indent+" ")) + } + return fmt.Sprintf(`RangeProof{ +%s LeftPath: %v +%s InnerNodes: +%s %v +%s Leaves: +%s %v +%s (rootVerified): %v +%s (rootHash): %X +%s (treeEnd): %v +%s}`, + indent, proof.LeftPath.stringIndented(indent+" "), + indent, + indent, strings.Join(istrs, "\n"+indent+" "), + indent, + indent, strings.Join(lstrs, "\n"+indent+" "), + indent, proof.rootVerified, + indent, proof.rootHash, + indent, proof.treeEnd, + indent) +} + +// The index of the first leaf (of the whole tree). +// Returns -1 if the proof is nil. +func (proof *RangeProof) LeftIndex() int64 { + if proof == nil { + return -1 + } + return proof.LeftPath.Index() +} + +// Also see LeftIndex(). +// Verify that a key has some value. +// Does not assume that the proof itself is valid, call Verify() first. +func (proof *RangeProof) VerifyItem(key, value []byte) error { + if proof == nil { + return errors.Wrap(ErrInvalidProof, "proof is nil") + } + + if !proof.rootVerified { + return errors.New("must call Verify(root) first") + } + + leaves := proof.Leaves + i := sort.Search(len(leaves), func(i int) bool { + return bytes.Compare(key, leaves[i].Key) <= 0 + }) + + if i >= len(leaves) || !bytes.Equal(leaves[i].Key, key) { + return errors.Wrap(ErrInvalidProof, "leaf key not found in proof") + } + + h := sha256.Sum256(value) + valueHash := h[:] + if !bytes.Equal(leaves[i].ValueHash, valueHash) { + return errors.Wrap(ErrInvalidProof, "leaf value hash not same") + } + + return nil +} + +// Verify that proof is valid absence proof for key. +// Does not assume that the proof itself is valid. +// For that, use Verify(root). +func (proof *RangeProof) VerifyAbsence(key []byte) error { + if proof == nil { + return errors.Wrap(ErrInvalidProof, "proof is nil") + } + if !proof.rootVerified { + return errors.New("must call Verify(root) first") + } + cmp := bytes.Compare(key, proof.Leaves[0].Key) + if cmp < 0 { + if proof.LeftPath.isLeftmost() { + return nil + } + return errors.New("absence not proved by left path") + + } else if cmp == 0 { + return errors.New("absence disproved via first item #0") + } + if len(proof.LeftPath) == 0 { + return nil // proof ok + } + if proof.LeftPath.isRightmost() { + return nil + } + + // See if any of the leaves are greater than key. + for i := 1; i < len(proof.Leaves); i++ { + leaf := proof.Leaves[i] + cmp := bytes.Compare(key, leaf.Key) + switch { + case cmp < 0: + return nil // proof ok + case cmp == 0: + return fmt.Errorf("absence disproved via item #%v", i) + default: + // if i == len(proof.Leaves)-1 { + // If last item, check whether + // it's the last item in the tree. + + // } + continue + } + } + + // It's still a valid proof if our last leaf is the rightmost child. + if proof.treeEnd { + return nil // OK! + } + + // It's not a valid absence proof. + if len(proof.Leaves) < 2 { + return errors.New("absence not proved by right leaf (need another leaf?)") + } + return errors.New("absence not proved by right leaf") + +} + +// Verify that proof is valid. +func (proof *RangeProof) Verify(root []byte) error { + if proof == nil { + return errors.Wrap(ErrInvalidProof, "proof is nil") + } + err := proof.verify(root) + return err +} + +func (proof *RangeProof) verify(root []byte) (err error) { + rootHash := proof.rootHash + if rootHash == nil { + derivedHash, err := proof.computeRootHash() + if err != nil { + return err + } + rootHash = derivedHash + } + if !bytes.Equal(rootHash, root) { + return errors.Wrap(ErrInvalidRoot, "root hash doesn't match") + } + proof.rootVerified = true + return nil +} + +// ComputeRootHash computes the root hash with leaves. +// Returns nil if error or proof is nil. +// Does not verify the root hash. +func (proof *RangeProof) ComputeRootHash() []byte { + if proof == nil { + return nil + } + + rootHash, _ := proof.computeRootHash() + + return rootHash +} + +func (proof *RangeProof) computeRootHash() (rootHash []byte, err error) { + rootHash, treeEnd, err := proof._computeRootHash() + if err == nil { + proof.rootHash = rootHash // memoize + proof.treeEnd = treeEnd // memoize + } + return rootHash, err +} + +func (proof *RangeProof) _computeRootHash() (rootHash []byte, treeEnd bool, err error) { + if len(proof.Leaves) == 0 { + return nil, false, errors.Wrap(ErrInvalidProof, "no leaves") + } + if len(proof.InnerNodes)+1 != len(proof.Leaves) { + return nil, false, errors.Wrap(ErrInvalidProof, "InnerNodes vs Leaves length mismatch, leaves should be 1 more.") //nolint:revive + } + + // Start from the left path and prove each leaf. + + // shared across recursive calls + var leaves = proof.Leaves + var innersq = proof.InnerNodes + var COMPUTEHASH func(path PathToLeaf, rightmost bool) (hash []byte, treeEnd bool, done bool, err error) + + // rightmost: is the root a rightmost child of the tree? + // treeEnd: true iff the last leaf is the last item of the tree. + // Returns the (possibly intermediate, possibly root) hash. + COMPUTEHASH = func(path PathToLeaf, rightmost bool) (hash []byte, treeEnd bool, done bool, err error) { + + // Pop next leaf. + nleaf, rleaves := leaves[0], leaves[1:] + leaves = rleaves + + // Compute hash. + hash, err = (pathWithLeaf{ + Path: path, + Leaf: nleaf, + }).computeRootHash() + + if err != nil { + return nil, treeEnd, false, err + } + + // If we don't have any leaves left, we're done. + if len(leaves) == 0 { + rightmost = rightmost && path.isRightmost() + return hash, rightmost, true, nil + } + + // Prove along path (until we run out of leaves). + for len(path) > 0 { + + // Drop the leaf-most (last-most) inner nodes from path + // until we encounter one with a left hash. + // We assume that the left side is already verified. + // rpath: rest of path + // lpath: last path item + rpath, lpath := path[:len(path)-1], path[len(path)-1] + path = rpath + if len(lpath.Right) == 0 { + continue + } + + // Pop next inners, a PathToLeaf (e.g. []ProofInnerNode). + inners, rinnersq := innersq[0], innersq[1:] + innersq = rinnersq + + // Recursively verify inners against remaining leaves. + derivedRoot, treeEnd, done, err := COMPUTEHASH(inners, rightmost && rpath.isRightmost()) + if err != nil { + return nil, treeEnd, false, errors.Wrap(err, "recursive COMPUTEHASH call") + } + + if !bytes.Equal(derivedRoot, lpath.Right) { + return nil, treeEnd, false, errors.Wrapf(ErrInvalidRoot, "intermediate root hash %X doesn't match, got %X", lpath.Right, derivedRoot) + } + + if done { + return hash, treeEnd, true, nil + } + } + + // We're not done yet (leaves left over). No error, not done either. + // Technically if rightmost, we know there's an error "left over leaves + // -- malformed proof", but we return that at the top level, below. + return hash, false, false, nil + } + + // Verify! + path := proof.LeftPath + rootHash, treeEnd, done, err := COMPUTEHASH(path, true) + if err != nil { + return nil, treeEnd, errors.Wrap(err, "root COMPUTEHASH call") + } else if !done { + return nil, treeEnd, errors.Wrap(ErrInvalidProof, "left over leaves -- malformed proof") + } + + // Ok! + return rootHash, treeEnd, nil +} + +// toProto converts the proof to a Protobuf representation, for use in ValueOp and AbsenceOp. +func (proof *RangeProof) ToProto() *iavlproto.RangeProof { + pb := &iavlproto.RangeProof{ + LeftPath: make([]*iavlproto.ProofInnerNode, 0, len(proof.LeftPath)), + InnerNodes: make([]*iavlproto.PathToLeaf, 0, len(proof.InnerNodes)), + Leaves: make([]*iavlproto.ProofLeafNode, 0, len(proof.Leaves)), + } + for _, inner := range proof.LeftPath { + pb.LeftPath = append(pb.LeftPath, inner.toProto()) + } + for _, path := range proof.InnerNodes { + pbPath := make([]*iavlproto.ProofInnerNode, 0, len(path)) + for _, inner := range path { + pbPath = append(pbPath, inner.toProto()) + } + pb.InnerNodes = append(pb.InnerNodes, &iavlproto.PathToLeaf{Inners: pbPath}) + } + for _, leaf := range proof.Leaves { + pb.Leaves = append(pb.Leaves, leaf.toProto()) + } + + return pb +} + +// rangeProofFromProto generates a RangeProof from a Protobuf RangeProof. +func RangeProofFromProto(pbProof *iavlproto.RangeProof) (RangeProof, error) { + proof := RangeProof{} + + for _, pbInner := range pbProof.LeftPath { + inner, err := proofInnerNodeFromProto(pbInner) + if err != nil { + return proof, err + } + proof.LeftPath = append(proof.LeftPath, inner) + } + + for _, pbPath := range pbProof.InnerNodes { + var path PathToLeaf // leave as nil unless populated, for Amino compatibility + if pbPath != nil { + for _, pbInner := range pbPath.Inners { + inner, err := proofInnerNodeFromProto(pbInner) + if err != nil { + return proof, err + } + path = append(path, inner) + } + } + proof.InnerNodes = append(proof.InnerNodes, path) + } + + for _, pbLeaf := range pbProof.Leaves { + leaf, err := proofLeafNodeFromProto(pbLeaf) + if err != nil { + return proof, err + } + proof.Leaves = append(proof.Leaves, leaf) + } + return proof, nil +} + +// keyStart is inclusive and keyEnd is exclusive. +// If keyStart or keyEnd don't exist, the leaf before keyStart +// or after keyEnd will also be included, but not be included in values. +// If keyEnd-1 exists, no later leaves will be included. +// If keyStart >= keyEnd and both not nil, errors out. +// Limit is never exceeded. + +func (t *ImmutableTree) getRangeProof(keyStart, keyEnd []byte, limit int) (proof *RangeProof, keys, values [][]byte, err error) { + if keyStart != nil && keyEnd != nil && bytes.Compare(keyStart, keyEnd) >= 0 { + return nil, nil, nil, fmt.Errorf("if keyStart and keyEnd are present, need keyStart < keyEnd") + } + if limit < 0 { + return nil, nil, nil, fmt.Errorf("limit must be greater or equal to 0 -- 0 means no limit") + } + if t.root == nil { + return nil, nil, nil, nil + } + + _, _, err = t.root.hashWithCount() // Ensure that all hashes are calculated. + if err != nil { + return nil, nil, nil, err + } + + // Get the first key/value pair proof, which provides us with the left key. + path, left, err := t.root.PathToLeaf(t, keyStart) + if err != nil { + // Key doesn't exist, but instead we got the prev leaf (or the + // first or last leaf), which provides proof of absence). + _ = err + } + startOK := keyStart == nil || bytes.Compare(keyStart, left.GetNodeKey()) <= 0 + endOK := keyEnd == nil || bytes.Compare(left.GetNodeKey(), keyEnd) < 0 + // If left.key is in range, add it to key/values. + if startOK && endOK { + keys = append(keys, left.GetNodeKey()) // == keyStart + values = append(values, left.GetValue()) + } + + h := sha256.Sum256(left.GetValue()) + var leaves = []ProofLeafNode{ + { + Key: left.GetNodeKey(), + ValueHash: h[:], + Version: left.GetVersion(), + }, + } + + // 1: Special case if limit is 1. + // 2: Special case if keyEnd is left.key+1. + _stop := false + if limit == 1 { + _stop = true // case 1 + } else if keyEnd != nil && bytes.Compare(cpIncr(left.GetNodeKey()), keyEnd) >= 0 { + _stop = true // case 2 + } + if _stop { + return &RangeProof{ + LeftPath: path, + Leaves: leaves, + }, keys, values, nil + } + + // Get the key after left.key to iterate from. + afterLeft := cpIncr(left.GetNodeKey()) + + // Traverse starting from afterLeft, until keyEnd or the next leaf + // after keyEnd. + var allPathToLeafs = []PathToLeaf(nil) + var currentPathToLeaf = PathToLeaf(nil) + var leafCount = 1 // from left above. + var pathCount = 0 + + t.root.traverseInRange(t, afterLeft, nil, true, false, false, + func(node *Node) (stop bool) { + + // Track when we diverge from path, or when we've exhausted path, + // since the first allPathToLeafs shouldn't include it. + if pathCount != -1 { + if len(path) <= pathCount { + // We're done with path counting. + pathCount = -1 + } else { + pn := path[pathCount] + if pn.Height != node.GetHeight() || + pn.Left != nil && !bytes.Equal(pn.Left, node.GetLeftHash()) || + pn.Right != nil && !bytes.Equal(pn.Right, node.GetRightHash()) { + + // We've diverged, so start appending to allPathToLeaf. + pathCount = -1 + } else { + pathCount++ + } + } + } + + if node.GetHeight() == 0 { // Leaf node + // Append all paths that we tracked so far to get to this leaf node. + allPathToLeafs = append(allPathToLeafs, currentPathToLeaf) + // Start a new one to track as we traverse the tree. + currentPathToLeaf = PathToLeaf(nil) + + h := sha256.Sum256(node.GetValue()) + leaves = append(leaves, ProofLeafNode{ + Key: node.GetNodeKey(), + ValueHash: h[:], + Version: node.GetVersion(), + }) + + leafCount++ + + // Maybe terminate because we found enough leaves. + if limit > 0 && limit <= leafCount { + return true + } + + // Terminate if we've found keyEnd or after. + if keyEnd != nil && bytes.Compare(node.GetNodeKey(), keyEnd) >= 0 { + return true + } + + // Value is in range, append to keys and values. + keys = append(keys, node.GetNodeKey()) + values = append(values, node.GetValue()) + + // Terminate if we've found keyEnd-1 or after. + // We don't want to fetch any leaves for it. + if keyEnd != nil && bytes.Compare(cpIncr(node.GetNodeKey()), keyEnd) >= 0 { + return true + } + + } else if pathCount < 0 { // Inner node. + // Only store if the node is not stored in currentPathToLeaf already. We track if we are + // still going through PathToLeaf using pathCount. When pathCount goes to -1, we + // start storing the other paths we took to get to the leaf nodes. Also we skip + // storing the left node, since we are traversing the tree starting from the left + // and don't need to store unnecessary info as we only need to go down the right + // path. + currentPathToLeaf = append(currentPathToLeaf, ProofInnerNode{ + Height: node.GetHeight(), + Size: node.GetSize(), + Version: node.GetVersion(), + Left: nil, + Right: node.GetRightHash(), + }) + } + return false + }, + ) + + return &RangeProof{ + LeftPath: path, + InnerNodes: allPathToLeafs, + Leaves: leaves, + }, keys, values, nil +} + +//---------------------------------------- + +// GetWithProof gets the value under the key if it exists, or returns nil. +// A proof of existence or absence is returned alongside the value. +func (t *ImmutableTree) GetWithProof(key []byte) (value []byte, proof *RangeProof, err error) { + proof, _, values, err := t.getRangeProof(key, cpIncr(key), 2) + if err != nil { + return nil, nil, errors.Wrap(err, "constructing range proof") + } + if len(values) > 0 && bytes.Equal(proof.Leaves[0].Key, key) { + return values[0], proof, nil + } + return nil, proof, nil +} + +// GetRangeWithProof gets key/value pairs within the specified range and limit. +func (t *ImmutableTree) GetRangeWithProof(startKey []byte, endKey []byte, limit int) (keys, values [][]byte, proof *RangeProof, err error) { + proof, keys, values, err = t.getRangeProof(startKey, endKey, limit) + return +} + +// GetVersionedWithProof gets the value under the key at the specified version +// if it exists, or returns nil. +func (tree *MutableTree) GetVersionedWithProof(key []byte, version int64) ([]byte, *RangeProof, error) { + if tree.VersionExists(version) { + t, err := tree.GetImmutable(version) + if err != nil { + return nil, nil, err + } + + return t.GetWithProof(key) + } + return nil, nil, errors.Wrap(ErrVersionDoesNotExist, "") +} + +// GetVersionedRangeWithProof gets key/value pairs within the specified range +// and limit. +func (tree *MutableTree) GetVersionedRangeWithProof(startKey, endKey []byte, limit int, version int64) ( + keys, values [][]byte, proof *RangeProof, err error) { + + if tree.VersionExists(version) { + t, err := tree.GetImmutable(version) + if err != nil { + return nil, nil, nil, err + } + return t.GetRangeWithProof(startKey, endKey, limit) + } + return nil, nil, nil, errors.Wrap(ErrVersionDoesNotExist, "") +} diff --git a/sei-iavl/proof_test.go b/sei-iavl/proof_test.go new file mode 100644 index 0000000000..7af16b39fe --- /dev/null +++ b/sei-iavl/proof_test.go @@ -0,0 +1,324 @@ +// nolint: errcheck +package iavl + +import ( + "bytes" + "sort" + "testing" + + proto "github.com/gogo/protobuf/proto" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" + + iavlrand "github.com/sei-protocol/sei-chain/sei-iavl/internal/rand" + iavlproto "github.com/sei-protocol/sei-chain/sei-iavl/proto" +) + +func TestTreeGetWithProof(t *testing.T) { + tree, err := getTestTree(0) + require.NoError(t, err) + require := require.New(t) + for _, ikey := range []byte{0x11, 0x32, 0x50, 0x72, 0x99} { + key := []byte{ikey} + tree.Set(key, []byte(iavlrand.RandStr(8))) + } + root, err := tree.WorkingHash() + require.NoError(err) + + key := []byte{0x32} + val, proof, err := tree.ImmutableTree().GetWithProof(key) + require.NoError(err) + require.NotEmpty(val) + require.NotNil(proof) + + err = proof.VerifyItem(key, val) + require.Error(err, "%+v", err) // Verifying item before calling Verify(root) + + err = proof.Verify(root) + require.NoError(err, "%+v", err) + + err = proof.VerifyItem(key, val) + require.NoError(err, "%+v", err) + + key = []byte{0x1} + val, proof, err = tree.ImmutableTree().GetWithProof(key) + require.NoError(err) + require.Empty(val) + require.NotNil(proof) + + err = proof.VerifyAbsence(key) + require.Error(err, "%+v", err) // Verifying absence before calling Verify(root) + + err = proof.Verify(root) + require.NoError(err, "%+v", err) + + err = proof.VerifyAbsence(key) + require.NoError(err, "%+v", err) +} + +func TestTreeKeyExistsProof(t *testing.T) { + tree, err := getTestTree(0) + require.NoError(t, err) + root, err := tree.WorkingHash() + require.NoError(t, err) + + // should get false for proof with nil root + proof, keys, values, err := tree.ImmutableTree().getRangeProof([]byte("foo"), nil, 1) + assert.Nil(t, proof) + assert.Error(t, proof.Verify(root)) + assert.Nil(t, keys) + assert.Nil(t, values) + assert.NoError(t, err) + + // insert lots of info and store the bytes + allkeys := make([][]byte, 200) + for i := 0; i < 200; i++ { + key := iavlrand.RandStr(20) + value := "value_for_" + key + tree.Set([]byte(key), []byte(value)) + allkeys[i] = []byte(key) + } + sortByteSlices(allkeys) // Sort all keys + root, err = tree.WorkingHash() + require.NoError(t, err) + + // query random key fails + proof, _, _, err = tree.ImmutableTree().getRangeProof([]byte("foo"), nil, 2) + assert.Nil(t, err) + assert.Nil(t, proof.Verify(root)) + assert.Nil(t, proof.VerifyAbsence([]byte("foo")), proof.String()) + + // query min key fails + proof, _, _, err = tree.ImmutableTree().getRangeProof([]byte{0x00}, []byte{0x01}, 2) + assert.Nil(t, err) + assert.Nil(t, proof.Verify(root)) + assert.Nil(t, proof.VerifyAbsence([]byte{0x00})) + + // valid proof for real keys + for i, key := range allkeys { + var keys, values [][]byte + proof, keys, values, err = tree.ImmutableTree().getRangeProof(key, nil, 2) + require.Nil(t, err) + + require.Equal(t, + append([]byte("value_for_"), key...), + values[0], + ) + require.Equal(t, key, keys[0]) + require.Nil(t, proof.Verify(root)) + require.Nil(t, proof.VerifyAbsence(cpIncr(key))) + require.Equal(t, 1, len(keys), proof.String()) + require.Equal(t, 1, len(values), proof.String()) + if i < len(allkeys)-1 { + if i < len(allkeys)-2 { + // No last item... not a proof of absence of large key. + require.NotNil(t, proof.VerifyAbsence(bytes.Repeat([]byte{0xFF}, 20)), proof.String()) + } else { + // Last item is included. + require.Nil(t, proof.VerifyAbsence(bytes.Repeat([]byte{0xFF}, 20))) + } + } else { + // last item of tree... valid proof of absence of large key. + require.Nil(t, proof.VerifyAbsence(bytes.Repeat([]byte{0xFF}, 20))) + } + } + // TODO: Test with single value in tree. +} + +func TestTreeKeyInRangeProofs(t *testing.T) { + tree, err := getTestTree(0) + require.NoError(t, err) + require := require.New(t) + keys := []byte{0x0a, 0x11, 0x2e, 0x32, 0x50, 0x72, 0x99, 0xa1, 0xe4, 0xf7} // 10 total. + for _, ikey := range keys { + key := []byte{ikey} + tree.Set(key, key) + } + root, err := tree.WorkingHash() + require.NoError(err) + + // For spacing: + T := 10 + // disable: don't use underscores in Go names; var nil______ should be nil (golint) + // nolint + nil______ := []byte(nil) + + cases := []struct { // nolint:maligned + start byte + end byte + pkeys []byte // proof keys, one byte per key. + vals []byte // keys and values, one byte per key. + lidx int64 // proof left index (index of first proof key). + err bool // does error + }{ + {start: 0x0a, end: 0xf7, pkeys: keys[0:T], vals: keys[0:9], lidx: 0}, // #0 + {start: 0x0a, end: 0xf8, pkeys: keys[0:T], vals: keys[0:T], lidx: 0}, // #1 + {start: 0x00, end: 0xff, pkeys: keys[0:T], vals: keys[0:T], lidx: 0}, // #2 + {start: 0x14, end: 0xe4, pkeys: keys[1:9], vals: keys[2:8], lidx: 1}, // #3 + {start: 0x14, end: 0xe5, pkeys: keys[1:9], vals: keys[2:9], lidx: 1}, // #4 + {start: 0x14, end: 0xe6, pkeys: keys[1:T], vals: keys[2:9], lidx: 1}, // #5 + {start: 0x14, end: 0xf1, pkeys: keys[1:T], vals: keys[2:9], lidx: 1}, // #6 + {start: 0x14, end: 0xf7, pkeys: keys[1:T], vals: keys[2:9], lidx: 1}, // #7 + {start: 0x14, end: 0xff, pkeys: keys[1:T], vals: keys[2:T], lidx: 1}, // #8 + {start: 0x2e, end: 0x31, pkeys: keys[2:4], vals: keys[2:3], lidx: 2}, // #9 + {start: 0x2e, end: 0x32, pkeys: keys[2:4], vals: keys[2:3], lidx: 2}, // #10 + {start: 0x2f, end: 0x32, pkeys: keys[2:4], vals: nil______, lidx: 2}, // #11 + {start: 0x2e, end: 0x31, pkeys: keys[2:4], vals: keys[2:3], lidx: 2}, // #12 + {start: 0x2e, end: 0x2f, pkeys: keys[2:3], vals: keys[2:3], lidx: 2}, // #13 + {start: 0x12, end: 0x31, pkeys: keys[1:4], vals: keys[2:3], lidx: 1}, // #14 + {start: 0xf8, end: 0xff, pkeys: keys[9:T], vals: nil______, lidx: 9}, // #15 + {start: 0x12, end: 0x20, pkeys: keys[1:3], vals: nil______, lidx: 1}, // #16 + {start: 0x00, end: 0x09, pkeys: keys[0:1], vals: nil______, lidx: 0}, // #17 + {start: 0xf7, end: 0x00, err: true}, // #18 + {start: 0xf8, end: 0x00, err: true}, // #19 + {start: 0x10, end: 0x10, err: true}, // #20 + {start: 0x12, end: 0x12, err: true}, // #21 + {start: 0xff, end: 0xf7, err: true}, // #22 + } + + // fmt.Println("PRINT TREE") + // printNode(tree.ndb, tree.root, 0) + // fmt.Println("PRINT TREE END") + + for i, c := range cases { + t.Logf("case %v", i) + start := []byte{c.start} + end := []byte{c.end} + + // Compute range proof. + keys, values, proof, err := tree.ImmutableTree().GetRangeWithProof(start, end, 0) + + if c.err { + require.Error(err, "%+v", err) + } else { + require.NoError(err, "%+v", err) + require.Equal(c.pkeys, flatten(proof.Keys())) + require.Equal(c.vals, flatten(keys)) + require.Equal(c.vals, flatten(values)) + require.Equal(c.lidx, proof.LeftIndex()) + + // Verify that proof is valid. + err = proof.Verify(root) + require.NoError(err, "%+v", err) + verifyProof(t, proof, root) + + // Verify each value of pkeys. + for _, key := range c.pkeys { + err := proof.VerifyItem([]byte{key}, []byte{key}) + require.NoError(err) + } + + // Verify each value of vals. + for _, key := range c.vals { + err := proof.VerifyItem([]byte{key}, []byte{key}) + require.NoError(err) + } + } + + } +} + +func encodeProof(proof *RangeProof) ([]byte, error) { + return proto.Marshal(proof.ToProto()) +} + +func decodeProof(bz []byte) (*RangeProof, error) { + proofOp := &iavlproto.RangeProof{} + err := proto.Unmarshal(bz, proofOp) + if err != nil { + return nil, err + } + proof, err := RangeProofFromProto(proofOp) + return &proof, err +} + +func verifyProof(t *testing.T, proof *RangeProof, root []byte) { + // Proof must verify. + require.NoError(t, proof.Verify(root)) + + // Write/Read then verify. + proofBytes, err := encodeProof(proof) + require.NoError(t, err) + _, err = decodeProof(proofBytes) + require.NoError(t, err) + + // Random mutations must not verify + for i := 0; i < 1e4; i++ { + badProofBytes := MutateByteSlice(proofBytes) + badProof, err := decodeProof(badProofBytes) + if err != nil { + continue // couldn't even decode. + } + // re-encode to make sure it's actually different. + badProofBytes2, err := encodeProof(badProof) + if bytes.Equal(proofBytes, badProofBytes2) { + continue // didn't mutate successfully. + } + // may be invalid... errors are okay + if err == nil { + assert.Errorf(t, badProof.Verify(root), + "Proof was still valid after a random mutation:\n%X\n%X", + proofBytes, badProofBytes) + } + } +} + +//---------------------------------------- + +func flatten(bzz [][]byte) (res []byte) { + for _, bz := range bzz { + res = append(res, bz...) + } + return res +} + +// Contract: !bytes.Equal(input, output) && len(input) >= len(output) +func MutateByteSlice(bytez []byte) []byte { + // If bytez is empty, panic + if len(bytez) == 0 { + panic("Cannot mutate an empty bytez") + } + + // Copy bytez + mBytez := make([]byte, len(bytez)) + copy(mBytez, bytez) + bytez = mBytez + + // Try a random mutation + switch iavlrand.RandInt() % 2 { + case 0: // Mutate a single byte + bytez[iavlrand.RandInt()%len(bytez)] += byte(iavlrand.RandInt()%255 + 1) + case 1: // Remove an arbitrary byte + pos := iavlrand.RandInt() % len(bytez) + bytez = append(bytez[:pos], bytez[pos+1:]...) + } + return bytez +} + +func sortByteSlices(src [][]byte) [][]byte { + bzz := byteslices(src) + sort.Sort(bzz) + return bzz +} + +type byteslices [][]byte + +func (bz byteslices) Len() int { + return len(bz) +} + +func (bz byteslices) Less(i, j int) bool { + switch bytes.Compare(bz[i], bz[j]) { + case -1: + return true + case 0, 1: + return false + default: + panic("should not happen") + } +} + +//nolint:unused +func (bz byteslices) Swap(i, j int) { + bz[j], bz[i] = bz[i], bz[j] +} diff --git a/sei-iavl/proto/changeset.pb.go b/sei-iavl/proto/changeset.pb.go new file mode 100644 index 0000000000..3c65935a85 --- /dev/null +++ b/sei-iavl/proto/changeset.pb.go @@ -0,0 +1,595 @@ +// Code generated by protoc-gen-gogo. DO NOT EDIT. +// source: iavl/changeset.proto + +package proto + +import ( + fmt "fmt" + proto "github.com/gogo/protobuf/proto" + io "io" + math "math" + math_bits "math/bits" +) + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal +var _ = fmt.Errorf +var _ = math.Inf + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the proto package it is being compiled against. +// A compilation error at this line likely means your copy of the +// proto package needs to be updated. +const _ = proto.GoGoProtoPackageIsVersion3 // please upgrade the proto package + +type KVPair struct { + Delete bool `protobuf:"varint,1,opt,name=delete,proto3" json:"delete,omitempty"` + Key []byte `protobuf:"bytes,2,opt,name=key,proto3" json:"key,omitempty"` + Value []byte `protobuf:"bytes,3,opt,name=value,proto3" json:"value,omitempty"` +} + +func (m *KVPair) Reset() { *m = KVPair{} } +func (m *KVPair) String() string { return proto.CompactTextString(m) } +func (*KVPair) ProtoMessage() {} +func (*KVPair) Descriptor() ([]byte, []int) { + return fileDescriptor_21609c3776972f61, []int{0} +} +func (m *KVPair) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *KVPair) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_KVPair.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *KVPair) XXX_Merge(src proto.Message) { + xxx_messageInfo_KVPair.Merge(m, src) +} +func (m *KVPair) XXX_Size() int { + return m.Size() +} +func (m *KVPair) XXX_DiscardUnknown() { + xxx_messageInfo_KVPair.DiscardUnknown(m) +} + +var xxx_messageInfo_KVPair proto.InternalMessageInfo + +func (m *KVPair) GetDelete() bool { + if m != nil { + return m.Delete + } + return false +} + +func (m *KVPair) GetKey() []byte { + if m != nil { + return m.Key + } + return nil +} + +func (m *KVPair) GetValue() []byte { + if m != nil { + return m.Value + } + return nil +} + +type ChangeSet struct { + Pairs []*KVPair `protobuf:"bytes,1,rep,name=pairs,proto3" json:"pairs,omitempty"` +} + +func (m *ChangeSet) Reset() { *m = ChangeSet{} } +func (m *ChangeSet) String() string { return proto.CompactTextString(m) } +func (*ChangeSet) ProtoMessage() {} +func (*ChangeSet) Descriptor() ([]byte, []int) { + return fileDescriptor_21609c3776972f61, []int{1} +} +func (m *ChangeSet) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *ChangeSet) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_ChangeSet.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *ChangeSet) XXX_Merge(src proto.Message) { + xxx_messageInfo_ChangeSet.Merge(m, src) +} +func (m *ChangeSet) XXX_Size() int { + return m.Size() +} +func (m *ChangeSet) XXX_DiscardUnknown() { + xxx_messageInfo_ChangeSet.DiscardUnknown(m) +} + +var xxx_messageInfo_ChangeSet proto.InternalMessageInfo + +func (m *ChangeSet) GetPairs() []*KVPair { + if m != nil { + return m.Pairs + } + return nil +} + +func init() { + proto.RegisterType((*KVPair)(nil), "iavl.KVPair") + proto.RegisterType((*ChangeSet)(nil), "iavl.ChangeSet") +} + +func init() { proto.RegisterFile("iavl/changeset.proto", fileDescriptor_21609c3776972f61) } + +var fileDescriptor_21609c3776972f61 = []byte{ + // 212 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xe2, 0x12, 0xc9, 0x4c, 0x2c, 0xcb, + 0xd1, 0x4f, 0xce, 0x48, 0xcc, 0x4b, 0x4f, 0x2d, 0x4e, 0x2d, 0xd1, 0x2b, 0x28, 0xca, 0x2f, 0xc9, + 0x17, 0x62, 0x01, 0x89, 0x2a, 0x79, 0x70, 0xb1, 0x79, 0x87, 0x05, 0x24, 0x66, 0x16, 0x09, 0x89, + 0x71, 0xb1, 0xa5, 0xa4, 0xe6, 0xa4, 0x96, 0xa4, 0x4a, 0x30, 0x2a, 0x30, 0x6a, 0x70, 0x04, 0x41, + 0x79, 0x42, 0x02, 0x5c, 0xcc, 0xd9, 0xa9, 0x95, 0x12, 0x4c, 0x0a, 0x8c, 0x1a, 0x3c, 0x41, 0x20, + 0xa6, 0x90, 0x08, 0x17, 0x6b, 0x59, 0x62, 0x4e, 0x69, 0xaa, 0x04, 0x33, 0x58, 0x0c, 0xc2, 0x51, + 0xd2, 0xe7, 0xe2, 0x74, 0x06, 0x5b, 0x11, 0x9c, 0x5a, 0x22, 0xa4, 0xc4, 0xc5, 0x5a, 0x90, 0x98, + 0x59, 0x54, 0x2c, 0xc1, 0xa8, 0xc0, 0xac, 0xc1, 0x6d, 0xc4, 0xa3, 0x07, 0xb2, 0x4c, 0x0f, 0x62, + 0x53, 0x10, 0x44, 0xca, 0xc9, 0xeb, 0xc4, 0x23, 0x39, 0xc6, 0x0b, 0x8f, 0xe4, 0x18, 0x1f, 0x3c, + 0x92, 0x63, 0x9c, 0xf0, 0x58, 0x8e, 0xe1, 0xc2, 0x63, 0x39, 0x86, 0x1b, 0x8f, 0xe5, 0x18, 0xa2, + 0x0c, 0xd2, 0x33, 0x4b, 0x32, 0x4a, 0x93, 0xf4, 0x92, 0xf3, 0x73, 0xf5, 0x8b, 0x53, 0x33, 0x75, + 0xc1, 0x0e, 0x4e, 0xce, 0xcf, 0x01, 0x73, 0x92, 0x33, 0x12, 0x33, 0xf3, 0xc0, 0x2c, 0xb0, 0xb7, + 0xc0, 0x72, 0x49, 0x6c, 0x60, 0xca, 0x18, 0x10, 0x00, 0x00, 0xff, 0xff, 0xf8, 0xbd, 0x58, 0xe8, + 0xeb, 0x00, 0x00, 0x00, +} + +func (m *KVPair) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *KVPair) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *KVPair) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if len(m.Value) > 0 { + i -= len(m.Value) + copy(dAtA[i:], m.Value) + i = encodeVarintChangeset(dAtA, i, uint64(len(m.Value))) + i-- + dAtA[i] = 0x1a + } + if len(m.Key) > 0 { + i -= len(m.Key) + copy(dAtA[i:], m.Key) + i = encodeVarintChangeset(dAtA, i, uint64(len(m.Key))) + i-- + dAtA[i] = 0x12 + } + if m.Delete { + i-- + if m.Delete { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i-- + dAtA[i] = 0x8 + } + return len(dAtA) - i, nil +} + +func (m *ChangeSet) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *ChangeSet) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *ChangeSet) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if len(m.Pairs) > 0 { + for iNdEx := len(m.Pairs) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Pairs[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintChangeset(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + } + } + return len(dAtA) - i, nil +} + +func encodeVarintChangeset(dAtA []byte, offset int, v uint64) int { + offset -= sovChangeset(v) + base := offset + for v >= 1<<7 { + dAtA[offset] = uint8(v&0x7f | 0x80) + v >>= 7 + offset++ + } + dAtA[offset] = uint8(v) + return base +} +func (m *KVPair) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.Delete { + n += 2 + } + l = len(m.Key) + if l > 0 { + n += 1 + l + sovChangeset(uint64(l)) + } + l = len(m.Value) + if l > 0 { + n += 1 + l + sovChangeset(uint64(l)) + } + return n +} + +func (m *ChangeSet) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if len(m.Pairs) > 0 { + for _, e := range m.Pairs { + l = e.Size() + n += 1 + l + sovChangeset(uint64(l)) + } + } + return n +} + +func sovChangeset(x uint64) (n int) { + return (math_bits.Len64(x|1) + 6) / 7 +} +func sozChangeset(x uint64) (n int) { + return sovChangeset(uint64((x << 1) ^ uint64((int64(x) >> 63)))) +} +func (m *KVPair) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowChangeset + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: KVPair: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: KVPair: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Delete", wireType) + } + var v int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowChangeset + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + m.Delete = bool(v != 0) + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Key", wireType) + } + var byteLen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowChangeset + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + byteLen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if byteLen < 0 { + return ErrInvalidLengthChangeset + } + postIndex := iNdEx + byteLen + if postIndex < 0 { + return ErrInvalidLengthChangeset + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Key = append(m.Key[:0], dAtA[iNdEx:postIndex]...) + if m.Key == nil { + m.Key = []byte{} + } + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Value", wireType) + } + var byteLen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowChangeset + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + byteLen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if byteLen < 0 { + return ErrInvalidLengthChangeset + } + postIndex := iNdEx + byteLen + if postIndex < 0 { + return ErrInvalidLengthChangeset + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Value = append(m.Value[:0], dAtA[iNdEx:postIndex]...) + if m.Value == nil { + m.Value = []byte{} + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipChangeset(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthChangeset + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *ChangeSet) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowChangeset + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: ChangeSet: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: ChangeSet: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Pairs", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowChangeset + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthChangeset + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthChangeset + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Pairs = append(m.Pairs, &KVPair{}) + if err := m.Pairs[len(m.Pairs)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipChangeset(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthChangeset + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func skipChangeset(dAtA []byte) (n int, err error) { + l := len(dAtA) + iNdEx := 0 + depth := 0 + for iNdEx < l { + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowChangeset + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + wireType := int(wire & 0x7) + switch wireType { + case 0: + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowChangeset + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + iNdEx++ + if dAtA[iNdEx-1] < 0x80 { + break + } + } + case 1: + iNdEx += 8 + case 2: + var length int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowChangeset + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + length |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if length < 0 { + return 0, ErrInvalidLengthChangeset + } + iNdEx += length + case 3: + depth++ + case 4: + if depth == 0 { + return 0, ErrUnexpectedEndOfGroupChangeset + } + depth-- + case 5: + iNdEx += 4 + default: + return 0, fmt.Errorf("proto: illegal wireType %d", wireType) + } + if iNdEx < 0 { + return 0, ErrInvalidLengthChangeset + } + if depth == 0 { + return iNdEx, nil + } + } + return 0, io.ErrUnexpectedEOF +} + +var ( + ErrInvalidLengthChangeset = fmt.Errorf("proto: negative length found during unmarshaling") + ErrIntOverflowChangeset = fmt.Errorf("proto: integer overflow") + ErrUnexpectedEndOfGroupChangeset = fmt.Errorf("proto: unexpected end of group") +) diff --git a/sei-iavl/proto/iavl/changeset.proto b/sei-iavl/proto/iavl/changeset.proto new file mode 100644 index 0000000000..56479c698c --- /dev/null +++ b/sei-iavl/proto/iavl/changeset.proto @@ -0,0 +1,14 @@ +syntax = "proto3"; +package iavl; + +option go_package = "github.com/sei-protocol/sei-chain/sei-iavl/proto"; + +message KVPair { + bool delete = 1; + bytes key = 2; + bytes value = 3; +} + +message ChangeSet { + repeated KVPair pairs = 1; +} diff --git a/sei-iavl/proto/iavl/proof.proto b/sei-iavl/proto/iavl/proof.proto new file mode 100644 index 0000000000..9b5e28ba3a --- /dev/null +++ b/sei-iavl/proto/iavl/proof.proto @@ -0,0 +1,42 @@ +syntax = "proto3"; +package iavl; + +option go_package = "github.com/sei-protocol/sei-chain/sei-iavl/proto"; + +// ValueOp is a Protobuf representation of iavl.ValueOp. +message ValueOp { + RangeProof proof = 1; +} + +// AbsenceOp is a Protobuf representation of iavl.AbsenceOp. +message AbsenceOp { + RangeProof proof = 1; +} + +// RangeProof is a Protobuf representation of iavl.RangeProof. +message RangeProof { + repeated ProofInnerNode left_path = 1; + repeated PathToLeaf inner_nodes = 2; + repeated ProofLeafNode leaves = 3; +} + +// PathToLeaf is a Protobuf representation of iavl.PathToLeaf. +message PathToLeaf { + repeated ProofInnerNode inners = 1; +} + +// ProofInnerNode is a Protobuf representation of iavl.ProofInnerNode. +message ProofInnerNode { + sint32 height = 1; + int64 size = 2; + int64 version = 3; + bytes left = 4; + bytes right = 5; +} + +// ProofLeafNode is a Protobuf representation of iavl.ProofInnerNode. +message ProofLeafNode { + bytes key = 1; + bytes value_hash = 2; + int64 version = 3; +} diff --git a/sei-iavl/proto/proof.pb.go b/sei-iavl/proto/proof.pb.go new file mode 100644 index 0000000000..24c5e0164d --- /dev/null +++ b/sei-iavl/proto/proof.pb.go @@ -0,0 +1,1601 @@ +// Code generated by protoc-gen-gogo. DO NOT EDIT. +// source: iavl/proof.proto + +package proto + +import ( + fmt "fmt" + proto "github.com/gogo/protobuf/proto" + io "io" + math "math" + math_bits "math/bits" +) + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal +var _ = fmt.Errorf +var _ = math.Inf + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the proto package it is being compiled against. +// A compilation error at this line likely means your copy of the +// proto package needs to be updated. +const _ = proto.GoGoProtoPackageIsVersion3 // please upgrade the proto package + +// ValueOp is a Protobuf representation of iavl.ValueOp. +type ValueOp struct { + Proof *RangeProof `protobuf:"bytes,1,opt,name=proof,proto3" json:"proof,omitempty"` +} + +func (m *ValueOp) Reset() { *m = ValueOp{} } +func (m *ValueOp) String() string { return proto.CompactTextString(m) } +func (*ValueOp) ProtoMessage() {} +func (*ValueOp) Descriptor() ([]byte, []int) { + return fileDescriptor_92b2514a05d2a2db, []int{0} +} +func (m *ValueOp) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *ValueOp) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_ValueOp.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *ValueOp) XXX_Merge(src proto.Message) { + xxx_messageInfo_ValueOp.Merge(m, src) +} +func (m *ValueOp) XXX_Size() int { + return m.Size() +} +func (m *ValueOp) XXX_DiscardUnknown() { + xxx_messageInfo_ValueOp.DiscardUnknown(m) +} + +var xxx_messageInfo_ValueOp proto.InternalMessageInfo + +func (m *ValueOp) GetProof() *RangeProof { + if m != nil { + return m.Proof + } + return nil +} + +// AbsenceOp is a Protobuf representation of iavl.AbsenceOp. +type AbsenceOp struct { + Proof *RangeProof `protobuf:"bytes,1,opt,name=proof,proto3" json:"proof,omitempty"` +} + +func (m *AbsenceOp) Reset() { *m = AbsenceOp{} } +func (m *AbsenceOp) String() string { return proto.CompactTextString(m) } +func (*AbsenceOp) ProtoMessage() {} +func (*AbsenceOp) Descriptor() ([]byte, []int) { + return fileDescriptor_92b2514a05d2a2db, []int{1} +} +func (m *AbsenceOp) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *AbsenceOp) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_AbsenceOp.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *AbsenceOp) XXX_Merge(src proto.Message) { + xxx_messageInfo_AbsenceOp.Merge(m, src) +} +func (m *AbsenceOp) XXX_Size() int { + return m.Size() +} +func (m *AbsenceOp) XXX_DiscardUnknown() { + xxx_messageInfo_AbsenceOp.DiscardUnknown(m) +} + +var xxx_messageInfo_AbsenceOp proto.InternalMessageInfo + +func (m *AbsenceOp) GetProof() *RangeProof { + if m != nil { + return m.Proof + } + return nil +} + +// RangeProof is a Protobuf representation of iavl.RangeProof. +type RangeProof struct { + LeftPath []*ProofInnerNode `protobuf:"bytes,1,rep,name=left_path,json=leftPath,proto3" json:"left_path,omitempty"` + InnerNodes []*PathToLeaf `protobuf:"bytes,2,rep,name=inner_nodes,json=innerNodes,proto3" json:"inner_nodes,omitempty"` + Leaves []*ProofLeafNode `protobuf:"bytes,3,rep,name=leaves,proto3" json:"leaves,omitempty"` +} + +func (m *RangeProof) Reset() { *m = RangeProof{} } +func (m *RangeProof) String() string { return proto.CompactTextString(m) } +func (*RangeProof) ProtoMessage() {} +func (*RangeProof) Descriptor() ([]byte, []int) { + return fileDescriptor_92b2514a05d2a2db, []int{2} +} +func (m *RangeProof) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *RangeProof) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_RangeProof.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *RangeProof) XXX_Merge(src proto.Message) { + xxx_messageInfo_RangeProof.Merge(m, src) +} +func (m *RangeProof) XXX_Size() int { + return m.Size() +} +func (m *RangeProof) XXX_DiscardUnknown() { + xxx_messageInfo_RangeProof.DiscardUnknown(m) +} + +var xxx_messageInfo_RangeProof proto.InternalMessageInfo + +func (m *RangeProof) GetLeftPath() []*ProofInnerNode { + if m != nil { + return m.LeftPath + } + return nil +} + +func (m *RangeProof) GetInnerNodes() []*PathToLeaf { + if m != nil { + return m.InnerNodes + } + return nil +} + +func (m *RangeProof) GetLeaves() []*ProofLeafNode { + if m != nil { + return m.Leaves + } + return nil +} + +// PathToLeaf is a Protobuf representation of iavl.PathToLeaf. +type PathToLeaf struct { + Inners []*ProofInnerNode `protobuf:"bytes,1,rep,name=inners,proto3" json:"inners,omitempty"` +} + +func (m *PathToLeaf) Reset() { *m = PathToLeaf{} } +func (m *PathToLeaf) String() string { return proto.CompactTextString(m) } +func (*PathToLeaf) ProtoMessage() {} +func (*PathToLeaf) Descriptor() ([]byte, []int) { + return fileDescriptor_92b2514a05d2a2db, []int{3} +} +func (m *PathToLeaf) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *PathToLeaf) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_PathToLeaf.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *PathToLeaf) XXX_Merge(src proto.Message) { + xxx_messageInfo_PathToLeaf.Merge(m, src) +} +func (m *PathToLeaf) XXX_Size() int { + return m.Size() +} +func (m *PathToLeaf) XXX_DiscardUnknown() { + xxx_messageInfo_PathToLeaf.DiscardUnknown(m) +} + +var xxx_messageInfo_PathToLeaf proto.InternalMessageInfo + +func (m *PathToLeaf) GetInners() []*ProofInnerNode { + if m != nil { + return m.Inners + } + return nil +} + +// ProofInnerNode is a Protobuf representation of iavl.ProofInnerNode. +type ProofInnerNode struct { + Height int32 `protobuf:"zigzag32,1,opt,name=height,proto3" json:"height,omitempty"` + Size_ int64 `protobuf:"varint,2,opt,name=size,proto3" json:"size,omitempty"` + Version int64 `protobuf:"varint,3,opt,name=version,proto3" json:"version,omitempty"` + Left []byte `protobuf:"bytes,4,opt,name=left,proto3" json:"left,omitempty"` + Right []byte `protobuf:"bytes,5,opt,name=right,proto3" json:"right,omitempty"` +} + +func (m *ProofInnerNode) Reset() { *m = ProofInnerNode{} } +func (m *ProofInnerNode) String() string { return proto.CompactTextString(m) } +func (*ProofInnerNode) ProtoMessage() {} +func (*ProofInnerNode) Descriptor() ([]byte, []int) { + return fileDescriptor_92b2514a05d2a2db, []int{4} +} +func (m *ProofInnerNode) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *ProofInnerNode) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_ProofInnerNode.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *ProofInnerNode) XXX_Merge(src proto.Message) { + xxx_messageInfo_ProofInnerNode.Merge(m, src) +} +func (m *ProofInnerNode) XXX_Size() int { + return m.Size() +} +func (m *ProofInnerNode) XXX_DiscardUnknown() { + xxx_messageInfo_ProofInnerNode.DiscardUnknown(m) +} + +var xxx_messageInfo_ProofInnerNode proto.InternalMessageInfo + +func (m *ProofInnerNode) GetHeight() int32 { + if m != nil { + return m.Height + } + return 0 +} + +func (m *ProofInnerNode) GetSize_() int64 { + if m != nil { + return m.Size_ + } + return 0 +} + +func (m *ProofInnerNode) GetVersion() int64 { + if m != nil { + return m.Version + } + return 0 +} + +func (m *ProofInnerNode) GetLeft() []byte { + if m != nil { + return m.Left + } + return nil +} + +func (m *ProofInnerNode) GetRight() []byte { + if m != nil { + return m.Right + } + return nil +} + +// ProofLeafNode is a Protobuf representation of iavl.ProofInnerNode. +type ProofLeafNode struct { + Key []byte `protobuf:"bytes,1,opt,name=key,proto3" json:"key,omitempty"` + ValueHash []byte `protobuf:"bytes,2,opt,name=value_hash,json=valueHash,proto3" json:"value_hash,omitempty"` + Version int64 `protobuf:"varint,3,opt,name=version,proto3" json:"version,omitempty"` +} + +func (m *ProofLeafNode) Reset() { *m = ProofLeafNode{} } +func (m *ProofLeafNode) String() string { return proto.CompactTextString(m) } +func (*ProofLeafNode) ProtoMessage() {} +func (*ProofLeafNode) Descriptor() ([]byte, []int) { + return fileDescriptor_92b2514a05d2a2db, []int{5} +} +func (m *ProofLeafNode) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *ProofLeafNode) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_ProofLeafNode.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *ProofLeafNode) XXX_Merge(src proto.Message) { + xxx_messageInfo_ProofLeafNode.Merge(m, src) +} +func (m *ProofLeafNode) XXX_Size() int { + return m.Size() +} +func (m *ProofLeafNode) XXX_DiscardUnknown() { + xxx_messageInfo_ProofLeafNode.DiscardUnknown(m) +} + +var xxx_messageInfo_ProofLeafNode proto.InternalMessageInfo + +func (m *ProofLeafNode) GetKey() []byte { + if m != nil { + return m.Key + } + return nil +} + +func (m *ProofLeafNode) GetValueHash() []byte { + if m != nil { + return m.ValueHash + } + return nil +} + +func (m *ProofLeafNode) GetVersion() int64 { + if m != nil { + return m.Version + } + return 0 +} + +func init() { + proto.RegisterType((*ValueOp)(nil), "iavl.ValueOp") + proto.RegisterType((*AbsenceOp)(nil), "iavl.AbsenceOp") + proto.RegisterType((*RangeProof)(nil), "iavl.RangeProof") + proto.RegisterType((*PathToLeaf)(nil), "iavl.PathToLeaf") + proto.RegisterType((*ProofInnerNode)(nil), "iavl.ProofInnerNode") + proto.RegisterType((*ProofLeafNode)(nil), "iavl.ProofLeafNode") +} + +func init() { proto.RegisterFile("iavl/proof.proto", fileDescriptor_92b2514a05d2a2db) } + +var fileDescriptor_92b2514a05d2a2db = []byte{ + // 395 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x94, 0x92, 0xc1, 0xee, 0xd2, 0x40, + 0x10, 0xc6, 0xd9, 0x7f, 0xa1, 0xc8, 0x80, 0x06, 0x57, 0x62, 0xf6, 0x62, 0xd3, 0xf4, 0x60, 0x9a, + 0xa8, 0x45, 0xe0, 0xe6, 0x4d, 0x4f, 0x6a, 0x8c, 0x92, 0x8d, 0xf1, 0xc0, 0x85, 0x2c, 0x65, 0x61, + 0x37, 0xd6, 0x6e, 0xd3, 0x2d, 0x4d, 0xf4, 0xe4, 0x23, 0xf8, 0x06, 0xbe, 0x8e, 0x47, 0x8e, 0x1e, + 0x0d, 0xbc, 0x88, 0xd9, 0xa1, 0x04, 0x39, 0x68, 0xe2, 0xa9, 0x33, 0xdf, 0xfc, 0x66, 0xbe, 0xe9, + 0xee, 0xc2, 0x50, 0x8b, 0x3a, 0x1b, 0x17, 0xa5, 0x31, 0x9b, 0xa4, 0x28, 0x4d, 0x65, 0x68, 0xdb, + 0x29, 0xd1, 0x04, 0xba, 0x1f, 0x44, 0xb6, 0x93, 0xef, 0x0a, 0xfa, 0x10, 0x3a, 0x58, 0x67, 0x24, + 0x24, 0x71, 0x7f, 0x3a, 0x4c, 0x1c, 0x90, 0x70, 0x91, 0x6f, 0xe5, 0xdc, 0xe9, 0xfc, 0x54, 0x8e, + 0x66, 0xd0, 0x7b, 0xbe, 0xb2, 0x32, 0x4f, 0xff, 0xa7, 0xe9, 0x3b, 0x01, 0xb8, 0xa8, 0x74, 0x02, + 0xbd, 0x4c, 0x6e, 0xaa, 0x65, 0x21, 0x2a, 0xc5, 0x48, 0xe8, 0xc5, 0xfd, 0xe9, 0xe8, 0xd4, 0x8a, + 0xf5, 0x57, 0x79, 0x2e, 0xcb, 0xb7, 0x66, 0x2d, 0xf9, 0x2d, 0x87, 0xcd, 0x45, 0xa5, 0xe8, 0x04, + 0xfa, 0xda, 0xc9, 0xcb, 0xdc, 0xac, 0xa5, 0x65, 0x37, 0xd8, 0xd4, 0xf8, 0x39, 0xe0, 0xbd, 0x79, + 0x23, 0xc5, 0x86, 0x83, 0x3e, 0xf7, 0x5a, 0xfa, 0x08, 0xfc, 0x4c, 0x8a, 0x5a, 0x5a, 0xe6, 0x21, + 0x7d, 0xef, 0x0f, 0x0b, 0x07, 0xa3, 0x43, 0x83, 0x44, 0xcf, 0x00, 0x2e, 0x63, 0xe8, 0x63, 0xf0, + 0x71, 0x90, 0xfd, 0xe7, 0x76, 0x0d, 0x13, 0x7d, 0x25, 0x70, 0xe7, 0xba, 0x44, 0xef, 0x83, 0xaf, + 0xa4, 0xde, 0xaa, 0x0a, 0x4f, 0xe6, 0x2e, 0x6f, 0x32, 0x4a, 0xa1, 0x6d, 0xf5, 0x17, 0xc9, 0x6e, + 0x42, 0x12, 0x7b, 0x1c, 0x63, 0xca, 0xa0, 0x5b, 0xcb, 0xd2, 0x6a, 0x93, 0x33, 0x0f, 0xe5, 0x73, + 0xea, 0x68, 0x77, 0x00, 0xac, 0x1d, 0x92, 0x78, 0xc0, 0x31, 0xa6, 0x23, 0xe8, 0x94, 0x38, 0xb8, + 0x83, 0xe2, 0x29, 0x89, 0x16, 0x70, 0xfb, 0xea, 0xbf, 0xe8, 0x10, 0xbc, 0x8f, 0xf2, 0x33, 0xba, + 0x0f, 0xb8, 0x0b, 0xe9, 0x03, 0x80, 0xda, 0xdd, 0xf5, 0x52, 0x09, 0xab, 0x70, 0x81, 0x01, 0xef, + 0xa1, 0xf2, 0x52, 0x58, 0xf5, 0xf7, 0x2d, 0x5e, 0xbc, 0xfe, 0x71, 0x08, 0xc8, 0xfe, 0x10, 0x90, + 0x5f, 0x87, 0x80, 0x7c, 0x3b, 0x06, 0xad, 0xfd, 0x31, 0x68, 0xfd, 0x3c, 0x06, 0xad, 0xc5, 0xd3, + 0xad, 0xae, 0xd4, 0x6e, 0x95, 0xa4, 0xe6, 0xd3, 0xd8, 0x4a, 0xfd, 0x04, 0x9f, 0x56, 0x6a, 0x32, + 0x4c, 0x52, 0x25, 0x74, 0x8e, 0xd1, 0xf9, 0xf1, 0x55, 0x66, 0xe5, 0xe3, 0x67, 0xf6, 0x3b, 0x00, + 0x00, 0xff, 0xff, 0x6a, 0xab, 0xb7, 0xd6, 0x91, 0x02, 0x00, 0x00, +} + +func (m *ValueOp) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *ValueOp) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *ValueOp) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.Proof != nil { + { + size, err := m.Proof.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintProof(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *AbsenceOp) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *AbsenceOp) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *AbsenceOp) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.Proof != nil { + { + size, err := m.Proof.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintProof(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *RangeProof) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *RangeProof) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *RangeProof) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if len(m.Leaves) > 0 { + for iNdEx := len(m.Leaves) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Leaves[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintProof(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x1a + } + } + if len(m.InnerNodes) > 0 { + for iNdEx := len(m.InnerNodes) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.InnerNodes[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintProof(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + } + } + if len(m.LeftPath) > 0 { + for iNdEx := len(m.LeftPath) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.LeftPath[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintProof(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + } + } + return len(dAtA) - i, nil +} + +func (m *PathToLeaf) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *PathToLeaf) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *PathToLeaf) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if len(m.Inners) > 0 { + for iNdEx := len(m.Inners) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Inners[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintProof(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + } + } + return len(dAtA) - i, nil +} + +func (m *ProofInnerNode) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *ProofInnerNode) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *ProofInnerNode) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if len(m.Right) > 0 { + i -= len(m.Right) + copy(dAtA[i:], m.Right) + i = encodeVarintProof(dAtA, i, uint64(len(m.Right))) + i-- + dAtA[i] = 0x2a + } + if len(m.Left) > 0 { + i -= len(m.Left) + copy(dAtA[i:], m.Left) + i = encodeVarintProof(dAtA, i, uint64(len(m.Left))) + i-- + dAtA[i] = 0x22 + } + if m.Version != 0 { + i = encodeVarintProof(dAtA, i, uint64(m.Version)) + i-- + dAtA[i] = 0x18 + } + if m.Size_ != 0 { + i = encodeVarintProof(dAtA, i, uint64(m.Size_)) + i-- + dAtA[i] = 0x10 + } + if m.Height != 0 { + i = encodeVarintProof(dAtA, i, uint64((uint32(m.Height)<<1)^uint32((m.Height>>31)))) + i-- + dAtA[i] = 0x8 + } + return len(dAtA) - i, nil +} + +func (m *ProofLeafNode) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *ProofLeafNode) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *ProofLeafNode) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.Version != 0 { + i = encodeVarintProof(dAtA, i, uint64(m.Version)) + i-- + dAtA[i] = 0x18 + } + if len(m.ValueHash) > 0 { + i -= len(m.ValueHash) + copy(dAtA[i:], m.ValueHash) + i = encodeVarintProof(dAtA, i, uint64(len(m.ValueHash))) + i-- + dAtA[i] = 0x12 + } + if len(m.Key) > 0 { + i -= len(m.Key) + copy(dAtA[i:], m.Key) + i = encodeVarintProof(dAtA, i, uint64(len(m.Key))) + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func encodeVarintProof(dAtA []byte, offset int, v uint64) int { + offset -= sovProof(v) + base := offset + for v >= 1<<7 { + dAtA[offset] = uint8(v&0x7f | 0x80) + v >>= 7 + offset++ + } + dAtA[offset] = uint8(v) + return base +} +func (m *ValueOp) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.Proof != nil { + l = m.Proof.Size() + n += 1 + l + sovProof(uint64(l)) + } + return n +} + +func (m *AbsenceOp) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.Proof != nil { + l = m.Proof.Size() + n += 1 + l + sovProof(uint64(l)) + } + return n +} + +func (m *RangeProof) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if len(m.LeftPath) > 0 { + for _, e := range m.LeftPath { + l = e.Size() + n += 1 + l + sovProof(uint64(l)) + } + } + if len(m.InnerNodes) > 0 { + for _, e := range m.InnerNodes { + l = e.Size() + n += 1 + l + sovProof(uint64(l)) + } + } + if len(m.Leaves) > 0 { + for _, e := range m.Leaves { + l = e.Size() + n += 1 + l + sovProof(uint64(l)) + } + } + return n +} + +func (m *PathToLeaf) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if len(m.Inners) > 0 { + for _, e := range m.Inners { + l = e.Size() + n += 1 + l + sovProof(uint64(l)) + } + } + return n +} + +func (m *ProofInnerNode) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.Height != 0 { + n += 1 + sozProof(uint64(m.Height)) + } + if m.Size_ != 0 { + n += 1 + sovProof(uint64(m.Size_)) + } + if m.Version != 0 { + n += 1 + sovProof(uint64(m.Version)) + } + l = len(m.Left) + if l > 0 { + n += 1 + l + sovProof(uint64(l)) + } + l = len(m.Right) + if l > 0 { + n += 1 + l + sovProof(uint64(l)) + } + return n +} + +func (m *ProofLeafNode) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Key) + if l > 0 { + n += 1 + l + sovProof(uint64(l)) + } + l = len(m.ValueHash) + if l > 0 { + n += 1 + l + sovProof(uint64(l)) + } + if m.Version != 0 { + n += 1 + sovProof(uint64(m.Version)) + } + return n +} + +func sovProof(x uint64) (n int) { + return (math_bits.Len64(x|1) + 6) / 7 +} +func sozProof(x uint64) (n int) { + return sovProof(uint64((x << 1) ^ uint64((int64(x) >> 63)))) +} +func (m *ValueOp) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowProof + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: ValueOp: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: ValueOp: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Proof", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowProof + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthProof + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthProof + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Proof == nil { + m.Proof = &RangeProof{} + } + if err := m.Proof.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipProof(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthProof + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *AbsenceOp) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowProof + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: AbsenceOp: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: AbsenceOp: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Proof", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowProof + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthProof + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthProof + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Proof == nil { + m.Proof = &RangeProof{} + } + if err := m.Proof.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipProof(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthProof + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *RangeProof) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowProof + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: RangeProof: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: RangeProof: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field LeftPath", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowProof + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthProof + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthProof + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.LeftPath = append(m.LeftPath, &ProofInnerNode{}) + if err := m.LeftPath[len(m.LeftPath)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field InnerNodes", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowProof + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthProof + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthProof + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.InnerNodes = append(m.InnerNodes, &PathToLeaf{}) + if err := m.InnerNodes[len(m.InnerNodes)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Leaves", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowProof + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthProof + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthProof + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Leaves = append(m.Leaves, &ProofLeafNode{}) + if err := m.Leaves[len(m.Leaves)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipProof(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthProof + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *PathToLeaf) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowProof + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: PathToLeaf: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: PathToLeaf: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Inners", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowProof + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthProof + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthProof + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Inners = append(m.Inners, &ProofInnerNode{}) + if err := m.Inners[len(m.Inners)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipProof(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthProof + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *ProofInnerNode) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowProof + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: ProofInnerNode: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: ProofInnerNode: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Height", wireType) + } + var v int32 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowProof + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= int32(b&0x7F) << shift + if b < 0x80 { + break + } + } + v = int32((uint32(v) >> 1) ^ uint32(((v&1)<<31)>>31)) + m.Height = v + case 2: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Size_", wireType) + } + m.Size_ = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowProof + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.Size_ |= int64(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 3: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Version", wireType) + } + m.Version = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowProof + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.Version |= int64(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Left", wireType) + } + var byteLen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowProof + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + byteLen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if byteLen < 0 { + return ErrInvalidLengthProof + } + postIndex := iNdEx + byteLen + if postIndex < 0 { + return ErrInvalidLengthProof + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Left = append(m.Left[:0], dAtA[iNdEx:postIndex]...) + if m.Left == nil { + m.Left = []byte{} + } + iNdEx = postIndex + case 5: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Right", wireType) + } + var byteLen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowProof + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + byteLen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if byteLen < 0 { + return ErrInvalidLengthProof + } + postIndex := iNdEx + byteLen + if postIndex < 0 { + return ErrInvalidLengthProof + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Right = append(m.Right[:0], dAtA[iNdEx:postIndex]...) + if m.Right == nil { + m.Right = []byte{} + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipProof(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthProof + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *ProofLeafNode) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowProof + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: ProofLeafNode: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: ProofLeafNode: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Key", wireType) + } + var byteLen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowProof + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + byteLen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if byteLen < 0 { + return ErrInvalidLengthProof + } + postIndex := iNdEx + byteLen + if postIndex < 0 { + return ErrInvalidLengthProof + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Key = append(m.Key[:0], dAtA[iNdEx:postIndex]...) + if m.Key == nil { + m.Key = []byte{} + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ValueHash", wireType) + } + var byteLen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowProof + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + byteLen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if byteLen < 0 { + return ErrInvalidLengthProof + } + postIndex := iNdEx + byteLen + if postIndex < 0 { + return ErrInvalidLengthProof + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.ValueHash = append(m.ValueHash[:0], dAtA[iNdEx:postIndex]...) + if m.ValueHash == nil { + m.ValueHash = []byte{} + } + iNdEx = postIndex + case 3: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Version", wireType) + } + m.Version = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowProof + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.Version |= int64(b&0x7F) << shift + if b < 0x80 { + break + } + } + default: + iNdEx = preIndex + skippy, err := skipProof(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthProof + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func skipProof(dAtA []byte) (n int, err error) { + l := len(dAtA) + iNdEx := 0 + depth := 0 + for iNdEx < l { + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowProof + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + wireType := int(wire & 0x7) + switch wireType { + case 0: + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowProof + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + iNdEx++ + if dAtA[iNdEx-1] < 0x80 { + break + } + } + case 1: + iNdEx += 8 + case 2: + var length int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowProof + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + length |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if length < 0 { + return 0, ErrInvalidLengthProof + } + iNdEx += length + case 3: + depth++ + case 4: + if depth == 0 { + return 0, ErrUnexpectedEndOfGroupProof + } + depth-- + case 5: + iNdEx += 4 + default: + return 0, fmt.Errorf("proto: illegal wireType %d", wireType) + } + if iNdEx < 0 { + return 0, ErrInvalidLengthProof + } + if depth == 0 { + return iNdEx, nil + } + } + return 0, io.ErrUnexpectedEOF +} + +var ( + ErrInvalidLengthProof = fmt.Errorf("proto: negative length found during unmarshaling") + ErrIntOverflowProof = fmt.Errorf("proto: integer overflow") + ErrUnexpectedEndOfGroupProof = fmt.Errorf("proto: unexpected end of group") +) diff --git a/sei-iavl/repair.go b/sei-iavl/repair.go new file mode 100644 index 0000000000..c0ec6f17f4 --- /dev/null +++ b/sei-iavl/repair.go @@ -0,0 +1,69 @@ +package iavl + +import ( + "math" + + "github.com/pkg/errors" + dbm "github.com/tendermint/tm-db" +) + +// Repair013Orphans repairs incorrect orphan entries written by IAVL 0.13 pruning. To use it, close +// a database using IAVL 0.13, make a backup copy, and then run this function before opening the +// database with IAVL 0.14 or later. It returns the number of faulty orphan entries removed. If the +// 0.13 database was written with KeepEvery:1 (the default) or the last version _ever_ saved to the +// tree was a multiple of `KeepEvery` and thus saved to disk, this repair is not necessary. +// +// Note that this cannot be used directly on Cosmos SDK databases, since they store multiple IAVL +// trees in the same underlying database via a prefix scheme. +// +// The pruning functionality enabled with Options.KeepEvery > 1 would write orphans entries to disk +// for versions that should only have been saved in memory, and these orphan entries were clamped +// to the last version persisted to disk instead of the version that generated them (so a delete at +// version 749 might generate an orphan entry ending at version 700 for KeepEvery:100). If the +// database is reopened at the last persisted version and this version is later deleted, the +// orphaned nodes can be deleted prematurely or incorrectly, causing data loss and database +// corruption. +// +// This function removes these incorrect orphan entries by deleting all orphan entries that have a +// to-version equal to or greater than the latest persisted version. Correct orphans will never +// have this, since they must have been deleted in a future (non-existent) version for that to be +// the case. +func Repair013Orphans(db dbm.DB) (uint64, error) { + ndb := newNodeDB(db, 0, &Options{Sync: true}) + version, err := ndb.getLatestVersion() + if err != nil { + return 0, err + } + if version == 0 { + return 0, errors.New("no versions found") + } + + var repaired uint64 + batch := db.NewBatch() + defer func() { _ = batch.Close() }() + err = ndb.traverseRange(orphanKeyFormat.Key(version), orphanKeyFormat.Key(int64(math.MaxInt64)), func(k, v []byte) error { + // Sanity check so we don't remove stuff we shouldn't + var toVersion int64 + orphanKeyFormat.Scan(k, &toVersion) + if toVersion < version { + err = errors.Errorf("Found unexpected orphan with toVersion=%v, lesser than latest version %v", + toVersion, version) + return err + } + repaired++ + err = batch.Delete(k) + if err != nil { + return err + } + return nil + }) + if err != nil { + return 0, err + } + err = batch.WriteSync() + if err != nil { + return 0, err + } + + return repaired, nil +} diff --git a/sei-iavl/testdata/0.13-orphans-v6.db/000001.log b/sei-iavl/testdata/0.13-orphans-v6.db/000001.log new file mode 100644 index 0000000000..13bc49ab48 Binary files /dev/null and b/sei-iavl/testdata/0.13-orphans-v6.db/000001.log differ diff --git a/sei-iavl/testdata/0.13-orphans-v6.db/CURRENT b/sei-iavl/testdata/0.13-orphans-v6.db/CURRENT new file mode 100644 index 0000000000..feda7d6b24 --- /dev/null +++ b/sei-iavl/testdata/0.13-orphans-v6.db/CURRENT @@ -0,0 +1 @@ +MANIFEST-000000 diff --git a/sei-iavl/testdata/0.13-orphans-v6.db/LOCK b/sei-iavl/testdata/0.13-orphans-v6.db/LOCK new file mode 100644 index 0000000000..e69de29bb2 diff --git a/sei-iavl/testdata/0.13-orphans-v6.db/LOG b/sei-iavl/testdata/0.13-orphans-v6.db/LOG new file mode 100644 index 0000000000..f890e80b8f --- /dev/null +++ b/sei-iavl/testdata/0.13-orphans-v6.db/LOG @@ -0,0 +1,6 @@ +=============== Jun 25, 2020 (CEST) =============== +14:30:10.673317 log@legend F·NumFile S·FileSize N·Entry C·BadEntry B·BadBlock Ke·KeyError D·DroppedEntry L·Level Q·SeqNum T·TimeElapsed +14:30:10.688689 db@open opening +14:30:10.689548 version@stat F·[] S·0B[] Sc·[] +14:30:10.702481 db@janitor F·2 G·0 +14:30:10.702564 db@open done T·13.82376ms diff --git a/sei-iavl/testdata/0.13-orphans-v6.db/MANIFEST-000000 b/sei-iavl/testdata/0.13-orphans-v6.db/MANIFEST-000000 new file mode 100644 index 0000000000..9d54f6733b Binary files /dev/null and b/sei-iavl/testdata/0.13-orphans-v6.db/MANIFEST-000000 differ diff --git a/sei-iavl/testdata/0.13-orphans.db/000001.log b/sei-iavl/testdata/0.13-orphans.db/000001.log new file mode 100644 index 0000000000..95ef16dcaf Binary files /dev/null and b/sei-iavl/testdata/0.13-orphans.db/000001.log differ diff --git a/sei-iavl/testdata/0.13-orphans.db/CURRENT b/sei-iavl/testdata/0.13-orphans.db/CURRENT new file mode 100644 index 0000000000..feda7d6b24 --- /dev/null +++ b/sei-iavl/testdata/0.13-orphans.db/CURRENT @@ -0,0 +1 @@ +MANIFEST-000000 diff --git a/sei-iavl/testdata/0.13-orphans.db/LOCK b/sei-iavl/testdata/0.13-orphans.db/LOCK new file mode 100644 index 0000000000..e69de29bb2 diff --git a/sei-iavl/testdata/0.13-orphans.db/LOG b/sei-iavl/testdata/0.13-orphans.db/LOG new file mode 100644 index 0000000000..711c5a08ff --- /dev/null +++ b/sei-iavl/testdata/0.13-orphans.db/LOG @@ -0,0 +1,6 @@ +=============== Jun 25, 2020 (CEST) =============== +13:31:22.162368 log@legend F·NumFile S·FileSize N·Entry C·BadEntry B·BadBlock Ke·KeyError D·DroppedEntry L·Level Q·SeqNum T·TimeElapsed +13:31:22.173177 db@open opening +13:31:22.173961 version@stat F·[] S·0B[] Sc·[] +13:31:22.189072 db@janitor F·2 G·0 +13:31:22.189117 db@open done T·15.875399ms diff --git a/sei-iavl/testdata/0.13-orphans.db/MANIFEST-000000 b/sei-iavl/testdata/0.13-orphans.db/MANIFEST-000000 new file mode 100644 index 0000000000..9d54f6733b Binary files /dev/null and b/sei-iavl/testdata/0.13-orphans.db/MANIFEST-000000 differ diff --git a/sei-iavl/testutils_test.go b/sei-iavl/testutils_test.go new file mode 100644 index 0000000000..d0354a7d3a --- /dev/null +++ b/sei-iavl/testutils_test.go @@ -0,0 +1,366 @@ +// nolint:errcheck +package iavl + +import ( + "bytes" + "fmt" + "runtime" + "sort" + "testing" + + "math/rand" + + "github.com/stretchr/testify/require" + db "github.com/tendermint/tm-db" + + "github.com/sei-protocol/sei-chain/sei-iavl/internal/encoding" + iavlrand "github.com/sei-protocol/sei-chain/sei-iavl/internal/rand" +) + +type iteratorTestConfig struct { + startIterate, endIterate []byte + startByteToSet, endByteToSet byte + ascending bool +} + +func randstr(length int) string { + return iavlrand.RandStr(length) +} + +func i2b(i int) []byte { + buf := new(bytes.Buffer) + encoding.EncodeVarint(buf, int64(i)) + return buf.Bytes() +} + +func b2i(bz []byte) int { + i, _, err := encoding.DecodeVarint(bz) + if err != nil { + panic(err) + } + return int(i) +} + +// Construct a MutableTree +func getTestTree(cacheSize int) (*MutableTree, error) { + return NewMutableTreeWithOpts(db.NewMemDB(), cacheSize, nil, false) +} + +// Convenience for a new node +func N(l, r interface{}) *Node { + var left, right *Node + if _, ok := l.(*Node); ok { + left = l.(*Node) + } else { + left = NewNode(i2b(l.(int)), nil, 0) + } + if _, ok := r.(*Node); ok { + right = r.(*Node) + } else { + right = NewNode(i2b(r.(int)), nil, 0) + } + + n := &Node{ + key: right.lmd(nil).key, + value: nil, + leftNode: left, + rightNode: right, + } + n.calcHeightAndSize(nil) + return n +} + +// Setup a deep node +func T(n *Node) (*MutableTree, error) { + t, _ := getTestTree(0) + + _, _, err := n.hashWithCount() + if err != nil { + return nil, err + } + t.ImmutableTree().root = n + return t, nil +} + +// Convenience for simple printing of keys & tree structure +func P(n *Node) string { + if n.height == 0 { + return fmt.Sprintf("%v", b2i(n.key)) + } + return fmt.Sprintf("(%v %v)", P(n.leftNode), P(n.rightNode)) +} + +func randBytes(length int) []byte { + return iavlrand.RandBytes(length) +} + +type traverser struct { + first string + last string + count int +} + +func (t *traverser) view(key, value []byte) bool { + if t.first == "" { + t.first = string(key) + } + t.last = string(key) + t.count++ + return false +} + +func expectTraverse(t *testing.T, trav traverser, start, end string, count int) { + if trav.first != start { + t.Error("Bad start", start, trav.first) + } + if trav.last != end { + t.Error("Bad end", end, trav.last) + } + if trav.count != count { + t.Error("Bad count", count, trav.count) + } +} + +func assertMutableMirrorIterate(t *testing.T, tree *MutableTree, mirror map[string]string) { + sortedMirrorKeys := make([]string, 0, len(mirror)) + for k := range mirror { + sortedMirrorKeys = append(sortedMirrorKeys, k) + } + sort.Strings(sortedMirrorKeys) + + curKeyIdx := 0 + tree.Iterate(func(k, v []byte) bool { + nextMirrorKey := sortedMirrorKeys[curKeyIdx] + nextMirrorValue := mirror[nextMirrorKey] + + require.Equal(t, []byte(nextMirrorKey), k) + require.Equal(t, []byte(nextMirrorValue), v) + + curKeyIdx++ + return false + }) +} + +func assertImmutableMirrorIterate(t *testing.T, tree *ImmutableTree, mirror map[string]string) { + sortedMirrorKeys := getSortedMirrorKeys(mirror) + + curKeyIdx := 0 + tree.Iterate(func(k, v []byte) bool { + nextMirrorKey := sortedMirrorKeys[curKeyIdx] + nextMirrorValue := mirror[nextMirrorKey] + + require.Equal(t, []byte(nextMirrorKey), k) + require.Equal(t, []byte(nextMirrorValue), v) + + curKeyIdx++ + return false + }) +} + +func getSortedMirrorKeys(mirror map[string]string) []string { + sortedMirrorKeys := make([]string, 0, len(mirror)) + for k := range mirror { + sortedMirrorKeys = append(sortedMirrorKeys, k) + } + sort.Strings(sortedMirrorKeys) + return sortedMirrorKeys +} + +func getRandomizedTreeAndMirror(t *testing.T) (*MutableTree, map[string]string) { + const cacheSize = 100 + + tree, err := getTestTree(cacheSize) + require.NoError(t, err) + + mirror := make(map[string]string) + + randomizeTreeAndMirror(t, tree, mirror) + return tree, mirror +} + +func randomizeTreeAndMirror(t *testing.T, tree *MutableTree, mirror map[string]string) { + if mirror == nil { + mirror = make(map[string]string) + } + const keyValLength = 5 + + numberOfSets := 1000 + numberOfUpdates := numberOfSets / 4 + numberOfRemovals := numberOfSets / 4 + + for numberOfSets > numberOfRemovals*3 { + key := randBytes(keyValLength) + value := randBytes(keyValLength) + + isUpdated, err := tree.Set(key, value) + require.NoError(t, err) + require.False(t, isUpdated) + mirror[string(key)] = string(value) + + numberOfSets-- + } + + for numberOfSets+numberOfRemovals+numberOfUpdates > 0 { + randOp := rand.Intn(3) + + switch randOp { + case 0: + if numberOfSets == 0 { + continue + } + + numberOfSets-- + + key := randBytes(keyValLength) + value := randBytes(keyValLength) + + isUpdated, err := tree.Set(key, value) + require.NoError(t, err) + require.False(t, isUpdated) + mirror[string(key)] = string(value) + case 1: + + if numberOfUpdates == 0 { + continue + } + numberOfUpdates-- + + key := getRandomKeyFrom(mirror) + value := randBytes(keyValLength) + + isUpdated, err := tree.Set([]byte(key), value) + require.NoError(t, err) + require.True(t, isUpdated) + mirror[key] = string(value) + case 2: + if numberOfRemovals == 0 { + continue + } + numberOfRemovals-- + + key := getRandomKeyFrom(mirror) + + val, isRemoved, err := tree.Remove([]byte(key)) + require.NoError(t, err) + require.True(t, isRemoved) + require.NotNil(t, val) + delete(mirror, key) + default: + t.Error("Invalid randOp", randOp) + } + } +} + +func getRandomKeyFrom(mirror map[string]string) string { + for k := range mirror { + return k + } + panic("no keys in mirror") +} + +func setupMirrorForIterator(t *testing.T, config *iteratorTestConfig, tree *MutableTree) [][]string { + var mirror [][]string + + startByteToSet := config.startByteToSet + endByteToSet := config.endByteToSet + + if !config.ascending { + startByteToSet, endByteToSet = endByteToSet, startByteToSet + } + + curByte := startByteToSet + for curByte != endByteToSet { + value := randBytes(5) + + if (config.startIterate == nil || curByte >= config.startIterate[0]) && (config.endIterate == nil || curByte < config.endIterate[0]) { + mirror = append(mirror, []string{string(curByte), string(value)}) + } + + isUpdated, err := tree.Set([]byte{curByte}, value) + require.NoError(t, err) + require.False(t, isUpdated) + + if config.ascending { + curByte++ + } else { + curByte-- + } + } + return mirror +} + +// assertIterator confirms that the iterator returns the expected values desribed by mirror in the same order. +// mirror is a slice containing slices of the form [key, value]. In other words, key at index 0 and value at index 1. +func assertIterator(t *testing.T, itr db.Iterator, mirror [][]string, ascending bool) { + startIdx, endIdx := 0, len(mirror)-1 + increment := 1 + mirrorIdx := startIdx + + // flip the iteration order over mirror if descending + if !ascending { + startIdx = endIdx - 1 + endIdx = -1 + increment *= -1 + } + + for startIdx != endIdx { + nextExpectedPair := mirror[mirrorIdx] + + require.True(t, itr.Valid()) + require.Equal(t, []byte(nextExpectedPair[0]), itr.Key()) + require.Equal(t, []byte(nextExpectedPair[1]), itr.Value()) + itr.Next() + require.NoError(t, itr.Error()) + + startIdx += increment + mirrorIdx++ + } +} + +func BenchmarkImmutableAvlTreeMemDB(b *testing.B) { + db, err := db.NewDB("test", db.MemDBBackend, "") + require.NoError(b, err) + benchmarkImmutableAvlTreeWithDB(b, db) +} + +func benchmarkImmutableAvlTreeWithDB(b *testing.B, db db.DB) { + defer func() { _ = db.Close() }() + + b.StopTimer() + + t, err := NewMutableTree(db, 100000, false) + require.NoError(b, err) + + value := []byte{} + for i := 0; i < 1000000; i++ { + t.Set(i2b(int(iavlrand.RandInt31())), value) + if i > 990000 && i%1000 == 999 { + t.SaveVersion() + } + } + b.ReportAllocs() + t.SaveVersion() + + runtime.GC() + + b.StartTimer() + for i := 0; i < b.N; i++ { + ri := i2b(int(iavlrand.RandInt31())) + t.Set(ri, value) + t.Remove(ri) + if i%100 == 99 { + t.SaveVersion() + } + } +} + +func (node *Node) lmd(t *ImmutableTree) *Node { + if node.isLeaf() { + return node + } + + // TODO: Should handle this error? + leftNode, _ := node.getLeftNode(t) + + return leftNode.lmd(t) +} diff --git a/sei-iavl/tree_dotgraph.go b/sei-iavl/tree_dotgraph.go new file mode 100644 index 0000000000..c9bd2300e2 --- /dev/null +++ b/sei-iavl/tree_dotgraph.go @@ -0,0 +1,107 @@ +package iavl + +import ( + "bytes" + "fmt" + "io" + "text/template" +) + +type graphEdge struct { + From, To string +} + +type graphNode struct { + Hash string + Label string + Value string + Attrs map[string]string +} + +type graphContext struct { + Edges []*graphEdge + Nodes []*graphNode +} + +var graphTemplate = ` +strict graph { + {{- range $i, $edge := $.Edges}} + "{{ $edge.From }}" -- "{{ $edge.To }}"; + {{- end}} + + {{range $i, $node := $.Nodes}} + "{{ $node.Hash }}" [label=<{{ $node.Label }}>,{{ range $k, $v := $node.Attrs }}{{ $k }}={{ $v }},{{end}}]; + {{- end}} +} +` + +var tpl = template.Must(template.New("iavl").Parse(graphTemplate)) + +var defaultGraphNodeAttrs = map[string]string{ + "shape": "circle", +} + +func WriteDOTGraph(w io.Writer, tree *ImmutableTree, paths []PathToLeaf) { + ctx := &graphContext{} + + if _, _, err := tree.root.hashWithCount(); err != nil { + panic(err) + } + tree.root.traverse(tree, true, func(node *Node) bool { + graphNode := &graphNode{ + Attrs: map[string]string{}, + Hash: fmt.Sprintf("%x", node.GetHash()), + } + for k, v := range defaultGraphNodeAttrs { + graphNode.Attrs[k] = v + } + shortHash := graphNode.Hash[:7] + + graphNode.Label = mkLabel(unsafeToStr(node.GetNodeKey()), 16, "sans-serif") + graphNode.Label += mkLabel(shortHash, 10, "monospace") + graphNode.Label += mkLabel(fmt.Sprintf("version=%d", node.GetVersion()), 10, "monospace") + + if node.GetValue() != nil { + graphNode.Label += mkLabel(unsafeToStr(node.GetValue()), 10, "sans-serif") + } + + if node.GetHeight() == 0 { + graphNode.Attrs["fillcolor"] = "lightgrey" + graphNode.Attrs["style"] = "filled" + } + + for _, path := range paths { + for _, n := range path { + if bytes.Equal(n.Left, node.GetHash()) || bytes.Equal(n.Right, node.GetHash()) { + graphNode.Attrs["peripheries"] = "2" + graphNode.Attrs["style"] = "filled" + graphNode.Attrs["fillcolor"] = "lightblue" + break + } + } + } + ctx.Nodes = append(ctx.Nodes, graphNode) + + if node.GetLeftNode() != nil { + ctx.Edges = append(ctx.Edges, &graphEdge{ + From: graphNode.Hash, + To: fmt.Sprintf("%x", node.GetLeftNode().GetHash()), + }) + } + if node.GetRightNode() != nil { + ctx.Edges = append(ctx.Edges, &graphEdge{ + From: graphNode.Hash, + To: fmt.Sprintf("%x", node.GetRightNode().GetHash()), + }) + } + return false + }) + + if err := tpl.Execute(w, ctx); err != nil { + panic(err) + } +} + +func mkLabel(label string, pt int, face string) string { + return fmt.Sprintf("%s
", face, pt, label) +} diff --git a/sei-iavl/tree_dotgraph_test.go b/sei-iavl/tree_dotgraph_test.go new file mode 100644 index 0000000000..a4bad9cdf7 --- /dev/null +++ b/sei-iavl/tree_dotgraph_test.go @@ -0,0 +1,20 @@ +package iavl + +import ( + "io/ioutil" + "testing" + + "github.com/stretchr/testify/require" +) + +func TestWriteDOTGraph(t *testing.T) { + tree, err := getTestTree(0) + require.NoError(t, err) + for _, ikey := range []byte{ + 0x0a, 0x11, 0x2e, 0x32, 0x50, 0x72, 0x99, 0xa1, 0xe4, 0xf7, + } { + key := []byte{ikey} + tree.Set(key, key) + } + WriteDOTGraph(ioutil.Discard, tree.ImmutableTree(), []PathToLeaf{}) +} diff --git a/sei-iavl/tree_fuzz_test.go b/sei-iavl/tree_fuzz_test.go new file mode 100644 index 0000000000..b3219a8f78 --- /dev/null +++ b/sei-iavl/tree_fuzz_test.go @@ -0,0 +1,128 @@ +// nolint:errcheck +package iavl + +import ( + "fmt" + "math/rand" + "testing" + + "github.com/stretchr/testify/require" + + iavlrand "github.com/sei-protocol/sei-chain/sei-iavl/internal/rand" +) + +// This file implement fuzz testing by generating programs and then running +// them. If an error occurs, the program that had the error is printed. + +// A program is a list of instructions. +type program struct { + instructions []instruction +} + +func (p *program) Execute(tree *MutableTree) (err error) { + var errLine int + + defer func() { + if r := recover(); r != nil { + var str string + + for i, instr := range p.instructions { + prefix := " " + if i == errLine { + prefix = ">> " + } + str += prefix + instr.String() + "\n" + } + err = fmt.Errorf("program panicked with: %s\n%s", r, str) + } + }() + + for i, instr := range p.instructions { + errLine = i + instr.Execute(tree) + } + return +} + +func (p *program) addInstruction(i instruction) { + p.instructions = append(p.instructions, i) +} + +func (p *program) size() int { + return len(p.instructions) +} + +type instruction struct { + op string + k, v []byte + version int64 +} + +func (i instruction) Execute(tree *MutableTree) { + switch i.op { + case "SET": + tree.Set(i.k, i.v) + case "REMOVE": + tree.Remove(i.k) + case "SAVE": + tree.SaveVersion() + case "DELETE": + tree.DeleteVersion(i.version) + default: + panic("Unrecognized op: " + i.op) + } +} + +func (i instruction) String() string { + if i.version > 0 { + return fmt.Sprintf("%-8s %-8s %-8s %-8d", i.op, i.k, i.v, i.version) + } + return fmt.Sprintf("%-8s %-8s %-8s", i.op, i.k, i.v) +} + +// Generate a random program of the given size. +func genRandomProgram(size int) *program { + p := &program{} + nextVersion := 1 + + for p.size() < size { + k, v := []byte(iavlrand.RandStr(1)), []byte(iavlrand.RandStr(1)) + + switch rand.Int() % 7 { + case 0, 1, 2: + p.addInstruction(instruction{op: "SET", k: k, v: v}) + case 3, 4: + p.addInstruction(instruction{op: "REMOVE", k: k}) + case 5: + p.addInstruction(instruction{op: "SAVE", version: int64(nextVersion)}) + nextVersion++ + case 6: + if rv := rand.Int() % nextVersion; rv < nextVersion && rv > 0 { + p.addInstruction(instruction{op: "DELETE", version: int64(rv)}) + } + } + } + return p +} + +// Generate many programs and run them. +func TestMutableTreeFuzz(t *testing.T) { + maxIterations := testFuzzIterations + progsPerIteration := 100000 + iterations := 0 + + for size := 5; iterations < maxIterations; size++ { + for i := 0; i < progsPerIteration/size; i++ { + tree, err := getTestTree(0) + require.NoError(t, err) + program := genRandomProgram(size) + err = program.Execute(tree) + if err != nil { + str, err := tree.String() + require.Nil(t, err) + t.Fatalf("Error after %d iterations (size %d): %s\n%s", iterations, size, err.Error(), str) + } + iterations++ + } + } +} diff --git a/sei-iavl/tree_random_test.go b/sei-iavl/tree_random_test.go new file mode 100644 index 0000000000..4ea0086f7e --- /dev/null +++ b/sei-iavl/tree_random_test.go @@ -0,0 +1,485 @@ +package iavl + +import ( + "encoding/base64" + "fmt" + "io/ioutil" + "math/rand" + "os" + "sort" + "strconv" + "strings" + "testing" + + "github.com/stretchr/testify/require" + + db "github.com/tendermint/tm-db" +) + +func TestRandomOperations(t *testing.T) { + // In short mode (specifically, when running in CI with the race detector), + // we only run the first couple of seeds. + seeds := []int64{ + 498727689, + 756509998, + 480459882, + 324736440, + 581827344, + 470870060, + 390970079, + 846023066, + 518638291, + 957382170, + } + + for i, seed := range seeds { + i, seed := i, seed + t.Run(fmt.Sprintf("Seed %v", seed), func(t *testing.T) { + if testing.Short() && i >= 2 { + t.Skip("Skipping seed in short mode") + } + t.Parallel() // comment out to disable parallel tests, or use -parallel 1 + testRandomOperations(t, seed) + }) + } +} + +// Randomized test that runs all sorts of random operations, mirrors them in a known-good +// map, and verifies the state of the tree against the map. +func testRandomOperations(t *testing.T, randSeed int64) { + const ( + keySize = 16 // before base64-encoding + valueSize = 16 // before base64-encoding + + versions = 32 // number of final versions to generate + reloadChance = 0.1 // chance of tree reload after save + deleteChance = 0.2 // chance of random version deletion after save + deleteRangeChance = 0.3 // chance of deleting a version range (DeleteVersionsRange) + deleteMultiChance = 0.3 // chance of deleting multiple versions (DeleteVersions) + deleteMax = 5 // max number of versions to delete + revertChance = 0.05 // chance to revert tree to random version with LoadVersionForOverwriting + syncChance = 0.2 // chance of enabling sync writes on tree load + cacheChance = 0.4 // chance of enabling caching + cacheSizeMax = 256 // maximum size of cache (will be random from 1) + + versionOps = 64 // number of operations (create/update/delete) per version + updateRatio = 0.4 // ratio of updates out of all operations + deleteRatio = 0.2 // ratio of deletes out of all operations + ) + + r := rand.New(rand.NewSource(randSeed)) + + // loadTree loads the last persisted version of a tree with random pruning settings. + loadTree := func(levelDB db.DB) (tree *MutableTree, version int64, options *Options) { + var err error + options = &Options{ + Sync: r.Float64() < syncChance, + } + // set the cache size regardless of whether caching is enabled. This ensures we always + // call the RNG the same number of times, such that changing settings does not affect + // the RNG sequence. + cacheSize := int(r.Int63n(cacheSizeMax + 1)) + if !(r.Float64() < cacheChance) { + cacheSize = 0 + } + tree, err = NewMutableTreeWithOpts(levelDB, cacheSize, options, false) + require.NoError(t, err) + version, err = tree.Load() + require.NoError(t, err) + t.Logf("Loaded version %v (sync=%v cache=%v)", version, options.Sync, cacheSize) + return + } + + // generates random keys and values + randString := func(size int) string { + buf := make([]byte, size) + r.Read(buf) + return base64.StdEncoding.EncodeToString(buf) + } + + // Use the same on-disk database for the entire run. + tempdir, err := ioutil.TempDir("", "iavl") + require.NoError(t, err) + defer os.RemoveAll(tempdir) + + levelDB, err := db.NewGoLevelDB("leveldb", tempdir) + require.NoError(t, err) + + tree, version, _ := loadTree(levelDB) + + // Set up a mirror of the current IAVL state, as well as the history of saved mirrors + // on disk and in memory. Since pruning was removed we currently persist all versions, + // thus memMirrors is never used, but it is left here for the future when it is re-introduces. + mirror := make(map[string]string, versionOps) + mirrorKeys := make([]string, 0, versionOps) + diskMirrors := make(map[int64]map[string]string) + memMirrors := make(map[int64]map[string]string) + + for version < versions { + for i := 0; i < versionOps; i++ { + switch { + case len(mirror) > 0 && r.Float64() < deleteRatio: + index := r.Intn(len(mirrorKeys)) + key := mirrorKeys[index] + mirrorKeys = append(mirrorKeys[:index], mirrorKeys[index+1:]...) + _, removed, err := tree.Remove([]byte(key)) + require.NoError(t, err) + require.True(t, removed) + delete(mirror, key) + + case len(mirror) > 0 && r.Float64() < updateRatio: + key := mirrorKeys[r.Intn(len(mirrorKeys))] + value := randString(valueSize) + updated, err := tree.Set([]byte(key), []byte(value)) + require.NoError(t, err) + require.True(t, updated) + mirror[key] = value + + default: + key := randString(keySize) + value := randString(valueSize) + for has, err := tree.Has([]byte(key)); has && err == nil; { + key = randString(keySize) + } + updated, err := tree.Set([]byte(key), []byte(value)) + require.NoError(t, err) + require.False(t, updated) + mirror[key] = value + mirrorKeys = append(mirrorKeys, key) + } + } + _, version, err = tree.SaveVersion() + require.NoError(t, err) + + t.Logf("Saved tree at version %v wit %v versions: %v", + version, len(tree.AvailableVersions()), tree.AvailableVersions()) + + // Verify that the version matches the mirror. + assertMirror(t, tree, mirror, 0) + + // Save the mirror as a disk mirror, since we currently persist all versions. + diskMirrors[version] = copyMirror(mirror) + + // Delete random versions if requested, but never the latest version. + if r.Float64() < deleteChance { + versions := getMirrorVersions(diskMirrors, memMirrors) + switch { + case len(versions) < 2: + + case r.Float64() < deleteRangeChance: + indexFrom := r.Intn(len(versions) - 1) + from := versions[indexFrom] + batch := r.Intn(deleteMax) + if batch > len(versions[indexFrom:])-2 { + batch = len(versions[indexFrom:]) - 2 + } + to := versions[indexFrom+batch] + 1 + t.Logf("Deleting versions %v-%v", from, to-1) + err = tree.DeleteVersionsRange(int64(from), int64(to)) + t.Logf("Available versions %v", tree.AvailableVersions()) + require.NoError(t, err) + for version := from; version < to; version++ { + delete(diskMirrors, int64(version)) + delete(memMirrors, int64(version)) + } + + // adjust probability to take into account probability of range delete not happening + case r.Float64() < deleteMultiChance/(1.0-deleteRangeChance): + deleteVersions := []int64{} + desc := "" + batchSize := 1 + r.Intn(deleteMax) + if batchSize > len(versions)-1 { + batchSize = len(versions) - 1 + } + for i := 0; i < batchSize; i++ { + deleteVersions = append(deleteVersions, int64(versions[i])) + delete(diskMirrors, int64(versions[i])) + delete(memMirrors, int64(versions[i])) + if len(desc) > 0 { + desc += "," + } + desc += fmt.Sprintf("%v", versions[i]) + } + t.Logf("Deleting versions %v", deleteVersions) + err = tree.DeleteVersions(deleteVersions...) + t.Logf("Available versions %v", tree.AvailableVersions()) + require.NoError(t, err) + + default: + } + } + + // Reload tree from last persisted version if requested, checking that it matches the + // latest disk mirror version and discarding memory mirrors. + if r.Float64() < reloadChance { + tree, version, _ = loadTree(levelDB) + assertMaxVersion(t, tree, version, diskMirrors) + memMirrors = make(map[int64]map[string]string) + mirror = copyMirror(diskMirrors[version]) + mirrorKeys = getMirrorKeys(mirror) + } + + // Revert tree to historical version if requested, deleting all subsequent versions. + if r.Float64() < revertChance { + versions := getMirrorVersions(diskMirrors, memMirrors) + if len(versions) > 1 { + version = int64(versions[r.Intn(len(versions)-1)]) + t.Logf("Reverting to version %v", version) + _, err = tree.LoadVersionForOverwriting(version) + require.NoError(t, err, "Failed to revert to version %v", version) + if m, ok := diskMirrors[version]; ok { + mirror = copyMirror(m) + } else if m, ok := memMirrors[version]; ok { + mirror = copyMirror(m) + } else { + t.Fatalf("Mirror not found for revert target %v", version) + } + mirrorKeys = getMirrorKeys(mirror) + for v := range diskMirrors { + if v > version { + delete(diskMirrors, v) + } + } + for v := range memMirrors { + if v > version { + delete(memMirrors, v) + } + } + } + } + + for diskVersion, diskMirror := range diskMirrors { + assertMirror(t, tree, diskMirror, diskVersion) + } + + for memVersion, memMirror := range memMirrors { + assertMirror(t, tree, memMirror, memVersion) + } + } + + // Once we're done, delete all prior versions in random order, make sure all orphans have been + // removed, and check that the latest versions matches the mirror. + remaining := tree.AvailableVersions() + remaining = remaining[:len(remaining)-1] + + switch { + case len(remaining) == 0: + t.Logf("No remaining versions") + + case r.Float64() < deleteRangeChance: + t.Logf("Deleting all remaining versions %v-%v", remaining[0], remaining[len(remaining)-1]) + err = tree.DeleteVersionsRange(int64(remaining[0]), int64(remaining[len(remaining)-1]+1)) + require.NoError(t, err) + + // adjust probability to take into account probability of range delete not happening + case r.Float64() < deleteMultiChance/(1.0-deleteRangeChance): + deleteVersions := []int64{} + desc := "" + for _, i := range r.Perm(len(remaining)) { + deleteVersions = append(deleteVersions, int64(remaining[i])) + if len(desc) > 0 { + desc += "," + } + desc += fmt.Sprintf("%v", remaining[i]) + } + t.Logf("Deleting all remaining versions %v", deleteVersions) + err = tree.DeleteVersions(deleteVersions...) + require.NoError(t, err) + + default: + err = tree.DeleteVersionsRange(int64(remaining[0]), int64(remaining[len(remaining)-1])+1) + require.NoError(t, err) + } + + require.EqualValues(t, []int{int(version)}, tree.AvailableVersions()) + assertMirror(t, tree, mirror, version) + assertMirror(t, tree, mirror, 0) + assertOrphans(t, tree, 0) + t.Logf("Final version %v is correct, with no stray orphans", version) + + // Now, let's delete all remaining key/value pairs, and make sure no stray + // data is left behind in the database. + prevVersion := tree.Version() + keys := [][]byte{} + tree.Iterate(func(key, value []byte) bool { + keys = append(keys, key) + return false + }) + for _, key := range keys { + _, removed, err := tree.Remove(key) + require.NoError(t, err) + require.True(t, removed) + } + _, _, err = tree.SaveVersion() + require.NoError(t, err) + err = tree.DeleteVersion(prevVersion) + require.NoError(t, err) + assertEmptyDatabase(t, tree) + t.Logf("Final version %v deleted, no stray database entries", prevVersion) +} + +// Checks that the database is empty, only containing a single root entry +// at the given version. +func assertEmptyDatabase(t *testing.T, tree *MutableTree) { + version := tree.Version() + iter, err := tree.ndb.db.Iterator(nil, nil) + require.NoError(t, err) + + var ( + foundKeys []string + ) + for ; iter.Valid(); iter.Next() { + foundKeys = append(foundKeys, string(iter.Key())) + } + require.NoError(t, iter.Error()) + require.EqualValues(t, 2, len(foundKeys), "Found %v database entries, expected 1", len(foundKeys)) // 1 for storage version and 1 for root + + firstKey := foundKeys[0] + secondKey := foundKeys[1] + + require.True(t, strings.HasPrefix(firstKey, metadataKeyFormat.Prefix())) + require.True(t, strings.HasPrefix(secondKey, rootKeyFormat.Prefix())) + + require.Equal(t, string(metadataKeyFormat.KeyBytes([]byte(storageVersionKey))), firstKey, "Unexpected storage version key") + + storageVersionValue, err := tree.ndb.db.Get([]byte(firstKey)) + require.NoError(t, err) + latestVersion, err := tree.ndb.getLatestVersion() + require.NoError(t, err) + require.Equal(t, fastStorageVersionValue+fastStorageVersionDelimiter+strconv.Itoa(int(latestVersion)), string(storageVersionValue)) + + var foundVersion int64 + rootKeyFormat.Scan([]byte(secondKey), &foundVersion) + require.Equal(t, version, foundVersion, "Unexpected root version") +} + +// Checks that the tree has the given number of orphan nodes. +func assertOrphans(t *testing.T, tree *MutableTree, expected int) { + count := 0 + err := tree.ndb.traverseOrphans(func(k, v []byte) error { + count++ + return nil + }) + require.Nil(t, err) + require.EqualValues(t, expected, count, "Expected %v orphans, got %v", expected, count) +} + +// Checks that a version is the maximum mirrored version. +func assertMaxVersion(t *testing.T, tree *MutableTree, version int64, mirrors map[int64]map[string]string) { + max := int64(0) + for v := range mirrors { + if v > max { + max = v + } + } + require.Equal(t, max, version) +} + +// Checks that a mirror, optionally for a given version, matches the tree contents. +func assertMirror(t *testing.T, tree *MutableTree, mirror map[string]string, version int64) { + var err error + itree := tree.ImmutableTree() + if version > 0 { + itree, err = tree.GetImmutable(version) + require.NoError(t, err, "loading version %v", version) + } + // We check both ways: first check that iterated keys match the mirror, then iterate over the + // mirror and check with get. This is to exercise both the iteration and Get() code paths. + iterated := 0 + itree.Iterate(func(key, value []byte) bool { + require.Equal(t, string(value), mirror[string(key)], "Invalid value for key %q", key) + iterated++ + return false + }) + require.EqualValues(t, len(mirror), itree.Size()) + require.EqualValues(t, len(mirror), iterated) + for key, value := range mirror { + actualFast, err := itree.Get([]byte(key)) + require.NoError(t, err) + require.Equal(t, value, string(actualFast)) + _, actual, err := itree.GetWithIndex([]byte(key)) + require.NoError(t, err) + require.Equal(t, value, string(actual)) + } + + assertFastNodeCacheIsLive(t, tree, mirror, version) + assertFastNodeDiskIsLive(t, tree, mirror, version) +} + +// Checks that fast node cache matches live state. +func assertFastNodeCacheIsLive(t *testing.T, tree *MutableTree, mirror map[string]string, version int64) { + latestVersion, err := tree.ndb.getLatestVersion() + require.NoError(t, err) + if latestVersion != version { + // The fast node cache check should only be done to the latest version + return + } + + require.Equal(t, len(mirror), tree.ndb.fastNodeCache.Len()) + for k, v := range mirror { + require.True(t, tree.ndb.fastNodeCache.Has([]byte(k)), "cached fast node must be in live tree") + mirrorNode := tree.ndb.fastNodeCache.Get([]byte(k)) + require.Equal(t, []byte(v), mirrorNode.(*FastNode).value, "cached fast node's value must be equal to live state value") + } +} + +// Checks that fast nodes on disk match live state. +func assertFastNodeDiskIsLive(t *testing.T, tree *MutableTree, mirror map[string]string, version int64) { + latestVersion, err := tree.ndb.getLatestVersion() + require.NoError(t, err) + if latestVersion != version { + // The fast node disk check should only be done to the latest version + return + } + + count := 0 + err = tree.ndb.traverseFastNodes(func(keyWithPrefix, v []byte) error { + key := keyWithPrefix[1:] + count++ + fastNode, err := DeserializeFastNode(key, v) + require.Nil(t, err) + + mirrorVal := mirror[string(fastNode.key)] + + require.NotNil(t, mirrorVal) + require.Equal(t, []byte(mirrorVal), fastNode.value) + return nil + }) + require.NoError(t, err) + require.Equal(t, len(mirror), count) +} + +// copyMirror copies a mirror map. +func copyMirror(mirror map[string]string) map[string]string { + c := make(map[string]string, len(mirror)) + for k, v := range mirror { + c[k] = v + } + return c +} + +// getMirrorKeys returns the keys of a mirror, unsorted. +func getMirrorKeys(mirror map[string]string) []string { + keys := make([]string, 0, len(mirror)) + for key := range mirror { + keys = append(keys, key) + } + return keys +} + +// getMirrorVersions returns the versions of the given mirrors, sorted. Returns []int to +// match tree.AvailableVersions(). +func getMirrorVersions(mirrors ...map[int64]map[string]string) []int { + versionMap := make(map[int]bool) + for _, m := range mirrors { + for version := range m { + versionMap[int(version)] = true + } + } + versions := make([]int, 0, len(versionMap)) + for version := range versionMap { + versions = append(versions, version) + } + sort.Ints(versions) + return versions +} diff --git a/sei-iavl/tree_test.go b/sei-iavl/tree_test.go new file mode 100644 index 0000000000..8ac75a0871 --- /dev/null +++ b/sei-iavl/tree_test.go @@ -0,0 +1,2042 @@ +// nolint:errcheck +package iavl + +import ( + "bytes" + "encoding/hex" + "flag" + "fmt" + "math/rand" + "os" + "runtime" + "strconv" + "testing" + + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" + db "github.com/tendermint/tm-db" + + iavlrand "github.com/sei-protocol/sei-chain/sei-iavl/internal/rand" +) + +var testLevelDB bool +var testFuzzIterations int +var random *iavlrand.Rand + +func SetupTest() { + random = iavlrand.NewRand() + random.Seed(0) // for determinism + flag.BoolVar(&testLevelDB, "test.leveldb", false, "test leveldb backend") + flag.IntVar(&testFuzzIterations, "test.fuzz-iterations", 100000, "number of fuzz testing iterations") + flag.Parse() +} + +func getTestDB() (db.DB, func()) { + if testLevelDB { + d, err := db.NewGoLevelDB("test", ".") + if err != nil { + panic(err) + } + return d, func() { + d.Close() + os.RemoveAll("./test.db") + } + } + return db.NewMemDB(), func() {} +} + +func TestVersionedRandomTree(t *testing.T) { + require := require.New(t) + SetupTest() + d, closeDB := getTestDB() + defer closeDB() + + tree, err := NewMutableTree(d, 100, false) + require.NoError(err) + versions := 50 + keysPerVersion := 30 + + // Create a tree of size 1000 with 100 versions. + for i := 1; i <= versions; i++ { + for j := 0; j < keysPerVersion; j++ { + k := []byte(iavlrand.RandStr(8)) + v := []byte(iavlrand.RandStr(8)) + tree.Set(k, v) + } + tree.SaveVersion() + } + roots, err := tree.ndb.getRoots() + require.NoError(err) + require.Equal(versions, len(roots), "wrong number of roots") + + leafNodes, err := tree.ndb.leafNodes() + require.Nil(err) + require.Equal(versions*keysPerVersion, len(leafNodes), "wrong number of nodes") + + // Before deleting old versions, we should have equal or more nodes in the + // db than in the current tree version. + nodes, err := tree.ndb.nodes() + require.Nil(err) + require.True(len(nodes) >= tree.ImmutableTree().nodeSize()) + + // Ensure it returns all versions in sorted order + available := tree.AvailableVersions() + assert.Equal(t, versions, len(available)) + assert.Equal(t, 1, available[0]) + assert.Equal(t, versions, available[len(available)-1]) + + for i := 1; i < versions; i++ { + tree.DeleteVersion(int64(i)) + } + + tr, err := tree.GetImmutable(int64(versions)) + require.NoError(err, "GetImmutable should not error for version %d", versions) + require.Equal(tr.root, tree.ImmutableTree().root) + + // we should only have one available version now + available = tree.AvailableVersions() + assert.Equal(t, 1, len(available)) + assert.Equal(t, versions, available[0]) + + // After cleaning up all previous versions, we should have as many nodes + // in the db as in the current tree version. + leafNodes, err = tree.ndb.leafNodes() + require.Nil(err) + require.Len(leafNodes, int(tree.ImmutableTree().Size())) + + nodes, err = tree.ndb.nodes() + require.Nil(err) + require.Equal(tree.ImmutableTree().nodeSize(), len(nodes)) +} + +// nolint: dupl +func TestTreeHash(t *testing.T) { + const ( + randSeed = 49872768940 // For deterministic tests + keySize = 16 + valueSize = 16 + + versions = 4 // number of versions to generate + versionOps = 4096 // number of operations (create/update/delete) per version + updateRatio = 0.4 // ratio of updates out of all operations + deleteRatio = 0.2 // ratio of deletes out of all operations + ) + + // expected hashes for each version + expectHashes := []string{ + "58ec30fa27f338057e5964ed9ec3367e59b2b54bec4c194f10fde7fed16c2a1c", + "91ad3ace227372f0064b2d63e8493ce8f4bdcbd16c7a8e4f4d54029c9db9570c", + "92c25dce822c5968c228cfe7e686129ea281f79273d4a8fcf6f9130a47aa5421", + "e44d170925554f42e00263155c19574837a38e3efed8910daccc7fa12f560fa0", + } + require.Len(t, expectHashes, versions, "must have expected hashes for all versions") + + r := rand.New(rand.NewSource(randSeed)) + tree, err := NewMutableTree(db.NewMemDB(), 0, false) + require.NoError(t, err) + + keys := make([][]byte, 0, versionOps) + for i := 0; i < versions; i++ { + for j := 0; j < versionOps; j++ { + key := make([]byte, keySize) + value := make([]byte, valueSize) + + // The performance of this is likely to be terrible, but that's fine for small tests + switch { + case len(keys) > 0 && r.Float64() <= deleteRatio: + index := r.Intn(len(keys)) + key = keys[index] + keys = append(keys[:index], keys[index+1:]...) + _, removed, err := tree.Remove(key) + require.NoError(t, err) + require.True(t, removed) + + case len(keys) > 0 && r.Float64() <= updateRatio: + key = keys[r.Intn(len(keys))] + r.Read(value) + updated, err := tree.Set(key, value) + require.NoError(t, err) + require.True(t, updated) + + default: + r.Read(key) + r.Read(value) + // If we get an update, set again + for updated, err := tree.Set(key, value); err == nil && updated; { + key = make([]byte, keySize) + r.Read(key) + } + keys = append(keys, key) + } + } + hash, version, err := tree.SaveVersion() + require.NoError(t, err) + require.EqualValues(t, i+1, version) + require.Equal(t, expectHashes[i], hex.EncodeToString(hash)) + } + + require.EqualValues(t, versions, tree.Version()) +} + +func TestVersionedRandomTreeSmallKeys(t *testing.T) { + require := require.New(t) + d, closeDB := getTestDB() + defer closeDB() + + tree, err := NewMutableTree(d, 100, false) + require.NoError(err) + singleVersionTree, err := getTestTree(0) + require.NoError(err) + versions := 20 + keysPerVersion := 50 + + for i := 1; i <= versions; i++ { + for j := 0; j < keysPerVersion; j++ { + // Keys of size one are likely to be overwritten. + k := []byte(iavlrand.RandStr(1)) + v := []byte(iavlrand.RandStr(8)) + tree.Set(k, v) + singleVersionTree.Set(k, v) + } + tree.SaveVersion() + } + singleVersionTree.SaveVersion() + + for i := 1; i < versions; i++ { + tree.DeleteVersion(int64(i)) + } + + // After cleaning up all previous versions, we should have as many nodes + // in the db as in the current tree version. The simple tree must be equal + // too. + leafNodes, err := tree.ndb.leafNodes() + require.Nil(err) + + nodes, err := tree.ndb.nodes() + require.Nil(err) + + require.Len(leafNodes, int(tree.ImmutableTree().Size())) + require.Len(nodes, tree.ImmutableTree().nodeSize()) + require.Len(nodes, singleVersionTree.ImmutableTree().nodeSize()) + + // Try getting random keys. + for i := 0; i < keysPerVersion; i++ { + val, err := tree.Get([]byte(iavlrand.RandStr(1))) + require.NoError(err) + require.NotNil(val) + require.NotEmpty(val) + } +} + +func TestVersionedRandomTreeSmallKeysRandomDeletes(t *testing.T) { + require := require.New(t) + d, closeDB := getTestDB() + defer closeDB() + + tree, err := NewMutableTree(d, 100, false) + require.NoError(err) + singleVersionTree, err := getTestTree(0) + require.NoError(err) + versions := 30 + keysPerVersion := 50 + + for i := 1; i <= versions; i++ { + for j := 0; j < keysPerVersion; j++ { + // Keys of size one are likely to be overwritten. + k := []byte(iavlrand.RandStr(1)) + v := []byte(iavlrand.RandStr(8)) + tree.Set(k, v) + singleVersionTree.Set(k, v) + } + tree.SaveVersion() + } + singleVersionTree.SaveVersion() + + for _, i := range iavlrand.RandPerm(versions - 1) { + tree.DeleteVersion(int64(i + 1)) + } + + // After cleaning up all previous versions, we should have as many nodes + // in the db as in the current tree version. The simple tree must be equal + // too. + leafNodes, err := tree.ndb.leafNodes() + require.Nil(err) + + nodes, err := tree.ndb.nodes() + require.Nil(err) + + require.Len(leafNodes, int(tree.ImmutableTree().Size())) + require.Len(nodes, tree.ImmutableTree().nodeSize()) + require.Len(nodes, singleVersionTree.ImmutableTree().nodeSize()) + + // Try getting random keys. + for i := 0; i < keysPerVersion; i++ { + val, err := tree.Get([]byte(iavlrand.RandStr(1))) + require.NoError(err) + require.NotNil(val) + require.NotEmpty(val) + } +} + +func TestVersionedTreeSpecial1(t *testing.T) { + tree, err := getTestTree(100) + require.NoError(t, err) + + tree.Set([]byte("C"), []byte("so43QQFN")) + tree.SaveVersion() + + tree.Set([]byte("A"), []byte("ut7sTTAO")) + tree.SaveVersion() + + tree.Set([]byte("X"), []byte("AoWWC1kN")) + tree.SaveVersion() + + tree.Set([]byte("T"), []byte("MhkWjkVy")) + tree.SaveVersion() + + tree.DeleteVersion(1) + tree.DeleteVersion(2) + tree.DeleteVersion(3) + + nodes, err := tree.ndb.nodes() + require.Nil(t, err) + require.Equal(t, tree.ImmutableTree().nodeSize(), len(nodes)) +} + +func TestVersionedRandomTreeSpecial2(t *testing.T) { + require := require.New(t) + tree, err := getTestTree(100) + require.NoError(err) + + tree.Set([]byte("OFMe2Yvm"), []byte("ez2OtQtE")) + tree.Set([]byte("WEN4iN7Y"), []byte("kQNyUalI")) + tree.SaveVersion() + + tree.Set([]byte("1yY3pXHr"), []byte("udYznpII")) + tree.Set([]byte("7OSHNE7k"), []byte("ff181M2d")) + tree.SaveVersion() + + tree.DeleteVersion(1) + + nodes, err := tree.ndb.nodes() + require.NoError(err) + require.Len(nodes, tree.ImmutableTree().nodeSize()) +} + +func TestVersionedEmptyTree(t *testing.T) { + require := require.New(t) + d, closeDB := getTestDB() + defer closeDB() + + tree, err := NewMutableTree(d, 0, false) + require.NoError(err) + + hash, v, err := tree.SaveVersion() + require.NoError(err) + require.Equal("e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855", hex.EncodeToString(hash)) + require.EqualValues(1, v) + + hash, v, err = tree.SaveVersion() + require.NoError(err) + require.Equal("e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855", hex.EncodeToString(hash)) + require.EqualValues(2, v) + + hash, v, err = tree.SaveVersion() + require.NoError(err) + require.Equal("e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855", hex.EncodeToString(hash)) + require.EqualValues(3, v) + + hash, v, err = tree.SaveVersion() + require.NoError(err) + require.Equal("e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855", hex.EncodeToString(hash)) + require.EqualValues(4, v) + + require.EqualValues(4, tree.Version()) + + require.True(tree.VersionExists(1)) + require.True(tree.VersionExists(3)) + + require.NoError(tree.DeleteVersion(1)) + require.NoError(tree.DeleteVersion(3)) + + require.False(tree.VersionExists(1)) + require.False(tree.VersionExists(3)) + + tree.Set([]byte("k"), []byte("v")) + require.EqualValues(5, tree.ImmutableTree().root.version) + + // Now reload the tree. + + tree, err = NewMutableTree(d, 0, false) + require.NoError(err) + tree.Load() + + require.False(tree.VersionExists(1)) + require.True(tree.VersionExists(2)) + require.False(tree.VersionExists(3)) + + t2, err := tree.GetImmutable(2) + require.NoError(err, "GetImmutable should not fail for version 2") + + require.Empty(t2.root) +} + +func TestVersionedTree(t *testing.T) { + require := require.New(t) + d, closeDB := getTestDB() + defer closeDB() + + tree, err := NewMutableTree(d, 0, false) + require.NoError(err) + + // We start with empty database. + require.Equal(0, tree.ndb.size()) + require.True(tree.IsEmpty()) + require.False(tree.ImmutableTree().IsFastCacheEnabled()) + + // version 0 + + tree.Set([]byte("key1"), []byte("val0")) + tree.Set([]byte("key2"), []byte("val0")) + + // Still zero keys, since we haven't written them. + nodes, err := tree.ndb.leafNodes() + require.NoError(err) + require.Len(nodes, 0) + require.False(tree.IsEmpty()) + + // Now let's write the keys to storage. + hash1, v, err := tree.SaveVersion() + require.NoError(err) + require.False(tree.IsEmpty()) + require.EqualValues(1, v) + + // -----1----- + // key1 = val0 version=1 + // key2 = val0 version=1 + // key2 (root) version=1 + // ----------- + + nodes1, err := tree.ndb.leafNodes() + require.NoError(err) + require.Len(nodes1, 2, "db should have a size of 2") + + // version 1 + + tree.Set([]byte("key1"), []byte("val1")) + tree.Set([]byte("key2"), []byte("val1")) + tree.Set([]byte("key3"), []byte("val1")) + nodes, err = tree.ndb.leafNodes() + require.NoError(err) + require.Len(nodes, len(nodes1)) + + hash2, v2, err := tree.SaveVersion() + require.NoError(err) + require.False(bytes.Equal(hash1, hash2)) + require.EqualValues(v+1, v2) + + // Recreate a new tree and load it, to make sure it works in this + // scenario. + tree, err = NewMutableTree(d, 100, false) + require.NoError(err) + _, err = tree.Load() + require.NoError(err) + + require.EqualValues(v2, tree.Version()) + + // -----1----- + // key1 = val0 + // key2 = val0 + // -----2----- + // key1 = val1 + // key2 = val1 + // key3 = val1 + // ----------- + + nodes2, err := tree.ndb.leafNodes() + require.NoError(err) + require.Len(nodes2, 5, "db should have grown in size") + orphans, err := tree.ndb.orphans() + require.NoError(err) + require.Len(orphans, 3, "db should have three orphans") + + // Create three more orphans. + tree.Remove([]byte("key1")) // orphans both leaf node and inner node containing "key1" and "key2" + tree.Set([]byte("key2"), []byte("val2")) + + hash3, v3, _ := tree.SaveVersion() + require.EqualValues(3, v3) + + // -----1----- + // key1 = val0 (replaced) + // key2 = val0 (replaced) + // -----2----- + // key1 = val1 (removed) + // key2 = val1 (replaced) + // key3 = val1 + // -----3----- + // key2 = val2 + // ----------- + + nodes3, err := tree.ndb.leafNodes() + require.NoError(err) + require.Len(nodes3, 6, "wrong number of nodes") + + orphans, err = tree.ndb.orphans() + require.NoError(err) + require.Len(orphans, 7, "wrong number of orphans") + + hash4, _, _ := tree.SaveVersion() + require.EqualValues(hash3, hash4) + require.NotNil(hash4) + + tree, err = NewMutableTree(d, 100, false) + require.NoError(err) + _, err = tree.Load() + require.NoError(err) + + // ------------ + // DB UNCHANGED + // ------------ + + nodes4, err := tree.ndb.leafNodes() + require.NoError(err) + require.Len(nodes4, len(nodes3), "db should not have changed in size") + + tree.Set([]byte("key1"), []byte("val0")) + + // "key2" + val, err := tree.GetVersioned([]byte("key2"), 0) + require.NoError(err) + require.Nil(val) + + val, err = tree.GetVersioned([]byte("key2"), 1) + require.NoError(err) + require.Equal("val0", string(val)) + + val, err = tree.GetVersioned([]byte("key2"), 2) + require.NoError(err) + require.Equal("val1", string(val)) + + val, err = tree.Get([]byte("key2")) + require.NoError(err) + require.Equal("val2", string(val)) + + // "key1" + val, err = tree.GetVersioned([]byte("key1"), 1) + require.NoError(err) + require.Equal("val0", string(val)) + + val, err = tree.GetVersioned([]byte("key1"), 2) + require.NoError(err) + require.Equal("val1", string(val)) + + val, err = tree.GetVersioned([]byte("key1"), 3) + require.NoError(err) + require.Nil(val) + + val, err = tree.GetVersioned([]byte("key1"), 4) + require.NoError(err) + require.Nil(val) + + val, err = tree.Get([]byte("key1")) + require.NoError(err) + require.Equal("val0", string(val)) + + // "key3" + val, err = tree.GetVersioned([]byte("key3"), 0) + require.NoError(err) + require.Nil(val) + + val, err = tree.GetVersioned([]byte("key3"), 2) + require.NoError(err) + require.Equal("val1", string(val)) + + val, err = tree.GetVersioned([]byte("key3"), 3) + require.NoError(err) + require.Equal("val1", string(val)) + + // Delete a version. After this the keys in that version should not be found. + + tree.DeleteVersion(2) + + // -----1----- + // key1 = val0 + // key2 = val0 + // -----2----- + // key3 = val1 + // -----3----- + // key2 = val2 + // ----------- + + nodes5, err := tree.ndb.leafNodes() + require.NoError(err) + + require.True(len(nodes5) < len(nodes4), "db should have shrunk after delete %d !< %d", len(nodes5), len(nodes4)) + + val, err = tree.GetVersioned([]byte("key2"), 2) + require.Nil(val) + + val, err = tree.GetVersioned([]byte("key3"), 2) + require.Nil(val) + + // But they should still exist in the latest version. + + val, err = tree.Get([]byte("key2")) + require.NoError(err) + require.Equal("val2", string(val)) + + val, err = tree.Get([]byte("key3")) + require.NoError(err) + require.Equal("val1", string(val)) + + // Version 1 should still be available. + + val, err = tree.GetVersioned([]byte("key1"), 1) + require.NoError(err) + require.Equal("val0", string(val)) + + val, err = tree.GetVersioned([]byte("key2"), 1) + require.NoError(err) + require.Equal("val0", string(val)) +} + +func TestVersionedTreeVersionDeletingEfficiency(t *testing.T) { + d, closeDB := getTestDB() + defer closeDB() + + tree, err := NewMutableTree(d, 0, false) + require.NoError(t, err) + + tree.Set([]byte("key0"), []byte("val0")) + tree.Set([]byte("key1"), []byte("val0")) + tree.Set([]byte("key2"), []byte("val0")) + tree.SaveVersion() + + leafNodes, err := tree.ndb.leafNodes() + require.Nil(t, err) + require.Len(t, leafNodes, 3) + + tree.Set([]byte("key1"), []byte("val1")) + tree.Set([]byte("key2"), []byte("val1")) + tree.Set([]byte("key3"), []byte("val1")) + tree.SaveVersion() + + leafNodes, err = tree.ndb.leafNodes() + require.Nil(t, err) + require.Len(t, leafNodes, 6) + + tree.Set([]byte("key0"), []byte("val2")) + tree.Remove([]byte("key1")) + tree.Set([]byte("key2"), []byte("val2")) + tree.SaveVersion() + + leafNodes, err = tree.ndb.leafNodes() + require.Nil(t, err) + require.Len(t, leafNodes, 8) + + tree.DeleteVersion(2) + + leafNodes, err = tree.ndb.leafNodes() + require.Nil(t, err) + require.Len(t, leafNodes, 6) + + tree.DeleteVersion(1) + + leafNodes, err = tree.ndb.leafNodes() + require.Nil(t, err) + require.Len(t, leafNodes, 3) + + tree2, err := getTestTree(0) + require.NoError(t, err) + tree2.Set([]byte("key0"), []byte("val2")) + tree2.Set([]byte("key2"), []byte("val2")) + tree2.Set([]byte("key3"), []byte("val1")) + tree2.SaveVersion() + + require.Equal(t, tree2.ImmutableTree().nodeSize(), tree.ImmutableTree().nodeSize()) +} + +func TestVersionedTreeOrphanDeleting(t *testing.T) { + tree, err := getTestTree(0) + require.NoError(t, err) + + tree.Set([]byte("key0"), []byte("val0")) + tree.Set([]byte("key1"), []byte("val0")) + tree.Set([]byte("key2"), []byte("val0")) + tree.SaveVersion() + + tree.Set([]byte("key1"), []byte("val1")) + tree.Set([]byte("key2"), []byte("val1")) + tree.Set([]byte("key3"), []byte("val1")) + tree.SaveVersion() + + tree.Set([]byte("key0"), []byte("val2")) + tree.Remove([]byte("key1")) + tree.Set([]byte("key2"), []byte("val2")) + tree.SaveVersion() + + tree.DeleteVersion(2) + + val, err := tree.Get([]byte("key0")) + require.NoError(t, err) + require.Equal(t, val, []byte("val2")) + + val, err = tree.Get([]byte("key1")) + require.NoError(t, err) + require.Nil(t, val) + + val, err = tree.Get([]byte("key2")) + require.NoError(t, err) + require.Equal(t, val, []byte("val2")) + + val, err = tree.Get([]byte("key3")) + require.NoError(t, err) + require.Equal(t, val, []byte("val1")) + + tree.DeleteVersion(1) + + leafNodes, err := tree.ndb.leafNodes() + require.Nil(t, err) + require.Len(t, leafNodes, 3) +} + +func TestVersionedTreeSpecialCase(t *testing.T) { + require := require.New(t) + d, closeDB := getTestDB() + defer closeDB() + + tree, err := NewMutableTree(d, 0, false) + require.NoError(err) + + tree.Set([]byte("key1"), []byte("val0")) + tree.Set([]byte("key2"), []byte("val0")) + tree.SaveVersion() + + tree.Set([]byte("key1"), []byte("val1")) + tree.Set([]byte("key2"), []byte("val1")) + tree.SaveVersion() + + tree.Set([]byte("key2"), []byte("val2")) + tree.SaveVersion() + + tree.DeleteVersion(2) + + val, err := tree.GetVersioned([]byte("key2"), 1) + require.NoError(err) + require.Equal("val0", string(val)) +} + +func TestVersionedTreeSpecialCase2(t *testing.T) { + require := require.New(t) + + d := db.NewMemDB() + tree, err := NewMutableTree(d, 100, false) + require.NoError(err) + + tree.Set([]byte("key1"), []byte("val0")) + tree.Set([]byte("key2"), []byte("val0")) + tree.SaveVersion() + + tree.Set([]byte("key1"), []byte("val1")) + tree.Set([]byte("key2"), []byte("val1")) + tree.SaveVersion() + + tree.Set([]byte("key2"), []byte("val2")) + tree.SaveVersion() + + tree, err = NewMutableTree(d, 100, false) + require.NoError(err) + _, err = tree.Load() + require.NoError(err) + + require.NoError(tree.DeleteVersion(2)) + + val, err := tree.GetVersioned([]byte("key2"), 1) + require.NoError(err) + require.Equal("val0", string(val)) +} + +func TestVersionedTreeSpecialCase3(t *testing.T) { + require := require.New(t) + tree, err := getTestTree(0) + require.NoError(err) + + tree.Set([]byte("m"), []byte("liWT0U6G")) + tree.Set([]byte("G"), []byte("7PxRXwUA")) + tree.SaveVersion() + + tree.Set([]byte("7"), []byte("XRLXgf8C")) + tree.SaveVersion() + + tree.Set([]byte("r"), []byte("bBEmIXBU")) + tree.SaveVersion() + + tree.Set([]byte("i"), []byte("kkIS35te")) + tree.SaveVersion() + + tree.Set([]byte("k"), []byte("CpEnpzKJ")) + tree.SaveVersion() + + tree.DeleteVersion(1) + tree.DeleteVersion(2) + tree.DeleteVersion(3) + tree.DeleteVersion(4) + + nodes, err := tree.ndb.nodes() + require.NoError(err) + require.Equal(tree.ImmutableTree().nodeSize(), len(nodes)) +} + +func TestVersionedTreeSaveAndLoad(t *testing.T) { + require := require.New(t) + d := db.NewMemDB() + tree, err := NewMutableTree(d, 0, false) + require.NoError(err) + + // Loading with an empty root is a no-op. + tree.Load() + + tree.Set([]byte("C"), []byte("so43QQFN")) + tree.SaveVersion() + + tree.Set([]byte("A"), []byte("ut7sTTAO")) + tree.SaveVersion() + + tree.Set([]byte("X"), []byte("AoWWC1kN")) + tree.SaveVersion() + + tree.SaveVersion() + tree.SaveVersion() + tree.SaveVersion() + + preHash, err := tree.Hash() + require.NoError(err) + require.NotNil(preHash) + + require.Equal(int64(6), tree.Version()) + + // Reload the tree, to test that roots and orphans are properly loaded. + ntree, err := NewMutableTree(d, 0, false) + require.NoError(err) + ntree.Load() + + require.False(ntree.IsEmpty()) + require.Equal(int64(6), ntree.Version()) + + postHash, err := ntree.Hash() + require.NoError(err) + require.Equal(preHash, postHash) + + ntree.Set([]byte("T"), []byte("MhkWjkVy")) + ntree.SaveVersion() + + ntree.DeleteVersion(6) + ntree.DeleteVersion(5) + ntree.DeleteVersion(1) + ntree.DeleteVersion(2) + ntree.DeleteVersion(4) + ntree.DeleteVersion(3) + + require.False(ntree.IsEmpty()) + require.Equal(int64(4), ntree.ImmutableTree().Size()) + nodes, err := tree.ndb.nodes() + require.NoError(err) + require.Len(nodes, ntree.ImmutableTree().nodeSize()) +} + +func TestVersionedTreeErrors(t *testing.T) { + require := require.New(t) + tree, err := getTestTree(100) + require.NoError(err) + + // Can't delete non-existent versions. + require.Error(tree.DeleteVersion(1)) + require.Error(tree.DeleteVersion(99)) + + tree.Set([]byte("key"), []byte("val")) + + // Saving with content is ok. + _, _, err = tree.SaveVersion() + require.NoError(err) + + // Can't delete current version. + require.Error(tree.DeleteVersion(1)) + + // Trying to get a key from a version which doesn't exist. + val, err := tree.GetVersioned([]byte("key"), 404) + require.NoError(err) + require.Nil(val) + + // Same thing with proof. We get an error because a proof couldn't be + // constructed. + val, proof, err := tree.GetVersionedWithProof([]byte("key"), 404) + require.Nil(val) + require.Empty(proof) + require.Error(err) +} + +func TestVersionedCheckpoints(t *testing.T) { + require := require.New(t) + d, closeDB := getTestDB() + defer closeDB() + + tree, err := NewMutableTree(d, 100, false) + require.NoError(err) + versions := 50 + keysPerVersion := 10 + versionsPerCheckpoint := 5 + keys := map[int64]([][]byte){} + + for i := 1; i <= versions; i++ { + for j := 0; j < keysPerVersion; j++ { + k := []byte(iavlrand.RandStr(1)) + v := []byte(iavlrand.RandStr(8)) + keys[int64(i)] = append(keys[int64(i)], k) + tree.Set(k, v) + } + _, _, err = tree.SaveVersion() + require.NoError(err, "failed to save version") + } + + for i := 1; i <= versions; i++ { + if i%versionsPerCheckpoint != 0 { + err = tree.DeleteVersion(int64(i)) + require.NoError(err, "failed to delete") + } + } + + // Make sure all keys exist at least once. + for _, ks := range keys { + for _, k := range ks { + val, err := tree.Get(k) + require.NoError(err) + require.NotEmpty(val) + } + } + + // Make sure all keys from deleted versions aren't present. + for i := 1; i <= versions; i++ { + if i%versionsPerCheckpoint != 0 { + for _, k := range keys[int64(i)] { + val, err := tree.GetVersioned(k, int64(i)) + require.NoError(err) + require.Nil(val) + } + } + } + + // Make sure all keys exist at all checkpoints. + for i := 1; i <= versions; i++ { + for _, k := range keys[int64(i)] { + if i%versionsPerCheckpoint == 0 { + val, err := tree.GetVersioned(k, int64(i)) + require.NoError(err) + require.NotEmpty(val) + } + } + } +} + +func TestVersionedCheckpointsSpecialCase(t *testing.T) { + require := require.New(t) + tree, err := getTestTree(0) + require.NoError(err) + key := []byte("k") + + tree.Set(key, []byte("val1")) + + tree.SaveVersion() + // ... + tree.SaveVersion() + // ... + tree.SaveVersion() + // ... + // This orphans "k" at version 1. + tree.Set(key, []byte("val2")) + tree.SaveVersion() + + // When version 1 is deleted, the orphans should move to the next + // checkpoint, which is version 10. + tree.DeleteVersion(1) + + val, err := tree.GetVersioned(key, 2) + require.NotEmpty(val) + require.Equal([]byte("val1"), val) +} + +func TestVersionedCheckpointsSpecialCase2(t *testing.T) { + tree, err := getTestTree(0) + require.NoError(t, err) + + tree.Set([]byte("U"), []byte("XamDUtiJ")) + tree.Set([]byte("A"), []byte("UkZBuYIU")) + tree.Set([]byte("H"), []byte("7a9En4uw")) + tree.Set([]byte("V"), []byte("5HXU3pSI")) + tree.SaveVersion() + + tree.Set([]byte("U"), []byte("Replaced")) + tree.Set([]byte("A"), []byte("Replaced")) + tree.SaveVersion() + + tree.Set([]byte("X"), []byte("New")) + tree.SaveVersion() + + tree.DeleteVersion(1) + tree.DeleteVersion(2) +} + +func TestVersionedCheckpointsSpecialCase3(t *testing.T) { + tree, err := getTestTree(0) + require.NoError(t, err) + + tree.Set([]byte("n"), []byte("2wUCUs8q")) + tree.Set([]byte("l"), []byte("WQ7mvMbc")) + tree.SaveVersion() + + tree.Set([]byte("N"), []byte("ved29IqU")) + tree.Set([]byte("v"), []byte("01jquVXU")) + tree.SaveVersion() + + tree.Set([]byte("l"), []byte("bhIpltPM")) + tree.Set([]byte("B"), []byte("rj97IKZh")) + tree.SaveVersion() + + tree.DeleteVersion(2) + + tree.GetVersioned([]byte("m"), 1) +} + +func TestVersionedCheckpointsSpecialCase4(t *testing.T) { + tree, err := NewMutableTree(db.NewMemDB(), 0, false) + require.NoError(t, err) + + tree.Set([]byte("U"), []byte("XamDUtiJ")) + tree.Set([]byte("A"), []byte("UkZBuYIU")) + tree.Set([]byte("H"), []byte("7a9En4uw")) + tree.Set([]byte("V"), []byte("5HXU3pSI")) + tree.SaveVersion() + + tree.Remove([]byte("U")) + tree.Remove([]byte("A")) + tree.SaveVersion() + + tree.Set([]byte("X"), []byte("New")) + tree.SaveVersion() + + val, err := tree.GetVersioned([]byte("A"), 2) + require.Nil(t, val) + + val, err = tree.GetVersioned([]byte("A"), 1) + require.NotEmpty(t, val) + + tree.DeleteVersion(1) + tree.DeleteVersion(2) + + val, err = tree.GetVersioned([]byte("A"), 2) + require.Nil(t, val) + + val, err = tree.GetVersioned([]byte("A"), 1) + require.Nil(t, val) +} + +func TestVersionedCheckpointsSpecialCase5(t *testing.T) { + tree, err := getTestTree(0) + require.NoError(t, err) + + tree.Set([]byte("R"), []byte("ygZlIzeW")) + tree.SaveVersion() + + tree.Set([]byte("j"), []byte("ZgmCWyo2")) + tree.SaveVersion() + + tree.Set([]byte("R"), []byte("vQDaoz6Z")) + tree.SaveVersion() + + tree.DeleteVersion(1) + + tree.GetVersioned([]byte("R"), 2) +} + +func TestVersionedCheckpointsSpecialCase6(t *testing.T) { + tree, err := getTestTree(0) + require.NoError(t, err) + + tree.Set([]byte("Y"), []byte("MW79JQeV")) + tree.Set([]byte("7"), []byte("Kp0ToUJB")) + tree.Set([]byte("Z"), []byte("I26B1jPG")) + tree.Set([]byte("6"), []byte("ZG0iXq3h")) + tree.Set([]byte("2"), []byte("WOR27LdW")) + tree.Set([]byte("4"), []byte("MKMvc6cn")) + tree.SaveVersion() + + tree.Set([]byte("1"), []byte("208dOu40")) + tree.Set([]byte("G"), []byte("7isI9OQH")) + tree.Set([]byte("8"), []byte("zMC1YwpH")) + tree.SaveVersion() + + tree.Set([]byte("7"), []byte("bn62vWbq")) + tree.Set([]byte("5"), []byte("wZuLGDkZ")) + tree.SaveVersion() + + tree.DeleteVersion(1) + tree.DeleteVersion(2) + + tree.GetVersioned([]byte("Y"), 1) + tree.GetVersioned([]byte("7"), 1) + tree.GetVersioned([]byte("Z"), 1) + tree.GetVersioned([]byte("6"), 1) + tree.GetVersioned([]byte("s"), 1) + tree.GetVersioned([]byte("2"), 1) + tree.GetVersioned([]byte("4"), 1) +} + +func TestVersionedCheckpointsSpecialCase7(t *testing.T) { + tree, err := getTestTree(100) + require.NoError(t, err) + + tree.Set([]byte("n"), []byte("OtqD3nyn")) + tree.Set([]byte("W"), []byte("kMdhJjF5")) + tree.Set([]byte("A"), []byte("BM3BnrIb")) + tree.Set([]byte("I"), []byte("QvtCH970")) + tree.Set([]byte("L"), []byte("txKgOTqD")) + tree.Set([]byte("Y"), []byte("NAl7PC5L")) + tree.SaveVersion() + + tree.Set([]byte("7"), []byte("qWcEAlyX")) + tree.SaveVersion() + + tree.Set([]byte("M"), []byte("HdQwzA64")) + tree.Set([]byte("3"), []byte("2Naa77fo")) + tree.Set([]byte("A"), []byte("SRuwKOTm")) + tree.Set([]byte("I"), []byte("oMX4aAOy")) + tree.Set([]byte("4"), []byte("dKfvbEOc")) + tree.SaveVersion() + + tree.Set([]byte("D"), []byte("3U4QbXCC")) + tree.Set([]byte("B"), []byte("FxExhiDq")) + tree.SaveVersion() + + tree.Set([]byte("A"), []byte("tWQgbFCY")) + tree.SaveVersion() + + tree.DeleteVersion(4) + + tree.GetVersioned([]byte("A"), 3) +} + +func TestVersionedTreeEfficiency(t *testing.T) { + require := require.New(t) + tree, err := NewMutableTree(db.NewMemDB(), 0, false) + require.NoError(err) + versions := 20 + keysPerVersion := 100 + keysAddedPerVersion := map[int]int{} + + keysAdded := 0 + for i := 1; i <= versions; i++ { + for j := 0; j < keysPerVersion; j++ { + // Keys of size one are likely to be overwritten. + tree.Set([]byte(iavlrand.RandStr(1)), []byte(iavlrand.RandStr(8))) + } + nodes, err := tree.ndb.nodes() + require.NoError(err) + sizeBefore := len(nodes) + tree.SaveVersion() + _, err = tree.ndb.nodes() + require.NoError(err) + nodes, err = tree.ndb.nodes() + require.NoError(err) + sizeAfter := len(nodes) + change := sizeAfter - sizeBefore + keysAddedPerVersion[i] = change + keysAdded += change + } + + keysDeleted := 0 + for i := 1; i < versions; i++ { + if tree.VersionExists(int64(i)) { + nodes, err := tree.ndb.nodes() + require.NoError(err) + sizeBefore := len(nodes) + tree.DeleteVersion(int64(i)) + nodes, err = tree.ndb.nodes() + require.NoError(err) + sizeAfter := len(nodes) + + change := sizeBefore - sizeAfter + keysDeleted += change + + require.InDelta(change, keysAddedPerVersion[i], float64(keysPerVersion)/5) + } + } + require.Equal(keysAdded-tree.ImmutableTree().nodeSize(), keysDeleted) +} + +func TestVersionedTreeProofs(t *testing.T) { + require := require.New(t) + tree, err := getTestTree(0) + require.NoError(err) + + tree.Set([]byte("k1"), []byte("v1")) + tree.Set([]byte("k2"), []byte("v1")) + tree.Set([]byte("k3"), []byte("v1")) + _, _, err = tree.SaveVersion() + require.NoError(err) + + // fmt.Println("TREE VERSION 1") + // printNode(tree.ndb, tree.root, 0) + // fmt.Println("TREE VERSION 1 END") + + root1, err := tree.Hash() + require.NoError(err) + + tree.Set([]byte("k2"), []byte("v2")) + tree.Set([]byte("k4"), []byte("v2")) + _, _, err = tree.SaveVersion() + require.NoError(err) + + // fmt.Println("TREE VERSION 2") + // printNode(tree.ndb, tree.root, 0) + // fmt.Println("TREE VERSION END") + + root2, err := tree.Hash() + require.NoError(err) + require.NotEqual(root1, root2) + + tree.Remove([]byte("k2")) + _, _, err = tree.SaveVersion() + require.NoError(err) + + root3, err := tree.Hash() + require.NoError(err) + require.NotEqual(root2, root3) + + val, proof, err := tree.GetVersionedWithProof([]byte("k2"), 1) + require.NoError(err) + require.EqualValues(val, []byte("v1")) + require.NoError(proof.Verify(root1), proof.String()) + require.NoError(proof.VerifyItem([]byte("k2"), val)) + + val, proof, err = tree.GetVersionedWithProof([]byte("k4"), 1) + require.NoError(err) + require.Nil(val) + require.NoError(proof.Verify(root1)) + require.NoError(proof.VerifyAbsence([]byte("k4"))) + + val, proof, err = tree.GetVersionedWithProof([]byte("k2"), 2) + require.NoError(err) + require.EqualValues(val, []byte("v2")) + require.NoError(proof.Verify(root2), proof.String()) + require.NoError(proof.VerifyItem([]byte("k2"), val)) + + val, proof, err = tree.GetVersionedWithProof([]byte("k1"), 2) + require.NoError(err) + require.EqualValues(val, []byte("v1")) + require.NoError(proof.Verify(root2)) + require.NoError(proof.VerifyItem([]byte("k1"), val)) + + val, proof, err = tree.GetVersionedWithProof([]byte("k2"), 3) + + require.NoError(err) + require.Nil(val) + require.NoError(proof.Verify(root3)) + require.NoError(proof.VerifyAbsence([]byte("k2"))) + require.Error(proof.Verify(root1)) + require.Error(proof.Verify(root2)) +} + +func TestOrphans(t *testing.T) { + // If you create a sequence of saved versions + // Then randomly delete versions other than the first and last until only those two remain + // Any remaining orphan nodes should either have fromVersion == firstVersion || toVersion == lastVersion + require := require.New(t) + tree, err := NewMutableTree(db.NewMemDB(), 100, false) + require.NoError(err) + + NUMVERSIONS := 100 + NUMUPDATES := 100 + + for i := 0; i < NUMVERSIONS; i++ { + for j := 1; j < NUMUPDATES; j++ { + tree.Set(randBytes(2), randBytes(2)) + } + _, _, err = tree.SaveVersion() + require.NoError(err, "SaveVersion should not error") + } + + idx := iavlrand.RandPerm(NUMVERSIONS - 2) + for _, v := range idx { + err = tree.DeleteVersion(int64(v + 1)) + require.NoError(err, "DeleteVersion should not error") + } + + err = tree.ndb.traverseOrphans(func(k, v []byte) error { + var fromVersion, toVersion int64 + orphanKeyFormat.Scan(k, &toVersion, &fromVersion) + require.True(fromVersion == int64(1) || toVersion == int64(99), fmt.Sprintf(`Unexpected orphan key exists: %v with fromVersion = %d and toVersion = %d.\n + Any orphan remaining in db should have either fromVersion == 1 or toVersion == 99. Since Version 1 and 99 are only versions in db`, k, fromVersion, toVersion)) + return nil + }) + require.Nil(err) +} + +func TestVersionedTreeHash(t *testing.T) { + require := require.New(t) + tree, err := getTestTree(0) + require.NoError(err) + + hash, err := tree.Hash() + require.NoError(err) + require.Equal("e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855", hex.EncodeToString(hash)) + tree.Set([]byte("I"), []byte("D")) + hash, err = tree.Hash() + require.NoError(err) + require.Equal("e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855", hex.EncodeToString(hash)) + + hash1, _, err := tree.SaveVersion() + require.NoError(err) + + tree.Set([]byte("I"), []byte("F")) + hash, err = tree.Hash() + require.NoError(err) + require.EqualValues(hash1, hash) + + hash2, _, err := tree.SaveVersion() + require.NoError(err) + + val, proof, err := tree.GetVersionedWithProof([]byte("I"), 2) + require.NoError(err) + require.EqualValues([]byte("F"), val) + require.NoError(proof.Verify(hash2)) + require.NoError(proof.VerifyItem([]byte("I"), val)) +} + +func TestNilValueSemantics(t *testing.T) { + require := require.New(t) + tree, err := getTestTree(0) + require.NoError(err) + + _, err = tree.Set([]byte("k"), nil) + require.Error(err) +} + +func TestCopyValueSemantics(t *testing.T) { + require := require.New(t) + + tree, err := getTestTree(0) + require.NoError(err) + + val := []byte("v1") + + tree.Set([]byte("k"), val) + v, err := tree.Get([]byte("k")) + require.NoError(err) + require.Equal([]byte("v1"), v) + + val[1] = '2' + + val, err = tree.Get([]byte("k")) + require.Equal([]byte("v2"), val) +} + +func TestRollback(t *testing.T) { + require := require.New(t) + + tree, err := getTestTree(0) + require.NoError(err) + + tree.Set([]byte("k"), []byte("v")) + tree.SaveVersion() + + tree.Set([]byte("r"), []byte("v")) + tree.Set([]byte("s"), []byte("v")) + + tree.Rollback() + + tree.Set([]byte("t"), []byte("v")) + + tree.SaveVersion() + + require.Equal(int64(2), tree.ImmutableTree().Size()) + + val, err := tree.Get([]byte("r")) + require.Nil(val) + + val, err = tree.Get([]byte("s")) + require.Nil(val) + + val, err = tree.Get([]byte("t")) + require.Equal([]byte("v"), val) +} + +func TestLazyLoadVersion(t *testing.T) { + tree, err := getTestTree(0) + require.NoError(t, err) + maxVersions := 10 + + version, err := tree.LazyLoadVersion(0) + require.NoError(t, err, "unexpected error") + require.Equal(t, version, int64(0), "expected latest version to be zero") + + for i := 0; i < maxVersions; i++ { + tree.Set([]byte(fmt.Sprintf("key_%d", i+1)), []byte(fmt.Sprintf("value_%d", i+1))) + + _, _, err = tree.SaveVersion() + require.NoError(t, err, "SaveVersion should not fail") + } + + // require the ability to lazy load the latest version + version, err = tree.LazyLoadVersion(int64(maxVersions)) + require.NoError(t, err, "unexpected error when lazy loading version") + require.Equal(t, version, int64(maxVersions)) + + value, err := tree.Get([]byte(fmt.Sprintf("key_%d", maxVersions))) + require.NoError(t, err) + require.Equal(t, value, []byte(fmt.Sprintf("value_%d", maxVersions)), "unexpected value") + + // require the ability to lazy load an older version + version, err = tree.LazyLoadVersion(int64(maxVersions - 1)) + require.NoError(t, err, "unexpected error when lazy loading version") + require.Equal(t, version, int64(maxVersions-1)) + + value, err = tree.Get([]byte(fmt.Sprintf("key_%d", maxVersions-1))) + require.NoError(t, err) + require.Equal(t, value, []byte(fmt.Sprintf("value_%d", maxVersions-1)), "unexpected value") + + // require the inability to lazy load a non-valid version + version, err = tree.LazyLoadVersion(int64(maxVersions + 1)) + require.Error(t, err, "expected error when lazy loading version") + require.Equal(t, version, int64(maxVersions)) +} + +func TestOverwrite(t *testing.T) { + require := require.New(t) + + mdb := db.NewMemDB() + tree, err := NewMutableTree(mdb, 0, false) + require.NoError(err) + + // Set one kv pair and save version 1 + tree.Set([]byte("key1"), []byte("value1")) + _, _, err = tree.SaveVersion() + require.NoError(err, "SaveVersion should not fail") + + // Set another kv pair and save version 2 + tree.Set([]byte("key2"), []byte("value2")) + _, _, err = tree.SaveVersion() + require.NoError(err, "SaveVersion should not fail") + + // Reload tree at version 1 + tree, err = NewMutableTree(mdb, 0, false) + require.NoError(err) + _, err = tree.LoadVersion(int64(1)) + require.NoError(err, "LoadVersion should not fail") + + // Attempt to put a different kv pair into the tree and save + tree.Set([]byte("key2"), []byte("different value 2")) + _, _, err = tree.SaveVersion() + require.Error(err, "SaveVersion should fail because of changed value") + + // Replay the original transition from version 1 to version 2 and attempt to save + tree.Set([]byte("key2"), []byte("value2")) + _, _, err = tree.SaveVersion() + require.NoError(err, "SaveVersion should not fail, overwrite was idempotent") +} + +func TestOverwriteEmpty(t *testing.T) { + require := require.New(t) + + mdb := db.NewMemDB() + tree, err := NewMutableTree(mdb, 0, false) + require.NoError(err) + + // Save empty version 1 + _, _, err = tree.SaveVersion() + require.NoError(err) + + // Save empty version 2 + _, _, err = tree.SaveVersion() + require.NoError(err) + + // Save a key in version 3 + tree.Set([]byte("key"), []byte("value")) + _, _, err = tree.SaveVersion() + require.NoError(err) + + // Load version 1 and attempt to save a different key + _, err = tree.LoadVersion(1) + require.NoError(err) + tree.Set([]byte("foo"), []byte("bar")) + _, _, err = tree.SaveVersion() + require.Error(err) + + // However, deleting the key and saving an empty version should work, + // since it's the same as the existing version. + tree.Remove([]byte("foo")) + _, version, err := tree.SaveVersion() + require.NoError(err) + require.EqualValues(2, version) +} + +func TestLoadVersionForOverwriting(t *testing.T) { + require := require.New(t) + + mdb := db.NewMemDB() + tree, err := NewMutableTree(mdb, 0, false) + require.NoError(err) + + maxLength := 100 + for count := 1; count <= maxLength; count++ { + countStr := strconv.Itoa(count) + // Set one kv pair and save version + tree.Set([]byte("key"+countStr), []byte("value"+countStr)) + _, _, err = tree.SaveVersion() + require.NoError(err, "SaveVersion should not fail") + } + + tree, err = NewMutableTree(mdb, 0, false) + require.NoError(err) + targetVersion, _ := tree.LoadVersionForOverwriting(int64(maxLength * 2)) + require.Equal(targetVersion, int64(maxLength), "targetVersion shouldn't larger than the actual tree latest version") + + tree, err = NewMutableTree(mdb, 0, false) + require.NoError(err) + _, err = tree.LoadVersionForOverwriting(int64(maxLength / 2)) + require.NoError(err, "LoadVersion should not fail") + + for version := 1; version <= maxLength/2; version++ { + exist := tree.VersionExists(int64(version)) + require.True(exist, "versions no more than 50 should exist") + } + + for version := (maxLength / 2) + 1; version <= maxLength; version++ { + exist := tree.VersionExists(int64(version)) + require.False(exist, "versions more than 50 should have been deleted") + } + + tree.Set([]byte("key49"), []byte("value49 different")) + _, _, err = tree.SaveVersion() + require.NoError(err, "SaveVersion should not fail, overwrite was allowed") + + tree.Set([]byte("key50"), []byte("value50 different")) + _, _, err = tree.SaveVersion() + require.NoError(err, "SaveVersion should not fail, overwrite was allowed") + + // Reload tree at version 50, the latest tree version is 52 + tree, err = NewMutableTree(mdb, 0, false) + require.NoError(err) + _, err = tree.LoadVersion(int64(maxLength / 2)) + require.NoError(err, "LoadVersion should not fail") + + tree.Set([]byte("key49"), []byte("value49 different")) + _, _, err = tree.SaveVersion() + require.NoError(err, "SaveVersion should not fail, write the same value") + + tree.Set([]byte("key50"), []byte("value50 different different")) + _, _, err = tree.SaveVersion() + require.Error(err, "SaveVersion should fail, overwrite was not allowed") + + tree.Set([]byte("key50"), []byte("value50 different")) + _, _, err = tree.SaveVersion() + require.NoError(err, "SaveVersion should not fail, write the same value") + + // The tree version now is 52 which is equal to latest version. + // Now any key value can be written into the tree + tree.Set([]byte("key any value"), []byte("value any value")) + _, _, err = tree.SaveVersion() + require.NoError(err, "SaveVersion should not fail.") +} + +func TestDeleteVersionsCompare(t *testing.T) { + require := require.New(t) + + var databaseSizeDeleteVersionsRange, databaseSizeDeleteVersion, databaseSizeDeleteVersions string + + const maxLength = 100 + const fromLength = 5 + { + mdb := db.NewMemDB() + tree, err := NewMutableTree(mdb, 0, false) + require.NoError(err) + + versions := make([]int64, 0, maxLength) + for count := 1; count <= maxLength; count++ { + versions = append(versions, int64(count)) + countStr := strconv.Itoa(count) + // Set kv pair and save version + tree.Set([]byte("aaa"), []byte("bbb")) + tree.Set([]byte("key"+countStr), []byte("value"+countStr)) + _, _, err = tree.SaveVersion() + require.NoError(err, "SaveVersion should not fail") + } + + tree, err = NewMutableTree(mdb, 0, false) + require.NoError(err) + targetVersion, err := tree.LoadVersion(int64(maxLength)) + require.NoError(err) + require.Equal(targetVersion, int64(maxLength), "targetVersion shouldn't larger than the actual tree latest version") + + err = tree.DeleteVersionsRange(versions[fromLength], versions[int64(maxLength/2)]) + require.NoError(err, "DeleteVersionsRange should not fail") + + databaseSizeDeleteVersionsRange = mdb.Stats()["database.size"] + } + { + mdb := db.NewMemDB() + tree, err := NewMutableTree(mdb, 0, false) + require.NoError(err) + + versions := make([]int64, 0, maxLength) + for count := 1; count <= maxLength; count++ { + versions = append(versions, int64(count)) + countStr := strconv.Itoa(count) + // Set kv pair and save version + tree.Set([]byte("aaa"), []byte("bbb")) + tree.Set([]byte("key"+countStr), []byte("value"+countStr)) + _, _, err = tree.SaveVersion() + require.NoError(err, "SaveVersion should not fail") + } + + tree, err = NewMutableTree(mdb, 0, false) + require.NoError(err) + targetVersion, err := tree.LoadVersion(int64(maxLength)) + require.NoError(err) + require.Equal(targetVersion, int64(maxLength), "targetVersion shouldn't larger than the actual tree latest version") + + for _, version := range versions[fromLength:int64(maxLength/2)] { + err = tree.DeleteVersion(version) + require.NoError(err, "DeleteVersion should not fail for %v", version) + } + + databaseSizeDeleteVersion = mdb.Stats()["database.size"] + } + { + mdb := db.NewMemDB() + tree, err := NewMutableTree(mdb, 0, false) + require.NoError(err) + + versions := make([]int64, 0, maxLength) + for count := 1; count <= maxLength; count++ { + versions = append(versions, int64(count)) + countStr := strconv.Itoa(count) + // Set kv pair and save version + tree.Set([]byte("aaa"), []byte("bbb")) + tree.Set([]byte("key"+countStr), []byte("value"+countStr)) + _, _, err = tree.SaveVersion() + require.NoError(err, "SaveVersion should not fail") + } + + tree, err = NewMutableTree(mdb, 0, false) + require.NoError(err) + targetVersion, err := tree.LoadVersion(int64(maxLength)) + require.NoError(err) + require.Equal(targetVersion, int64(maxLength), "targetVersion shouldn't larger than the actual tree latest version") + + err = tree.DeleteVersions(versions[fromLength:int64(maxLength/2)]...) + require.NoError(err, "DeleteVersions should not fail") + + databaseSizeDeleteVersions = mdb.Stats()["database.size"] + } + + require.Equal(databaseSizeDeleteVersion, databaseSizeDeleteVersionsRange) + require.Equal(databaseSizeDeleteVersion, databaseSizeDeleteVersions) +} + +// BENCHMARKS + +func BenchmarkTreeLoadAndDelete(b *testing.B) { + numVersions := 5000 + numKeysPerVersion := 10 + + d, err := db.NewGoLevelDB("bench", ".") + if err != nil { + panic(err) + } + defer func() { _ = d.Close() }() + defer os.RemoveAll("./bench.db") + + tree, err := NewMutableTree(d, 0, false) + require.NoError(b, err) + for v := 1; v < numVersions; v++ { + for i := 0; i < numKeysPerVersion; i++ { + tree.Set([]byte(iavlrand.RandStr(16)), iavlrand.RandBytes(32)) + } + tree.SaveVersion() + } + + b.Run("LoadAndDelete", func(b *testing.B) { + for n := 0; n < b.N; n++ { + b.StopTimer() + tree, err = NewMutableTree(d, 0, false) + require.NoError(b, err) + runtime.GC() + b.StartTimer() + + // Load the tree from disk. + tree.Load() + + // Delete about 10% of the versions randomly. + // The trade-off is usually between load efficiency and delete + // efficiency, which is why we do both in this benchmark. + // If we can load quickly into a data-structure that allows for + // efficient deletes, we are golden. + for v := 0; v < numVersions/10; v++ { + version := (iavlrand.RandInt() % numVersions) + 1 + tree.DeleteVersion(int64(version)) + } + } + }) +} + +func TestLoadVersionForOverwritingCase2(t *testing.T) { + require := require.New(t) + + tree, _ := NewMutableTreeWithOpts(db.NewMemDB(), 0, nil, false) + + for i := byte(0); i < 20; i++ { + tree.Set([]byte{i}, []byte{i}) + } + + _, _, err := tree.SaveVersion() + require.NoError(err, "SaveVersion should not fail") + + for i := byte(0); i < 20; i++ { + tree.Set([]byte{i}, []byte{i + 1}) + } + + _, _, err = tree.SaveVersion() + require.NoError(err, "SaveVersion should not fail with the same key") + + for i := byte(0); i < 20; i++ { + tree.Set([]byte{i}, []byte{i + 2}) + } + tree.SaveVersion() + + removedNodes := []*Node{} + + nodes, err := tree.ndb.nodes() + require.NoError(err) + for _, n := range nodes { + if n.version > 1 { + removedNodes = append(removedNodes, n) + } + } + + _, err = tree.LoadVersionForOverwriting(1) + require.NoError(err, "LoadVersionForOverwriting should not fail") + + for i := byte(0); i < 20; i++ { + v, err := tree.Get([]byte{i}) + require.NoError(err) + require.Equal([]byte{i}, v) + } + + for _, n := range removedNodes { + has, _ := tree.ndb.Has(n.hash) + require.False(has, "LoadVersionForOverwriting should remove useless nodes") + } + + tree.Set([]byte{0x2}, []byte{0x3}) + + _, _, err = tree.SaveVersion() + require.NoError(err, "SaveVersion should not fail") + + err = tree.DeleteVersion(1) + require.NoError(err, "DeleteVersion should not fail") + + tree.Set([]byte{0x1}, []byte{0x3}) + + _, _, err = tree.SaveVersion() + require.NoError(err, "SaveVersion should not fail") +} + +func TestLoadVersionForOverwritingCase3(t *testing.T) { + require := require.New(t) + + tree, err := NewMutableTreeWithOpts(db.NewMemDB(), 0, nil, false) + require.NoError(err) + + for i := byte(0); i < 20; i++ { + tree.Set([]byte{i}, []byte{i}) + } + _, _, err = tree.SaveVersion() + require.NoError(err) + + for i := byte(0); i < 20; i++ { + tree.Set([]byte{i}, []byte{i + 1}) + } + _, _, err = tree.SaveVersion() + require.NoError(err) + + removedNodes := []*Node{} + + nodes, err := tree.ndb.nodes() + require.NoError(err) + for _, n := range nodes { + if n.version > 1 { + removedNodes = append(removedNodes, n) + } + } + + for i := byte(0); i < 20; i++ { + tree.Remove([]byte{i}) + } + _, _, err = tree.SaveVersion() + require.NoError(err) + + _, err = tree.LoadVersionForOverwriting(1) + require.NoError(err) + for _, n := range removedNodes { + has, err := tree.ndb.Has(n.hash) + require.NoError(err) + require.False(has, "LoadVersionForOverwriting should remove useless nodes") + } + + for i := byte(0); i < 20; i++ { + v, err := tree.Get([]byte{i}) + require.NoError(err) + require.Equal([]byte{i}, v) + } +} + +func TestIterate_ImmutableTree_Version1(t *testing.T) { + tree, mirror := getRandomizedTreeAndMirror(t) + + _, _, err := tree.SaveVersion() + require.NoError(t, err) + + immutableTree, err := tree.GetImmutable(1) + require.NoError(t, err) + + assertImmutableMirrorIterate(t, immutableTree, mirror) +} + +func TestIterate_ImmutableTree_Version2(t *testing.T) { + tree, mirror := getRandomizedTreeAndMirror(t) + + _, _, err := tree.SaveVersion() + require.NoError(t, err) + + randomizeTreeAndMirror(t, tree, mirror) + + _, _, err = tree.SaveVersion() + require.NoError(t, err) + + immutableTree, err := tree.GetImmutable(2) + require.NoError(t, err) + + assertImmutableMirrorIterate(t, immutableTree, mirror) +} + +func TestGetByIndex_ImmutableTree(t *testing.T) { + tree, mirror := getRandomizedTreeAndMirror(t) + mirrorKeys := getSortedMirrorKeys(mirror) + + _, _, err := tree.SaveVersion() + require.NoError(t, err) + + immutableTree, err := tree.GetImmutable(1) + require.NoError(t, err) + + isFastCacheEnabled, err := immutableTree.IsFastCacheEnabled() + require.NoError(t, err) + require.True(t, isFastCacheEnabled) + + for index, expectedKey := range mirrorKeys { + expectedValue := mirror[expectedKey] + + actualKey, actualValue, err := immutableTree.GetByIndex(int64(index)) + require.NoError(t, err) + + require.Equal(t, expectedKey, string(actualKey)) + require.Equal(t, expectedValue, string(actualValue)) + } +} + +func TestGetWithIndex_ImmutableTree(t *testing.T) { + tree, mirror := getRandomizedTreeAndMirror(t) + mirrorKeys := getSortedMirrorKeys(mirror) + + _, _, err := tree.SaveVersion() + require.NoError(t, err) + + immutableTree, err := tree.GetImmutable(1) + require.NoError(t, err) + + isFastCacheEnabled, err := immutableTree.IsFastCacheEnabled() + require.NoError(t, err) + require.True(t, isFastCacheEnabled) + + for expectedIndex, key := range mirrorKeys { + expectedValue := mirror[key] + + actualIndex, actualValue, err := immutableTree.GetWithIndex([]byte(key)) + require.NoError(t, err) + + require.Equal(t, expectedValue, string(actualValue)) + require.Equal(t, int64(expectedIndex), actualIndex) + } +} + +func Benchmark_GetWithIndex(b *testing.B) { + db, err := db.NewDB("test", db.MemDBBackend, "") + require.NoError(b, err) + + const numKeyVals = 100000 + + t, err := NewMutableTree(db, numKeyVals, false) + require.NoError(b, err) + + keys := make([][]byte, 0, numKeyVals) + + for i := 0; i < numKeyVals; i++ { + key := randBytes(10) + keys = append(keys, key) + t.Set(key, randBytes(10)) + } + _, _, err = t.SaveVersion() + require.NoError(b, err) + + b.ReportAllocs() + runtime.GC() + + b.Run("fast", func(sub *testing.B) { + isFastCacheEnabled, err := t.ImmutableTree().IsFastCacheEnabled() + require.NoError(b, err) + require.True(b, isFastCacheEnabled) + b.ResetTimer() + for i := 0; i < sub.N; i++ { + randKey := rand.Intn(numKeyVals) + t.ImmutableTree().GetWithIndex(keys[randKey]) + } + }) + + b.Run("regular", func(sub *testing.B) { + // get non-latest version to force regular storage + _, latestVersion, err := t.SaveVersion() + require.NoError(b, err) + + itree, err := t.GetImmutable(latestVersion - 1) + require.NoError(b, err) + + isFastCacheEnabled, err := itree.IsFastCacheEnabled() + require.NoError(b, err) + require.False(b, isFastCacheEnabled) + b.ResetTimer() + for i := 0; i < sub.N; i++ { + randKey := rand.Intn(numKeyVals) + itree.GetWithIndex(keys[randKey]) + } + }) +} + +func Benchmark_GetByIndex(b *testing.B) { + db, err := db.NewDB("test", db.MemDBBackend, "") + require.NoError(b, err) + + const numKeyVals = 100000 + + t, err := NewMutableTree(db, numKeyVals, false) + require.NoError(b, err) + + for i := 0; i < numKeyVals; i++ { + key := randBytes(10) + t.Set(key, randBytes(10)) + } + _, _, err = t.SaveVersion() + require.NoError(b, err) + + b.ReportAllocs() + runtime.GC() + + b.Run("fast", func(sub *testing.B) { + isFastCacheEnabled, err := t.ImmutableTree().IsFastCacheEnabled() + require.NoError(b, err) + require.True(b, isFastCacheEnabled) + b.ResetTimer() + for i := 0; i < sub.N; i++ { + randIdx := rand.Intn(numKeyVals) + t.ImmutableTree().GetByIndex(int64(randIdx)) + } + }) + + b.Run("regular", func(sub *testing.B) { + // get non-latest version to force regular storage + _, latestVersion, err := t.SaveVersion() + require.NoError(b, err) + + itree, err := t.GetImmutable(latestVersion - 1) + require.NoError(b, err) + + isFastCacheEnabled, err := itree.IsFastCacheEnabled() + require.NoError(b, err) + require.False(b, isFastCacheEnabled) + + b.ResetTimer() + for i := 0; i < sub.N; i++ { + randIdx := rand.Intn(numKeyVals) + itree.GetByIndex(int64(randIdx)) + } + }) +} + +func TestNodeCacheStatisic(t *testing.T) { + const numKeyVals = 100000 + testcases := map[string]struct { + cacheSize int + expectFastCacheHitCnt int + expectFastCacheMissCnt int + expectCacheHitCnt int + expectCacheMissCnt int + }{ + "with_cache": { + cacheSize: numKeyVals, + expectFastCacheHitCnt: numKeyVals, + expectFastCacheMissCnt: 0, + expectCacheHitCnt: 1, + expectCacheMissCnt: 0, + }, + "without_cache": { + cacheSize: 0, + expectFastCacheHitCnt: 100000, // this value is hardcoded in nodedb for fast cache. + expectFastCacheMissCnt: 0, + expectCacheHitCnt: 0, + expectCacheMissCnt: 1, + }, + } + + for name, tc := range testcases { + tc := tc + t.Run(name, func(sub *testing.T) { + stat := &Statistics{} + opts := &Options{Stat: stat} + db, err := db.NewDB("test", db.MemDBBackend, "") + require.NoError(t, err) + mt, err := NewMutableTreeWithOpts(db, tc.cacheSize, opts, false) + require.NoError(t, err) + + for i := 0; i < numKeyVals; i++ { + key := []byte(strconv.Itoa(i)) + _, err := mt.Set(key, randBytes(10)) + require.NoError(t, err) + } + _, ver, _ := mt.SaveVersion() + it, err := mt.GetImmutable(ver) + require.NoError(t, err) + + for i := 0; i < numKeyVals; i++ { + key := []byte(strconv.Itoa(i)) + val, err := it.Get(key) + require.NoError(t, err) + require.NotNil(t, val) + require.NotEmpty(t, val) + } + require.Equal(t, tc.expectFastCacheHitCnt, int(opts.Stat.GetFastCacheHitCnt())) + require.Equal(t, tc.expectFastCacheMissCnt, int(opts.Stat.GetFastCacheMissCnt())) + require.Equal(t, tc.expectCacheHitCnt, int(opts.Stat.GetCacheHitCnt())) + require.Equal(t, tc.expectCacheMissCnt, int(opts.Stat.GetCacheMissCnt())) + }) + } + +} diff --git a/sei-iavl/unsafe.go b/sei-iavl/unsafe.go new file mode 100644 index 0000000000..a5fbbe8b79 --- /dev/null +++ b/sei-iavl/unsafe.go @@ -0,0 +1,8 @@ +package iavl + +import ibytes "github.com/sei-protocol/sei-chain/sei-iavl/internal/bytes" + +var ( + unsafeToStr = ibytes.UnsafeBytesToStr + unsafeToBz = ibytes.UnsafeStrToBytes +) diff --git a/sei-iavl/unsaved_fast_iterator.go b/sei-iavl/unsaved_fast_iterator.go new file mode 100644 index 0000000000..cbbff85fe3 --- /dev/null +++ b/sei-iavl/unsaved_fast_iterator.go @@ -0,0 +1,218 @@ +package iavl + +import ( + "bytes" + "errors" + "sort" + + dbm "github.com/tendermint/tm-db" +) + +var ( + errUnsavedFastIteratorNilAdditionsGiven = errors.New("unsaved fast iterator must be created with unsaved additions but they were nil") + + errUnsavedFastIteratorNilRemovalsGiven = errors.New("unsaved fast iterator must be created with unsaved removals but they were nil") +) + +// UnsavedFastIterator is a dbm.Iterator for ImmutableTree +// it iterates over the latest state via fast nodes, +// taking advantage of keys being located in sequence in the underlying database. +type UnsavedFastIterator struct { + start, end []byte + valid bool + ascending bool + err error + ndb *nodeDB + nextKey []byte + nextVal []byte + fastIterator dbm.Iterator + + nextUnsavedNodeIdx int + unsavedFastNodeAdditions map[string]*FastNode + unsavedFastNodeRemovals map[string]interface{} + unsavedFastNodesToSort []string +} + +var _ dbm.Iterator = (*UnsavedFastIterator)(nil) + +func NewUnsavedFastIterator(start, end []byte, ascending bool, ndb *nodeDB, unsavedFastNodeAdditions map[string]*FastNode, unsavedFastNodeRemovals map[string]interface{}) *UnsavedFastIterator { + iter := &UnsavedFastIterator{ + start: start, + end: end, + ascending: ascending, + ndb: ndb, + unsavedFastNodeAdditions: unsavedFastNodeAdditions, + unsavedFastNodeRemovals: unsavedFastNodeRemovals, + nextKey: nil, + nextVal: nil, + nextUnsavedNodeIdx: 0, + fastIterator: NewFastIterator(start, end, ascending, ndb), + } + + // We need to ensure that we iterate over saved and unsaved state in order. + // The strategy is to sort unsaved nodes, the fast node on disk are already sorted. + // Then, we keep a pointer to both the unsaved and saved nodes, and iterate over them in order efficiently. + for _, fastNode := range unsavedFastNodeAdditions { + if start != nil && bytes.Compare(fastNode.key, start) < 0 { + continue + } + + if end != nil && bytes.Compare(fastNode.key, end) >= 0 { + continue + } + + iter.unsavedFastNodesToSort = append(iter.unsavedFastNodesToSort, unsafeToStr(fastNode.key)) + } + + sort.Slice(iter.unsavedFastNodesToSort, func(i, j int) bool { + if ascending { + return iter.unsavedFastNodesToSort[i] < iter.unsavedFastNodesToSort[j] + } + return iter.unsavedFastNodesToSort[i] > iter.unsavedFastNodesToSort[j] + }) + + if iter.ndb == nil { + iter.err = errFastIteratorNilNdbGiven + iter.valid = false + return iter + } + + if iter.unsavedFastNodeAdditions == nil { + iter.err = errUnsavedFastIteratorNilAdditionsGiven + iter.valid = false + return iter + } + + if iter.unsavedFastNodeRemovals == nil { + iter.err = errUnsavedFastIteratorNilRemovalsGiven + iter.valid = false + return iter + } + + // Move to the first elemenet + iter.Next() + + return iter +} + +// Domain implements dbm.Iterator. +// Maps the underlying nodedb iterator domain, to the 'logical' keys involved. +func (iter *UnsavedFastIterator) Domain() ([]byte, []byte) { + return iter.start, iter.end +} + +// Valid implements dbm.Iterator. +func (iter *UnsavedFastIterator) Valid() bool { + if iter.start != nil && iter.end != nil { + if bytes.Compare(iter.end, iter.start) != 1 { + return false + } + } + + return iter.fastIterator.Valid() || iter.nextUnsavedNodeIdx < len(iter.unsavedFastNodesToSort) || (iter.nextKey != nil && iter.nextVal != nil) +} + +// Key implements dbm.Iterator +func (iter *UnsavedFastIterator) Key() []byte { + return iter.nextKey +} + +// Value implements dbm.Iterator +func (iter *UnsavedFastIterator) Value() []byte { + return iter.nextVal +} + +// Next implements dbm.Iterator +// Its effectively running the constant space overhead algorithm for streaming through sorted lists: +// the sorted lists being underlying fast nodes & unsavedFastNodeChanges +func (iter *UnsavedFastIterator) Next() { + if iter.ndb == nil { + iter.err = errFastIteratorNilNdbGiven + iter.valid = false + return + } + + diskKeyStr := unsafeToStr(iter.fastIterator.Key()) + if iter.fastIterator.Valid() && iter.nextUnsavedNodeIdx < len(iter.unsavedFastNodesToSort) { + + if iter.unsavedFastNodeRemovals[diskKeyStr] != nil { + // If next fast node from disk is to be removed, skip it. + iter.fastIterator.Next() + iter.Next() + return + } + + nextUnsavedKey := iter.unsavedFastNodesToSort[iter.nextUnsavedNodeIdx] + nextUnsavedNode := iter.unsavedFastNodeAdditions[nextUnsavedKey] + + var isUnsavedNext bool + if iter.ascending { + isUnsavedNext = diskKeyStr >= nextUnsavedKey + } else { + isUnsavedNext = diskKeyStr <= nextUnsavedKey + } + + if isUnsavedNext { + // Unsaved node is next + + if diskKeyStr == nextUnsavedKey { + // Unsaved update prevails over saved copy so we skip the copy from disk + iter.fastIterator.Next() + } + + iter.nextKey = nextUnsavedNode.key + iter.nextVal = nextUnsavedNode.value + + iter.nextUnsavedNodeIdx++ + return + } + // Disk node is next + iter.nextKey = iter.fastIterator.Key() + iter.nextVal = iter.fastIterator.Value() + + iter.fastIterator.Next() + return + } + + // if only nodes on disk are left, we return them + if iter.fastIterator.Valid() { + if iter.unsavedFastNodeRemovals[diskKeyStr] != nil { + // If next fast node from disk is to be removed, skip it. + iter.fastIterator.Next() + iter.Next() + return + } + + iter.nextKey = iter.fastIterator.Key() + iter.nextVal = iter.fastIterator.Value() + + iter.fastIterator.Next() + return + } + + // if only unsaved nodes are left, we can just iterate + if iter.nextUnsavedNodeIdx < len(iter.unsavedFastNodesToSort) { + nextUnsavedKey := iter.unsavedFastNodesToSort[iter.nextUnsavedNodeIdx] + nextUnsavedNode := iter.unsavedFastNodeAdditions[nextUnsavedKey] + + iter.nextKey = nextUnsavedNode.key + iter.nextVal = nextUnsavedNode.value + + iter.nextUnsavedNodeIdx++ + return + } + + iter.nextKey = nil + iter.nextVal = nil +} + +// Close implements dbm.Iterator +func (iter *UnsavedFastIterator) Close() error { + iter.valid = false + return iter.fastIterator.Close() +} + +// Error implements dbm.Iterator +func (iter *UnsavedFastIterator) Error() error { + return iter.err +} diff --git a/sei-iavl/util.go b/sei-iavl/util.go new file mode 100644 index 0000000000..51590a0e1f --- /dev/null +++ b/sei-iavl/util.go @@ -0,0 +1,103 @@ +package iavl + +import ( + "fmt" + "os" + "strings" +) + +func maxInt8(a, b int8) int8 { + if a > b { + return a + } + return b +} + +func cp(bz []byte) (ret []byte) { + ret = make([]byte, len(bz)) + copy(ret, bz) + return ret +} + +// Returns a slice of the same length (big endian) +// except incremented by one. +// Appends 0x00 if bz is all 0xFF. +// CONTRACT: len(bz) > 0 +func cpIncr(bz []byte) (ret []byte) { + ret = cp(bz) + for i := len(bz) - 1; i >= 0; i-- { + if ret[i] < byte(0xFF) { + ret[i]++ + return + } + ret[i] = byte(0x00) + if i == 0 { + // here, the original bz is all 0xFF, so we keep the original and append 0x00 + // instead of returning all 0x00 + ret = cp(bz) + return append(ret, 0x00) + } + } + return []byte{0x00} +} + +// Colors: ------------------------------------------------ + +const ( + ANSIReset = "\x1b[0m" + ANSIBright = "\x1b[1m" + + ANSIFgGreen = "\x1b[32m" + ANSIFgBlue = "\x1b[34m" + ANSIFgCyan = "\x1b[36m" +) + +// color the string s with color 'color' +// unless s is already colored +func treat(s string, color string) string { + if len(s) > 2 && s[:2] == "\x1b[" { + return s + } + return color + s + ANSIReset +} + +func treatAll(color string, args ...interface{}) string { + parts := make([]string, 0, len(args)) + for _, arg := range args { + parts = append(parts, treat(fmt.Sprintf("%v", arg), color)) + } + return strings.Join(parts, "") +} + +func Green(args ...interface{}) string { + return treatAll(ANSIFgGreen, args...) +} + +func Blue(args ...interface{}) string { + return treatAll(ANSIFgBlue, args...) +} + +func Cyan(args ...interface{}) string { + return treatAll(ANSIFgCyan, args...) +} + +// ColoredBytes takes in the byte that you would like to show as a string and byte +// and will display them in a human readable format. +// If the environment variable TENDERMINT_IAVL_COLORS_ON is set to a non-empty string then different colors will be used for bytes and strings. +func ColoredBytes(data []byte, textColor, bytesColor func(...interface{}) string) string { + colors := os.Getenv("TENDERMINT_IAVL_COLORS_ON") + if colors == "" { + for _, b := range data { + return string(b) + } + } + s := "" + for _, b := range data { + if 0x21 <= b && b < 0x7F { + s += textColor(string(b)) + } else { + s += bytesColor(fmt.Sprintf("%02X", b)) + } + } + return s +} diff --git a/sei-iavl/version.go b/sei-iavl/version.go new file mode 100644 index 0000000000..9c6627b4b9 --- /dev/null +++ b/sei-iavl/version.go @@ -0,0 +1,37 @@ +package iavl + +import ( + "fmt" + "runtime" +) + +// Version of iavl. Fill in fields with build flags +var ( + Version = "" + Commit = "" + Branch = "" +) + +// VersionInfo contains useful versioning information in struct +type VersionInfo struct { + IAVL string `json:"iavl"` + GitCommit string `json:"commit"` + Branch string `json:"branch"` + GoVersion string `json:"go"` +} + +func (v VersionInfo) String() string { + return fmt.Sprintf(`iavl: %s +git commit: %s +git branch: %s +%s`, v.IAVL, v.GitCommit, v.Branch, v.GoVersion) +} + +// Returns VersionInfo with global vars filled in +func GetVersionInfo() VersionInfo { + return VersionInfo{ + Version, + Commit, + Branch, + fmt.Sprintf("go version %s %s/%s\n", runtime.Version(), runtime.GOOS, runtime.GOARCH)} +} diff --git a/sei-iavl/with_gcc_test.go b/sei-iavl/with_gcc_test.go new file mode 100644 index 0000000000..433ab3acb3 --- /dev/null +++ b/sei-iavl/with_gcc_test.go @@ -0,0 +1,19 @@ +//go:build gcc +// +build gcc + +// This file exists because some of the DBs e.g CLevelDB +// require gcc as the compiler before they can ran otherwise +// we'll encounter crashes such as in https://github.com/tendermint/merkleeyes/issues/39 + +package iavl + +import ( + "testing" + + db "github.com/tendermint/tm-db" +) + +func BenchmarkImmutableAvlTreeCLevelDB(b *testing.B) { + db := db.NewDB("test", db.CLevelDBBackendStr, "./") + benchmarkImmutableAvlTreeWithDB(b, db) +} diff --git a/sei-wasmd/x/wasm/types/iavl_range_test.go b/sei-wasmd/x/wasm/types/iavl_range_test.go index a361525e10..bdd2135e83 100644 --- a/sei-wasmd/x/wasm/types/iavl_range_test.go +++ b/sei-wasmd/x/wasm/types/iavl_range_test.go @@ -5,7 +5,7 @@ import ( "github.com/cosmos/cosmos-sdk/store" "github.com/cosmos/cosmos-sdk/store/iavl" - iavl2 "github.com/cosmos/iavl" + iavl2 "github.com/sei-protocol/sei-chain/sei-iavl" "github.com/stretchr/testify/require" dbm "github.com/tendermint/tm-db" ) diff --git a/tools/hash_verification/iavl/scanner.go b/tools/hash_verification/iavl/scanner.go index 492896f83e..60c18db936 100644 --- a/tools/hash_verification/iavl/scanner.go +++ b/tools/hash_verification/iavl/scanner.go @@ -5,8 +5,8 @@ import ( "fmt" "github.com/cosmos/cosmos-sdk/store/rootmulti" - "github.com/cosmos/iavl" "github.com/sei-protocol/sei-chain/sei-db/ss/types" + iavl "github.com/sei-protocol/sei-chain/sei-iavl" "github.com/sei-protocol/sei-chain/tools/hash_verification/hasher" "github.com/sei-protocol/sei-chain/tools/utils" dbm "github.com/tendermint/tm-db" diff --git a/tools/migration/cmd/cmd.go b/tools/migration/cmd/cmd.go index 2589ffa69d..b6b158abb3 100644 --- a/tools/migration/cmd/cmd.go +++ b/tools/migration/cmd/cmd.go @@ -7,9 +7,9 @@ import ( "time" "github.com/cosmos/cosmos-sdk/store/rootmulti" - "github.com/cosmos/iavl" "github.com/sei-protocol/sei-chain/sei-db/config" sstypes "github.com/sei-protocol/sei-chain/sei-db/ss" + iavl "github.com/sei-protocol/sei-chain/sei-iavl" "github.com/sei-protocol/sei-chain/tools/migration/sc" "github.com/sei-protocol/sei-chain/tools/migration/ss" "github.com/sei-protocol/sei-chain/tools/utils" diff --git a/tools/migration/ss/migrator.go b/tools/migration/ss/migrator.go index e58438776e..dbe7ce50d9 100644 --- a/tools/migration/ss/migrator.go +++ b/tools/migration/ss/migrator.go @@ -6,8 +6,8 @@ import ( "time" "github.com/armon/go-metrics" - "github.com/cosmos/iavl" "github.com/sei-protocol/sei-chain/sei-db/ss/types" + iavl "github.com/sei-protocol/sei-chain/sei-iavl" "github.com/sei-protocol/sei-chain/tools/utils" seimetrics "github.com/sei-protocol/sei-chain/utils/metrics" dbm "github.com/tendermint/tm-db" diff --git a/x/evm/keeper/receipt.go b/x/evm/keeper/receipt.go index 689094e900..c81209e7de 100644 --- a/x/evm/keeper/receipt.go +++ b/x/evm/keeper/receipt.go @@ -8,9 +8,9 @@ import ( "github.com/cosmos/cosmos-sdk/store/prefix" sdk "github.com/cosmos/cosmos-sdk/types" - "github.com/cosmos/iavl" "github.com/ethereum/go-ethereum/common" "github.com/sei-protocol/sei-chain/sei-db/proto" + iavl "github.com/sei-protocol/sei-chain/sei-iavl" "github.com/ethereum/go-ethereum/core" ethtypes "github.com/ethereum/go-ethereum/core/types"