From 7c5e216b7faceb73a0b6b983e201f590d834cc7d Mon Sep 17 00:00:00 2001 From: Cody Littley Date: Thu, 5 Mar 2026 13:31:39 -0600 Subject: [PATCH 001/119] Created a cache for flatKV. --- sei-db/state_db/sc/flatkv/flatcache/cache.go | 27 +++ .../sc/flatkv/flatcache/cache_impl.go | 34 ++++ .../sc/flatkv/flatcache/read_scheduler.go | 17 ++ sei-db/state_db/sc/flatkv/flatcache/shard.go | 161 ++++++++++++++++++ .../sc/flatkv/flatcache/shard_manager.go | 47 +++++ 5 files changed, 286 insertions(+) create mode 100644 sei-db/state_db/sc/flatkv/flatcache/cache.go create mode 100644 sei-db/state_db/sc/flatkv/flatcache/cache_impl.go create mode 100644 sei-db/state_db/sc/flatkv/flatcache/read_scheduler.go create mode 100644 sei-db/state_db/sc/flatkv/flatcache/shard.go create mode 100644 sei-db/state_db/sc/flatkv/flatcache/shard_manager.go diff --git a/sei-db/state_db/sc/flatkv/flatcache/cache.go b/sei-db/state_db/sc/flatkv/flatcache/cache.go new file mode 100644 index 0000000000..aebcba1d7c --- /dev/null +++ b/sei-db/state_db/sc/flatkv/flatcache/cache.go @@ -0,0 +1,27 @@ +package flatcache + +import "github.com/sei-protocol/sei-chain/sei-iavl/proto" + +// Cache describes a cache kapable of being used by a FlatKV store. +type Cache interface { + + // TODO decide if we should support individual modifications + + // Get returns the value for the given key, or (nil, false) if not found. + Get(key []byte) ([]byte, bool, error) + + // GetPrevious returns the value for the given key, or (nil, false) if not found. + // This will only return a value that is different than the current value returned by Get() + // if the cache is dirty, i.e. if there is data that has not yet been flushed down into the underlying storage. + // In the case where the cache is not dirty, this method will return the same value as Get(). + GetPrevious(key []byte) ([]byte, bool, error) + + // Set sets the value for the given key. + Set(key []byte, value []byte) error + + // Delete deletes the value for the given key. + Delete(key []byte) error + + // BatchSet sets the values for a batch of keys. + BatchSet(entries []*proto.KVPair) error +} diff --git a/sei-db/state_db/sc/flatkv/flatcache/cache_impl.go b/sei-db/state_db/sc/flatkv/flatcache/cache_impl.go new file mode 100644 index 0000000000..d94fc20f4a --- /dev/null +++ b/sei-db/state_db/sc/flatkv/flatcache/cache_impl.go @@ -0,0 +1,34 @@ +package flatcache + +import "github.com/sei-protocol/sei-chain/sei-iavl/proto" + +var _ Cache = (*cache)(nil) + +// A standard implementation of a flatcache. +type cache struct { +} + +// Creates a new Cache. +func NewCache() Cache { + return &cache{} +} + +func (f *cache) BatchSet(entries []*proto.KVPair) error { + panic("unimplemented") +} + +func (f *cache) Delete(key []byte) error { + panic("unimplemented") +} + +func (f *cache) Get(key []byte) ([]byte, bool, error) { + panic("unimplemented") +} + +func (f *cache) GetPrevious(key []byte) ([]byte, bool, error) { + panic("unimplemented") +} + +func (f *cache) Set(key []byte, value []byte) error { + panic("unimplemented") +} diff --git a/sei-db/state_db/sc/flatkv/flatcache/read_scheduler.go b/sei-db/state_db/sc/flatkv/flatcache/read_scheduler.go new file mode 100644 index 0000000000..abfbc67b1a --- /dev/null +++ b/sei-db/state_db/sc/flatkv/flatcache/read_scheduler.go @@ -0,0 +1,17 @@ +package flatcache + +// A utility for scheduling asyncronous DB reads. +type readScheduler struct { +} + +// Creates a new ReadScheduler. +func NewReadScheduler() *readScheduler { + return &readScheduler{} +} + +// ScheduleRead schedules a read for the given key within the given shard. +// This method returns immediately, and the read is performed asynchronously. +// When eventually completed, the read result is inserted into the provided shard entry +func (r *readScheduler) ScheduleRead(key []byte, entry *shardEntry) error { + panic("unimplemented") +} diff --git a/sei-db/state_db/sc/flatkv/flatcache/shard.go b/sei-db/state_db/sc/flatkv/flatcache/shard.go new file mode 100644 index 0000000000..2da1cf7304 --- /dev/null +++ b/sei-db/state_db/sc/flatkv/flatcache/shard.go @@ -0,0 +1,161 @@ +package flatcache + +import ( + "context" + "fmt" + "sync" + + "github.com/sei-protocol/sei-chain/sei-iavl/proto" +) + +// TODO unsafe byte-> string conversion maybe? + +// A single shard of a Cache. +type shard struct { + ctx context.Context + + // A lock to protect the shard's data. + lock sync.Mutex + + // The data in the shard. + data map[string]*shardEntry + + // A scheduler for asyncronous reads. + readScheduler *readScheduler +} + +// The status of a value in the cache. +type valueStatus int + +const ( + // The value is not known and we are not currently attempting to find it. + statusUnknown valueStatus = iota + // We've scheduled a read of the value but haven't yet finsihed the read. + statusScheduled + // The data is available. + statusAvailable + // We are aware that the value is deleted (special case of data being avialable). + statusDeleted +) + +// A single shardEntry in a shard. Records data for a single key. +type shardEntry struct { + // The parent shard that contains this entry. + shard *shard + + // The current status of this entry. + status valueStatus + + // The value, if known. + value []byte + + // If the value is not available when we request it, + // it will be written to this channel when it is available. + valueChan chan []byte +} + +// Creates a new Shard. +func NewShard(readScheduler *readScheduler) *shard { + return &shard{ + readScheduler: readScheduler, + lock: sync.Mutex{}, + } +} + +// Get returns the value for the given key, or (nil, false) if not found. +func (s *shard) Get(key []byte) ([]byte, bool, error) { + s.lock.Lock() + + entry := s.getEntry(key) + + switch entry.status { + + case statusAvailable: + value := entry.value + s.lock.Unlock() + return value, true, nil + case statusDeleted: + s.lock.Unlock() + return nil, false, nil + case statusScheduled: + // Another goroutine initiated a read, wait for that read to finish. + valueChan := entry.valueChan + s.lock.Unlock() + value := <-valueChan + valueChan <- value // reload the channel in case there are other listeners + return value, value != nil, nil + case statusUnknown: + // We are the first goroutine to read this value. + entry.status = statusScheduled + valueChan := make(chan []byte, 1) + entry.valueChan = valueChan + s.readScheduler.ScheduleRead(key, entry) + s.lock.Unlock() + value := <-valueChan + valueChan <- value // reload the channel in case there are other listeners + return value, value != nil, nil + default: + panic(fmt.Sprintf("unexpected statustatus: %#v", entry.status)) + } +} + +// This method is called by the read scheduler when a value becomes available. +func (se *shardEntry) InjectValue(value []byte) { + se.shard.lock.Lock() + + if value == nil { + se.status = statusDeleted + } else { + se.status = statusAvailable + se.value = value + } + + se.shard.lock.Unlock() + + se.valueChan <- value +} + +// Get a shard entry for a given key. Caller is responsible for holding the shard's lock +// when this method is hcalled. +func (s *shard) getEntry(key []byte) *shardEntry { + entry, ok := s.data[string(key)] + if !ok { + entry = &shardEntry{ + shard: s, + status: statusUnknown, + } + s.data[string(key)] = entry + + // TODO register with GC queue + } + + return entry +} + +// GetPrevious returns the value for the given key, or (nil, false) if not found. +// This will only return a value that is different than the current value returned by Get() +// if the cache is dirty, i.e. if there is data that has not yet been flushed down into the underlying storage. +// In the case where the cache is not dirty, this method will return the same value as Get(). +func (s *shard) GetPrevious(key []byte) ([]byte, bool, error) { + panic("unimplemented") +} + +// Set sets the value for the given key. +func (s *shard) Set(key []byte, value []byte) error { + panic("unimplemented") +} + +// BatchSet sets the values for a batch of keys. +func (s *shard) BatchSet(entries []*proto.KVPair) error { + panic("unimplemented") +} + +// Delete deletes the value for the given key. +func (s *shard) Delete(key []byte) error { + panic("unimplemented") +} + +// RunGarbageCollection runs the garbage collection process. +func (s *shard) RunGarbageCollection() error { + panic("unimplemented") +} diff --git a/sei-db/state_db/sc/flatkv/flatcache/shard_manager.go b/sei-db/state_db/sc/flatkv/flatcache/shard_manager.go new file mode 100644 index 0000000000..2f5183f9b5 --- /dev/null +++ b/sei-db/state_db/sc/flatkv/flatcache/shard_manager.go @@ -0,0 +1,47 @@ +package flatcache + +import ( + "errors" + "hash/maphash" + "sync" +) + +var ErrNumShardsNotPowerOfTwo = errors.New("numShards must be a power of two and > 0") + +// A utility for assigning keys to shard indices. +type shardManager struct { + // A random seed that makmes it hard for an attacker to predict the shard index and to skew the distribution. + seed maphash.Seed + // Used to perform a quick modulo operation to get the shard index (since numShards is a power of two) + mask uint64 + // reusable Hash objects to avoid allocs + pool sync.Pool +} + +// Creates a new Sharder. Number of shards must be a power of two and greater than 0. +func NewShardManager(numShards uint64) (*shardManager, error) { + if numShards <= 0 || (numShards&(numShards-1)) != 0 { + return nil, ErrNumShardsNotPowerOfTwo + } + + return &shardManager{ + seed: maphash.MakeSeed(), // secret, randomized + mask: numShards - 1, + pool: sync.Pool{ + New: func() any { return new(maphash.Hash) }, + }, + }, nil +} + +// Shard returns a shard index in [0, numShards). +// addr should be the raw address bytes (e.g., 20-byte ETH address). +func (s *shardManager) Shard(addr []byte) uint64 { + h := s.pool.Get().(*maphash.Hash) + h.SetSeed(s.seed) + h.Reset() + _, _ = h.Write(addr) + x := h.Sum64() + s.pool.Put(h) + + return x & s.mask +} From 4a404eeb335c677153fb21afacf29eb6a52c02fd Mon Sep 17 00:00:00 2001 From: Cody Littley Date: Thu, 5 Mar 2026 14:08:05 -0600 Subject: [PATCH 002/119] checkpoint --- sei-db/state_db/sc/flatkv/flatcache/cache.go | 12 +-- .../state_db/sc/flatkv/flatcache/lru_queue.go | 42 +++++++++ sei-db/state_db/sc/flatkv/flatcache/shard.go | 87 ++++++++++++++----- 3 files changed, 109 insertions(+), 32 deletions(-) create mode 100644 sei-db/state_db/sc/flatkv/flatcache/lru_queue.go diff --git a/sei-db/state_db/sc/flatkv/flatcache/cache.go b/sei-db/state_db/sc/flatkv/flatcache/cache.go index aebcba1d7c..fdbc10c81d 100644 --- a/sei-db/state_db/sc/flatkv/flatcache/cache.go +++ b/sei-db/state_db/sc/flatkv/flatcache/cache.go @@ -10,18 +10,12 @@ type Cache interface { // Get returns the value for the given key, or (nil, false) if not found. Get(key []byte) ([]byte, bool, error) - // GetPrevious returns the value for the given key, or (nil, false) if not found. - // This will only return a value that is different than the current value returned by Get() - // if the cache is dirty, i.e. if there is data that has not yet been flushed down into the underlying storage. - // In the case where the cache is not dirty, this method will return the same value as Get(). - GetPrevious(key []byte) ([]byte, bool, error) - // Set sets the value for the given key. - Set(key []byte, value []byte) error + Set(key []byte, value []byte) // Delete deletes the value for the given key. - Delete(key []byte) error + Delete(key []byte) // BatchSet sets the values for a batch of keys. - BatchSet(entries []*proto.KVPair) error + BatchSet(entries []*proto.KVPair) } diff --git a/sei-db/state_db/sc/flatkv/flatcache/lru_queue.go b/sei-db/state_db/sc/flatkv/flatcache/lru_queue.go new file mode 100644 index 0000000000..da94f3b205 --- /dev/null +++ b/sei-db/state_db/sc/flatkv/flatcache/lru_queue.go @@ -0,0 +1,42 @@ +package flatcache + +// Implements a queue-like abstraction with LRU semantics. Not thread safe. +type lruQueue struct { +} + +// Create a new LRU queue. +func NewLRUQueue() *lruQueue { + return &lruQueue{} +} + +// Add a new entry to the LRU queue. Can also be used to update an existing value with a new weight. +func (lru *lruQueue) Push( + // the key in the cache that was recently interacted with + key []byte, + // the size of the key + value + size int, +) { + +} + +// Signal that an entry has been interated with, moving it to the to the back of the queue +// (i.e. making it so it doesn't get popped soon). +func (lru *lruQueue) Touch(key []byte) { + +} + +// Returns the total size of all entries in the LRU queue. +func (lru *lruQueue) GetTotalSize() int { + return 0 +} + +// Returns a count of the number of entries in the LRU queue, where each entry counts for 1 regardless of size. +func (lru *lruQueue) GetCount() int { + return 0 +} + +// Pops a single element out of the queue. The element removed is the entry least recently passed to Update(). +// Panics if the queue is empty. +func (lru *lruQueue) PopLeastRecentlyUsed() []byte { + return nil +} diff --git a/sei-db/state_db/sc/flatkv/flatcache/shard.go b/sei-db/state_db/sc/flatkv/flatcache/shard.go index 2da1cf7304..7b3bbf975b 100644 --- a/sei-db/state_db/sc/flatkv/flatcache/shard.go +++ b/sei-db/state_db/sc/flatkv/flatcache/shard.go @@ -20,8 +20,14 @@ type shard struct { // The data in the shard. data map[string]*shardEntry + // Organizes data for garbage collection. + gcQueue *lruQueue + // A scheduler for asyncronous reads. readScheduler *readScheduler + + // The maximum size of this cache, in bytes. + maxSize int } // The status of a value in the cache. @@ -55,11 +61,16 @@ type shardEntry struct { } // Creates a new Shard. -func NewShard(readScheduler *readScheduler) *shard { +func NewShard(readScheduler *readScheduler, maxSize int) (*shard, error) { + + if maxSize <= 0 { + return nil, fmt.Errorf("maxSize must be greater than 0") + } + return &shard{ readScheduler: readScheduler, lock: sync.Mutex{}, - } + }, nil } // Get returns the value for the given key, or (nil, false) if not found. @@ -72,9 +83,11 @@ func (s *shard) Get(key []byte) ([]byte, bool, error) { case statusAvailable: value := entry.value + s.gcQueue.Touch(key) s.lock.Unlock() return value, true, nil case statusDeleted: + s.gcQueue.Touch(key) s.lock.Unlock() return nil, false, nil case statusScheduled: @@ -100,7 +113,7 @@ func (s *shard) Get(key []byte) ([]byte, bool, error) { } // This method is called by the read scheduler when a value becomes available. -func (se *shardEntry) InjectValue(value []byte) { +func (se *shardEntry) InjectValue(key []byte, value []byte) { se.shard.lock.Lock() if value == nil { @@ -110,13 +123,15 @@ func (se *shardEntry) InjectValue(value []byte) { se.value = value } + se.shard.gcQueue.Push(key, len(key)+len(value)) + se.shard.lock.Unlock() - + se.valueChan <- value } // Get a shard entry for a given key. Caller is responsible for holding the shard's lock -// when this method is hcalled. +// when this method is called. func (s *shard) getEntry(key []byte) *shardEntry { entry, ok := s.data[string(key)] if !ok { @@ -125,37 +140,63 @@ func (s *shard) getEntry(key []byte) *shardEntry { status: statusUnknown, } s.data[string(key)] = entry - - // TODO register with GC queue } - return entry } -// GetPrevious returns the value for the given key, or (nil, false) if not found. -// This will only return a value that is different than the current value returned by Get() -// if the cache is dirty, i.e. if there is data that has not yet been flushed down into the underlying storage. -// In the case where the cache is not dirty, this method will return the same value as Get(). -func (s *shard) GetPrevious(key []byte) ([]byte, bool, error) { - panic("unimplemented") +// Set sets the value for the given key. +func (s *shard) Set(key []byte, value []byte) { + s.lock.Lock() + s.setUnlocked(key, value) + s.lock.Unlock() } -// Set sets the value for the given key. -func (s *shard) Set(key []byte, value []byte) error { - panic("unimplemented") +// Set a value. Caller is required to hold the lock. +func (s *shard) setUnlocked(key []byte, value []byte) { + entry := s.getEntry(key) + entry.status = statusAvailable + entry.value = value + + s.gcQueue.Push(key, len(key)+len(value)) } // BatchSet sets the values for a batch of keys. -func (s *shard) BatchSet(entries []*proto.KVPair) error { - panic("unimplemented") +func (s *shard) BatchSet(entries []*proto.KVPair) { + s.lock.Lock() + for _, entry := range entries { + if entry.Delete { + s.deleteUnlocked(entry.Key) + } else { + s.setUnlocked(entry.Key, entry.Value) + } + } + s.lock.Unlock() } // Delete deletes the value for the given key. -func (s *shard) Delete(key []byte) error { - panic("unimplemented") +func (s *shard) Delete(key []byte) { + s.lock.Lock() + s.deleteUnlocked(key) + s.lock.Unlock() +} + +// Delete a value. Caller is required to hold the lock. +func (s *shard) deleteUnlocked(key []byte) { + entry := s.getEntry(key) + entry.status = statusDeleted + entry.value = nil + + s.gcQueue.Push(key, len(key)) } // RunGarbageCollection runs the garbage collection process. -func (s *shard) RunGarbageCollection() error { - panic("unimplemented") +func (s *shard) RunGarbageCollection() { + s.lock.Lock() + + for s.gcQueue.GetTotalSize() > s.maxSize { + next := s.gcQueue.PopLeastRecentlyUsed() + delete(s.data, string(next)) // TODO use unsafe copy + } + + s.lock.Unlock() } From d36e8258b05731752372a8a6989df1cc232f9359 Mon Sep 17 00:00:00 2001 From: Cody Littley Date: Thu, 5 Mar 2026 14:31:45 -0600 Subject: [PATCH 003/119] incremental progress --- sei-db/state_db/sc/flatkv/flatcache/cache.go | 2 +- .../sc/flatkv/flatcache/cache_impl.go | 106 +++++++++++++++--- .../state_db/sc/flatkv/flatcache/lru_queue.go | 51 ++++++++- .../sc/flatkv/flatcache/lru_queue_test.go | 82 ++++++++++++++ .../sc/flatkv/flatcache/read_scheduler.go | 58 +++++++++- sei-db/state_db/sc/flatkv/flatcache/shard.go | 10 +- 6 files changed, 281 insertions(+), 28 deletions(-) create mode 100644 sei-db/state_db/sc/flatkv/flatcache/lru_queue_test.go diff --git a/sei-db/state_db/sc/flatkv/flatcache/cache.go b/sei-db/state_db/sc/flatkv/flatcache/cache.go index fdbc10c81d..ead7e82fc8 100644 --- a/sei-db/state_db/sc/flatkv/flatcache/cache.go +++ b/sei-db/state_db/sc/flatkv/flatcache/cache.go @@ -8,7 +8,7 @@ type Cache interface { // TODO decide if we should support individual modifications // Get returns the value for the given key, or (nil, false) if not found. - Get(key []byte) ([]byte, bool, error) + Get(key []byte) ([]byte, bool) // Set sets the value for the given key. Set(key []byte, value []byte) diff --git a/sei-db/state_db/sc/flatkv/flatcache/cache_impl.go b/sei-db/state_db/sc/flatkv/flatcache/cache_impl.go index d94fc20f4a..fd087a39b2 100644 --- a/sei-db/state_db/sc/flatkv/flatcache/cache_impl.go +++ b/sei-db/state_db/sc/flatkv/flatcache/cache_impl.go @@ -1,34 +1,112 @@ package flatcache -import "github.com/sei-protocol/sei-chain/sei-iavl/proto" +import ( + "context" + "fmt" + + "github.com/sei-protocol/sei-chain/sei-iavl/proto" +) var _ Cache = (*cache)(nil) // A standard implementation of a flatcache. type cache struct { + // A utility for assigning keys to shard indices. + shardManager *shardManager + + // The shards in the cache. + shards []*shard + + // A scheduler for asyncronous reads. + readScheduler *readScheduler } // Creates a new Cache. -func NewCache() Cache { - return &cache{} -} +func NewCache( + ctx context.Context, + // A function that reads a value from the database. + readFunc func(key []byte) []byte, + // The number of shards in the cache. Must be a power of two and greater than 0. + shardCount int, + // The maximum size of the cache, in bytes. + maxSize int, + // The number of background goroutines to read values from the database. + readWorkerCount int, + // The max size of the read queue. + readQueueSize int, +) (Cache, error) { + if shardCount <= 0 || (shardCount&(shardCount-1)) != 0 { + return nil, ErrNumShardsNotPowerOfTwo + } + if maxSize <= 0 { + return nil, fmt.Errorf("maxSize must be greater than 0") + } + if readWorkerCount <= 0 { + return nil, fmt.Errorf("readWorkerCount must be greater than 0") + } + if readQueueSize <= 0 { + return nil, fmt.Errorf("readQueueSize must be greater than 0") + } + + shardManager, err := NewShardManager(uint64(shardCount)) + if err != nil { + return nil, fmt.Errorf("failed to create shard manager: %w", err) + } + + readScheduler := NewReadScheduler(ctx, readFunc, readWorkerCount, readQueueSize) -func (f *cache) BatchSet(entries []*proto.KVPair) error { - panic("unimplemented") + sizePerShard := maxSize / shardCount + if sizePerShard <= 0 { + return nil, fmt.Errorf("maxSize must be greater than shardCount") + } + + shards := make([]*shard, shardCount) + for i := 0; i < shardCount; i++ { + shards[i], err = NewShard(readScheduler, sizePerShard) + if err != nil { + return nil, fmt.Errorf("failed to create shard: %w", err) + } + } + + return &cache{ + shardManager: shardManager, + shards: shards, + readScheduler: readScheduler, + }, nil } -func (f *cache) Delete(key []byte) error { - panic("unimplemented") +func (f *cache) BatchSet(entries []*proto.KVPair) { + + // First, sort entries by shard index. + // This allows us to set all values in a single shard with only a single lock acquisition. + shardMap := make(map[uint64][]*proto.KVPair) + for _, entry := range entries { + shardMap[f.shardManager.Shard(entry.Key)] = append(shardMap[f.shardManager.Shard(entry.Key)], entry) + } + + // This is probably qutie fast, but if it isn't it can be parallelized. + for shardIndex, shardEntries := range shardMap { + shard := f.shards[shardIndex] + shard.BatchSet(shardEntries) + } } -func (f *cache) Get(key []byte) ([]byte, bool, error) { - panic("unimplemented") +func (f *cache) Delete(key []byte) { + shardIndex := f.shardManager.Shard(key) + shard := f.shards[shardIndex] + shard.Delete(key) } -func (f *cache) GetPrevious(key []byte) ([]byte, bool, error) { - panic("unimplemented") +func (f *cache) Get(key []byte) ([]byte, bool) { + shardIndex := f.shardManager.Shard(key) + shard := f.shards[shardIndex] + return shard.Get(key) } -func (f *cache) Set(key []byte, value []byte) error { - panic("unimplemented") +func (f *cache) Set(key []byte, value []byte) { + shardIndex := f.shardManager.Shard(key) + shard := f.shards[shardIndex] + shard.Set(key, value) } + +// TODO create a warming mechanism diff --git a/sei-db/state_db/sc/flatkv/flatcache/lru_queue.go b/sei-db/state_db/sc/flatkv/flatcache/lru_queue.go index da94f3b205..545e985946 100644 --- a/sei-db/state_db/sc/flatkv/flatcache/lru_queue.go +++ b/sei-db/state_db/sc/flatkv/flatcache/lru_queue.go @@ -1,12 +1,25 @@ package flatcache +import "container/list" + // Implements a queue-like abstraction with LRU semantics. Not thread safe. type lruQueue struct { + order *list.List + entries map[string]*list.Element + totalSize int +} + +type lruQueueEntry struct { + key []byte + size int } // Create a new LRU queue. func NewLRUQueue() *lruQueue { - return &lruQueue{} + return &lruQueue{ + order: list.New(), + entries: make(map[string]*list.Element), + } } // Add a new entry to the LRU queue. Can also be used to update an existing value with a new weight. @@ -16,27 +29,55 @@ func (lru *lruQueue) Push( // the size of the key + value size int, ) { + keyString := string(key) // TODO revisit and maybe do unsafe copies + if elem, ok := lru.entries[keyString]; ok { + entry := elem.Value.(*lruQueueEntry) + lru.totalSize += size - entry.size + entry.size = size + lru.order.MoveToBack(elem) + return + } + keyCopy := append([]byte(nil), key...) // TODO don't do this + elem := lru.order.PushBack(&lruQueueEntry{ + key: keyCopy, + size: size, + }) + lru.entries[keyString] = elem + lru.totalSize += size } // Signal that an entry has been interated with, moving it to the to the back of the queue // (i.e. making it so it doesn't get popped soon). func (lru *lruQueue) Touch(key []byte) { - + elem, ok := lru.entries[string(key)] + if !ok { + return + } + lru.order.MoveToBack(elem) } // Returns the total size of all entries in the LRU queue. func (lru *lruQueue) GetTotalSize() int { - return 0 + return lru.totalSize } // Returns a count of the number of entries in the LRU queue, where each entry counts for 1 regardless of size. func (lru *lruQueue) GetCount() int { - return 0 + return len(lru.entries) } // Pops a single element out of the queue. The element removed is the entry least recently passed to Update(). // Panics if the queue is empty. func (lru *lruQueue) PopLeastRecentlyUsed() []byte { - return nil + elem := lru.order.Front() + if elem == nil { + panic("cannot pop from empty LRU queue") + } + + lru.order.Remove(elem) + entry := elem.Value.(*lruQueueEntry) + delete(lru.entries, string(entry.key)) + lru.totalSize -= entry.size + return entry.key } diff --git a/sei-db/state_db/sc/flatkv/flatcache/lru_queue_test.go b/sei-db/state_db/sc/flatkv/flatcache/lru_queue_test.go new file mode 100644 index 0000000000..9fa17a8ce9 --- /dev/null +++ b/sei-db/state_db/sc/flatkv/flatcache/lru_queue_test.go @@ -0,0 +1,82 @@ +package flatcache + +import ( + "bytes" + "testing" +) + +func TestLRUQueueTracksSizeCountAndOrder(t *testing.T) { + lru := NewLRUQueue() + + lru.Push([]byte("a"), 3) + lru.Push([]byte("b"), 5) + lru.Push([]byte("c"), 7) + + if got := lru.GetCount(); got != 3 { + t.Fatalf("GetCount() = %d, want 3", got) + } + + if got := lru.GetTotalSize(); got != 15 { + t.Fatalf("GetTotalSize() = %d, want 15", got) + } + + lru.Touch([]byte("a")) + + if got := lru.PopLeastRecentlyUsed(); !bytes.Equal(got, []byte("b")) { + t.Fatalf("first pop = %q, want %q", got, []byte("b")) + } + + if got := lru.PopLeastRecentlyUsed(); !bytes.Equal(got, []byte("c")) { + t.Fatalf("second pop = %q, want %q", got, []byte("c")) + } + + if got := lru.PopLeastRecentlyUsed(); !bytes.Equal(got, []byte("a")) { + t.Fatalf("third pop = %q, want %q", got, []byte("a")) + } + + if got := lru.GetCount(); got != 0 { + t.Fatalf("GetCount() after pops = %d, want 0", got) + } + + if got := lru.GetTotalSize(); got != 0 { + t.Fatalf("GetTotalSize() after pops = %d, want 0", got) + } +} + +func TestLRUQueuePushUpdatesExistingEntry(t *testing.T) { + lru := NewLRUQueue() + + lru.Push([]byte("a"), 3) + lru.Push([]byte("b"), 5) + lru.Push([]byte("a"), 11) + + if got := lru.GetCount(); got != 2 { + t.Fatalf("GetCount() = %d, want 2", got) + } + + if got := lru.GetTotalSize(); got != 16 { + t.Fatalf("GetTotalSize() = %d, want 16", got) + } + + if got := lru.PopLeastRecentlyUsed(); !bytes.Equal(got, []byte("b")) { + t.Fatalf("first pop = %q, want %q", got, []byte("b")) + } + + if got := lru.PopLeastRecentlyUsed(); !bytes.Equal(got, []byte("a")) { + t.Fatalf("second pop = %q, want %q", got, []byte("a")) + } +} + +func TestLRUQueueCopiesInsertedKey(t *testing.T) { + lru := NewLRUQueue() + + key := []byte("a") + lru.Push(key, 1) + key[0] = 'z' + + if got := lru.PopLeastRecentlyUsed(); !bytes.Equal(got, []byte("a")) { + t.Fatalf("pop after mutating caller key = %q, want %q", got, []byte("a")) + } +} + +// TODO expand these tests diff --git a/sei-db/state_db/sc/flatkv/flatcache/read_scheduler.go b/sei-db/state_db/sc/flatkv/flatcache/read_scheduler.go index abfbc67b1a..627f1ac1f5 100644 --- a/sei-db/state_db/sc/flatkv/flatcache/read_scheduler.go +++ b/sei-db/state_db/sc/flatkv/flatcache/read_scheduler.go @@ -1,17 +1,69 @@ package flatcache +import ( + "context" + "fmt" +) + // A utility for scheduling asyncronous DB reads. type readScheduler struct { + ctx context.Context + readFunc func(key []byte) []byte + requestChan chan *readRequest +} + +// A request to read a value from the database. +type readRequest struct { + // The key to read. + key []byte + + // The entry to write the result to. + entry *shardEntry } // Creates a new ReadScheduler. -func NewReadScheduler() *readScheduler { - return &readScheduler{} +func NewReadScheduler( + ctx context.Context, + readFunc func(key []byte) []byte, + // The number of background goroutines to read values from the database. + workerCount int, + // The max size of the read queue. + readQueueSize int, +) *readScheduler { + rs := &readScheduler{ + ctx: ctx, + readFunc: readFunc, + requestChan: make(chan *readRequest, readQueueSize), + } + + for i := 0; i < workerCount; i++ { + go rs.readWorker() + } + + return rs } // ScheduleRead schedules a read for the given key within the given shard. // This method returns immediately, and the read is performed asynchronously. // When eventually completed, the read result is inserted into the provided shard entry func (r *readScheduler) ScheduleRead(key []byte, entry *shardEntry) error { - panic("unimplemented") + select { + case <-r.ctx.Done(): + return fmt.Errorf("context done") + case r.requestChan <- &readRequest{key: key, entry: entry}: + return nil + } +} + +// A worker that reads values from the database. +func (r *readScheduler) readWorker() { + for { + select { + case <-r.ctx.Done(): + return + case request := <-r.requestChan: + value := r.readFunc(request.key) + request.entry.InjectValue(request.key, value) + } + } } diff --git a/sei-db/state_db/sc/flatkv/flatcache/shard.go b/sei-db/state_db/sc/flatkv/flatcache/shard.go index 7b3bbf975b..46628a6e79 100644 --- a/sei-db/state_db/sc/flatkv/flatcache/shard.go +++ b/sei-db/state_db/sc/flatkv/flatcache/shard.go @@ -74,7 +74,7 @@ func NewShard(readScheduler *readScheduler, maxSize int) (*shard, error) { } // Get returns the value for the given key, or (nil, false) if not found. -func (s *shard) Get(key []byte) ([]byte, bool, error) { +func (s *shard) Get(key []byte) ([]byte, bool) { s.lock.Lock() entry := s.getEntry(key) @@ -85,18 +85,18 @@ func (s *shard) Get(key []byte) ([]byte, bool, error) { value := entry.value s.gcQueue.Touch(key) s.lock.Unlock() - return value, true, nil + return value, true case statusDeleted: s.gcQueue.Touch(key) s.lock.Unlock() - return nil, false, nil + return nil, false case statusScheduled: // Another goroutine initiated a read, wait for that read to finish. valueChan := entry.valueChan s.lock.Unlock() value := <-valueChan valueChan <- value // reload the channel in case there are other listeners - return value, value != nil, nil + return value, value != nil case statusUnknown: // We are the first goroutine to read this value. entry.status = statusScheduled @@ -106,7 +106,7 @@ func (s *shard) Get(key []byte) ([]byte, bool, error) { s.lock.Unlock() value := <-valueChan valueChan <- value // reload the channel in case there are other listeners - return value, value != nil, nil + return value, value != nil default: panic(fmt.Sprintf("unexpected statustatus: %#v", entry.status)) } From 2ccbe6288995aa6b362d060796e02229015b7d40 Mon Sep 17 00:00:00 2001 From: Cody Littley Date: Thu, 5 Mar 2026 14:55:20 -0600 Subject: [PATCH 004/119] address feedback --- sei-db/common/utils/chan_utils.go | 32 ++++++++ sei-db/state_db/sc/flatkv/flatcache/cache.go | 2 +- .../sc/flatkv/flatcache/cache_impl.go | 75 +++++++++++++++---- sei-db/state_db/sc/flatkv/flatcache/shard.go | 50 +++++++++---- sei-db/wal/wal.go | 35 ++------- 5 files changed, 133 insertions(+), 61 deletions(-) create mode 100644 sei-db/common/utils/chan_utils.go diff --git a/sei-db/common/utils/chan_utils.go b/sei-db/common/utils/chan_utils.go new file mode 100644 index 0000000000..4ae92c6b4a --- /dev/null +++ b/sei-db/common/utils/chan_utils.go @@ -0,0 +1,32 @@ +package utils + +import ( + "context" + "fmt" +) + +// TODO unit test before merge + +// Push to a channel, returning an error if the context is cancelled before the value is pushed. +func InterruptiblePush[T any](ctx context.Context, ch chan T, value T) error { + select { + case <-ctx.Done(): + return fmt.Errorf("context cancelled: %w", ctx.Err()) + case ch <- value: + return nil + } +} + +// Pull from a channel, returning an error if the context is cancelled before the value is pulled. +func InterruptiblePull[T any](ctx context.Context, ch <-chan T) (T, error) { + var zero T + select { + case <-ctx.Done(): + return zero, fmt.Errorf("context cancelled: %w", ctx.Err()) + case value, ok := <-ch: + if !ok { + return zero, fmt.Errorf("channel closed") + } + return value, nil + } +} diff --git a/sei-db/state_db/sc/flatkv/flatcache/cache.go b/sei-db/state_db/sc/flatkv/flatcache/cache.go index ead7e82fc8..fdbc10c81d 100644 --- a/sei-db/state_db/sc/flatkv/flatcache/cache.go +++ b/sei-db/state_db/sc/flatkv/flatcache/cache.go @@ -8,7 +8,7 @@ type Cache interface { // TODO decide if we should support individual modifications // Get returns the value for the given key, or (nil, false) if not found. - Get(key []byte) ([]byte, bool) + Get(key []byte) ([]byte, bool, error) // Set sets the value for the given key. Set(key []byte, value []byte) diff --git a/sei-db/state_db/sc/flatkv/flatcache/cache_impl.go b/sei-db/state_db/sc/flatkv/flatcache/cache_impl.go index fd087a39b2..bf5065d5b9 100644 --- a/sei-db/state_db/sc/flatkv/flatcache/cache_impl.go +++ b/sei-db/state_db/sc/flatkv/flatcache/cache_impl.go @@ -3,6 +3,7 @@ package flatcache import ( "context" "fmt" + "time" "github.com/sei-protocol/sei-chain/sei-iavl/proto" ) @@ -11,6 +12,8 @@ var _ Cache = (*cache)(nil) // A standard implementation of a flatcache. type cache struct { + ctx context.Context + // A utility for assigning keys to shard indices. shardManager *shardManager @@ -19,6 +22,9 @@ type cache struct { // A scheduler for asyncronous reads. readScheduler *readScheduler + + // The interval at which to run garbage collection. + garbageCollectionInterval time.Duration } // Creates a new Cache. @@ -34,6 +40,8 @@ func NewCache( readWorkerCount int, // The max size of the read queue. readQueueSize int, + // The interval at which to run garbage collection. + garbageCollectionInterval time.Duration, ) (Cache, error) { if shardCount <= 0 || (shardCount&(shardCount-1)) != 0 { return nil, ErrNumShardsNotPowerOfTwo @@ -68,45 +76,82 @@ func NewCache( } } - return &cache{ + c := &cache{ + ctx: ctx, shardManager: shardManager, shards: shards, readScheduler: readScheduler, - }, nil + } + + go c.runGarbageCollection() + + return c, nil } -func (f *cache) BatchSet(entries []*proto.KVPair) { +func (c *cache) BatchSet(entries []*proto.KVPair) { // First, sort entries by shard index. // This allows us to set all values in a single shard with only a single lock acquisition. shardMap := make(map[uint64][]*proto.KVPair) for _, entry := range entries { - shardMap[f.shardManager.Shard(entry.Key)] = append(shardMap[f.shardManager.Shard(entry.Key)], entry) + shardMap[c.shardManager.Shard(entry.Key)] = append(shardMap[c.shardManager.Shard(entry.Key)], entry) } // This is probably qutie fast, but if it isn't it can be parallelized. for shardIndex, shardEntries := range shardMap { - shard := f.shards[shardIndex] + shard := c.shards[shardIndex] shard.BatchSet(shardEntries) } } -func (f *cache) Delete(key []byte) { - shardIndex := f.shardManager.Shard(key) - shard := f.shards[shardIndex] +func (c *cache) Delete(key []byte) { + shardIndex := c.shardManager.Shard(key) + shard := c.shards[shardIndex] shard.Delete(key) } -func (f *cache) Get(key []byte) ([]byte, bool) { - shardIndex := f.shardManager.Shard(key) - shard := f.shards[shardIndex] - return shard.Get(key) +func (c *cache) Get(key []byte) ([]byte, bool, error) { + shardIndex := c.shardManager.Shard(key) + shard := c.shards[shardIndex] + + value, ok, err := shard.Get(key) + if err != nil { + return nil, false, fmt.Errorf("failed to get value from shard: %w", err) + } + if !ok { + return nil, false, nil + } + return value, ok, nil } -func (f *cache) Set(key []byte, value []byte) { - shardIndex := f.shardManager.Shard(key) - shard := f.shards[shardIndex] +func (c *cache) Set(key []byte, value []byte) { + shardIndex := c.shardManager.Shard(key) + shard := c.shards[shardIndex] shard.Set(key, value) } +// TODO add GC metrics + +// Periodically runs garbage collection in the background. +func (c *cache) runGarbageCollection() { + + // Spread out work evenly across all shards, so that we visit each shard roughly once per interval. + gcSubInterval := c.garbageCollectionInterval / time.Duration(len(c.shards)) + ticker := time.NewTicker(gcSubInterval) + defer ticker.Stop() + + nextShardIndex := 0 + + for { + select { + case <-c.ctx.Done(): + return + case <-ticker.C: + shardIndex := nextShardIndex + nextShardIndex = (nextShardIndex + 1) % len(c.shards) + c.shards[shardIndex].RunGarbageCollection() + } + } +} + // TODO create a warming mechanism diff --git a/sei-db/state_db/sc/flatkv/flatcache/shard.go b/sei-db/state_db/sc/flatkv/flatcache/shard.go index 46628a6e79..6933d71a48 100644 --- a/sei-db/state_db/sc/flatkv/flatcache/shard.go +++ b/sei-db/state_db/sc/flatkv/flatcache/shard.go @@ -5,6 +5,7 @@ import ( "fmt" "sync" + "github.com/sei-protocol/sei-chain/sei-db/common/utils" "github.com/sei-protocol/sei-chain/sei-iavl/proto" ) @@ -70,11 +71,14 @@ func NewShard(readScheduler *readScheduler, maxSize int) (*shard, error) { return &shard{ readScheduler: readScheduler, lock: sync.Mutex{}, + data: make(map[string]*shardEntry), + gcQueue: NewLRUQueue(), + maxSize: maxSize, }, nil } // Get returns the value for the given key, or (nil, false) if not found. -func (s *shard) Get(key []byte) ([]byte, bool) { +func (s *shard) Get(key []byte) ([]byte, bool, error) { s.lock.Lock() entry := s.getEntry(key) @@ -85,30 +89,39 @@ func (s *shard) Get(key []byte) ([]byte, bool) { value := entry.value s.gcQueue.Touch(key) s.lock.Unlock() - return value, true + return value, true, nil case statusDeleted: s.gcQueue.Touch(key) s.lock.Unlock() - return nil, false + return nil, false, nil case statusScheduled: // Another goroutine initiated a read, wait for that read to finish. valueChan := entry.valueChan s.lock.Unlock() - value := <-valueChan + value, err := utils.InterruptiblePull(s.ctx, valueChan) + if err != nil { + return nil, false, fmt.Errorf("failed to pull value from channel: %w", err) + } valueChan <- value // reload the channel in case there are other listeners - return value, value != nil + return value, value != nil, nil case statusUnknown: // We are the first goroutine to read this value. entry.status = statusScheduled valueChan := make(chan []byte, 1) entry.valueChan = valueChan - s.readScheduler.ScheduleRead(key, entry) s.lock.Unlock() - value := <-valueChan + err := s.readScheduler.ScheduleRead(key, entry) + if err != nil { + return nil, false, fmt.Errorf("failed to schedule read: %w", err) + } + value, err := utils.InterruptiblePull(s.ctx, valueChan) + if err != nil { + return nil, false, fmt.Errorf("failed to pull value from channel: %w", err) + } valueChan <- value // reload the channel in case there are other listeners - return value, value != nil + return value, value != nil, nil default: - panic(fmt.Sprintf("unexpected statustatus: %#v", entry.status)) + panic(fmt.Sprintf("unexpected status: %#v", entry.status)) } } @@ -116,15 +129,20 @@ func (s *shard) Get(key []byte) ([]byte, bool) { func (se *shardEntry) InjectValue(key []byte, value []byte) { se.shard.lock.Lock() - if value == nil { - se.status = statusDeleted - } else { - se.status = statusAvailable - se.value = value + if se.status == statusScheduled { + // In the time since the read was scheduled, nobody has written to this entry, + // so safe to overwrite the value. + if value == nil { + se.status = statusDeleted + se.value = nil + se.shard.gcQueue.Push(key, len(key)) + } else { + se.status = statusAvailable + se.value = value + se.shard.gcQueue.Push(key, len(key)+len(value)) + } } - se.shard.gcQueue.Push(key, len(key)+len(value)) - se.shard.lock.Unlock() se.valueChan <- value diff --git a/sei-db/wal/wal.go b/sei-db/wal/wal.go index 6b21fa31f8..85aef58881 100644 --- a/sei-db/wal/wal.go +++ b/sei-db/wal/wal.go @@ -13,6 +13,7 @@ import ( "github.com/tidwall/wal" "github.com/sei-protocol/sei-chain/sei-db/common/logger" + "github.com/sei-protocol/sei-chain/sei-db/common/utils" ) // The size of internal channel buffers if the provided buffer size is less than 1. @@ -175,7 +176,7 @@ func (walLog *WAL[T]) Write(entry T) error { errChan: errChan, } - err := interruptiblePush(walLog.ctx, walLog.writeChan, req) + err := utils.InterruptiblePush(walLog.ctx, walLog.writeChan, req) if err != nil { return fmt.Errorf("failed to push write request: %w", err) } @@ -185,7 +186,7 @@ func (walLog *WAL[T]) Write(entry T) error { return nil } - err, pullErr := interruptiblePull(walLog.ctx, errChan) + err, pullErr := utils.InterruptiblePull(walLog.ctx, errChan) if pullErr != nil { return fmt.Errorf("failed to pull write error: %w", pullErr) } @@ -345,12 +346,12 @@ func (walLog *WAL[T]) sendTruncate(before bool, index uint64) error { errChan: make(chan error, 1), } - err := interruptiblePush(walLog.ctx, walLog.truncateChan, req) + err := utils.InterruptiblePush(walLog.ctx, walLog.truncateChan, req) if err != nil { return fmt.Errorf("failed to push truncate request: %w", err) } - err, pullErr := interruptiblePull(walLog.ctx, req.errChan) + err, pullErr := utils.InterruptiblePull(walLog.ctx, req.errChan) if pullErr != nil { return fmt.Errorf("failed to pull truncate error: %w", pullErr) } @@ -505,7 +506,7 @@ func (walLog *WAL[T]) drain() { // Shut down the WAL. Sends a close request to the main loop so in-flight writes (and other work) // can complete before teardown. Idempotent. func (walLog *WAL[T]) Close() error { - _ = interruptiblePush(walLog.ctx, walLog.closeReqChan, struct{}{}) + _ = utils.InterruptiblePush(walLog.ctx, walLog.closeReqChan, struct{}{}) // If error is non-nil then this is not the first call to Close(), no problem since Close() is idempotent err := <-walLog.closeErrChan @@ -598,27 +599,3 @@ func (walLog *WAL[T]) mainLoop() { } walLog.closeErrChan <- closeErr } - -// Push to a channel, returning an error if the context is cancelled before the value is pushed. -func interruptiblePush[T any](ctx context.Context, ch chan T, value T) error { - select { - case <-ctx.Done(): - return fmt.Errorf("context cancelled: %w", ctx.Err()) - case ch <- value: - return nil - } -} - -// Pull from a channel, returning an error if the context is cancelled before the value is pulled. -func interruptiblePull[T any](ctx context.Context, ch <-chan T) (T, error) { - var zero T - select { - case <-ctx.Done(): - return zero, fmt.Errorf("context cancelled: %w", ctx.Err()) - case value, ok := <-ch: - if !ok { - return zero, fmt.Errorf("channel closed") - } - return value, nil - } -} From f412e856d3d2cb10586e640babfbf87836d889d3 Mon Sep 17 00:00:00 2001 From: Cody Littley Date: Thu, 5 Mar 2026 14:57:38 -0600 Subject: [PATCH 005/119] more fixes --- sei-db/state_db/sc/flatkv/flatcache/cache_impl.go | 9 +++++---- sei-db/state_db/sc/flatkv/flatcache/shard.go | 3 ++- 2 files changed, 7 insertions(+), 5 deletions(-) diff --git a/sei-db/state_db/sc/flatkv/flatcache/cache_impl.go b/sei-db/state_db/sc/flatkv/flatcache/cache_impl.go index bf5065d5b9..5875ac2534 100644 --- a/sei-db/state_db/sc/flatkv/flatcache/cache_impl.go +++ b/sei-db/state_db/sc/flatkv/flatcache/cache_impl.go @@ -77,10 +77,11 @@ func NewCache( } c := &cache{ - ctx: ctx, - shardManager: shardManager, - shards: shards, - readScheduler: readScheduler, + ctx: ctx, + shardManager: shardManager, + shards: shards, + readScheduler: readScheduler, + garbageCollectionInterval: garbageCollectionInterval, } go c.runGarbageCollection() diff --git a/sei-db/state_db/sc/flatkv/flatcache/shard.go b/sei-db/state_db/sc/flatkv/flatcache/shard.go index 6933d71a48..05c5e619b6 100644 --- a/sei-db/state_db/sc/flatkv/flatcache/shard.go +++ b/sei-db/state_db/sc/flatkv/flatcache/shard.go @@ -62,13 +62,14 @@ type shardEntry struct { } // Creates a new Shard. -func NewShard(readScheduler *readScheduler, maxSize int) (*shard, error) { +func NewShard(ctx context.Context, readScheduler *readScheduler, maxSize int) (*shard, error) { if maxSize <= 0 { return nil, fmt.Errorf("maxSize must be greater than 0") } return &shard{ + ctx: ctx, readScheduler: readScheduler, lock: sync.Mutex{}, data: make(map[string]*shardEntry), From e310037f8758a2d984cb81c2eabefe0d30b6b514 Mon Sep 17 00:00:00 2001 From: Cody Littley Date: Thu, 5 Mar 2026 15:08:30 -0600 Subject: [PATCH 006/119] bugfix --- sei-db/state_db/sc/flatkv/flatcache/cache_impl.go | 9 ++++++++- 1 file changed, 8 insertions(+), 1 deletion(-) diff --git a/sei-db/state_db/sc/flatkv/flatcache/cache_impl.go b/sei-db/state_db/sc/flatkv/flatcache/cache_impl.go index 5875ac2534..e0e84c97da 100644 --- a/sei-db/state_db/sc/flatkv/flatcache/cache_impl.go +++ b/sei-db/state_db/sc/flatkv/flatcache/cache_impl.go @@ -60,6 +60,9 @@ func NewCache( if err != nil { return nil, fmt.Errorf("failed to create shard manager: %w", err) } + if garbageCollectionInterval <= 0 { + return nil, fmt.Errorf("garbageCollectionInterval must be greater than 0") + } readScheduler := NewReadScheduler(ctx, readFunc, readWorkerCount, readQueueSize) @@ -70,7 +73,7 @@ func NewCache( shards := make([]*shard, shardCount) for i := 0; i < shardCount; i++ { - shards[i], err = NewShard(readScheduler, sizePerShard) + shards[i], err = NewShard(ctx, readScheduler, sizePerShard) if err != nil { return nil, fmt.Errorf("failed to create shard: %w", err) } @@ -138,6 +141,10 @@ func (c *cache) runGarbageCollection() { // Spread out work evenly across all shards, so that we visit each shard roughly once per interval. gcSubInterval := c.garbageCollectionInterval / time.Duration(len(c.shards)) + if gcSubInterval == 0 { + // technically possible if the number of shards is very large and the interval is very small + gcSubInterval = 1 + } ticker := time.NewTicker(gcSubInterval) defer ticker.Stop() From cf1071ca23463dc6e4e410995dfb275fcc1d4183 Mon Sep 17 00:00:00 2001 From: Cody Littley Date: Thu, 5 Mar 2026 15:44:50 -0600 Subject: [PATCH 007/119] wire in cache --- sei-db/state_db/sc/flatkv/flatcache/cache.go | 6 +-- .../sc/flatkv/flatcache/cache_impl.go | 15 ++++--- sei-db/state_db/sc/flatkv/flatcache/shard.go | 2 +- sei-db/state_db/sc/flatkv/store.go | 31 ++++++++++++- sei-db/state_db/sc/flatkv/store_read.go | 45 ++++++++++++++++--- sei-db/state_db/sc/flatkv/store_write.go | 3 ++ 6 files changed, 84 insertions(+), 18 deletions(-) diff --git a/sei-db/state_db/sc/flatkv/flatcache/cache.go b/sei-db/state_db/sc/flatkv/flatcache/cache.go index fdbc10c81d..4b255ecded 100644 --- a/sei-db/state_db/sc/flatkv/flatcache/cache.go +++ b/sei-db/state_db/sc/flatkv/flatcache/cache.go @@ -1,6 +1,6 @@ package flatcache -import "github.com/sei-protocol/sei-chain/sei-iavl/proto" +import "github.com/sei-protocol/sei-chain/sei-db/proto" // Cache describes a cache kapable of being used by a FlatKV store. type Cache interface { @@ -16,6 +16,6 @@ type Cache interface { // Delete deletes the value for the given key. Delete(key []byte) - // BatchSet sets the values for a batch of keys. - BatchSet(entries []*proto.KVPair) + // BatchSet applies the given changesets to the cache. + BatchSet(cs []*proto.NamedChangeSet) } diff --git a/sei-db/state_db/sc/flatkv/flatcache/cache_impl.go b/sei-db/state_db/sc/flatkv/flatcache/cache_impl.go index e0e84c97da..a8e0bc8922 100644 --- a/sei-db/state_db/sc/flatkv/flatcache/cache_impl.go +++ b/sei-db/state_db/sc/flatkv/flatcache/cache_impl.go @@ -5,7 +5,8 @@ import ( "fmt" "time" - "github.com/sei-protocol/sei-chain/sei-iavl/proto" + "github.com/sei-protocol/sei-chain/sei-db/proto" + iavl "github.com/sei-protocol/sei-chain/sei-iavl/proto" ) var _ Cache = (*cache)(nil) @@ -92,16 +93,18 @@ func NewCache( return c, nil } -func (c *cache) BatchSet(entries []*proto.KVPair) { +func (c *cache) BatchSet(cs []*proto.NamedChangeSet) { // First, sort entries by shard index. // This allows us to set all values in a single shard with only a single lock acquisition. - shardMap := make(map[uint64][]*proto.KVPair) - for _, entry := range entries { - shardMap[c.shardManager.Shard(entry.Key)] = append(shardMap[c.shardManager.Shard(entry.Key)], entry) + shardMap := make(map[uint64][]*iavl.KVPair) + for _, ncs := range cs { + for _, entry := range ncs.Changeset.Pairs { + shardMap[c.shardManager.Shard(entry.Key)] = append(shardMap[c.shardManager.Shard(entry.Key)], entry) + } } - // This is probably qutie fast, but if it isn't it can be parallelized. + // This is probably quite fast, but if it isn't it can be parallelized. for shardIndex, shardEntries := range shardMap { shard := c.shards[shardIndex] shard.BatchSet(shardEntries) diff --git a/sei-db/state_db/sc/flatkv/flatcache/shard.go b/sei-db/state_db/sc/flatkv/flatcache/shard.go index 05c5e619b6..e7a6182218 100644 --- a/sei-db/state_db/sc/flatkv/flatcache/shard.go +++ b/sei-db/state_db/sc/flatkv/flatcache/shard.go @@ -209,7 +209,7 @@ func (s *shard) deleteUnlocked(key []byte) { } // RunGarbageCollection runs the garbage collection process. -func (s *shard) RunGarbageCollection() { +func (s *shard) RunGarbageCollection() { // TODO maybe just do this after each update? s.lock.Lock() for s.gcQueue.GetTotalSize() > s.maxSize { diff --git a/sei-db/state_db/sc/flatkv/store.go b/sei-db/state_db/sc/flatkv/store.go index 5dabbbba1c..26b1cc7eeb 100644 --- a/sei-db/state_db/sc/flatkv/store.go +++ b/sei-db/state_db/sc/flatkv/store.go @@ -13,6 +13,7 @@ import ( "github.com/sei-protocol/sei-chain/sei-db/db_engine/pebbledb" seidbtypes "github.com/sei-protocol/sei-chain/sei-db/db_engine/types" "github.com/sei-protocol/sei-chain/sei-db/proto" + "github.com/sei-protocol/sei-chain/sei-db/state_db/sc/flatkv/flatcache" "github.com/sei-protocol/sei-chain/sei-db/state_db/sc/flatkv/lthash" "github.com/sei-protocol/sei-chain/sei-db/state_db/sc/types" "github.com/sei-protocol/sei-chain/sei-db/wal" @@ -74,6 +75,9 @@ type CommitStore struct { storageDB seidbtypes.KeyValueDB // addr(20)||slot(32) → value(32) legacyDB seidbtypes.KeyValueDB // Legacy data for backward compatibility + // TODO consider having one cache per DB + cache flatcache.Cache + // Per-DB committed version, keyed by DB dir name (e.g. accountDBDir). localMeta map[string]*LocalMeta @@ -117,7 +121,7 @@ func NewCommitStore( } meter := otel.Meter(flatkvMeterName) - return &CommitStore{ + s := &CommitStore{ ctx: ctx, log: log, config: cfg, @@ -132,6 +136,31 @@ func NewCommitStore( workingLtHash: lthash.New(), phaseTimer: metrics.NewPhaseTimer(meter, "seidb_main_thread"), } + + readFunction := func(key []byte) []byte { // TODO maybe change signature + value, found := s.storageRead(key) + if !found { + return nil + } + return value + } + + // TODO use config + cache, err := flatcache.NewCache( + ctx, + readFunction, + 1024, + 1024*1024*1024, + 20, + 1024, + 10*time.Second) + if err != nil { + panic(fmt.Errorf("failed to create cache: %w", err)) // TODO + } + + s.cache = cache + + return s } func (s *CommitStore) flatkvDir() string { diff --git a/sei-db/state_db/sc/flatkv/store_read.go b/sei-db/state_db/sc/flatkv/store_read.go index b2a4ecb847..d17cd1d0d7 100644 --- a/sei-db/state_db/sc/flatkv/store_read.go +++ b/sei-db/state_db/sc/flatkv/store_read.go @@ -7,6 +7,7 @@ import ( errorutils "github.com/sei-protocol/sei-chain/sei-db/common/errors" "github.com/sei-protocol/sei-chain/sei-db/common/evm" + seidbtypes "github.com/sei-protocol/sei-chain/sei-db/db_engine/types" ) // Get returns the value for the given memiavl key. @@ -28,8 +29,8 @@ func (s *CommitStore) Get(key []byte) ([]byte, bool) { return pw.value, true } - // Read from storageDB - value, err := s.storageDB.Get(keyBytes) + // Read from cache (may fall through to storageDB if not found) + value, _, err := s.cache.Get(keyBytes) if err != nil { return nil, false } @@ -61,8 +62,8 @@ func (s *CommitStore) Get(key []byte) ([]byte, bool) { return paw.value.CodeHash[:], true } - // Read from accountDB - encoded, err := s.accountDB.Get(AccountKey(addr)) + // Read from cache (may fall through to accountDB if not found) + encoded, _, err := s.cache.Get(AccountKey(addr)) if err != nil { return nil, false } @@ -93,8 +94,8 @@ func (s *CommitStore) Get(key []byte) ([]byte, bool) { return pw.value, true } - // Read from codeDB - value, err := s.codeDB.Get(keyBytes) + // Read from cache (may fall through to codeDB if not found) + value, _, err := s.cache.Get(keyBytes) if err != nil { return nil, false } @@ -108,7 +109,8 @@ func (s *CommitStore) Get(key []byte) ([]byte, bool) { return pw.value, true } - value, err := s.legacyDB.Get(keyBytes) + // Read from cache (may fall through to legacyDB if not found) + value, _, err := s.cache.Get(keyBytes) if err != nil { return nil, false } @@ -119,6 +121,35 @@ func (s *CommitStore) Get(key []byte) ([]byte, bool) { } } +// Read a value from the disk, ignoring pending writes and without interpreting data in any way. +func (s *CommitStore) storageRead(key []byte) ([]byte, bool) { + kind, keyBytes := evm.ParseEVMKey(key) + if kind == evm.EVMKeyUnknown { + return nil, false + } + + var db seidbtypes.KeyValueDB + + switch kind { + case evm.EVMKeyStorage: + db = s.storageDB + case evm.EVMKeyNonce, evm.EVMKeyCodeHash: + db = s.accountDB + case evm.EVMKeyCode: + db = s.codeDB + case evm.EVMKeyLegacy: + db = s.legacyDB + default: + return nil, false + } + + value, err := db.Get(keyBytes) + if err != nil { + return nil, false // TODO why are we squelching errors here? + } + return value, true +} + // Has reports whether the given memiavl key exists. func (s *CommitStore) Has(key []byte) bool { _, found := s.Get(key) diff --git a/sei-db/state_db/sc/flatkv/store_write.go b/sei-db/state_db/sc/flatkv/store_write.go index 5e01274566..a54fea65e2 100644 --- a/sei-db/state_db/sc/flatkv/store_write.go +++ b/sei-db/state_db/sc/flatkv/store_write.go @@ -222,6 +222,9 @@ func (s *CommitStore) ApplyChangeSets(cs []*proto.NamedChangeSet) error { s.workingLtHash = newLtHash } + s.phaseTimer.SetPhase("update_cache") + s.cache.BatchSet(s.pendingChangeSets) + s.phaseTimer.SetPhase("apply_change_done") return nil } From a8c1c75d416dbb5a92f5de7800631c0d59301064 Mon Sep 17 00:00:00 2001 From: Cody Littley Date: Fri, 6 Mar 2026 10:16:13 -0600 Subject: [PATCH 008/119] incremental improvements --- .../bench/cryptosim/config/basic-config.json | 2 ++ .../bench/cryptosim/config/debug.json | 7 ++++++ .../bench/cryptosim/cryptosim_config.go | 8 ++++++ sei-db/state_db/bench/cryptosim/main/main.go | 25 +++++++++++++++++++ .../sc/flatkv/flatcache/cache_impl.go | 12 ++++++--- .../sc/flatkv/flatcache/shard_manager.go | 18 ++++++++----- sei-db/state_db/sc/flatkv/store.go | 2 +- sei-db/state_db/sc/flatkv/store_read.go | 18 +++++++------ 8 files changed, 74 insertions(+), 18 deletions(-) create mode 100644 sei-db/state_db/bench/cryptosim/config/debug.json diff --git a/sei-db/state_db/bench/cryptosim/config/basic-config.json b/sei-db/state_db/bench/cryptosim/config/basic-config.json index dea1b2a229..41298be70e 100644 --- a/sei-db/state_db/bench/cryptosim/config/basic-config.json +++ b/sei-db/state_db/bench/cryptosim/config/basic-config.json @@ -12,6 +12,8 @@ "Erc20InteractionsPerAccount": 10, "Erc20StorageSlotSize": 32, "ExecutorQueueSize": 64, + "DeleteDataDirOnStartup": false, + "DeleteDataDirOnShutdown": false, "HotAccountProbability": 0.1, "HotErc20ContractProbability": 0.5, "HotErc20ContractSetSize": 100, diff --git a/sei-db/state_db/bench/cryptosim/config/debug.json b/sei-db/state_db/bench/cryptosim/config/debug.json new file mode 100644 index 0000000000..15f556bf57 --- /dev/null +++ b/sei-db/state_db/bench/cryptosim/config/debug.json @@ -0,0 +1,7 @@ +{ + "Comment": "For locally testing/debugging the benchmark or related code.", + "DataDir": "data", + "DeleteDataDirOnStartup": true, + "DeleteDataDirOnShutdown": true +} + diff --git a/sei-db/state_db/bench/cryptosim/cryptosim_config.go b/sei-db/state_db/bench/cryptosim/cryptosim_config.go index a33e5a42ff..14c01cedc8 100644 --- a/sei-db/state_db/bench/cryptosim/cryptosim_config.go +++ b/sei-db/state_db/bench/cryptosim/cryptosim_config.go @@ -132,6 +132,12 @@ type CryptoSimConfig struct { // If true, pressing Enter in the terminal will toggle suspend/resume of the benchmark. // If false, Enter has no effect. EnableSuspension bool + + // If true, the data directory will be deleted on startup if it exists. + DeleteDataDirOnStartup bool + + // If true, the data directory will be deleted on a clean shutdown. + DeleteDataDirOnShutdown bool } // Returns the default configuration for the cryptosim benchmark. @@ -170,6 +176,8 @@ func DefaultCryptoSimConfig() *CryptoSimConfig { TransactionMetricsSampleRate: 0.001, BackgroundMetricsScrapeInterval: 60, EnableSuspension: true, + DeleteDataDirOnStartup: false, + DeleteDataDirOnShutdown: false, } } diff --git a/sei-db/state_db/bench/cryptosim/main/main.go b/sei-db/state_db/bench/cryptosim/main/main.go index 6c475dc3d3..d2bd3e00ee 100644 --- a/sei-db/state_db/bench/cryptosim/main/main.go +++ b/sei-db/state_db/bench/cryptosim/main/main.go @@ -7,6 +7,7 @@ import ( "net/http" "os" "os/signal" + "path/filepath" "time" "github.com/prometheus/client_golang/prometheus" @@ -89,6 +90,18 @@ func run() error { } fmt.Printf("%s\n", configString) + if config.DeleteDataDirOnStartup { + resolved, err := filepath.Abs(config.DataDir) + if err != nil { + return fmt.Errorf("failed to resolve data directory: %w", err) + } + fmt.Printf("Deleting data directory: %s\n", resolved) + err = os.RemoveAll(resolved) + if err != nil { + return fmt.Errorf("failed to delete data directory: %w", err) + } + } + ctx, stop := signal.NotifyContext(context.Background(), os.Interrupt) defer stop() @@ -134,5 +147,17 @@ func run() error { cs.BlockUntilHalted() + if config.DeleteDataDirOnShutdown { + resolved, err := filepath.Abs(config.DataDir) + if err != nil { + return fmt.Errorf("failed to resolve data directory: %w", err) + } + fmt.Printf("Deleting data directory: %s\n", resolved) + err = os.RemoveAll(resolved) + if err != nil { + return fmt.Errorf("failed to delete data directory: %w", err) + } + } + return nil } diff --git a/sei-db/state_db/sc/flatkv/flatcache/cache_impl.go b/sei-db/state_db/sc/flatkv/flatcache/cache_impl.go index a8e0bc8922..8a7068874a 100644 --- a/sei-db/state_db/sc/flatkv/flatcache/cache_impl.go +++ b/sei-db/state_db/sc/flatkv/flatcache/cache_impl.go @@ -3,6 +3,7 @@ package flatcache import ( "context" "fmt" + "sync" "time" "github.com/sei-protocol/sei-chain/sei-db/proto" @@ -104,11 +105,16 @@ func (c *cache) BatchSet(cs []*proto.NamedChangeSet) { } } - // This is probably quite fast, but if it isn't it can be parallelized. + wg := sync.WaitGroup{} for shardIndex, shardEntries := range shardMap { - shard := c.shards[shardIndex] - shard.BatchSet(shardEntries) + wg.Add(1) + go func(shardIndex uint64, shardEntries []*iavl.KVPair) { + defer wg.Done() + shard := c.shards[shardIndex] + shard.BatchSet(shardEntries) + }(shardIndex, shardEntries) } + wg.Wait() } func (c *cache) Delete(key []byte) { diff --git a/sei-db/state_db/sc/flatkv/flatcache/shard_manager.go b/sei-db/state_db/sc/flatkv/flatcache/shard_manager.go index 2f5183f9b5..79c9f1fa6e 100644 --- a/sei-db/state_db/sc/flatkv/flatcache/shard_manager.go +++ b/sei-db/state_db/sc/flatkv/flatcache/shard_manager.go @@ -1,6 +1,7 @@ package flatcache import ( + "encoding/binary" "errors" "hash/maphash" "sync" @@ -36,12 +37,17 @@ func NewShardManager(numShards uint64) (*shardManager, error) { // Shard returns a shard index in [0, numShards). // addr should be the raw address bytes (e.g., 20-byte ETH address). func (s *shardManager) Shard(addr []byte) uint64 { - h := s.pool.Get().(*maphash.Hash) - h.SetSeed(s.seed) - h.Reset() - _, _ = h.Write(addr) - x := h.Sum64() - s.pool.Put(h) + // Temporary to measure impact of hash function + x := binary.BigEndian.Uint64(addr) return x & s.mask + + // h := s.pool.Get().(*maphash.Hash) + // h.SetSeed(s.seed) + // h.Reset() + // _, _ = h.Write(addr) + // x := h.Sum64() + // s.pool.Put(h) + + // return x & s.mask } diff --git a/sei-db/state_db/sc/flatkv/store.go b/sei-db/state_db/sc/flatkv/store.go index 26b1cc7eeb..f1bfce8113 100644 --- a/sei-db/state_db/sc/flatkv/store.go +++ b/sei-db/state_db/sc/flatkv/store.go @@ -152,7 +152,7 @@ func NewCommitStore( 1024, 1024*1024*1024, 20, - 1024, + 8, 10*time.Second) if err != nil { panic(fmt.Errorf("failed to create cache: %w", err)) // TODO diff --git a/sei-db/state_db/sc/flatkv/store_read.go b/sei-db/state_db/sc/flatkv/store_read.go index d17cd1d0d7..ec21b7edfd 100644 --- a/sei-db/state_db/sc/flatkv/store_read.go +++ b/sei-db/state_db/sc/flatkv/store_read.go @@ -30,11 +30,11 @@ func (s *CommitStore) Get(key []byte) ([]byte, bool) { } // Read from cache (may fall through to storageDB if not found) - value, _, err := s.cache.Get(keyBytes) + value, found, err := s.cache.Get(key) if err != nil { return nil, false } - return value, true + return value, found case evm.EVMKeyNonce, evm.EVMKeyCodeHash: // Account data: keyBytes = addr(20) @@ -62,8 +62,10 @@ func (s *CommitStore) Get(key []byte) ([]byte, bool) { return paw.value.CodeHash[:], true } - // Read from cache (may fall through to accountDB if not found) - encoded, _, err := s.cache.Get(AccountKey(addr)) + // Read from accountDB directly. The cache cannot be used for accounts + // because BatchSet stores raw nonce/codehash values individually, but + // accountDB stores merged AccountValue records. + encoded, err := s.accountDB.Get(keyBytes) // TODO: figure out how to fix this!!! if err != nil { return nil, false } @@ -95,11 +97,11 @@ func (s *CommitStore) Get(key []byte) ([]byte, bool) { } // Read from cache (may fall through to codeDB if not found) - value, _, err := s.cache.Get(keyBytes) + value, found, err := s.cache.Get(key) if err != nil { return nil, false } - return value, true + return value, found case evm.EVMKeyLegacy: if pw, ok := s.legacyWrites[string(keyBytes)]; ok { @@ -110,11 +112,11 @@ func (s *CommitStore) Get(key []byte) ([]byte, bool) { } // Read from cache (may fall through to legacyDB if not found) - value, _, err := s.cache.Get(keyBytes) + value, found, err := s.cache.Get(key) if err != nil { return nil, false } - return value, true + return value, found default: return nil, false From 221d114a495306936c6ba6d90ef985fc0507017e Mon Sep 17 00:00:00 2001 From: Cody Littley Date: Fri, 6 Mar 2026 10:36:16 -0600 Subject: [PATCH 009/119] checkin --- sei-db/state_db/sc/flatkv/flatcache/cache.go | 9 ++++++++- sei-db/state_db/sc/flatkv/flatcache/cache_impl.go | 4 ++-- sei-db/state_db/sc/flatkv/flatcache/shard.go | 10 +++++++--- sei-db/state_db/sc/flatkv/store_read.go | 6 +++--- sei-db/state_db/sc/flatkv/store_write.go | 8 ++++---- 5 files changed, 24 insertions(+), 13 deletions(-) diff --git a/sei-db/state_db/sc/flatkv/flatcache/cache.go b/sei-db/state_db/sc/flatkv/flatcache/cache.go index 4b255ecded..e04de15b88 100644 --- a/sei-db/state_db/sc/flatkv/flatcache/cache.go +++ b/sei-db/state_db/sc/flatkv/flatcache/cache.go @@ -8,7 +8,14 @@ type Cache interface { // TODO decide if we should support individual modifications // Get returns the value for the given key, or (nil, false) if not found. - Get(key []byte) ([]byte, bool, error) + Get( + // The entry to fetch. + key []byte, + // If true, the LRU queue will be updated. If false, the LRU queue will not be updated. + // Useful for when an operation is performed multiple times in close succession on the same key, + // since it requires non-zero overhead to do so with little benefit. + updateLru bool, + ) ([]byte, bool, error) // Set sets the value for the given key. Set(key []byte, value []byte) diff --git a/sei-db/state_db/sc/flatkv/flatcache/cache_impl.go b/sei-db/state_db/sc/flatkv/flatcache/cache_impl.go index 8a7068874a..07d29cecbd 100644 --- a/sei-db/state_db/sc/flatkv/flatcache/cache_impl.go +++ b/sei-db/state_db/sc/flatkv/flatcache/cache_impl.go @@ -123,11 +123,11 @@ func (c *cache) Delete(key []byte) { shard.Delete(key) } -func (c *cache) Get(key []byte) ([]byte, bool, error) { +func (c *cache) Get(key []byte, updateLru bool) ([]byte, bool, error) { shardIndex := c.shardManager.Shard(key) shard := c.shards[shardIndex] - value, ok, err := shard.Get(key) + value, ok, err := shard.Get(key, updateLru) if err != nil { return nil, false, fmt.Errorf("failed to get value from shard: %w", err) } diff --git a/sei-db/state_db/sc/flatkv/flatcache/shard.go b/sei-db/state_db/sc/flatkv/flatcache/shard.go index e7a6182218..d9a82de8a3 100644 --- a/sei-db/state_db/sc/flatkv/flatcache/shard.go +++ b/sei-db/state_db/sc/flatkv/flatcache/shard.go @@ -79,7 +79,7 @@ func NewShard(ctx context.Context, readScheduler *readScheduler, maxSize int) (* } // Get returns the value for the given key, or (nil, false) if not found. -func (s *shard) Get(key []byte) ([]byte, bool, error) { +func (s *shard) Get(key []byte, updateLru bool) ([]byte, bool, error) { s.lock.Lock() entry := s.getEntry(key) @@ -88,11 +88,15 @@ func (s *shard) Get(key []byte) ([]byte, bool, error) { case statusAvailable: value := entry.value - s.gcQueue.Touch(key) + if updateLru { + s.gcQueue.Touch(key) + } s.lock.Unlock() return value, true, nil case statusDeleted: - s.gcQueue.Touch(key) + if updateLru { + s.gcQueue.Touch(key) + } s.lock.Unlock() return nil, false, nil case statusScheduled: diff --git a/sei-db/state_db/sc/flatkv/store_read.go b/sei-db/state_db/sc/flatkv/store_read.go index ec21b7edfd..aad5ddfb26 100644 --- a/sei-db/state_db/sc/flatkv/store_read.go +++ b/sei-db/state_db/sc/flatkv/store_read.go @@ -30,7 +30,7 @@ func (s *CommitStore) Get(key []byte) ([]byte, bool) { } // Read from cache (may fall through to storageDB if not found) - value, found, err := s.cache.Get(key) + value, found, err := s.cache.Get(key, true) if err != nil { return nil, false } @@ -97,7 +97,7 @@ func (s *CommitStore) Get(key []byte) ([]byte, bool) { } // Read from cache (may fall through to codeDB if not found) - value, found, err := s.cache.Get(key) + value, found, err := s.cache.Get(key, true) if err != nil { return nil, false } @@ -112,7 +112,7 @@ func (s *CommitStore) Get(key []byte) ([]byte, bool) { } // Read from cache (may fall through to legacyDB if not found) - value, found, err := s.cache.Get(key) + value, found, err := s.cache.Get(key, true) if err != nil { return nil, false } diff --git a/sei-db/state_db/sc/flatkv/store_write.go b/sei-db/state_db/sc/flatkv/store_write.go index e40f3b254e..423a03da51 100644 --- a/sei-db/state_db/sc/flatkv/store_write.go +++ b/sei-db/state_db/sc/flatkv/store_write.go @@ -53,7 +53,7 @@ func (s *CommitStore) ApplyChangeSets(cs []*proto.NamedChangeSet) error { switch kind { case evm.EVMKeyStorage: // Get old value for LtHash - oldValue, err := s.getStorageValue(keyBytes) + oldValue, _, err := s.cache.Get(keyBytes, false) if err != nil { return fmt.Errorf("failed to get storage value: %w", err) } @@ -105,7 +105,7 @@ func (s *CommitStore) ApplyChangeSets(cs []*proto.NamedChangeSet) error { } paw := s.accountWrites[addrStr] if paw == nil { - existingValue, err := s.getAccountValue(addr) + existingValue, err := s.getAccountValue(addr) // TODO not using cache if err != nil { return fmt.Errorf("failed to load existing account value: %w", err) } @@ -138,7 +138,7 @@ func (s *CommitStore) ApplyChangeSets(cs []*proto.NamedChangeSet) error { case evm.EVMKeyCode: // Get old value for LtHash - oldValue, err := s.getCodeValue(keyBytes) + oldValue, _, err := s.cache.Get(keyBytes, false) if err != nil { return fmt.Errorf("failed to get code value: %w", err) } @@ -166,7 +166,7 @@ func (s *CommitStore) ApplyChangeSets(cs []*proto.NamedChangeSet) error { }) case evm.EVMKeyLegacy: - oldValue, err := s.getLegacyValue(keyBytes) + oldValue, _, err := s.cache.Get(keyBytes, false) if err != nil { return fmt.Errorf("failed to get legacy value: %w", err) } From 8eca079ceba2134ba720c48a826bd2d776df4834 Mon Sep 17 00:00:00 2001 From: Cody Littley Date: Fri, 6 Mar 2026 11:31:30 -0600 Subject: [PATCH 010/119] Moved where the cache sits --- sei-db/db_engine/pebbledb/db.go | 69 +++++++++++++++---- .../pebbledb}/flatcache/cache.go | 2 +- .../pebbledb}/flatcache/cache_impl.go | 0 .../pebbledb}/flatcache/lru_queue.go | 0 .../pebbledb}/flatcache/lru_queue_test.go | 0 .../pebbledb}/flatcache/read_scheduler.go | 0 .../pebbledb}/flatcache/shard.go | 0 .../pebbledb}/flatcache/shard_manager.go | 19 +++-- sei-db/state_db/sc/flatkv/store.go | 31 +-------- sei-db/state_db/sc/flatkv/store_read.go | 53 +++----------- sei-db/state_db/sc/flatkv/store_write.go | 11 ++- 11 files changed, 81 insertions(+), 104 deletions(-) rename sei-db/{state_db/sc/flatkv => db_engine/pebbledb}/flatcache/cache.go (93%) rename sei-db/{state_db/sc/flatkv => db_engine/pebbledb}/flatcache/cache_impl.go (100%) rename sei-db/{state_db/sc/flatkv => db_engine/pebbledb}/flatcache/lru_queue.go (100%) rename sei-db/{state_db/sc/flatkv => db_engine/pebbledb}/flatcache/lru_queue_test.go (100%) rename sei-db/{state_db/sc/flatkv => db_engine/pebbledb}/flatcache/read_scheduler.go (100%) rename sei-db/{state_db/sc/flatkv => db_engine/pebbledb}/flatcache/shard.go (100%) rename sei-db/{state_db/sc/flatkv => db_engine/pebbledb}/flatcache/shard_manager.go (86%) diff --git a/sei-db/db_engine/pebbledb/db.go b/sei-db/db_engine/pebbledb/db.go index e2f0000ecd..c1eecc2321 100644 --- a/sei-db/db_engine/pebbledb/db.go +++ b/sei-db/db_engine/pebbledb/db.go @@ -14,6 +14,7 @@ import ( errorutils "github.com/sei-protocol/sei-chain/sei-db/common/errors" "github.com/sei-protocol/sei-chain/sei-db/common/metrics" + "github.com/sei-protocol/sei-chain/sei-db/db_engine/pebbledb/flatcache" "github.com/sei-protocol/sei-chain/sei-db/db_engine/types" ) @@ -23,6 +24,7 @@ const metricsScrapeInterval = 10 * time.Second type pebbleDB struct { db *pebble.DB metricsCancel context.CancelFunc + cache flatcache.Cache } var _ types.KeyValueDB = (*pebbleDB)(nil) @@ -44,11 +46,12 @@ func Open( } } - cache := pebble.NewCache(1024 * 1024 * 512) // 512MB cache - defer cache.Unref() + // Internal pebbleDB cache, used to cache pages in memory. // TODO verify accuracy of this statement + pebbleCache := pebble.NewCache(1024 * 1024 * 512) // 512MB cache + defer pebbleCache.Unref() popts := &pebble.Options{ - Cache: cache, + Cache: pebbleCache, Comparer: cmp, // FormatMajorVersion is pinned to a specific version to prevent accidental // breaking changes when updating the pebble dependency. Using FormatNewest @@ -92,33 +95,73 @@ func Open( return nil, err } + readFunction := func(key []byte) []byte { // TODO error handling! + val, closer, err := db.Get(key) + if err != nil { + return nil + } + cloned := bytes.Clone(val) + _ = closer.Close() + return cloned + } + + // A high level cache per key. + cache, err := flatcache.NewCache( + ctx, + readFunction, + 8, + 1024*1024*1024, + 20, + 64, + 10*time.Second) + if err != nil { + return nil, fmt.Errorf("failed to create flatcache: %w", err) + } + ctx, cancel := context.WithCancel(ctx) if enableMetrics { metrics.NewPebbleMetrics(ctx, db, filepath.Base(path), metricsScrapeInterval) } - return &pebbleDB{db: db, metricsCancel: cancel}, nil + return &pebbleDB{ + db: db, + metricsCancel: cancel, + cache: cache, + }, nil } func (p *pebbleDB) Get(key []byte) ([]byte, error) { - // Pebble returns a zero-copy view plus a closer; we copy and close internally. - val, closer, err := p.db.Get(key) + // // Pebble returns a zero-copy view plus a closer; we copy and close internally. + // val, closer, err := p.db.Get(key) + // if err != nil { + // if errors.Is(err, pebble.ErrNotFound) { + // return nil, errorutils.ErrNotFound + // } + // return nil, err + // } + // cloned := bytes.Clone(val) + // _ = closer.Close() + + val, found, err := p.cache.Get(key, true) if err != nil { - if errors.Is(err, pebble.ErrNotFound) { - return nil, errorutils.ErrNotFound - } - return nil, err + return nil, fmt.Errorf("failed to get value from cache: %w", err) } - cloned := bytes.Clone(val) - _ = closer.Close() - return cloned, nil + if !found { + return nil, errorutils.ErrNotFound + } + + return val, nil } func (p *pebbleDB) Set(key, value []byte, opts types.WriteOptions) error { + // TODO batch set! + p.cache.Set(key, value) return p.db.Set(key, value, toPebbleWriteOpts(opts)) } func (p *pebbleDB) Delete(key []byte, opts types.WriteOptions) error { + // TODO batch delete! + p.cache.Delete(key) return p.db.Delete(key, toPebbleWriteOpts(opts)) } diff --git a/sei-db/state_db/sc/flatkv/flatcache/cache.go b/sei-db/db_engine/pebbledb/flatcache/cache.go similarity index 93% rename from sei-db/state_db/sc/flatkv/flatcache/cache.go rename to sei-db/db_engine/pebbledb/flatcache/cache.go index e04de15b88..5f05ad2cec 100644 --- a/sei-db/state_db/sc/flatkv/flatcache/cache.go +++ b/sei-db/db_engine/pebbledb/flatcache/cache.go @@ -1,4 +1,4 @@ -package flatcache +package flatcache // TODO rename the flatcache package! import "github.com/sei-protocol/sei-chain/sei-db/proto" diff --git a/sei-db/state_db/sc/flatkv/flatcache/cache_impl.go b/sei-db/db_engine/pebbledb/flatcache/cache_impl.go similarity index 100% rename from sei-db/state_db/sc/flatkv/flatcache/cache_impl.go rename to sei-db/db_engine/pebbledb/flatcache/cache_impl.go diff --git a/sei-db/state_db/sc/flatkv/flatcache/lru_queue.go b/sei-db/db_engine/pebbledb/flatcache/lru_queue.go similarity index 100% rename from sei-db/state_db/sc/flatkv/flatcache/lru_queue.go rename to sei-db/db_engine/pebbledb/flatcache/lru_queue.go diff --git a/sei-db/state_db/sc/flatkv/flatcache/lru_queue_test.go b/sei-db/db_engine/pebbledb/flatcache/lru_queue_test.go similarity index 100% rename from sei-db/state_db/sc/flatkv/flatcache/lru_queue_test.go rename to sei-db/db_engine/pebbledb/flatcache/lru_queue_test.go diff --git a/sei-db/state_db/sc/flatkv/flatcache/read_scheduler.go b/sei-db/db_engine/pebbledb/flatcache/read_scheduler.go similarity index 100% rename from sei-db/state_db/sc/flatkv/flatcache/read_scheduler.go rename to sei-db/db_engine/pebbledb/flatcache/read_scheduler.go diff --git a/sei-db/state_db/sc/flatkv/flatcache/shard.go b/sei-db/db_engine/pebbledb/flatcache/shard.go similarity index 100% rename from sei-db/state_db/sc/flatkv/flatcache/shard.go rename to sei-db/db_engine/pebbledb/flatcache/shard.go diff --git a/sei-db/state_db/sc/flatkv/flatcache/shard_manager.go b/sei-db/db_engine/pebbledb/flatcache/shard_manager.go similarity index 86% rename from sei-db/state_db/sc/flatkv/flatcache/shard_manager.go rename to sei-db/db_engine/pebbledb/flatcache/shard_manager.go index 79c9f1fa6e..473e8c3a4a 100644 --- a/sei-db/state_db/sc/flatkv/flatcache/shard_manager.go +++ b/sei-db/db_engine/pebbledb/flatcache/shard_manager.go @@ -1,7 +1,6 @@ package flatcache import ( - "encoding/binary" "errors" "hash/maphash" "sync" @@ -39,15 +38,15 @@ func NewShardManager(numShards uint64) (*shardManager, error) { func (s *shardManager) Shard(addr []byte) uint64 { // Temporary to measure impact of hash function - x := binary.BigEndian.Uint64(addr) - return x & s.mask + // x := binary.BigEndian.Uint64(addr) + // return x & s.mask - // h := s.pool.Get().(*maphash.Hash) - // h.SetSeed(s.seed) - // h.Reset() - // _, _ = h.Write(addr) - // x := h.Sum64() - // s.pool.Put(h) + h := s.pool.Get().(*maphash.Hash) + h.SetSeed(s.seed) + h.Reset() + _, _ = h.Write(addr) + x := h.Sum64() + s.pool.Put(h) - // return x & s.mask + return x & s.mask } diff --git a/sei-db/state_db/sc/flatkv/store.go b/sei-db/state_db/sc/flatkv/store.go index f1bfce8113..5dabbbba1c 100644 --- a/sei-db/state_db/sc/flatkv/store.go +++ b/sei-db/state_db/sc/flatkv/store.go @@ -13,7 +13,6 @@ import ( "github.com/sei-protocol/sei-chain/sei-db/db_engine/pebbledb" seidbtypes "github.com/sei-protocol/sei-chain/sei-db/db_engine/types" "github.com/sei-protocol/sei-chain/sei-db/proto" - "github.com/sei-protocol/sei-chain/sei-db/state_db/sc/flatkv/flatcache" "github.com/sei-protocol/sei-chain/sei-db/state_db/sc/flatkv/lthash" "github.com/sei-protocol/sei-chain/sei-db/state_db/sc/types" "github.com/sei-protocol/sei-chain/sei-db/wal" @@ -75,9 +74,6 @@ type CommitStore struct { storageDB seidbtypes.KeyValueDB // addr(20)||slot(32) → value(32) legacyDB seidbtypes.KeyValueDB // Legacy data for backward compatibility - // TODO consider having one cache per DB - cache flatcache.Cache - // Per-DB committed version, keyed by DB dir name (e.g. accountDBDir). localMeta map[string]*LocalMeta @@ -121,7 +117,7 @@ func NewCommitStore( } meter := otel.Meter(flatkvMeterName) - s := &CommitStore{ + return &CommitStore{ ctx: ctx, log: log, config: cfg, @@ -136,31 +132,6 @@ func NewCommitStore( workingLtHash: lthash.New(), phaseTimer: metrics.NewPhaseTimer(meter, "seidb_main_thread"), } - - readFunction := func(key []byte) []byte { // TODO maybe change signature - value, found := s.storageRead(key) - if !found { - return nil - } - return value - } - - // TODO use config - cache, err := flatcache.NewCache( - ctx, - readFunction, - 1024, - 1024*1024*1024, - 20, - 8, - 10*time.Second) - if err != nil { - panic(fmt.Errorf("failed to create cache: %w", err)) // TODO - } - - s.cache = cache - - return s } func (s *CommitStore) flatkvDir() string { diff --git a/sei-db/state_db/sc/flatkv/store_read.go b/sei-db/state_db/sc/flatkv/store_read.go index aad5ddfb26..b2a4ecb847 100644 --- a/sei-db/state_db/sc/flatkv/store_read.go +++ b/sei-db/state_db/sc/flatkv/store_read.go @@ -7,7 +7,6 @@ import ( errorutils "github.com/sei-protocol/sei-chain/sei-db/common/errors" "github.com/sei-protocol/sei-chain/sei-db/common/evm" - seidbtypes "github.com/sei-protocol/sei-chain/sei-db/db_engine/types" ) // Get returns the value for the given memiavl key. @@ -29,12 +28,12 @@ func (s *CommitStore) Get(key []byte) ([]byte, bool) { return pw.value, true } - // Read from cache (may fall through to storageDB if not found) - value, found, err := s.cache.Get(key, true) + // Read from storageDB + value, err := s.storageDB.Get(keyBytes) if err != nil { return nil, false } - return value, found + return value, true case evm.EVMKeyNonce, evm.EVMKeyCodeHash: // Account data: keyBytes = addr(20) @@ -62,10 +61,8 @@ func (s *CommitStore) Get(key []byte) ([]byte, bool) { return paw.value.CodeHash[:], true } - // Read from accountDB directly. The cache cannot be used for accounts - // because BatchSet stores raw nonce/codehash values individually, but - // accountDB stores merged AccountValue records. - encoded, err := s.accountDB.Get(keyBytes) // TODO: figure out how to fix this!!! + // Read from accountDB + encoded, err := s.accountDB.Get(AccountKey(addr)) if err != nil { return nil, false } @@ -96,12 +93,12 @@ func (s *CommitStore) Get(key []byte) ([]byte, bool) { return pw.value, true } - // Read from cache (may fall through to codeDB if not found) - value, found, err := s.cache.Get(key, true) + // Read from codeDB + value, err := s.codeDB.Get(keyBytes) if err != nil { return nil, false } - return value, found + return value, true case evm.EVMKeyLegacy: if pw, ok := s.legacyWrites[string(keyBytes)]; ok { @@ -111,47 +108,17 @@ func (s *CommitStore) Get(key []byte) ([]byte, bool) { return pw.value, true } - // Read from cache (may fall through to legacyDB if not found) - value, found, err := s.cache.Get(key, true) + value, err := s.legacyDB.Get(keyBytes) if err != nil { return nil, false } - return value, found + return value, true default: return nil, false } } -// Read a value from the disk, ignoring pending writes and without interpreting data in any way. -func (s *CommitStore) storageRead(key []byte) ([]byte, bool) { - kind, keyBytes := evm.ParseEVMKey(key) - if kind == evm.EVMKeyUnknown { - return nil, false - } - - var db seidbtypes.KeyValueDB - - switch kind { - case evm.EVMKeyStorage: - db = s.storageDB - case evm.EVMKeyNonce, evm.EVMKeyCodeHash: - db = s.accountDB - case evm.EVMKeyCode: - db = s.codeDB - case evm.EVMKeyLegacy: - db = s.legacyDB - default: - return nil, false - } - - value, err := db.Get(keyBytes) - if err != nil { - return nil, false // TODO why are we squelching errors here? - } - return value, true -} - // Has reports whether the given memiavl key exists. func (s *CommitStore) Has(key []byte) bool { _, found := s.Get(key) diff --git a/sei-db/state_db/sc/flatkv/store_write.go b/sei-db/state_db/sc/flatkv/store_write.go index 423a03da51..cf1350f9be 100644 --- a/sei-db/state_db/sc/flatkv/store_write.go +++ b/sei-db/state_db/sc/flatkv/store_write.go @@ -53,7 +53,7 @@ func (s *CommitStore) ApplyChangeSets(cs []*proto.NamedChangeSet) error { switch kind { case evm.EVMKeyStorage: // Get old value for LtHash - oldValue, _, err := s.cache.Get(keyBytes, false) + oldValue, err := s.getStorageValue(keyBytes) if err != nil { return fmt.Errorf("failed to get storage value: %w", err) } @@ -105,7 +105,7 @@ func (s *CommitStore) ApplyChangeSets(cs []*proto.NamedChangeSet) error { } paw := s.accountWrites[addrStr] if paw == nil { - existingValue, err := s.getAccountValue(addr) // TODO not using cache + existingValue, err := s.getAccountValue(addr) if err != nil { return fmt.Errorf("failed to load existing account value: %w", err) } @@ -138,7 +138,7 @@ func (s *CommitStore) ApplyChangeSets(cs []*proto.NamedChangeSet) error { case evm.EVMKeyCode: // Get old value for LtHash - oldValue, _, err := s.cache.Get(keyBytes, false) + oldValue, err := s.getCodeValue(keyBytes) if err != nil { return fmt.Errorf("failed to get code value: %w", err) } @@ -166,7 +166,7 @@ func (s *CommitStore) ApplyChangeSets(cs []*proto.NamedChangeSet) error { }) case evm.EVMKeyLegacy: - oldValue, _, err := s.cache.Get(keyBytes, false) + oldValue, err := s.getLegacyValue(keyBytes) if err != nil { return fmt.Errorf("failed to get legacy value: %w", err) } @@ -223,9 +223,6 @@ func (s *CommitStore) ApplyChangeSets(cs []*proto.NamedChangeSet) error { s.workingLtHash = newLtHash } - s.phaseTimer.SetPhase("update_cache") - s.cache.BatchSet(s.pendingChangeSets) - s.phaseTimer.SetPhase("apply_change_done") return nil } From 267feae911bbca7dd0f64bca540288039ad27db9 Mon Sep 17 00:00:00 2001 From: Cody Littley Date: Fri, 6 Mar 2026 11:45:56 -0600 Subject: [PATCH 011/119] bugfix --- sei-db/db_engine/pebbledb/batch.go | 44 +++++++++++++++++++++++++----- 1 file changed, 37 insertions(+), 7 deletions(-) diff --git a/sei-db/db_engine/pebbledb/batch.go b/sei-db/db_engine/pebbledb/batch.go index 032bc3fb5d..49ee3c0106 100644 --- a/sei-db/db_engine/pebbledb/batch.go +++ b/sei-db/db_engine/pebbledb/batch.go @@ -2,38 +2,67 @@ package pebbledb import ( "github.com/cockroachdb/pebble/v2" + "github.com/sei-protocol/sei-chain/sei-db/db_engine/pebbledb/flatcache" "github.com/sei-protocol/sei-chain/sei-db/db_engine/types" ) +type pendingCacheWrite struct { + key []byte + value []byte + isDelete bool +} + // pebbleBatch wraps a Pebble batch for atomic writes. // Important: Callers must call Close() after Commit() to release batch resources, // even if Commit() succeeds. Failure to Close() will leak memory. type pebbleBatch struct { - b *pebble.Batch + b *pebble.Batch + cache flatcache.Cache + + // Writes are tracked so the cache can be updated after a successful commit. + pendingCacheWrites []pendingCacheWrite } var _ types.Batch = (*pebbleBatch)(nil) -func newPebbleBatch(db *pebble.DB) *pebbleBatch { - return &pebbleBatch{b: db.NewBatch()} +func newPebbleBatch(db *pebble.DB, cache flatcache.Cache) *pebbleBatch { + return &pebbleBatch{b: db.NewBatch(), cache: cache} } func (p *pebbleDB) NewBatch() types.Batch { - return newPebbleBatch(p.db) + return newPebbleBatch(p.db, p.cache) } func (pb *pebbleBatch) Set(key, value []byte) error { - // Durability options are applied on Commit. + pb.pendingCacheWrites = append(pb.pendingCacheWrites, pendingCacheWrite{ + key: key, + value: value, + }) return pb.b.Set(key, value, nil) } func (pb *pebbleBatch) Delete(key []byte) error { - // Durability options are applied on Commit. + pb.pendingCacheWrites = append(pb.pendingCacheWrites, pendingCacheWrite{ + key: key, + isDelete: true, + }) return pb.b.Delete(key, nil) } func (pb *pebbleBatch) Commit(opts types.WriteOptions) error { - return pb.b.Commit(toPebbleWriteOpts(opts)) + err := pb.b.Commit(toPebbleWriteOpts(opts)) + if err != nil { + return err + } + for _, w := range pb.pendingCacheWrites { + if w.isDelete { + pb.cache.Delete(w.key) + } else { + pb.cache.Set(w.key, w.value) + } + } + pb.pendingCacheWrites = nil + return nil } func (pb *pebbleBatch) Len() int { @@ -42,6 +71,7 @@ func (pb *pebbleBatch) Len() int { func (pb *pebbleBatch) Reset() { pb.b.Reset() + pb.pendingCacheWrites = nil } func (pb *pebbleBatch) Close() error { From 50b0be6a874e8289d43db7ddd49bc67e07a136dc Mon Sep 17 00:00:00 2001 From: Cody Littley Date: Fri, 6 Mar 2026 11:52:31 -0600 Subject: [PATCH 012/119] Batch update the cache --- sei-db/db_engine/pebbledb/batch.go | 32 ++++++------------- sei-db/db_engine/pebbledb/flatcache/cache.go | 15 +++++++-- .../pebbledb/flatcache/cache_impl.go | 25 ++++++--------- sei-db/db_engine/pebbledb/flatcache/shard.go | 11 +++---- 4 files changed, 36 insertions(+), 47 deletions(-) diff --git a/sei-db/db_engine/pebbledb/batch.go b/sei-db/db_engine/pebbledb/batch.go index 49ee3c0106..e01770b349 100644 --- a/sei-db/db_engine/pebbledb/batch.go +++ b/sei-db/db_engine/pebbledb/batch.go @@ -6,12 +6,6 @@ import ( "github.com/sei-protocol/sei-chain/sei-db/db_engine/types" ) -type pendingCacheWrite struct { - key []byte - value []byte - isDelete bool -} - // pebbleBatch wraps a Pebble batch for atomic writes. // Important: Callers must call Close() after Commit() to release batch resources, // even if Commit() succeeds. Failure to Close() will leak memory. @@ -20,7 +14,7 @@ type pebbleBatch struct { cache flatcache.Cache // Writes are tracked so the cache can be updated after a successful commit. - pendingCacheWrites []pendingCacheWrite + pendingCacheUpdates []flatcache.CacheUpdate } var _ types.Batch = (*pebbleBatch)(nil) @@ -34,17 +28,17 @@ func (p *pebbleDB) NewBatch() types.Batch { } func (pb *pebbleBatch) Set(key, value []byte) error { - pb.pendingCacheWrites = append(pb.pendingCacheWrites, pendingCacheWrite{ - key: key, - value: value, + pb.pendingCacheUpdates = append(pb.pendingCacheUpdates, flatcache.CacheUpdate{ + Key: key, + Value: value, }) return pb.b.Set(key, value, nil) } func (pb *pebbleBatch) Delete(key []byte) error { - pb.pendingCacheWrites = append(pb.pendingCacheWrites, pendingCacheWrite{ - key: key, - isDelete: true, + pb.pendingCacheUpdates = append(pb.pendingCacheUpdates, flatcache.CacheUpdate{ + Key: key, + IsDelete: true, }) return pb.b.Delete(key, nil) } @@ -54,14 +48,8 @@ func (pb *pebbleBatch) Commit(opts types.WriteOptions) error { if err != nil { return err } - for _, w := range pb.pendingCacheWrites { - if w.isDelete { - pb.cache.Delete(w.key) - } else { - pb.cache.Set(w.key, w.value) - } - } - pb.pendingCacheWrites = nil + pb.cache.BatchSet(pb.pendingCacheUpdates) + pb.pendingCacheUpdates = nil return nil } @@ -71,7 +59,7 @@ func (pb *pebbleBatch) Len() int { func (pb *pebbleBatch) Reset() { pb.b.Reset() - pb.pendingCacheWrites = nil + pb.pendingCacheUpdates = nil } func (pb *pebbleBatch) Close() error { diff --git a/sei-db/db_engine/pebbledb/flatcache/cache.go b/sei-db/db_engine/pebbledb/flatcache/cache.go index 5f05ad2cec..8f996ca083 100644 --- a/sei-db/db_engine/pebbledb/flatcache/cache.go +++ b/sei-db/db_engine/pebbledb/flatcache/cache.go @@ -1,6 +1,15 @@ package flatcache // TODO rename the flatcache package! -import "github.com/sei-protocol/sei-chain/sei-db/proto" +// CacheUpdate describes a single key-value mutation to apply to the cache. +type CacheUpdate struct { + // The key to update. + Key []byte + // The value to set. If nil, the key will be deleted. + Value []byte + // If true, the key will be deleted. + // If false, the key will be set to the given value. + IsDelete bool +} // Cache describes a cache kapable of being used by a FlatKV store. type Cache interface { @@ -23,6 +32,6 @@ type Cache interface { // Delete deletes the value for the given key. Delete(key []byte) - // BatchSet applies the given changesets to the cache. - BatchSet(cs []*proto.NamedChangeSet) + // BatchSet applies the given updates to the cache. + BatchSet(updates []CacheUpdate) } diff --git a/sei-db/db_engine/pebbledb/flatcache/cache_impl.go b/sei-db/db_engine/pebbledb/flatcache/cache_impl.go index 07d29cecbd..063a1de190 100644 --- a/sei-db/db_engine/pebbledb/flatcache/cache_impl.go +++ b/sei-db/db_engine/pebbledb/flatcache/cache_impl.go @@ -5,9 +5,6 @@ import ( "fmt" "sync" "time" - - "github.com/sei-protocol/sei-chain/sei-db/proto" - iavl "github.com/sei-protocol/sei-chain/sei-iavl/proto" ) var _ Cache = (*cache)(nil) @@ -94,24 +91,20 @@ func NewCache( return c, nil } -func (c *cache) BatchSet(cs []*proto.NamedChangeSet) { - - // First, sort entries by shard index. - // This allows us to set all values in a single shard with only a single lock acquisition. - shardMap := make(map[uint64][]*iavl.KVPair) - for _, ncs := range cs { - for _, entry := range ncs.Changeset.Pairs { - shardMap[c.shardManager.Shard(entry.Key)] = append(shardMap[c.shardManager.Shard(entry.Key)], entry) - } +func (c *cache) BatchSet(updates []CacheUpdate) { + // Sort entries by shard index so each shard is locked only once. + shardMap := make(map[uint64][]CacheUpdate) + for i := range updates { + idx := c.shardManager.Shard(updates[i].Key) + shardMap[idx] = append(shardMap[idx], updates[i]) } - wg := sync.WaitGroup{} + var wg sync.WaitGroup // TODO use a pool here for shardIndex, shardEntries := range shardMap { wg.Add(1) - go func(shardIndex uint64, shardEntries []*iavl.KVPair) { + go func(shardIndex uint64, shardEntries []CacheUpdate) { defer wg.Done() - shard := c.shards[shardIndex] - shard.BatchSet(shardEntries) + c.shards[shardIndex].BatchSet(shardEntries) }(shardIndex, shardEntries) } wg.Wait() diff --git a/sei-db/db_engine/pebbledb/flatcache/shard.go b/sei-db/db_engine/pebbledb/flatcache/shard.go index d9a82de8a3..efdfb9bf7c 100644 --- a/sei-db/db_engine/pebbledb/flatcache/shard.go +++ b/sei-db/db_engine/pebbledb/flatcache/shard.go @@ -6,7 +6,6 @@ import ( "sync" "github.com/sei-protocol/sei-chain/sei-db/common/utils" - "github.com/sei-protocol/sei-chain/sei-iavl/proto" ) // TODO unsafe byte-> string conversion maybe? @@ -184,13 +183,13 @@ func (s *shard) setUnlocked(key []byte, value []byte) { } // BatchSet sets the values for a batch of keys. -func (s *shard) BatchSet(entries []*proto.KVPair) { +func (s *shard) BatchSet(entries []CacheUpdate) { s.lock.Lock() - for _, entry := range entries { - if entry.Delete { - s.deleteUnlocked(entry.Key) + for i := range entries { + if entries[i].IsDelete { + s.deleteUnlocked(entries[i].Key) } else { - s.setUnlocked(entry.Key, entry.Value) + s.setUnlocked(entries[i].Key, entries[i].Value) } } s.lock.Unlock() From 2ca00d6c15b60f33b3b468b3248325345346004e Mon Sep 17 00:00:00 2001 From: Cody Littley Date: Fri, 6 Mar 2026 12:35:38 -0600 Subject: [PATCH 013/119] Add batch read to cache --- sei-db/db_engine/pebbledb/flatcache/cache.go | 16 ++++ .../pebbledb/flatcache/cache_impl.go | 32 +++++++ .../pebbledb/flatcache/read_scheduler.go | 19 +++- sei-db/db_engine/pebbledb/flatcache/shard.go | 96 ++++++++++++++++++- 4 files changed, 158 insertions(+), 5 deletions(-) diff --git a/sei-db/db_engine/pebbledb/flatcache/cache.go b/sei-db/db_engine/pebbledb/flatcache/cache.go index 8f996ca083..b39d136612 100644 --- a/sei-db/db_engine/pebbledb/flatcache/cache.go +++ b/sei-db/db_engine/pebbledb/flatcache/cache.go @@ -11,6 +11,16 @@ type CacheUpdate struct { IsDelete bool } +// BatchGetResult describes the result of a batch read operation. +type BatchGetResult struct { + // The value for the given key. + Value []byte + // If true, the key was found. + Found bool + // The error, if any, that occurred during the read. + Error error +} + // Cache describes a cache kapable of being used by a FlatKV store. type Cache interface { @@ -26,6 +36,12 @@ type Cache interface { updateLru bool, ) ([]byte, bool, error) + // Perform a batch read operation. Given a map of keys to read, performs the reads and updates the + // map with the results. + // + // It is not thread safe to read or mutate the map while this method is running. + BatchGet(keys map[string]BatchGetResult) + // Set sets the value for the given key. Set(key []byte, value []byte) diff --git a/sei-db/db_engine/pebbledb/flatcache/cache_impl.go b/sei-db/db_engine/pebbledb/flatcache/cache_impl.go index 063a1de190..187597ad06 100644 --- a/sei-db/db_engine/pebbledb/flatcache/cache_impl.go +++ b/sei-db/db_engine/pebbledb/flatcache/cache_impl.go @@ -110,6 +110,38 @@ func (c *cache) BatchSet(updates []CacheUpdate) { wg.Wait() } +func (c *cache) BatchGet(keys map[string]BatchGetResult) { + work := make(map[uint64]map[string]BatchGetResult) + for key := range keys { + idx := c.shardManager.Shard([]byte(key)) + if work[idx] == nil { + work[idx] = make(map[string]BatchGetResult) + } + work[idx][key] = BatchGetResult{} + } + + var wg sync.WaitGroup // TODO use a pool here + for shardIndex, subMap := range work { + wg.Add(1) + go func(shardIndex uint64, subMap map[string]BatchGetResult) { + defer wg.Done() + err := c.shards[shardIndex].BatchGet(subMap) + if err != nil { + for key := range subMap { + subMap[key] = BatchGetResult{Error: err} + } + } + }(shardIndex, subMap) + } + wg.Wait() + + for _, subMap := range work { + for key, result := range subMap { + keys[key] = result + } + } +} + func (c *cache) Delete(key []byte) { shardIndex := c.shardManager.Shard(key) shard := c.shards[shardIndex] diff --git a/sei-db/db_engine/pebbledb/flatcache/read_scheduler.go b/sei-db/db_engine/pebbledb/flatcache/read_scheduler.go index 627f1ac1f5..57cdf0ad34 100644 --- a/sei-db/db_engine/pebbledb/flatcache/read_scheduler.go +++ b/sei-db/db_engine/pebbledb/flatcache/read_scheduler.go @@ -19,6 +19,11 @@ type readRequest struct { // The entry to write the result to. entry *shardEntry + + // If true, the worker will send the value directly to entry.valueChan + // without calling InjectValue (which acquires the shard lock). + // Used by BatchGet to defer cache updates to a single bulk operation. + skipInject bool } // Creates a new ReadScheduler. @@ -45,12 +50,14 @@ func NewReadScheduler( // ScheduleRead schedules a read for the given key within the given shard. // This method returns immediately, and the read is performed asynchronously. -// When eventually completed, the read result is inserted into the provided shard entry -func (r *readScheduler) ScheduleRead(key []byte, entry *shardEntry) error { +// When eventually completed, the read result is inserted into the provided shard entry. +// If skipInject is true, the worker sends the value directly to entry.valueChan +// without calling InjectValue. +func (r *readScheduler) ScheduleRead(key []byte, entry *shardEntry, skipInject bool) error { select { case <-r.ctx.Done(): return fmt.Errorf("context done") - case r.requestChan <- &readRequest{key: key, entry: entry}: + case r.requestChan <- &readRequest{key: key, entry: entry, skipInject: skipInject}: return nil } } @@ -63,7 +70,11 @@ func (r *readScheduler) readWorker() { return case request := <-r.requestChan: value := r.readFunc(request.key) - request.entry.InjectValue(request.key, value) + if request.skipInject { + request.entry.valueChan <- value + } else { + request.entry.InjectValue(request.key, value) + } } } } diff --git a/sei-db/db_engine/pebbledb/flatcache/shard.go b/sei-db/db_engine/pebbledb/flatcache/shard.go index efdfb9bf7c..95a383c477 100644 --- a/sei-db/db_engine/pebbledb/flatcache/shard.go +++ b/sei-db/db_engine/pebbledb/flatcache/shard.go @@ -114,7 +114,7 @@ func (s *shard) Get(key []byte, updateLru bool) ([]byte, bool, error) { valueChan := make(chan []byte, 1) entry.valueChan = valueChan s.lock.Unlock() - err := s.readScheduler.ScheduleRead(key, entry) + err := s.readScheduler.ScheduleRead(key, entry, false) if err != nil { return nil, false, fmt.Errorf("failed to schedule read: %w", err) } @@ -166,6 +166,100 @@ func (s *shard) getEntry(key []byte) *shardEntry { return entry } +// Tracks a key whose value is not yet available and must be waited on. +type pendingRead struct { + key string + entry *shardEntry + valueChan chan []byte + needsSchedule bool + // Populated after the read completes, used by bulkInjectValues. + value []byte +} + +// BatchGet reads a batch of keys from the shard. Results are written into the provided map. +func (s *shard) BatchGet(keys map[string]BatchGetResult) error { + pending := make([]pendingRead, 0, len(keys)) + + s.lock.Lock() + for key := range keys { + entry := s.getEntry([]byte(key)) + + switch entry.status { + case statusAvailable: + keys[key] = BatchGetResult{Value: entry.value, Found: true} + case statusDeleted: + keys[key] = BatchGetResult{Found: false} + case statusScheduled: + pending = append(pending, pendingRead{ + key: key, + entry: entry, + valueChan: entry.valueChan, + }) + case statusUnknown: + entry.status = statusScheduled + valueChan := make(chan []byte, 1) + entry.valueChan = valueChan + pending = append(pending, pendingRead{ + key: key, + entry: entry, + valueChan: valueChan, + needsSchedule: true, + }) + default: + s.lock.Unlock() + panic(fmt.Sprintf("unexpected status: %#v", entry.status)) + } + } + s.lock.Unlock() + + for i := range pending { + if pending[i].needsSchedule { + err := s.readScheduler.ScheduleRead([]byte(pending[i].key), pending[i].entry, true) + if err != nil { + return fmt.Errorf("failed to schedule read: %w", err) + } + } + } + + for i := range pending { + value, err := utils.InterruptiblePull(s.ctx, pending[i].valueChan) + if err != nil { + return fmt.Errorf("failed to pull value from channel: %w", err) + } + pending[i].valueChan <- value + pending[i].value = value + + keys[pending[i].key] = BatchGetResult{Value: value, Found: value != nil} + } + + if len(pending) > 0 { + go s.bulkInjectValues(pending) + } + + return nil +} + +// Applies deferred cache updates for a batch of reads under a single lock acquisition. +func (s *shard) bulkInjectValues(reads []pendingRead) { + s.lock.Lock() + for i := range reads { + entry := reads[i].entry + if entry.status != statusScheduled { + continue + } + if reads[i].value == nil { + entry.status = statusDeleted + entry.value = nil + s.gcQueue.Push([]byte(reads[i].key), len(reads[i].key)) + } else { + entry.status = statusAvailable + entry.value = reads[i].value + s.gcQueue.Push([]byte(reads[i].key), len(reads[i].key)+len(reads[i].value)) + } + } + s.lock.Unlock() +} + // Set sets the value for the given key. func (s *shard) Set(key []byte, value []byte) { s.lock.Lock() From 8f8534a832b1464cd82f337e8d2dcf86d09a9f48 Mon Sep 17 00:00:00 2001 From: Cody Littley Date: Fri, 6 Mar 2026 12:41:01 -0600 Subject: [PATCH 014/119] Add batch get to db interface --- sei-db/db_engine/pebbledb/db.go | 4 ++++ sei-db/db_engine/pebbledb/flatcache/cache.go | 14 +++----------- .../db_engine/pebbledb/flatcache/cache_impl.go | 14 ++++++++------ sei-db/db_engine/pebbledb/flatcache/shard.go | 9 +++++---- sei-db/db_engine/types/types.go | 17 ++++++++++++++++- 5 files changed, 36 insertions(+), 22 deletions(-) diff --git a/sei-db/db_engine/pebbledb/db.go b/sei-db/db_engine/pebbledb/db.go index c1eecc2321..093c44439c 100644 --- a/sei-db/db_engine/pebbledb/db.go +++ b/sei-db/db_engine/pebbledb/db.go @@ -153,6 +153,10 @@ func (p *pebbleDB) Get(key []byte) ([]byte, error) { return val, nil } +func (p *pebbleDB) BatchGet(keys map[string]types.BatchGetResult) { + p.cache.BatchGet(keys) +} + func (p *pebbleDB) Set(key, value []byte, opts types.WriteOptions) error { // TODO batch set! p.cache.Set(key, value) diff --git a/sei-db/db_engine/pebbledb/flatcache/cache.go b/sei-db/db_engine/pebbledb/flatcache/cache.go index b39d136612..ab6877e765 100644 --- a/sei-db/db_engine/pebbledb/flatcache/cache.go +++ b/sei-db/db_engine/pebbledb/flatcache/cache.go @@ -1,5 +1,7 @@ package flatcache // TODO rename the flatcache package! +import "github.com/sei-protocol/sei-chain/sei-db/db_engine/types" + // CacheUpdate describes a single key-value mutation to apply to the cache. type CacheUpdate struct { // The key to update. @@ -11,16 +13,6 @@ type CacheUpdate struct { IsDelete bool } -// BatchGetResult describes the result of a batch read operation. -type BatchGetResult struct { - // The value for the given key. - Value []byte - // If true, the key was found. - Found bool - // The error, if any, that occurred during the read. - Error error -} - // Cache describes a cache kapable of being used by a FlatKV store. type Cache interface { @@ -40,7 +32,7 @@ type Cache interface { // map with the results. // // It is not thread safe to read or mutate the map while this method is running. - BatchGet(keys map[string]BatchGetResult) + BatchGet(keys map[string]types.BatchGetResult) // Set sets the value for the given key. Set(key []byte, value []byte) diff --git a/sei-db/db_engine/pebbledb/flatcache/cache_impl.go b/sei-db/db_engine/pebbledb/flatcache/cache_impl.go index 187597ad06..a102fceca7 100644 --- a/sei-db/db_engine/pebbledb/flatcache/cache_impl.go +++ b/sei-db/db_engine/pebbledb/flatcache/cache_impl.go @@ -5,6 +5,8 @@ import ( "fmt" "sync" "time" + + "github.com/sei-protocol/sei-chain/sei-db/db_engine/types" ) var _ Cache = (*cache)(nil) @@ -110,25 +112,25 @@ func (c *cache) BatchSet(updates []CacheUpdate) { wg.Wait() } -func (c *cache) BatchGet(keys map[string]BatchGetResult) { - work := make(map[uint64]map[string]BatchGetResult) +func (c *cache) BatchGet(keys map[string]types.BatchGetResult) { + work := make(map[uint64]map[string]types.BatchGetResult) for key := range keys { idx := c.shardManager.Shard([]byte(key)) if work[idx] == nil { - work[idx] = make(map[string]BatchGetResult) + work[idx] = make(map[string]types.BatchGetResult) } - work[idx][key] = BatchGetResult{} + work[idx][key] = types.BatchGetResult{} } var wg sync.WaitGroup // TODO use a pool here for shardIndex, subMap := range work { wg.Add(1) - go func(shardIndex uint64, subMap map[string]BatchGetResult) { + go func(shardIndex uint64, subMap map[string]types.BatchGetResult) { defer wg.Done() err := c.shards[shardIndex].BatchGet(subMap) if err != nil { for key := range subMap { - subMap[key] = BatchGetResult{Error: err} + subMap[key] = types.BatchGetResult{Error: err} } } }(shardIndex, subMap) diff --git a/sei-db/db_engine/pebbledb/flatcache/shard.go b/sei-db/db_engine/pebbledb/flatcache/shard.go index 95a383c477..97761dfded 100644 --- a/sei-db/db_engine/pebbledb/flatcache/shard.go +++ b/sei-db/db_engine/pebbledb/flatcache/shard.go @@ -6,6 +6,7 @@ import ( "sync" "github.com/sei-protocol/sei-chain/sei-db/common/utils" + "github.com/sei-protocol/sei-chain/sei-db/db_engine/types" ) // TODO unsafe byte-> string conversion maybe? @@ -177,7 +178,7 @@ type pendingRead struct { } // BatchGet reads a batch of keys from the shard. Results are written into the provided map. -func (s *shard) BatchGet(keys map[string]BatchGetResult) error { +func (s *shard) BatchGet(keys map[string]types.BatchGetResult) error { pending := make([]pendingRead, 0, len(keys)) s.lock.Lock() @@ -186,9 +187,9 @@ func (s *shard) BatchGet(keys map[string]BatchGetResult) error { switch entry.status { case statusAvailable: - keys[key] = BatchGetResult{Value: entry.value, Found: true} + keys[key] = types.BatchGetResult{Value: entry.value, Found: true} case statusDeleted: - keys[key] = BatchGetResult{Found: false} + keys[key] = types.BatchGetResult{Found: false} case statusScheduled: pending = append(pending, pendingRead{ key: key, @@ -229,7 +230,7 @@ func (s *shard) BatchGet(keys map[string]BatchGetResult) error { pending[i].valueChan <- value pending[i].value = value - keys[pending[i].key] = BatchGetResult{Value: value, Found: value != nil} + keys[pending[i].key] = types.BatchGetResult{Value: value, Found: value != nil} } if len(pending) > 0 { diff --git a/sei-db/db_engine/types/types.go b/sei-db/db_engine/types/types.go index 0f82ac85a2..686cb8d5ff 100644 --- a/sei-db/db_engine/types/types.go +++ b/sei-db/db_engine/types/types.go @@ -33,11 +33,26 @@ type OpenOptions struct { Comparer any } +// BatchGetResult describes the result of a single key lookup within a BatchGet call. +type BatchGetResult struct { + // The value for the given key. + Value []byte + // If true, the key was found. + Found bool + // The error, if any, that occurred during the read. + Error error +} + // KeyValueDB is a low-level KV engine contract (business-agnostic). // // Get returns a value copy (safe to use after the call returns). -type KeyValueDB interface { +type KeyValueDB interface { // TODO document other methods, split this into a stand alone file maybe Get(key []byte) (value []byte, err error) + // Perform a batch read operation. Given a map of keys to read, performs the reads and updates the + // map with the results. + // + // It is not thread safe to read or mutate the map while this method is running. + BatchGet(keys map[string]BatchGetResult) Set(key, value []byte, opts WriteOptions) error Delete(key []byte, opts WriteOptions) error From 23c0277ae0c25f310b4133bfd8d95d762ba3e604 Mon Sep 17 00:00:00 2001 From: Cody Littley Date: Fri, 6 Mar 2026 12:52:31 -0600 Subject: [PATCH 015/119] integrate batch reads --- sei-db/state_db/sc/flatkv/store_write.go | 126 +++++++++++++++++------ 1 file changed, 94 insertions(+), 32 deletions(-) diff --git a/sei-db/state_db/sc/flatkv/store_write.go b/sei-db/state_db/sc/flatkv/store_write.go index cf1350f9be..11a2f23210 100644 --- a/sei-db/state_db/sc/flatkv/store_write.go +++ b/sei-db/state_db/sc/flatkv/store_write.go @@ -5,7 +5,6 @@ import ( "fmt" "sync" - errorutils "github.com/sei-protocol/sei-chain/sei-db/common/errors" "github.com/sei-protocol/sei-chain/sei-db/common/evm" "github.com/sei-protocol/sei-chain/sei-db/db_engine/types" "github.com/sei-protocol/sei-chain/sei-db/proto" @@ -20,6 +19,11 @@ import ( // - codeDB: key=addr, value=bytecode // - legacyDB: key=full original key (with prefix), value=raw value func (s *CommitStore) ApplyChangeSets(cs []*proto.NamedChangeSet) error { + s.phaseTimer.SetPhase("apply_change_sets_batch_read") + + // Batch read all old values from DBs in parallel. + storageOld, accountOld, codeOld, legacyOld := s.batchReadOldValues(cs) + s.phaseTimer.SetPhase("apply_change_sets_prepare") // Save original changesets for changelog @@ -52,14 +56,10 @@ func (s *CommitStore) ApplyChangeSets(cs []*proto.NamedChangeSet) error { // Route to appropriate DB based on key type switch kind { case evm.EVMKeyStorage: - // Get old value for LtHash - oldValue, err := s.getStorageValue(keyBytes) - if err != nil { - return fmt.Errorf("failed to get storage value: %w", err) - } - // Storage: keyBytes = addr(20) || slot(32) keyStr := string(keyBytes) + oldValue := storageOld[keyStr].Value + if pair.Delete { s.storageWrites[keyStr] = &pendingKVWrite{ key: keyBytes, @@ -87,27 +87,27 @@ func (s *CommitStore) ApplyChangeSets(cs []*proto.NamedChangeSet) error { return fmt.Errorf("invalid address length %d for key kind %d", len(keyBytes), kind) } addrStr := string(addr[:]) + addrKey := string(AccountKey(addr)) if _, seen := oldAccountRawValues[addrStr]; !seen { - if paw, ok := s.accountWrites[addrStr]; ok { - oldAccountRawValues[addrStr] = paw.value.Encode() + result := accountOld[addrKey] + if result.Found { + oldAccountRawValues[addrStr] = result.Value } else { - rawBytes, err := s.accountDB.Get(AccountKey(addr)) - if err != nil { - if !errorutils.IsNotFound(err) { - return fmt.Errorf("accountDB I/O error for addr %x: %w", addr, err) - } - oldAccountRawValues[addrStr] = nil - } else { - oldAccountRawValues[addrStr] = rawBytes - } + oldAccountRawValues[addrStr] = nil } } + paw := s.accountWrites[addrStr] if paw == nil { - existingValue, err := s.getAccountValue(addr) - if err != nil { - return fmt.Errorf("failed to load existing account value: %w", err) + var existingValue AccountValue + result := accountOld[addrKey] + if result.Found && result.Value != nil { + av, err := DecodeAccountValue(result.Value) + if err != nil { + return fmt.Errorf("corrupted AccountValue for addr %x: %w", addr, err) + } + existingValue = av } paw = &pendingAccountWrite{ addr: addr, @@ -137,14 +137,10 @@ func (s *CommitStore) ApplyChangeSets(cs []*proto.NamedChangeSet) error { } case evm.EVMKeyCode: - // Get old value for LtHash - oldValue, err := s.getCodeValue(keyBytes) - if err != nil { - return fmt.Errorf("failed to get code value: %w", err) - } - // Code: keyBytes = addr(20) - per x/evm/types/keys.go keyStr := string(keyBytes) + oldValue := codeOld[keyStr].Value + if pair.Delete { s.codeWrites[keyStr] = &pendingKVWrite{ key: keyBytes, @@ -166,12 +162,9 @@ func (s *CommitStore) ApplyChangeSets(cs []*proto.NamedChangeSet) error { }) case evm.EVMKeyLegacy: - oldValue, err := s.getLegacyValue(keyBytes) - if err != nil { - return fmt.Errorf("failed to get legacy value: %w", err) - } - keyStr := string(keyBytes) + oldValue := legacyOld[keyStr].Value + if pair.Delete { s.legacyWrites[keyStr] = &pendingKVWrite{ key: keyBytes, @@ -470,3 +463,72 @@ func (s *CommitStore) commitBatches(version int64) error { } return nil } + +// batchReadOldValues scans all changeset pairs and issues parallel BatchGet +// calls across the four data DBs. Returns one result map per DB. +func (s *CommitStore) batchReadOldValues(cs []*proto.NamedChangeSet) ( + storageOld map[string]types.BatchGetResult, + accountOld map[string]types.BatchGetResult, + codeOld map[string]types.BatchGetResult, + legacyOld map[string]types.BatchGetResult, +) { + storageOld = make(map[string]types.BatchGetResult) + accountOld = make(map[string]types.BatchGetResult) + codeOld = make(map[string]types.BatchGetResult) + legacyOld = make(map[string]types.BatchGetResult) + + for _, namedCS := range cs { + if namedCS.Changeset.Pairs == nil { + continue + } + for _, pair := range namedCS.Changeset.Pairs { + kind, keyBytes := evm.ParseEVMKey(pair.Key) + switch kind { + case evm.EVMKeyStorage: + storageOld[string(keyBytes)] = types.BatchGetResult{} + case evm.EVMKeyNonce, evm.EVMKeyCodeHash: + addr, ok := AddressFromBytes(keyBytes) + if !ok { + continue + } + accountOld[string(AccountKey(addr))] = types.BatchGetResult{} + case evm.EVMKeyCode: + codeOld[string(keyBytes)] = types.BatchGetResult{} + case evm.EVMKeyLegacy: + legacyOld[string(keyBytes)] = types.BatchGetResult{} + } + } + } + + var wg sync.WaitGroup + if len(storageOld) > 0 { + wg.Add(1) + go func() { + defer wg.Done() + s.storageDB.BatchGet(storageOld) + }() + } + if len(accountOld) > 0 { + wg.Add(1) + go func() { + defer wg.Done() + s.accountDB.BatchGet(accountOld) + }() + } + if len(codeOld) > 0 { + wg.Add(1) + go func() { + defer wg.Done() + s.codeDB.BatchGet(codeOld) + }() + } + if len(legacyOld) > 0 { + wg.Add(1) + go func() { + defer wg.Done() + s.legacyDB.BatchGet(legacyOld) + }() + } + wg.Wait() + return +} From 02d3ca156ea4dcc208fb14a2b9b4d5d984904e40 Mon Sep 17 00:00:00 2001 From: Cody Littley Date: Fri, 6 Mar 2026 13:18:37 -0600 Subject: [PATCH 016/119] wire in cache --- sei-db/db_engine/pebbledb/db.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/sei-db/db_engine/pebbledb/db.go b/sei-db/db_engine/pebbledb/db.go index 093c44439c..05c6cbab13 100644 --- a/sei-db/db_engine/pebbledb/db.go +++ b/sei-db/db_engine/pebbledb/db.go @@ -110,7 +110,7 @@ func Open( ctx, readFunction, 8, - 1024*1024*1024, + 1024*1024*1024*8, // 8GB TODO configure this differently for each db instance! 20, 64, 10*time.Second) From 7ee1b08aa1aea2f5b3ac073b7a4ccdfb64a9b369 Mon Sep 17 00:00:00 2001 From: Cody Littley Date: Fri, 6 Mar 2026 14:18:49 -0600 Subject: [PATCH 017/119] Introduce work pool, size caches differently --- sei-db/common/utils/work_pool.go | 77 +++++++++++++++++++ sei-db/db_engine/pebbledb/db.go | 14 +++- sei-db/db_engine/pebbledb/db_test.go | 37 ++++++--- .../pebbledb/flatcache/cache_impl.go | 23 ++---- .../pebbledb/flatcache/read_scheduler.go | 2 +- sei-db/db_engine/pebbledb/flatcache/shard.go | 41 +++++++--- sei-db/state_db/sc/flatkv/snapshot.go | 3 +- sei-db/state_db/sc/flatkv/store.go | 31 ++++++-- 8 files changed, 175 insertions(+), 53 deletions(-) create mode 100644 sei-db/common/utils/work_pool.go diff --git a/sei-db/common/utils/work_pool.go b/sei-db/common/utils/work_pool.go new file mode 100644 index 0000000000..476bfa98b5 --- /dev/null +++ b/sei-db/common/utils/work_pool.go @@ -0,0 +1,77 @@ +package utils + +import ( + "context" + "fmt" +) + +// WorkPool is a pool of workers that can be used to execute tasks concurrently. +// More efficient than spawning large numbers of short lived goroutines. +type WorkPool struct { + ctx context.Context + workQueue chan func() +} + +// TODO add metrics! +// TODO unit test before merging! + +// Create a new work pool. +func NewWorkPool( + // The work pool shuts down when the context is done. + ctx context.Context, + // The name of the work pool. Used for metrics. + name string, + // The number of workers to create. + workers int, + // The size of the work queue. Once full, Submit will block until a slot is available. + queueSize int, +) *WorkPool { + + workQueue := make(chan func(), queueSize) + workPool := &WorkPool{ + ctx: ctx, + workQueue: workQueue, + } + + for i := 0; i < workers; i++ { + go workPool.worker() + } + + // Shutdown the work pool when the context is done. + go func() { + <-ctx.Done() + close(workQueue) + }() + + return workPool +} + +// Submit submits a task to the work pool. This method does not block until the task is executed. +// +// If wp is nil, the task is executed asynchronously in a one-off goroutine. +func (wp *WorkPool) Submit(ctx context.Context, task func()) (err error) { + if wp == nil { + go task() + return nil + } + + defer func() { + if recover() != nil { + err = fmt.Errorf("work pool is shut down") + } + }() + select { + case <-ctx.Done(): + return ctx.Err() + case <-wp.ctx.Done(): + return fmt.Errorf("work pool is shut down") + case wp.workQueue <- task: + return nil + } +} + +func (wp *WorkPool) worker() { + for task := range wp.workQueue { + task() + } +} diff --git a/sei-db/db_engine/pebbledb/db.go b/sei-db/db_engine/pebbledb/db.go index 05c6cbab13..4e50fa6afe 100644 --- a/sei-db/db_engine/pebbledb/db.go +++ b/sei-db/db_engine/pebbledb/db.go @@ -14,6 +14,7 @@ import ( errorutils "github.com/sei-protocol/sei-chain/sei-db/common/errors" "github.com/sei-protocol/sei-chain/sei-db/common/metrics" + "github.com/sei-protocol/sei-chain/sei-db/common/utils" "github.com/sei-protocol/sei-chain/sei-db/db_engine/pebbledb/flatcache" "github.com/sei-protocol/sei-chain/sei-db/db_engine/types" ) @@ -29,12 +30,18 @@ type pebbleDB struct { var _ types.KeyValueDB = (*pebbleDB)(nil) +// TODO create a config struct for this! + // Open opens (or creates) a Pebble-backed DB at path, returning the DB interface. func Open( ctx context.Context, path string, opts types.OpenOptions, enableMetrics bool, + // A work pool for reading from the DB. + readPool *utils.WorkPool, + cacheSize int, + pageCacheSize int, ) (_ types.KeyValueDB, err error) { // Validate options before allocating resources to avoid leaks on validation failure var cmp *pebble.Comparer @@ -47,7 +54,7 @@ func Open( } // Internal pebbleDB cache, used to cache pages in memory. // TODO verify accuracy of this statement - pebbleCache := pebble.NewCache(1024 * 1024 * 512) // 512MB cache + pebbleCache := pebble.NewCache(int64(pageCacheSize)) defer pebbleCache.Unref() popts := &pebble.Options{ @@ -110,9 +117,8 @@ func Open( ctx, readFunction, 8, - 1024*1024*1024*8, // 8GB TODO configure this differently for each db instance! - 20, - 64, + cacheSize, + readPool, 10*time.Second) if err != nil { return nil, fmt.Errorf("failed to create flatcache: %w", err) diff --git a/sei-db/db_engine/pebbledb/db_test.go b/sei-db/db_engine/pebbledb/db_test.go index 9d8f7cd400..4223904520 100644 --- a/sei-db/db_engine/pebbledb/db_test.go +++ b/sei-db/db_engine/pebbledb/db_test.go @@ -8,12 +8,14 @@ import ( "github.com/cockroachdb/pebble/v2" errorutils "github.com/sei-protocol/sei-chain/sei-db/common/errors" + "github.com/sei-protocol/sei-chain/sei-db/common/utils" "github.com/sei-protocol/sei-chain/sei-db/db_engine/types" ) func TestDBGetSetDelete(t *testing.T) { dir := t.TempDir() - db, err := Open(t.Context(), dir, types.OpenOptions{}, false) + pool := utils.NewWorkPool(t.Context(), "test", 1, 64) + db, err := Open(t.Context(), dir, types.OpenOptions{}, false, pool, 64<<20, 64<<20) if err != nil { t.Fatalf("Open: %v", err) } @@ -51,7 +53,8 @@ func TestDBGetSetDelete(t *testing.T) { func TestBatchAtomicWrite(t *testing.T) { dir := t.TempDir() - db, err := Open(t.Context(), dir, types.OpenOptions{}, false) + pool := utils.NewWorkPool(t.Context(), "test", 1, 64) + db, err := Open(t.Context(), dir, types.OpenOptions{}, false, pool, 64<<20, 64<<20) if err != nil { t.Fatalf("Open: %v", err) } @@ -90,7 +93,8 @@ func TestBatchAtomicWrite(t *testing.T) { func TestIteratorBounds(t *testing.T) { dir := t.TempDir() - db, err := Open(t.Context(), dir, types.OpenOptions{}, false) + pool := utils.NewWorkPool(t.Context(), "test", 1, 64) + db, err := Open(t.Context(), dir, types.OpenOptions{}, false, pool, 64<<20, 64<<20) if err != nil { t.Fatalf("Open: %v", err) } @@ -124,7 +128,8 @@ func TestIteratorBounds(t *testing.T) { func TestIteratorPrev(t *testing.T) { dir := t.TempDir() - db, err := Open(t.Context(), dir, types.OpenOptions{}, false) + pool := utils.NewWorkPool(t.Context(), "test", 1, 64) + db, err := Open(t.Context(), dir, types.OpenOptions{}, false, pool, 64<<20, 64<<20) if err != nil { t.Fatalf("Open: %v", err) } @@ -189,7 +194,8 @@ func TestIteratorNextPrefixWithComparerSplit(t *testing.T) { } dir := t.TempDir() - db, err := Open(t.Context(), dir, types.OpenOptions{Comparer: &cmp}, false) + pool := utils.NewWorkPool(t.Context(), "test", 1, 64) + db, err := Open(t.Context(), dir, types.OpenOptions{Comparer: &cmp}, false, pool, 64<<20, 64<<20) if err != nil { t.Fatalf("Open: %v", err) } @@ -224,7 +230,8 @@ func TestIteratorNextPrefixWithComparerSplit(t *testing.T) { func TestOpenOptionsComparerTypeCheck(t *testing.T) { dir := t.TempDir() - _, err := Open(t.Context(), dir, types.OpenOptions{Comparer: "not-a-pebble-comparer"}, false) + pool := utils.NewWorkPool(t.Context(), "test", 1, 64) + _, err := Open(t.Context(), dir, types.OpenOptions{Comparer: "not-a-pebble-comparer"}, false, pool, 64<<20, 64<<20) if err == nil { t.Fatalf("expected error for invalid comparer type") } @@ -232,7 +239,8 @@ func TestOpenOptionsComparerTypeCheck(t *testing.T) { func TestErrNotFoundConsistency(t *testing.T) { dir := t.TempDir() - db, err := Open(t.Context(), dir, types.OpenOptions{}, false) + pool := utils.NewWorkPool(t.Context(), "test", 1, 64) + db, err := Open(t.Context(), dir, types.OpenOptions{}, false, pool, 64<<20, 64<<20) if err != nil { t.Fatalf("Open: %v", err) } @@ -257,7 +265,8 @@ func TestErrNotFoundConsistency(t *testing.T) { func TestGetReturnsCopy(t *testing.T) { dir := t.TempDir() - db, err := Open(t.Context(), dir, types.OpenOptions{}, false) + pool := utils.NewWorkPool(t.Context(), "test", 1, 64) + db, err := Open(t.Context(), dir, types.OpenOptions{}, false, pool, 64<<20, 64<<20) if err != nil { t.Fatalf("Open: %v", err) } @@ -287,7 +296,8 @@ func TestGetReturnsCopy(t *testing.T) { func TestBatchLenResetDelete(t *testing.T) { dir := t.TempDir() - db, err := Open(t.Context(), dir, types.OpenOptions{}, false) + pool := utils.NewWorkPool(t.Context(), "test", 1, 64) + db, err := Open(t.Context(), dir, types.OpenOptions{}, false, pool, 64<<20, 64<<20) if err != nil { t.Fatalf("Open: %v", err) } @@ -343,7 +353,8 @@ func TestBatchLenResetDelete(t *testing.T) { func TestIteratorSeekLTAndValue(t *testing.T) { dir := t.TempDir() - db, err := Open(t.Context(), dir, types.OpenOptions{}, false) + pool := utils.NewWorkPool(t.Context(), "test", 1, 64) + db, err := Open(t.Context(), dir, types.OpenOptions{}, false, pool, 64<<20, 64<<20) if err != nil { t.Fatalf("Open: %v", err) } @@ -380,7 +391,8 @@ func TestIteratorSeekLTAndValue(t *testing.T) { func TestFlush(t *testing.T) { dir := t.TempDir() - db, err := Open(t.Context(), dir, types.OpenOptions{}, false) + pool := utils.NewWorkPool(t.Context(), "test", 1, 64) + db, err := Open(t.Context(), dir, types.OpenOptions{}, false, pool, 64<<20, 64<<20) if err != nil { t.Fatalf("Open: %v", err) } @@ -408,7 +420,8 @@ func TestFlush(t *testing.T) { func TestCloseIsIdempotent(t *testing.T) { dir := t.TempDir() - db, err := Open(t.Context(), dir, types.OpenOptions{}, false) + pool := utils.NewWorkPool(t.Context(), "test", 1, 64) + db, err := Open(t.Context(), dir, types.OpenOptions{}, false, pool, 64<<20, 64<<20) if err != nil { t.Fatalf("Open: %v", err) } diff --git a/sei-db/db_engine/pebbledb/flatcache/cache_impl.go b/sei-db/db_engine/pebbledb/flatcache/cache_impl.go index a102fceca7..b2181aae9f 100644 --- a/sei-db/db_engine/pebbledb/flatcache/cache_impl.go +++ b/sei-db/db_engine/pebbledb/flatcache/cache_impl.go @@ -6,6 +6,7 @@ import ( "sync" "time" + "github.com/sei-protocol/sei-chain/sei-db/common/utils" "github.com/sei-protocol/sei-chain/sei-db/db_engine/types" ) @@ -21,8 +22,8 @@ type cache struct { // The shards in the cache. shards []*shard - // A scheduler for asyncronous reads. - readScheduler *readScheduler + // A pool for asyncronous reads. + readPool *utils.WorkPool // The interval at which to run garbage collection. garbageCollectionInterval time.Duration @@ -37,10 +38,8 @@ func NewCache( shardCount int, // The maximum size of the cache, in bytes. maxSize int, - // The number of background goroutines to read values from the database. - readWorkerCount int, - // The max size of the read queue. - readQueueSize int, + // A work pool for reading from the DB. + readPool *utils.WorkPool, // The interval at which to run garbage collection. garbageCollectionInterval time.Duration, ) (Cache, error) { @@ -50,12 +49,6 @@ func NewCache( if maxSize <= 0 { return nil, fmt.Errorf("maxSize must be greater than 0") } - if readWorkerCount <= 0 { - return nil, fmt.Errorf("readWorkerCount must be greater than 0") - } - if readQueueSize <= 0 { - return nil, fmt.Errorf("readQueueSize must be greater than 0") - } shardManager, err := NewShardManager(uint64(shardCount)) if err != nil { @@ -65,8 +58,6 @@ func NewCache( return nil, fmt.Errorf("garbageCollectionInterval must be greater than 0") } - readScheduler := NewReadScheduler(ctx, readFunc, readWorkerCount, readQueueSize) - sizePerShard := maxSize / shardCount if sizePerShard <= 0 { return nil, fmt.Errorf("maxSize must be greater than shardCount") @@ -74,7 +65,7 @@ func NewCache( shards := make([]*shard, shardCount) for i := 0; i < shardCount; i++ { - shards[i], err = NewShard(ctx, readScheduler, sizePerShard) + shards[i], err = NewShard(ctx, readPool, readFunc, sizePerShard) if err != nil { return nil, fmt.Errorf("failed to create shard: %w", err) } @@ -84,7 +75,7 @@ func NewCache( ctx: ctx, shardManager: shardManager, shards: shards, - readScheduler: readScheduler, + readPool: readPool, garbageCollectionInterval: garbageCollectionInterval, } diff --git a/sei-db/db_engine/pebbledb/flatcache/read_scheduler.go b/sei-db/db_engine/pebbledb/flatcache/read_scheduler.go index 57cdf0ad34..16bf88d597 100644 --- a/sei-db/db_engine/pebbledb/flatcache/read_scheduler.go +++ b/sei-db/db_engine/pebbledb/flatcache/read_scheduler.go @@ -73,7 +73,7 @@ func (r *readScheduler) readWorker() { if request.skipInject { request.entry.valueChan <- value } else { - request.entry.InjectValue(request.key, value) + request.entry.injectValue(request.key, value) } } } diff --git a/sei-db/db_engine/pebbledb/flatcache/shard.go b/sei-db/db_engine/pebbledb/flatcache/shard.go index 97761dfded..b9fb0954ef 100644 --- a/sei-db/db_engine/pebbledb/flatcache/shard.go +++ b/sei-db/db_engine/pebbledb/flatcache/shard.go @@ -24,8 +24,11 @@ type shard struct { // Organizes data for garbage collection. gcQueue *lruQueue - // A scheduler for asyncronous reads. - readScheduler *readScheduler + // A pool for asyncronous reads. + readPool *utils.WorkPool + + // A function that reads a value from the database. + readFunc func(key []byte) []byte // The maximum size of this cache, in bytes. maxSize int @@ -62,19 +65,25 @@ type shardEntry struct { } // Creates a new Shard. -func NewShard(ctx context.Context, readScheduler *readScheduler, maxSize int) (*shard, error) { +func NewShard( + ctx context.Context, + readPool *utils.WorkPool, + readFunc func(key []byte) []byte, + maxSize int, +) (*shard, error) { if maxSize <= 0 { return nil, fmt.Errorf("maxSize must be greater than 0") } return &shard{ - ctx: ctx, - readScheduler: readScheduler, - lock: sync.Mutex{}, - data: make(map[string]*shardEntry), - gcQueue: NewLRUQueue(), - maxSize: maxSize, + ctx: ctx, + readPool: readPool, + readFunc: readFunc, + lock: sync.Mutex{}, + data: make(map[string]*shardEntry), + gcQueue: NewLRUQueue(), + maxSize: maxSize, }, nil } @@ -115,7 +124,10 @@ func (s *shard) Get(key []byte, updateLru bool) ([]byte, bool, error) { valueChan := make(chan []byte, 1) entry.valueChan = valueChan s.lock.Unlock() - err := s.readScheduler.ScheduleRead(key, entry, false) + err := s.readPool.Submit(s.ctx, func() { + value := s.readFunc(key) + entry.injectValue(key, value) + }) if err != nil { return nil, false, fmt.Errorf("failed to schedule read: %w", err) } @@ -131,7 +143,7 @@ func (s *shard) Get(key []byte, updateLru bool) ([]byte, bool, error) { } // This method is called by the read scheduler when a value becomes available. -func (se *shardEntry) InjectValue(key []byte, value []byte) { +func (se *shardEntry) injectValue(key []byte, value []byte) { se.shard.lock.Lock() if se.status == statusScheduled { @@ -215,7 +227,12 @@ func (s *shard) BatchGet(keys map[string]types.BatchGetResult) error { for i := range pending { if pending[i].needsSchedule { - err := s.readScheduler.ScheduleRead([]byte(pending[i].key), pending[i].entry, true) + p := &pending[i] + err := s.readPool.Submit(s.ctx, func() { + value := s.readFunc([]byte(p.key)) + p.entry.valueChan <- value + // Intentionally do not call injectValue here, we want to defer the update to a single bulk operation. + }) if err != nil { return fmt.Errorf("failed to schedule read: %w", err) } diff --git a/sei-db/state_db/sc/flatkv/snapshot.go b/sei-db/state_db/sc/flatkv/snapshot.go index bfbcc76021..6b67fb7c82 100644 --- a/sei-db/state_db/sc/flatkv/snapshot.go +++ b/sei-db/state_db/sc/flatkv/snapshot.go @@ -379,7 +379,8 @@ func (s *CommitStore) migrateFlatLayout(flatkvDir string) (string, error) { // be at the flat location or might have been moved in a prior attempt. var version int64 metaPath := filepath.Join(flatkvDir, metadataDir) - if tmpMeta, err := pebbledb.Open(s.ctx, metaPath, types.OpenOptions{}, s.config.EnablePebbleMetrics); err == nil { + tmpMeta, err := pebbledb.Open(s.ctx, metaPath, types.OpenOptions{}, s.config.EnablePebbleMetrics, s.readPool) + if err == nil { verData, verErr := tmpMeta.Get([]byte(MetaGlobalVersion)) _ = tmpMeta.Close() if verErr == nil && len(verData) == 8 { diff --git a/sei-db/state_db/sc/flatkv/store.go b/sei-db/state_db/sc/flatkv/store.go index 5dabbbba1c..46251cfd77 100644 --- a/sei-db/state_db/sc/flatkv/store.go +++ b/sei-db/state_db/sc/flatkv/store.go @@ -10,6 +10,7 @@ import ( "github.com/sei-protocol/sei-chain/sei-db/common/logger" "github.com/sei-protocol/sei-chain/sei-db/common/metrics" + "github.com/sei-protocol/sei-chain/sei-db/common/utils" "github.com/sei-protocol/sei-chain/sei-db/db_engine/pebbledb" seidbtypes "github.com/sei-protocol/sei-chain/sei-db/db_engine/types" "github.com/sei-protocol/sei-chain/sei-db/proto" @@ -100,6 +101,9 @@ type CommitStore struct { // Used to track time spent in various phases of execution. phaseTimer *metrics.PhaseTimer + + // A work pool for reading from the DB. + readPool *utils.WorkPool } var _ Store = (*CommitStore)(nil) @@ -117,6 +121,8 @@ func NewCommitStore( } meter := otel.Meter(flatkvMeterName) + readPool := utils.NewWorkPool(ctx, "flatkv-read", 20, 1024) // TODO this should be configurable! + return &CommitStore{ ctx: ctx, log: log, @@ -131,6 +137,7 @@ func NewCommitStore( committedLtHash: lthash.New(), workingLtHash: lthash.New(), phaseTimer: metrics.NewPhaseTimer(meter, "seidb_main_thread"), + readPool: readPool, } } @@ -328,8 +335,15 @@ func (s *CommitStore) openAllDBs(snapDir, flatkvRoot string) (retErr error) { } }() - openDB := func(np namedPath) (seidbtypes.KeyValueDB, error) { - db, err := pebbledb.Open(s.ctx, np.path, seidbtypes.OpenOptions{}, s.config.EnablePebbleMetrics) + openDB := func(np namedPath, cacheSize int, pageCacheSize int) (seidbtypes.KeyValueDB, error) { + db, err := pebbledb.Open( + s.ctx, + np.path, + seidbtypes.OpenOptions{}, + s.config.EnablePebbleMetrics, + s.readPool, + cacheSize, + pageCacheSize) if err != nil { return nil, fmt.Errorf("failed to open %s: %w", np.name, err) } @@ -337,20 +351,23 @@ func (s *CommitStore) openAllDBs(snapDir, flatkvRoot string) (retErr error) { return db, nil } + // TODO don't hardcode the cache sizes! + gb := 1024 * 1024 * 1024 + var err error - if s.accountDB, err = openDB(dbPaths[0]); err != nil { + if s.accountDB, err = openDB(dbPaths[0], gb/2, gb/2); err != nil { return err } - if s.codeDB, err = openDB(dbPaths[1]); err != nil { + if s.codeDB, err = openDB(dbPaths[1], gb/2, gb/2); err != nil { return err } - if s.storageDB, err = openDB(dbPaths[2]); err != nil { + if s.storageDB, err = openDB(dbPaths[2], gb*4, gb/2); err != nil { return err } - if s.legacyDB, err = openDB(dbPaths[3]); err != nil { + if s.legacyDB, err = openDB(dbPaths[3], gb/2, gb/2); err != nil { return err } - if s.metadataDB, err = openDB(dbPaths[4]); err != nil { + if s.metadataDB, err = openDB(dbPaths[4], gb/2, gb/2); err != nil { return err } From 20c70c363f2e49bc2c468a2cf2dbfcd5b2c9e3fe Mon Sep 17 00:00:00 2001 From: Cody Littley Date: Fri, 6 Mar 2026 14:46:51 -0600 Subject: [PATCH 018/119] bugfix --- sei-db/state_db/sc/flatkv/snapshot.go | 5 ++++- 1 file changed, 4 insertions(+), 1 deletion(-) diff --git a/sei-db/state_db/sc/flatkv/snapshot.go b/sei-db/state_db/sc/flatkv/snapshot.go index 6b67fb7c82..dd788ee720 100644 --- a/sei-db/state_db/sc/flatkv/snapshot.go +++ b/sei-db/state_db/sc/flatkv/snapshot.go @@ -379,7 +379,10 @@ func (s *CommitStore) migrateFlatLayout(flatkvDir string) (string, error) { // be at the flat location or might have been moved in a prior attempt. var version int64 metaPath := filepath.Join(flatkvDir, metadataDir) - tmpMeta, err := pebbledb.Open(s.ctx, metaPath, types.OpenOptions{}, s.config.EnablePebbleMetrics, s.readPool) + gb := 1024 * 1024 * 1024 + // TODO don't hardcode the cache sizes! + tmpMeta, err := pebbledb.Open( + s.ctx, metaPath, types.OpenOptions{}, s.config.EnablePebbleMetrics, s.readPool, gb/2, gb/2) if err == nil { verData, verErr := tmpMeta.Get([]byte(MetaGlobalVersion)) _ = tmpMeta.Close() From b714789207aa627658940334fc0e5513307074b7 Mon Sep 17 00:00:00 2001 From: Cody Littley Date: Mon, 9 Mar 2026 08:29:07 -0500 Subject: [PATCH 019/119] Add unit constants --- sei-db/common/unit/data_units.go | 20 +++++++++++++++++ sei-db/db_engine/pebbledb/db_test.go | 26 ++++++++++++---------- sei-db/state_db/sc/flatkv/snapshot.go | 5 ++--- sei-db/state_db/sc/flatkv/snapshot_test.go | 5 +++-- sei-db/state_db/sc/flatkv/store.go | 13 +++++------ sei-db/state_db/sc/flatkv/store_test.go | 3 ++- 6 files changed, 47 insertions(+), 25 deletions(-) create mode 100644 sei-db/common/unit/data_units.go diff --git a/sei-db/common/unit/data_units.go b/sei-db/common/unit/data_units.go new file mode 100644 index 0000000000..58ae32a5ad --- /dev/null +++ b/sei-db/common/unit/data_units.go @@ -0,0 +1,20 @@ +package unit + +const ( + // KB is the number of bytes in a kilobyte. + KB = 1024 + // MB is the number of bytes in a megabyte. + MB = KB * 1024 + // GB is the number of bytes in a gigabyte. + GB = MB * 1024 + // TB is the number of bytes in a terabyte. + TB = GB * 1024 + // PB is the number of bytes in a petabyte. + PB = TB * 1024 + // EB is the number of bytes in an exabyte. + EB = PB * 1024 + // ZB is the number of bytes in a zettabyte. + ZB = EB * 1024 + // YB is the number of bytes in a yottabyte. + YB = ZB * 1024 +) diff --git a/sei-db/db_engine/pebbledb/db_test.go b/sei-db/db_engine/pebbledb/db_test.go index 4223904520..180ba7012f 100644 --- a/sei-db/db_engine/pebbledb/db_test.go +++ b/sei-db/db_engine/pebbledb/db_test.go @@ -8,6 +8,7 @@ import ( "github.com/cockroachdb/pebble/v2" errorutils "github.com/sei-protocol/sei-chain/sei-db/common/errors" + "github.com/sei-protocol/sei-chain/sei-db/common/unit" "github.com/sei-protocol/sei-chain/sei-db/common/utils" "github.com/sei-protocol/sei-chain/sei-db/db_engine/types" ) @@ -15,7 +16,7 @@ import ( func TestDBGetSetDelete(t *testing.T) { dir := t.TempDir() pool := utils.NewWorkPool(t.Context(), "test", 1, 64) - db, err := Open(t.Context(), dir, types.OpenOptions{}, false, pool, 64<<20, 64<<20) + db, err := Open(t.Context(), dir, types.OpenOptions{}, false, pool, unit.MB*8, unit.MB*8) if err != nil { t.Fatalf("Open: %v", err) } @@ -54,7 +55,7 @@ func TestDBGetSetDelete(t *testing.T) { func TestBatchAtomicWrite(t *testing.T) { dir := t.TempDir() pool := utils.NewWorkPool(t.Context(), "test", 1, 64) - db, err := Open(t.Context(), dir, types.OpenOptions{}, false, pool, 64<<20, 64<<20) + db, err := Open(t.Context(), dir, types.OpenOptions{}, false, pool, unit.MB*8, unit.MB*8) if err != nil { t.Fatalf("Open: %v", err) } @@ -94,7 +95,7 @@ func TestBatchAtomicWrite(t *testing.T) { func TestIteratorBounds(t *testing.T) { dir := t.TempDir() pool := utils.NewWorkPool(t.Context(), "test", 1, 64) - db, err := Open(t.Context(), dir, types.OpenOptions{}, false, pool, 64<<20, 64<<20) + db, err := Open(t.Context(), dir, types.OpenOptions{}, false, pool, unit.MB*8, unit.MB*8) if err != nil { t.Fatalf("Open: %v", err) } @@ -129,7 +130,7 @@ func TestIteratorBounds(t *testing.T) { func TestIteratorPrev(t *testing.T) { dir := t.TempDir() pool := utils.NewWorkPool(t.Context(), "test", 1, 64) - db, err := Open(t.Context(), dir, types.OpenOptions{}, false, pool, 64<<20, 64<<20) + db, err := Open(t.Context(), dir, types.OpenOptions{}, false, pool, unit.MB*8, unit.MB*8) if err != nil { t.Fatalf("Open: %v", err) } @@ -195,7 +196,7 @@ func TestIteratorNextPrefixWithComparerSplit(t *testing.T) { dir := t.TempDir() pool := utils.NewWorkPool(t.Context(), "test", 1, 64) - db, err := Open(t.Context(), dir, types.OpenOptions{Comparer: &cmp}, false, pool, 64<<20, 64<<20) + db, err := Open(t.Context(), dir, types.OpenOptions{Comparer: &cmp}, false, pool, unit.MB*8, unit.MB*8) if err != nil { t.Fatalf("Open: %v", err) } @@ -231,7 +232,8 @@ func TestIteratorNextPrefixWithComparerSplit(t *testing.T) { func TestOpenOptionsComparerTypeCheck(t *testing.T) { dir := t.TempDir() pool := utils.NewWorkPool(t.Context(), "test", 1, 64) - _, err := Open(t.Context(), dir, types.OpenOptions{Comparer: "not-a-pebble-comparer"}, false, pool, 64<<20, 64<<20) + _, err := Open(t.Context(), dir, types.OpenOptions{Comparer: "not-a-pebble-comparer"}, + false, pool, unit.MB*8, unit.MB*8) if err == nil { t.Fatalf("expected error for invalid comparer type") } @@ -240,7 +242,7 @@ func TestOpenOptionsComparerTypeCheck(t *testing.T) { func TestErrNotFoundConsistency(t *testing.T) { dir := t.TempDir() pool := utils.NewWorkPool(t.Context(), "test", 1, 64) - db, err := Open(t.Context(), dir, types.OpenOptions{}, false, pool, 64<<20, 64<<20) + db, err := Open(t.Context(), dir, types.OpenOptions{}, false, pool, unit.MB*8, unit.MB*8) if err != nil { t.Fatalf("Open: %v", err) } @@ -266,7 +268,7 @@ func TestErrNotFoundConsistency(t *testing.T) { func TestGetReturnsCopy(t *testing.T) { dir := t.TempDir() pool := utils.NewWorkPool(t.Context(), "test", 1, 64) - db, err := Open(t.Context(), dir, types.OpenOptions{}, false, pool, 64<<20, 64<<20) + db, err := Open(t.Context(), dir, types.OpenOptions{}, false, pool, unit.MB*8, unit.MB*8) if err != nil { t.Fatalf("Open: %v", err) } @@ -297,7 +299,7 @@ func TestGetReturnsCopy(t *testing.T) { func TestBatchLenResetDelete(t *testing.T) { dir := t.TempDir() pool := utils.NewWorkPool(t.Context(), "test", 1, 64) - db, err := Open(t.Context(), dir, types.OpenOptions{}, false, pool, 64<<20, 64<<20) + db, err := Open(t.Context(), dir, types.OpenOptions{}, false, pool, unit.MB*8, unit.MB*8) if err != nil { t.Fatalf("Open: %v", err) } @@ -354,7 +356,7 @@ func TestBatchLenResetDelete(t *testing.T) { func TestIteratorSeekLTAndValue(t *testing.T) { dir := t.TempDir() pool := utils.NewWorkPool(t.Context(), "test", 1, 64) - db, err := Open(t.Context(), dir, types.OpenOptions{}, false, pool, 64<<20, 64<<20) + db, err := Open(t.Context(), dir, types.OpenOptions{}, false, pool, unit.MB*8, unit.MB*8) if err != nil { t.Fatalf("Open: %v", err) } @@ -392,7 +394,7 @@ func TestIteratorSeekLTAndValue(t *testing.T) { func TestFlush(t *testing.T) { dir := t.TempDir() pool := utils.NewWorkPool(t.Context(), "test", 1, 64) - db, err := Open(t.Context(), dir, types.OpenOptions{}, false, pool, 64<<20, 64<<20) + db, err := Open(t.Context(), dir, types.OpenOptions{}, false, pool, unit.MB*8, unit.MB*8) if err != nil { t.Fatalf("Open: %v", err) } @@ -421,7 +423,7 @@ func TestFlush(t *testing.T) { func TestCloseIsIdempotent(t *testing.T) { dir := t.TempDir() pool := utils.NewWorkPool(t.Context(), "test", 1, 64) - db, err := Open(t.Context(), dir, types.OpenOptions{}, false, pool, 64<<20, 64<<20) + db, err := Open(t.Context(), dir, types.OpenOptions{}, false, pool, unit.MB*8, unit.MB*8) if err != nil { t.Fatalf("Open: %v", err) } diff --git a/sei-db/state_db/sc/flatkv/snapshot.go b/sei-db/state_db/sc/flatkv/snapshot.go index dd788ee720..f076799ce2 100644 --- a/sei-db/state_db/sc/flatkv/snapshot.go +++ b/sei-db/state_db/sc/flatkv/snapshot.go @@ -11,6 +11,7 @@ import ( "strings" "time" + "github.com/sei-protocol/sei-chain/sei-db/common/unit" "github.com/sei-protocol/sei-chain/sei-db/db_engine/pebbledb" "github.com/sei-protocol/sei-chain/sei-db/db_engine/types" "github.com/sei-protocol/sei-chain/sei-db/proto" @@ -379,10 +380,8 @@ func (s *CommitStore) migrateFlatLayout(flatkvDir string) (string, error) { // be at the flat location or might have been moved in a prior attempt. var version int64 metaPath := filepath.Join(flatkvDir, metadataDir) - gb := 1024 * 1024 * 1024 - // TODO don't hardcode the cache sizes! tmpMeta, err := pebbledb.Open( - s.ctx, metaPath, types.OpenOptions{}, s.config.EnablePebbleMetrics, s.readPool, gb/2, gb/2) + s.ctx, metaPath, types.OpenOptions{}, s.config.EnablePebbleMetrics, s.readPool, unit.GB/2, unit.GB/2) if err == nil { verData, verErr := tmpMeta.Get([]byte(MetaGlobalVersion)) _ = tmpMeta.Close() diff --git a/sei-db/state_db/sc/flatkv/snapshot_test.go b/sei-db/state_db/sc/flatkv/snapshot_test.go index d6b43e4c24..01f1cf1f55 100644 --- a/sei-db/state_db/sc/flatkv/snapshot_test.go +++ b/sei-db/state_db/sc/flatkv/snapshot_test.go @@ -8,6 +8,7 @@ import ( "testing" "github.com/sei-protocol/sei-chain/sei-db/common/evm" + "github.com/sei-protocol/sei-chain/sei-db/common/unit" "github.com/sei-protocol/sei-chain/sei-db/db_engine/pebbledb" "github.com/sei-protocol/sei-chain/sei-db/db_engine/types" "github.com/sei-protocol/sei-chain/sei-db/proto" @@ -255,7 +256,7 @@ func TestMigrationFromFlatLayout(t *testing.T) { dbPath := filepath.Join(flatkvDir, sub) require.NoError(t, os.MkdirAll(dbPath, 0750)) // Create an actual PebbleDB so Open works - db, err := pebbledb.Open(t.Context(), dbPath, types.OpenOptions{}, false) + db, err := pebbledb.Open(t.Context(), dbPath, types.OpenOptions{}, false, nil, unit.MB*8, unit.MB*8) require.NoError(t, err) require.NoError(t, db.Close()) } @@ -312,7 +313,7 @@ func TestOpenVersionValidation(t *testing.T) { require.NoError(t, err) accountDBPath := filepath.Join(snapDir, accountDBDir) - db, err := pebbledb.Open(t.Context(), accountDBPath, types.OpenOptions{}, false) + db, err := pebbledb.Open(t.Context(), accountDBPath, types.OpenOptions{}, false, nil, unit.MB*8, unit.MB*8) require.NoError(t, err) lagMeta := &LocalMeta{CommittedVersion: 1} require.NoError(t, db.Set(DBLocalMetaKey, MarshalLocalMeta(lagMeta), types.WriteOptions{Sync: true})) diff --git a/sei-db/state_db/sc/flatkv/store.go b/sei-db/state_db/sc/flatkv/store.go index 46251cfd77..482f28dd58 100644 --- a/sei-db/state_db/sc/flatkv/store.go +++ b/sei-db/state_db/sc/flatkv/store.go @@ -10,6 +10,7 @@ import ( "github.com/sei-protocol/sei-chain/sei-db/common/logger" "github.com/sei-protocol/sei-chain/sei-db/common/metrics" + "github.com/sei-protocol/sei-chain/sei-db/common/unit" "github.com/sei-protocol/sei-chain/sei-db/common/utils" "github.com/sei-protocol/sei-chain/sei-db/db_engine/pebbledb" seidbtypes "github.com/sei-protocol/sei-chain/sei-db/db_engine/types" @@ -352,22 +353,20 @@ func (s *CommitStore) openAllDBs(snapDir, flatkvRoot string) (retErr error) { } // TODO don't hardcode the cache sizes! - gb := 1024 * 1024 * 1024 - var err error - if s.accountDB, err = openDB(dbPaths[0], gb/2, gb/2); err != nil { + if s.accountDB, err = openDB(dbPaths[0], unit.GB/2, unit.GB/2); err != nil { return err } - if s.codeDB, err = openDB(dbPaths[1], gb/2, gb/2); err != nil { + if s.codeDB, err = openDB(dbPaths[1], unit.GB/2, unit.GB/2); err != nil { return err } - if s.storageDB, err = openDB(dbPaths[2], gb*4, gb/2); err != nil { + if s.storageDB, err = openDB(dbPaths[2], unit.GB*4, unit.GB/2); err != nil { return err } - if s.legacyDB, err = openDB(dbPaths[3], gb/2, gb/2); err != nil { + if s.legacyDB, err = openDB(dbPaths[3], unit.GB/2, unit.GB/2); err != nil { return err } - if s.metadataDB, err = openDB(dbPaths[4], gb/2, gb/2); err != nil { + if s.metadataDB, err = openDB(dbPaths[4], unit.GB/2, unit.GB/2); err != nil { return err } diff --git a/sei-db/state_db/sc/flatkv/store_test.go b/sei-db/state_db/sc/flatkv/store_test.go index dadba42c1b..14e2c3f333 100644 --- a/sei-db/state_db/sc/flatkv/store_test.go +++ b/sei-db/state_db/sc/flatkv/store_test.go @@ -8,6 +8,7 @@ import ( "github.com/stretchr/testify/require" "github.com/sei-protocol/sei-chain/sei-db/common/evm" + "github.com/sei-protocol/sei-chain/sei-db/common/unit" "github.com/sei-protocol/sei-chain/sei-db/db_engine/pebbledb" "github.com/sei-protocol/sei-chain/sei-db/db_engine/types" "github.com/sei-protocol/sei-chain/sei-db/proto" @@ -58,7 +59,7 @@ func makeChangeSet(key, value []byte, delete bool) *proto.NamedChangeSet { func setupTestDB(t *testing.T) types.KeyValueDB { t.Helper() dir := t.TempDir() - db, err := pebbledb.Open(t.Context(), dir, types.OpenOptions{}, false) + db, err := pebbledb.Open(t.Context(), dir, types.OpenOptions{}, false, nil, unit.MB*8, unit.MB*8) require.NoError(t, err) return db } From cc9d41d1a3cb5cba62e240249fde88209244d8a3 Mon Sep 17 00:00:00 2001 From: Cody Littley Date: Mon, 9 Mar 2026 08:38:18 -0500 Subject: [PATCH 020/119] refactor threading utils --- .../common/{utils => threading}/chan_utils.go | 2 +- sei-db/common/threading/pool.go | 9 +++++++ .../work_pool.go => threading/pool_impl.go} | 18 +++++++------ sei-db/db_engine/pebbledb/db.go | 4 +-- sei-db/db_engine/pebbledb/db_test.go | 26 +++++++++---------- .../pebbledb/flatcache/cache_impl.go | 6 ++--- sei-db/db_engine/pebbledb/flatcache/shard.go | 12 ++++----- sei-db/state_db/sc/flatkv/store.go | 6 ++--- sei-db/wal/wal.go | 12 ++++----- 9 files changed, 53 insertions(+), 42 deletions(-) rename sei-db/common/{utils => threading}/chan_utils.go (97%) create mode 100644 sei-db/common/threading/pool.go rename sei-db/common/{utils/work_pool.go => threading/pool_impl.go} (82%) diff --git a/sei-db/common/utils/chan_utils.go b/sei-db/common/threading/chan_utils.go similarity index 97% rename from sei-db/common/utils/chan_utils.go rename to sei-db/common/threading/chan_utils.go index 4ae92c6b4a..1d6c23aed0 100644 --- a/sei-db/common/utils/chan_utils.go +++ b/sei-db/common/threading/chan_utils.go @@ -1,4 +1,4 @@ -package utils +package threading import ( "context" diff --git a/sei-db/common/threading/pool.go b/sei-db/common/threading/pool.go new file mode 100644 index 0000000000..06ba2bba28 --- /dev/null +++ b/sei-db/common/threading/pool.go @@ -0,0 +1,9 @@ +package threading + +import "context" + +// Pool is a pool of workers that can be used to execute tasks concurrently. +type Pool interface { + // Submit submits a task to the pool. This method does not block until the task is executed. + Submit(ctx context.Context, task func()) error +} diff --git a/sei-db/common/utils/work_pool.go b/sei-db/common/threading/pool_impl.go similarity index 82% rename from sei-db/common/utils/work_pool.go rename to sei-db/common/threading/pool_impl.go index 476bfa98b5..a35c9c2bf8 100644 --- a/sei-db/common/utils/work_pool.go +++ b/sei-db/common/threading/pool_impl.go @@ -1,13 +1,15 @@ -package utils +package threading import ( "context" "fmt" ) -// WorkPool is a pool of workers that can be used to execute tasks concurrently. +var _ Pool = (*pool)(nil) + +// pool is a pool of workers that can be used to execute tasks concurrently. // More efficient than spawning large numbers of short lived goroutines. -type WorkPool struct { +type pool struct { ctx context.Context workQueue chan func() } @@ -16,7 +18,7 @@ type WorkPool struct { // TODO unit test before merging! // Create a new work pool. -func NewWorkPool( +func NewPool( // The work pool shuts down when the context is done. ctx context.Context, // The name of the work pool. Used for metrics. @@ -25,10 +27,10 @@ func NewWorkPool( workers int, // The size of the work queue. Once full, Submit will block until a slot is available. queueSize int, -) *WorkPool { +) Pool { workQueue := make(chan func(), queueSize) - workPool := &WorkPool{ + workPool := &pool{ ctx: ctx, workQueue: workQueue, } @@ -49,7 +51,7 @@ func NewWorkPool( // Submit submits a task to the work pool. This method does not block until the task is executed. // // If wp is nil, the task is executed asynchronously in a one-off goroutine. -func (wp *WorkPool) Submit(ctx context.Context, task func()) (err error) { +func (wp *pool) Submit(ctx context.Context, task func()) (err error) { if wp == nil { go task() return nil @@ -70,7 +72,7 @@ func (wp *WorkPool) Submit(ctx context.Context, task func()) (err error) { } } -func (wp *WorkPool) worker() { +func (wp *pool) worker() { for task := range wp.workQueue { task() } diff --git a/sei-db/db_engine/pebbledb/db.go b/sei-db/db_engine/pebbledb/db.go index 4e50fa6afe..1a207323ef 100644 --- a/sei-db/db_engine/pebbledb/db.go +++ b/sei-db/db_engine/pebbledb/db.go @@ -14,7 +14,7 @@ import ( errorutils "github.com/sei-protocol/sei-chain/sei-db/common/errors" "github.com/sei-protocol/sei-chain/sei-db/common/metrics" - "github.com/sei-protocol/sei-chain/sei-db/common/utils" + "github.com/sei-protocol/sei-chain/sei-db/common/threading" "github.com/sei-protocol/sei-chain/sei-db/db_engine/pebbledb/flatcache" "github.com/sei-protocol/sei-chain/sei-db/db_engine/types" ) @@ -39,7 +39,7 @@ func Open( opts types.OpenOptions, enableMetrics bool, // A work pool for reading from the DB. - readPool *utils.WorkPool, + readPool threading.Pool, cacheSize int, pageCacheSize int, ) (_ types.KeyValueDB, err error) { diff --git a/sei-db/db_engine/pebbledb/db_test.go b/sei-db/db_engine/pebbledb/db_test.go index 180ba7012f..0fab3dccf9 100644 --- a/sei-db/db_engine/pebbledb/db_test.go +++ b/sei-db/db_engine/pebbledb/db_test.go @@ -8,14 +8,14 @@ import ( "github.com/cockroachdb/pebble/v2" errorutils "github.com/sei-protocol/sei-chain/sei-db/common/errors" + "github.com/sei-protocol/sei-chain/sei-db/common/threading" "github.com/sei-protocol/sei-chain/sei-db/common/unit" - "github.com/sei-protocol/sei-chain/sei-db/common/utils" "github.com/sei-protocol/sei-chain/sei-db/db_engine/types" ) func TestDBGetSetDelete(t *testing.T) { dir := t.TempDir() - pool := utils.NewWorkPool(t.Context(), "test", 1, 64) + pool := threading.NewPool(t.Context(), "test", 1, 64) db, err := Open(t.Context(), dir, types.OpenOptions{}, false, pool, unit.MB*8, unit.MB*8) if err != nil { t.Fatalf("Open: %v", err) @@ -54,7 +54,7 @@ func TestDBGetSetDelete(t *testing.T) { func TestBatchAtomicWrite(t *testing.T) { dir := t.TempDir() - pool := utils.NewWorkPool(t.Context(), "test", 1, 64) + pool := threading.NewPool(t.Context(), "test", 1, 64) db, err := Open(t.Context(), dir, types.OpenOptions{}, false, pool, unit.MB*8, unit.MB*8) if err != nil { t.Fatalf("Open: %v", err) @@ -94,7 +94,7 @@ func TestBatchAtomicWrite(t *testing.T) { func TestIteratorBounds(t *testing.T) { dir := t.TempDir() - pool := utils.NewWorkPool(t.Context(), "test", 1, 64) + pool := threading.NewPool(t.Context(), "test", 1, 64) db, err := Open(t.Context(), dir, types.OpenOptions{}, false, pool, unit.MB*8, unit.MB*8) if err != nil { t.Fatalf("Open: %v", err) @@ -129,7 +129,7 @@ func TestIteratorBounds(t *testing.T) { func TestIteratorPrev(t *testing.T) { dir := t.TempDir() - pool := utils.NewWorkPool(t.Context(), "test", 1, 64) + pool := threading.NewPool(t.Context(), "test", 1, 64) db, err := Open(t.Context(), dir, types.OpenOptions{}, false, pool, unit.MB*8, unit.MB*8) if err != nil { t.Fatalf("Open: %v", err) @@ -195,7 +195,7 @@ func TestIteratorNextPrefixWithComparerSplit(t *testing.T) { } dir := t.TempDir() - pool := utils.NewWorkPool(t.Context(), "test", 1, 64) + pool := threading.NewPool(t.Context(), "test", 1, 64) db, err := Open(t.Context(), dir, types.OpenOptions{Comparer: &cmp}, false, pool, unit.MB*8, unit.MB*8) if err != nil { t.Fatalf("Open: %v", err) @@ -231,7 +231,7 @@ func TestIteratorNextPrefixWithComparerSplit(t *testing.T) { func TestOpenOptionsComparerTypeCheck(t *testing.T) { dir := t.TempDir() - pool := utils.NewWorkPool(t.Context(), "test", 1, 64) + pool := threading.NewPool(t.Context(), "test", 1, 64) _, err := Open(t.Context(), dir, types.OpenOptions{Comparer: "not-a-pebble-comparer"}, false, pool, unit.MB*8, unit.MB*8) if err == nil { @@ -241,7 +241,7 @@ func TestOpenOptionsComparerTypeCheck(t *testing.T) { func TestErrNotFoundConsistency(t *testing.T) { dir := t.TempDir() - pool := utils.NewWorkPool(t.Context(), "test", 1, 64) + pool := threading.NewPool(t.Context(), "test", 1, 64) db, err := Open(t.Context(), dir, types.OpenOptions{}, false, pool, unit.MB*8, unit.MB*8) if err != nil { t.Fatalf("Open: %v", err) @@ -267,7 +267,7 @@ func TestErrNotFoundConsistency(t *testing.T) { func TestGetReturnsCopy(t *testing.T) { dir := t.TempDir() - pool := utils.NewWorkPool(t.Context(), "test", 1, 64) + pool := threading.NewPool(t.Context(), "test", 1, 64) db, err := Open(t.Context(), dir, types.OpenOptions{}, false, pool, unit.MB*8, unit.MB*8) if err != nil { t.Fatalf("Open: %v", err) @@ -298,7 +298,7 @@ func TestGetReturnsCopy(t *testing.T) { func TestBatchLenResetDelete(t *testing.T) { dir := t.TempDir() - pool := utils.NewWorkPool(t.Context(), "test", 1, 64) + pool := threading.NewPool(t.Context(), "test", 1, 64) db, err := Open(t.Context(), dir, types.OpenOptions{}, false, pool, unit.MB*8, unit.MB*8) if err != nil { t.Fatalf("Open: %v", err) @@ -355,7 +355,7 @@ func TestBatchLenResetDelete(t *testing.T) { func TestIteratorSeekLTAndValue(t *testing.T) { dir := t.TempDir() - pool := utils.NewWorkPool(t.Context(), "test", 1, 64) + pool := threading.NewPool(t.Context(), "test", 1, 64) db, err := Open(t.Context(), dir, types.OpenOptions{}, false, pool, unit.MB*8, unit.MB*8) if err != nil { t.Fatalf("Open: %v", err) @@ -393,7 +393,7 @@ func TestIteratorSeekLTAndValue(t *testing.T) { func TestFlush(t *testing.T) { dir := t.TempDir() - pool := utils.NewWorkPool(t.Context(), "test", 1, 64) + pool := threading.NewPool(t.Context(), "test", 1, 64) db, err := Open(t.Context(), dir, types.OpenOptions{}, false, pool, unit.MB*8, unit.MB*8) if err != nil { t.Fatalf("Open: %v", err) @@ -422,7 +422,7 @@ func TestFlush(t *testing.T) { func TestCloseIsIdempotent(t *testing.T) { dir := t.TempDir() - pool := utils.NewWorkPool(t.Context(), "test", 1, 64) + pool := threading.NewPool(t.Context(), "test", 1, 64) db, err := Open(t.Context(), dir, types.OpenOptions{}, false, pool, unit.MB*8, unit.MB*8) if err != nil { t.Fatalf("Open: %v", err) diff --git a/sei-db/db_engine/pebbledb/flatcache/cache_impl.go b/sei-db/db_engine/pebbledb/flatcache/cache_impl.go index b2181aae9f..01516bbafb 100644 --- a/sei-db/db_engine/pebbledb/flatcache/cache_impl.go +++ b/sei-db/db_engine/pebbledb/flatcache/cache_impl.go @@ -6,7 +6,7 @@ import ( "sync" "time" - "github.com/sei-protocol/sei-chain/sei-db/common/utils" + "github.com/sei-protocol/sei-chain/sei-db/common/threading" "github.com/sei-protocol/sei-chain/sei-db/db_engine/types" ) @@ -23,7 +23,7 @@ type cache struct { shards []*shard // A pool for asyncronous reads. - readPool *utils.WorkPool + readPool threading.Pool // The interval at which to run garbage collection. garbageCollectionInterval time.Duration @@ -39,7 +39,7 @@ func NewCache( // The maximum size of the cache, in bytes. maxSize int, // A work pool for reading from the DB. - readPool *utils.WorkPool, + readPool threading.Pool, // The interval at which to run garbage collection. garbageCollectionInterval time.Duration, ) (Cache, error) { diff --git a/sei-db/db_engine/pebbledb/flatcache/shard.go b/sei-db/db_engine/pebbledb/flatcache/shard.go index b9fb0954ef..749c8379c2 100644 --- a/sei-db/db_engine/pebbledb/flatcache/shard.go +++ b/sei-db/db_engine/pebbledb/flatcache/shard.go @@ -5,7 +5,7 @@ import ( "fmt" "sync" - "github.com/sei-protocol/sei-chain/sei-db/common/utils" + "github.com/sei-protocol/sei-chain/sei-db/common/threading" "github.com/sei-protocol/sei-chain/sei-db/db_engine/types" ) @@ -25,7 +25,7 @@ type shard struct { gcQueue *lruQueue // A pool for asyncronous reads. - readPool *utils.WorkPool + readPool threading.Pool // A function that reads a value from the database. readFunc func(key []byte) []byte @@ -67,7 +67,7 @@ type shardEntry struct { // Creates a new Shard. func NewShard( ctx context.Context, - readPool *utils.WorkPool, + readPool threading.Pool, readFunc func(key []byte) []byte, maxSize int, ) (*shard, error) { @@ -112,7 +112,7 @@ func (s *shard) Get(key []byte, updateLru bool) ([]byte, bool, error) { // Another goroutine initiated a read, wait for that read to finish. valueChan := entry.valueChan s.lock.Unlock() - value, err := utils.InterruptiblePull(s.ctx, valueChan) + value, err := threading.InterruptiblePull(s.ctx, valueChan) if err != nil { return nil, false, fmt.Errorf("failed to pull value from channel: %w", err) } @@ -131,7 +131,7 @@ func (s *shard) Get(key []byte, updateLru bool) ([]byte, bool, error) { if err != nil { return nil, false, fmt.Errorf("failed to schedule read: %w", err) } - value, err := utils.InterruptiblePull(s.ctx, valueChan) + value, err := threading.InterruptiblePull(s.ctx, valueChan) if err != nil { return nil, false, fmt.Errorf("failed to pull value from channel: %w", err) } @@ -240,7 +240,7 @@ func (s *shard) BatchGet(keys map[string]types.BatchGetResult) error { } for i := range pending { - value, err := utils.InterruptiblePull(s.ctx, pending[i].valueChan) + value, err := threading.InterruptiblePull(s.ctx, pending[i].valueChan) if err != nil { return fmt.Errorf("failed to pull value from channel: %w", err) } diff --git a/sei-db/state_db/sc/flatkv/store.go b/sei-db/state_db/sc/flatkv/store.go index 482f28dd58..5b773cafad 100644 --- a/sei-db/state_db/sc/flatkv/store.go +++ b/sei-db/state_db/sc/flatkv/store.go @@ -10,8 +10,8 @@ import ( "github.com/sei-protocol/sei-chain/sei-db/common/logger" "github.com/sei-protocol/sei-chain/sei-db/common/metrics" + "github.com/sei-protocol/sei-chain/sei-db/common/threading" "github.com/sei-protocol/sei-chain/sei-db/common/unit" - "github.com/sei-protocol/sei-chain/sei-db/common/utils" "github.com/sei-protocol/sei-chain/sei-db/db_engine/pebbledb" seidbtypes "github.com/sei-protocol/sei-chain/sei-db/db_engine/types" "github.com/sei-protocol/sei-chain/sei-db/proto" @@ -104,7 +104,7 @@ type CommitStore struct { phaseTimer *metrics.PhaseTimer // A work pool for reading from the DB. - readPool *utils.WorkPool + readPool threading.Pool } var _ Store = (*CommitStore)(nil) @@ -122,7 +122,7 @@ func NewCommitStore( } meter := otel.Meter(flatkvMeterName) - readPool := utils.NewWorkPool(ctx, "flatkv-read", 20, 1024) // TODO this should be configurable! + readPool := threading.NewPool(ctx, "flatkv-read", 20, 1024) // TODO this should be configurable! return &CommitStore{ ctx: ctx, diff --git a/sei-db/wal/wal.go b/sei-db/wal/wal.go index 85aef58881..71eb050663 100644 --- a/sei-db/wal/wal.go +++ b/sei-db/wal/wal.go @@ -13,7 +13,7 @@ import ( "github.com/tidwall/wal" "github.com/sei-protocol/sei-chain/sei-db/common/logger" - "github.com/sei-protocol/sei-chain/sei-db/common/utils" + "github.com/sei-protocol/sei-chain/sei-db/common/threading" ) // The size of internal channel buffers if the provided buffer size is less than 1. @@ -176,7 +176,7 @@ func (walLog *WAL[T]) Write(entry T) error { errChan: errChan, } - err := utils.InterruptiblePush(walLog.ctx, walLog.writeChan, req) + err := threading.InterruptiblePush(walLog.ctx, walLog.writeChan, req) if err != nil { return fmt.Errorf("failed to push write request: %w", err) } @@ -186,7 +186,7 @@ func (walLog *WAL[T]) Write(entry T) error { return nil } - err, pullErr := utils.InterruptiblePull(walLog.ctx, errChan) + err, pullErr := threading.InterruptiblePull(walLog.ctx, errChan) if pullErr != nil { return fmt.Errorf("failed to pull write error: %w", pullErr) } @@ -346,12 +346,12 @@ func (walLog *WAL[T]) sendTruncate(before bool, index uint64) error { errChan: make(chan error, 1), } - err := utils.InterruptiblePush(walLog.ctx, walLog.truncateChan, req) + err := threading.InterruptiblePush(walLog.ctx, walLog.truncateChan, req) if err != nil { return fmt.Errorf("failed to push truncate request: %w", err) } - err, pullErr := utils.InterruptiblePull(walLog.ctx, req.errChan) + err, pullErr := threading.InterruptiblePull(walLog.ctx, req.errChan) if pullErr != nil { return fmt.Errorf("failed to pull truncate error: %w", pullErr) } @@ -506,7 +506,7 @@ func (walLog *WAL[T]) drain() { // Shut down the WAL. Sends a close request to the main loop so in-flight writes (and other work) // can complete before teardown. Idempotent. func (walLog *WAL[T]) Close() error { - _ = utils.InterruptiblePush(walLog.ctx, walLog.closeReqChan, struct{}{}) + _ = threading.InterruptiblePush(walLog.ctx, walLog.closeReqChan, struct{}{}) // If error is non-nil then this is not the first call to Close(), no problem since Close() is idempotent err := <-walLog.closeErrChan From 53b2bd8d769169e9cdb2e9c38ca72f3a2818b521 Mon Sep 17 00:00:00 2001 From: Cody Littley Date: Mon, 9 Mar 2026 08:54:26 -0500 Subject: [PATCH 021/119] cleanup --- sei-db/common/threading/adhoc_pool.go | 19 ++++++++++ sei-db/common/threading/pool.go | 2 +- sei-db/common/threading/pool_impl.go | 7 ---- sei-db/db_engine/pebbledb/db_test.go | 37 +++++++------------ .../pebbledb/flatcache/cache_impl.go | 2 +- .../pebbledb/flatcache/read_scheduler.go | 2 +- sei-db/db_engine/pebbledb/flatcache/shard.go | 4 +- sei-db/state_db/sc/flatkv/snapshot_test.go | 7 +++- sei-db/state_db/sc/flatkv/store_test.go | 4 +- 9 files changed, 45 insertions(+), 39 deletions(-) create mode 100644 sei-db/common/threading/adhoc_pool.go diff --git a/sei-db/common/threading/adhoc_pool.go b/sei-db/common/threading/adhoc_pool.go new file mode 100644 index 0000000000..07565c97e3 --- /dev/null +++ b/sei-db/common/threading/adhoc_pool.go @@ -0,0 +1,19 @@ +package threading + +import "context" + +var _ Pool = (*adHocPool)(nil) + +// adHocPool is a Pool that runs each task in a new goroutine. +// Intended for use in unit tests or where performance is not important. +type adHocPool struct{} + +// NewAdHocPool creates a Pool that runs each submitted task in a one-off goroutine. +func NewAdHocPool() Pool { + return &adHocPool{} +} + +func (p *adHocPool) Submit(_ context.Context, task func()) error { + go task() + return nil +} diff --git a/sei-db/common/threading/pool.go b/sei-db/common/threading/pool.go index 06ba2bba28..158780ebce 100644 --- a/sei-db/common/threading/pool.go +++ b/sei-db/common/threading/pool.go @@ -4,6 +4,6 @@ import "context" // Pool is a pool of workers that can be used to execute tasks concurrently. type Pool interface { - // Submit submits a task to the pool. This method does not block until the task is executed. + // Submit submits a task to the pool. Submit(ctx context.Context, task func()) error } diff --git a/sei-db/common/threading/pool_impl.go b/sei-db/common/threading/pool_impl.go index a35c9c2bf8..c2bec106ec 100644 --- a/sei-db/common/threading/pool_impl.go +++ b/sei-db/common/threading/pool_impl.go @@ -49,14 +49,7 @@ func NewPool( } // Submit submits a task to the work pool. This method does not block until the task is executed. -// -// If wp is nil, the task is executed asynchronously in a one-off goroutine. func (wp *pool) Submit(ctx context.Context, task func()) (err error) { - if wp == nil { - go task() - return nil - } - defer func() { if recover() != nil { err = fmt.Errorf("work pool is shut down") diff --git a/sei-db/db_engine/pebbledb/db_test.go b/sei-db/db_engine/pebbledb/db_test.go index 0fab3dccf9..79083d3421 100644 --- a/sei-db/db_engine/pebbledb/db_test.go +++ b/sei-db/db_engine/pebbledb/db_test.go @@ -15,8 +15,7 @@ import ( func TestDBGetSetDelete(t *testing.T) { dir := t.TempDir() - pool := threading.NewPool(t.Context(), "test", 1, 64) - db, err := Open(t.Context(), dir, types.OpenOptions{}, false, pool, unit.MB*8, unit.MB*8) + db, err := Open(t.Context(), dir, types.OpenOptions{}, false, threading.NewAdHocPool(), unit.MB*8, unit.MB*8) if err != nil { t.Fatalf("Open: %v", err) } @@ -54,8 +53,7 @@ func TestDBGetSetDelete(t *testing.T) { func TestBatchAtomicWrite(t *testing.T) { dir := t.TempDir() - pool := threading.NewPool(t.Context(), "test", 1, 64) - db, err := Open(t.Context(), dir, types.OpenOptions{}, false, pool, unit.MB*8, unit.MB*8) + db, err := Open(t.Context(), dir, types.OpenOptions{}, false, threading.NewAdHocPool(), unit.MB*8, unit.MB*8) if err != nil { t.Fatalf("Open: %v", err) } @@ -94,8 +92,7 @@ func TestBatchAtomicWrite(t *testing.T) { func TestIteratorBounds(t *testing.T) { dir := t.TempDir() - pool := threading.NewPool(t.Context(), "test", 1, 64) - db, err := Open(t.Context(), dir, types.OpenOptions{}, false, pool, unit.MB*8, unit.MB*8) + db, err := Open(t.Context(), dir, types.OpenOptions{}, false, threading.NewAdHocPool(), unit.MB*8, unit.MB*8) if err != nil { t.Fatalf("Open: %v", err) } @@ -129,8 +126,7 @@ func TestIteratorBounds(t *testing.T) { func TestIteratorPrev(t *testing.T) { dir := t.TempDir() - pool := threading.NewPool(t.Context(), "test", 1, 64) - db, err := Open(t.Context(), dir, types.OpenOptions{}, false, pool, unit.MB*8, unit.MB*8) + db, err := Open(t.Context(), dir, types.OpenOptions{}, false, threading.NewAdHocPool(), unit.MB*8, unit.MB*8) if err != nil { t.Fatalf("Open: %v", err) } @@ -195,8 +191,8 @@ func TestIteratorNextPrefixWithComparerSplit(t *testing.T) { } dir := t.TempDir() - pool := threading.NewPool(t.Context(), "test", 1, 64) - db, err := Open(t.Context(), dir, types.OpenOptions{Comparer: &cmp}, false, pool, unit.MB*8, unit.MB*8) + db, err := Open(t.Context(), dir, types.OpenOptions{Comparer: &cmp}, false, + threading.NewAdHocPool(), unit.MB*8, unit.MB*8) if err != nil { t.Fatalf("Open: %v", err) } @@ -231,9 +227,8 @@ func TestIteratorNextPrefixWithComparerSplit(t *testing.T) { func TestOpenOptionsComparerTypeCheck(t *testing.T) { dir := t.TempDir() - pool := threading.NewPool(t.Context(), "test", 1, 64) _, err := Open(t.Context(), dir, types.OpenOptions{Comparer: "not-a-pebble-comparer"}, - false, pool, unit.MB*8, unit.MB*8) + false, threading.NewAdHocPool(), unit.MB*8, unit.MB*8) if err == nil { t.Fatalf("expected error for invalid comparer type") } @@ -241,8 +236,7 @@ func TestOpenOptionsComparerTypeCheck(t *testing.T) { func TestErrNotFoundConsistency(t *testing.T) { dir := t.TempDir() - pool := threading.NewPool(t.Context(), "test", 1, 64) - db, err := Open(t.Context(), dir, types.OpenOptions{}, false, pool, unit.MB*8, unit.MB*8) + db, err := Open(t.Context(), dir, types.OpenOptions{}, false, threading.NewAdHocPool(), unit.MB*8, unit.MB*8) if err != nil { t.Fatalf("Open: %v", err) } @@ -267,8 +261,7 @@ func TestErrNotFoundConsistency(t *testing.T) { func TestGetReturnsCopy(t *testing.T) { dir := t.TempDir() - pool := threading.NewPool(t.Context(), "test", 1, 64) - db, err := Open(t.Context(), dir, types.OpenOptions{}, false, pool, unit.MB*8, unit.MB*8) + db, err := Open(t.Context(), dir, types.OpenOptions{}, false, threading.NewAdHocPool(), unit.MB*8, unit.MB*8) if err != nil { t.Fatalf("Open: %v", err) } @@ -298,8 +291,7 @@ func TestGetReturnsCopy(t *testing.T) { func TestBatchLenResetDelete(t *testing.T) { dir := t.TempDir() - pool := threading.NewPool(t.Context(), "test", 1, 64) - db, err := Open(t.Context(), dir, types.OpenOptions{}, false, pool, unit.MB*8, unit.MB*8) + db, err := Open(t.Context(), dir, types.OpenOptions{}, false, threading.NewAdHocPool(), unit.MB*8, unit.MB*8) if err != nil { t.Fatalf("Open: %v", err) } @@ -355,8 +347,7 @@ func TestBatchLenResetDelete(t *testing.T) { func TestIteratorSeekLTAndValue(t *testing.T) { dir := t.TempDir() - pool := threading.NewPool(t.Context(), "test", 1, 64) - db, err := Open(t.Context(), dir, types.OpenOptions{}, false, pool, unit.MB*8, unit.MB*8) + db, err := Open(t.Context(), dir, types.OpenOptions{}, false, threading.NewAdHocPool(), unit.MB*8, unit.MB*8) if err != nil { t.Fatalf("Open: %v", err) } @@ -393,8 +384,7 @@ func TestIteratorSeekLTAndValue(t *testing.T) { func TestFlush(t *testing.T) { dir := t.TempDir() - pool := threading.NewPool(t.Context(), "test", 1, 64) - db, err := Open(t.Context(), dir, types.OpenOptions{}, false, pool, unit.MB*8, unit.MB*8) + db, err := Open(t.Context(), dir, types.OpenOptions{}, false, threading.NewAdHocPool(), unit.MB*8, unit.MB*8) if err != nil { t.Fatalf("Open: %v", err) } @@ -422,8 +412,7 @@ func TestFlush(t *testing.T) { func TestCloseIsIdempotent(t *testing.T) { dir := t.TempDir() - pool := threading.NewPool(t.Context(), "test", 1, 64) - db, err := Open(t.Context(), dir, types.OpenOptions{}, false, pool, unit.MB*8, unit.MB*8) + db, err := Open(t.Context(), dir, types.OpenOptions{}, false, threading.NewAdHocPool(), unit.MB*8, unit.MB*8) if err != nil { t.Fatalf("Open: %v", err) } diff --git a/sei-db/db_engine/pebbledb/flatcache/cache_impl.go b/sei-db/db_engine/pebbledb/flatcache/cache_impl.go index 01516bbafb..161d7635ae 100644 --- a/sei-db/db_engine/pebbledb/flatcache/cache_impl.go +++ b/sei-db/db_engine/pebbledb/flatcache/cache_impl.go @@ -22,7 +22,7 @@ type cache struct { // The shards in the cache. shards []*shard - // A pool for asyncronous reads. + // A pool for asynchronous reads. readPool threading.Pool // The interval at which to run garbage collection. diff --git a/sei-db/db_engine/pebbledb/flatcache/read_scheduler.go b/sei-db/db_engine/pebbledb/flatcache/read_scheduler.go index 16bf88d597..d4a11f1e76 100644 --- a/sei-db/db_engine/pebbledb/flatcache/read_scheduler.go +++ b/sei-db/db_engine/pebbledb/flatcache/read_scheduler.go @@ -5,7 +5,7 @@ import ( "fmt" ) -// A utility for scheduling asyncronous DB reads. +// A utility for scheduling asynchronous DB reads. type readScheduler struct { ctx context.Context readFunc func(key []byte) []byte diff --git a/sei-db/db_engine/pebbledb/flatcache/shard.go b/sei-db/db_engine/pebbledb/flatcache/shard.go index 749c8379c2..9d57529ec0 100644 --- a/sei-db/db_engine/pebbledb/flatcache/shard.go +++ b/sei-db/db_engine/pebbledb/flatcache/shard.go @@ -24,7 +24,7 @@ type shard struct { // Organizes data for garbage collection. gcQueue *lruQueue - // A pool for asyncronous reads. + // A pool for asynchronous reads. readPool threading.Pool // A function that reads a value from the database. @@ -44,7 +44,7 @@ const ( statusScheduled // The data is available. statusAvailable - // We are aware that the value is deleted (special case of data being avialable). + // We are aware that the value is deleted (special case of data being available). statusDeleted ) diff --git a/sei-db/state_db/sc/flatkv/snapshot_test.go b/sei-db/state_db/sc/flatkv/snapshot_test.go index 01f1cf1f55..604daef2a3 100644 --- a/sei-db/state_db/sc/flatkv/snapshot_test.go +++ b/sei-db/state_db/sc/flatkv/snapshot_test.go @@ -8,6 +8,7 @@ import ( "testing" "github.com/sei-protocol/sei-chain/sei-db/common/evm" + "github.com/sei-protocol/sei-chain/sei-db/common/threading" "github.com/sei-protocol/sei-chain/sei-db/common/unit" "github.com/sei-protocol/sei-chain/sei-db/db_engine/pebbledb" "github.com/sei-protocol/sei-chain/sei-db/db_engine/types" @@ -256,7 +257,8 @@ func TestMigrationFromFlatLayout(t *testing.T) { dbPath := filepath.Join(flatkvDir, sub) require.NoError(t, os.MkdirAll(dbPath, 0750)) // Create an actual PebbleDB so Open works - db, err := pebbledb.Open(t.Context(), dbPath, types.OpenOptions{}, false, nil, unit.MB*8, unit.MB*8) + db, err := pebbledb.Open(t.Context(), dbPath, types.OpenOptions{}, false, + threading.NewAdHocPool(), unit.MB*8, unit.MB*8) require.NoError(t, err) require.NoError(t, db.Close()) } @@ -313,7 +315,8 @@ func TestOpenVersionValidation(t *testing.T) { require.NoError(t, err) accountDBPath := filepath.Join(snapDir, accountDBDir) - db, err := pebbledb.Open(t.Context(), accountDBPath, types.OpenOptions{}, false, nil, unit.MB*8, unit.MB*8) + db, err := pebbledb.Open(t.Context(), accountDBPath, types.OpenOptions{}, false, + threading.NewAdHocPool(), unit.MB*8, unit.MB*8) require.NoError(t, err) lagMeta := &LocalMeta{CommittedVersion: 1} require.NoError(t, db.Set(DBLocalMetaKey, MarshalLocalMeta(lagMeta), types.WriteOptions{Sync: true})) diff --git a/sei-db/state_db/sc/flatkv/store_test.go b/sei-db/state_db/sc/flatkv/store_test.go index 14e2c3f333..d992cf6ede 100644 --- a/sei-db/state_db/sc/flatkv/store_test.go +++ b/sei-db/state_db/sc/flatkv/store_test.go @@ -8,6 +8,7 @@ import ( "github.com/stretchr/testify/require" "github.com/sei-protocol/sei-chain/sei-db/common/evm" + "github.com/sei-protocol/sei-chain/sei-db/common/threading" "github.com/sei-protocol/sei-chain/sei-db/common/unit" "github.com/sei-protocol/sei-chain/sei-db/db_engine/pebbledb" "github.com/sei-protocol/sei-chain/sei-db/db_engine/types" @@ -59,7 +60,8 @@ func makeChangeSet(key, value []byte, delete bool) *proto.NamedChangeSet { func setupTestDB(t *testing.T) types.KeyValueDB { t.Helper() dir := t.TempDir() - db, err := pebbledb.Open(t.Context(), dir, types.OpenOptions{}, false, nil, unit.MB*8, unit.MB*8) + db, err := pebbledb.Open(t.Context(), dir, types.OpenOptions{}, false, + threading.NewAdHocPool(), unit.MB*8, unit.MB*8) require.NoError(t, err) return db } From c10e0cd5726997592bdd696eecd0100799b9b58e Mon Sep 17 00:00:00 2001 From: Cody Littley Date: Mon, 9 Mar 2026 10:14:45 -0500 Subject: [PATCH 022/119] Cleanup, fix race condition --- sei-db/common/threading/pool_impl.go | 1 - sei-db/db_engine/pebbledb/db.go | 25 ++++++++++++++++++++----- sei-db/db_engine/types/types.go | 19 ++++++++++++++++++- 3 files changed, 38 insertions(+), 7 deletions(-) diff --git a/sei-db/common/threading/pool_impl.go b/sei-db/common/threading/pool_impl.go index c2bec106ec..edf916b88f 100644 --- a/sei-db/common/threading/pool_impl.go +++ b/sei-db/common/threading/pool_impl.go @@ -48,7 +48,6 @@ func NewPool( return workPool } -// Submit submits a task to the work pool. This method does not block until the task is executed. func (wp *pool) Submit(ctx context.Context, task func()) (err error) { defer func() { if recover() != nil { diff --git a/sei-db/db_engine/pebbledb/db.go b/sei-db/db_engine/pebbledb/db.go index 1a207323ef..d884756d5f 100644 --- a/sei-db/db_engine/pebbledb/db.go +++ b/sei-db/db_engine/pebbledb/db.go @@ -164,15 +164,21 @@ func (p *pebbleDB) BatchGet(keys map[string]types.BatchGetResult) { } func (p *pebbleDB) Set(key, value []byte, opts types.WriteOptions) error { - // TODO batch set! + err := p.db.Set(key, value, toPebbleWriteOpts(opts)) + if err != nil { + return fmt.Errorf("failed to set value in database: %w", err) + } p.cache.Set(key, value) - return p.db.Set(key, value, toPebbleWriteOpts(opts)) + return nil } func (p *pebbleDB) Delete(key []byte, opts types.WriteOptions) error { - // TODO batch delete! + err := p.db.Delete(key, toPebbleWriteOpts(opts)) + if err != nil { + return fmt.Errorf("failed to delete value in database: %w", err) + } p.cache.Delete(key) - return p.db.Delete(key, toPebbleWriteOpts(opts)) + return nil } func (p *pebbleDB) NewIter(opts *types.IterOptions) (types.KeyValueDBIterator, error) { @@ -191,7 +197,12 @@ func (p *pebbleDB) NewIter(opts *types.IterOptions) (types.KeyValueDBIterator, e } func (p *pebbleDB) Flush() error { - return p.db.Flush() + err := p.db.Flush() + if err != nil { + return fmt.Errorf("failed to flush database: %w", err) + } + + return nil } func (p *pebbleDB) Checkpoint(destDir string) error { @@ -226,3 +237,7 @@ func toPebbleWriteOpts(opts types.WriteOptions) *pebble.WriteOptions { } return pebble.NoSync } + +func (p *pebbleDB) DataFlushed() error { + panic("unimplemented") // TODO +} diff --git a/sei-db/db_engine/types/types.go b/sei-db/db_engine/types/types.go index 686cb8d5ff..9f2f26fc17 100644 --- a/sei-db/db_engine/types/types.go +++ b/sei-db/db_engine/types/types.go @@ -46,20 +46,37 @@ type BatchGetResult struct { // KeyValueDB is a low-level KV engine contract (business-agnostic). // // Get returns a value copy (safe to use after the call returns). -type KeyValueDB interface { // TODO document other methods, split this into a stand alone file maybe +type KeyValueDB interface { + + // Get returns the value for the given key, returning an error if the key is not found. Get(key []byte) (value []byte, err error) + // Perform a batch read operation. Given a map of keys to read, performs the reads and updates the // map with the results. // // It is not thread safe to read or mutate the map while this method is running. BatchGet(keys map[string]BatchGetResult) + + // Set sets the value for the given key. Set(key, value []byte, opts WriteOptions) error + + // Delete deletes the value for the given key. Delete(key []byte, opts WriteOptions) error + // NewIter returns a new iterator over the key-value store. NewIter(opts *IterOptions) (KeyValueDBIterator, error) + + // NewBatch returns a new batch for atomic writes. NewBatch() Batch + // Flush flushes the database to disk. Flush() error + + // Signal to the cach layer that all data currently in the cache has been pushed down to the underlying + // storage layer. Useful if the calling layer keeps its own cache of data on top + DataFlushed() error + + // Close closes the database. io.Closer } From 04f40fadb147ed3ee9e116595246d2f39f1177f2 Mon Sep 17 00:00:00 2001 From: Cody Littley Date: Mon, 9 Mar 2026 11:23:30 -0500 Subject: [PATCH 023/119] cleanup --- sei-db/common/threading/pool_impl.go | 9 ++++- sei-db/db_engine/pebbledb/batch.go | 9 ++++- sei-db/db_engine/pebbledb/db.go | 26 +++++-------- sei-db/db_engine/pebbledb/db_test.go | 34 ++++++++++------ sei-db/db_engine/pebbledb/flatcache/cache.go | 6 +-- .../pebbledb/flatcache/cache_impl.go | 34 +++++++++++----- sei-db/db_engine/types/types.go | 6 +-- sei-db/state_db/sc/flatkv/snapshot.go | 3 +- sei-db/state_db/sc/flatkv/snapshot_test.go | 4 +- sei-db/state_db/sc/flatkv/store.go | 6 +++ sei-db/state_db/sc/flatkv/store_test.go | 2 +- sei-db/state_db/sc/flatkv/store_write.go | 39 ++++++++++++++++--- 12 files changed, 118 insertions(+), 60 deletions(-) diff --git a/sei-db/common/threading/pool_impl.go b/sei-db/common/threading/pool_impl.go index edf916b88f..b77be64b38 100644 --- a/sei-db/common/threading/pool_impl.go +++ b/sei-db/common/threading/pool_impl.go @@ -10,8 +10,8 @@ var _ Pool = (*pool)(nil) // pool is a pool of workers that can be used to execute tasks concurrently. // More efficient than spawning large numbers of short lived goroutines. type pool struct { - ctx context.Context - workQueue chan func() + ctx context.Context + workQueue chan func() } // TODO add metrics! @@ -43,6 +43,11 @@ func NewPool( go func() { <-ctx.Done() close(workQueue) + + // Handle any remaining tasks in the queue to avoid caller deadlock. + for task := range workQueue { + task() + } }() return workPool diff --git a/sei-db/db_engine/pebbledb/batch.go b/sei-db/db_engine/pebbledb/batch.go index e01770b349..4ea63ae598 100644 --- a/sei-db/db_engine/pebbledb/batch.go +++ b/sei-db/db_engine/pebbledb/batch.go @@ -1,6 +1,8 @@ package pebbledb import ( + "fmt" + "github.com/cockroachdb/pebble/v2" "github.com/sei-protocol/sei-chain/sei-db/db_engine/pebbledb/flatcache" "github.com/sei-protocol/sei-chain/sei-db/db_engine/types" @@ -46,9 +48,12 @@ func (pb *pebbleBatch) Delete(key []byte) error { func (pb *pebbleBatch) Commit(opts types.WriteOptions) error { err := pb.b.Commit(toPebbleWriteOpts(opts)) if err != nil { - return err + return fmt.Errorf("failed to commit batch: %w", err) + } + err = pb.cache.BatchSet(pb.pendingCacheUpdates) + if err != nil { + return fmt.Errorf("failed to set cache: %w", err) } - pb.cache.BatchSet(pb.pendingCacheUpdates) pb.pendingCacheUpdates = nil return nil } diff --git a/sei-db/db_engine/pebbledb/db.go b/sei-db/db_engine/pebbledb/db.go index d884756d5f..fbee058bca 100644 --- a/sei-db/db_engine/pebbledb/db.go +++ b/sei-db/db_engine/pebbledb/db.go @@ -40,6 +40,8 @@ func Open( enableMetrics bool, // A work pool for reading from the DB. readPool threading.Pool, + // A work pool for miscellaneous operations that are neither computationally intensive nor IO bound. + miscPool threading.Pool, cacheSize int, pageCacheSize int, ) (_ types.KeyValueDB, err error) { @@ -119,6 +121,7 @@ func Open( 8, cacheSize, readPool, + miscPool, 10*time.Second) if err != nil { return nil, fmt.Errorf("failed to create flatcache: %w", err) @@ -137,17 +140,6 @@ func Open( } func (p *pebbleDB) Get(key []byte) ([]byte, error) { - // // Pebble returns a zero-copy view plus a closer; we copy and close internally. - // val, closer, err := p.db.Get(key) - // if err != nil { - // if errors.Is(err, pebble.ErrNotFound) { - // return nil, errorutils.ErrNotFound - // } - // return nil, err - // } - // cloned := bytes.Clone(val) - // _ = closer.Close() - val, found, err := p.cache.Get(key, true) if err != nil { return nil, fmt.Errorf("failed to get value from cache: %w", err) @@ -159,8 +151,12 @@ func (p *pebbleDB) Get(key []byte) ([]byte, error) { return val, nil } -func (p *pebbleDB) BatchGet(keys map[string]types.BatchGetResult) { - p.cache.BatchGet(keys) +func (p *pebbleDB) BatchGet(keys map[string]types.BatchGetResult) error { + err := p.cache.BatchGet(keys) + if err != nil { + return fmt.Errorf("failed to get values from cache: %w", err) + } + return nil } func (p *pebbleDB) Set(key, value []byte, opts types.WriteOptions) error { @@ -237,7 +233,3 @@ func toPebbleWriteOpts(opts types.WriteOptions) *pebble.WriteOptions { } return pebble.NoSync } - -func (p *pebbleDB) DataFlushed() error { - panic("unimplemented") // TODO -} diff --git a/sei-db/db_engine/pebbledb/db_test.go b/sei-db/db_engine/pebbledb/db_test.go index 79083d3421..451cd925db 100644 --- a/sei-db/db_engine/pebbledb/db_test.go +++ b/sei-db/db_engine/pebbledb/db_test.go @@ -15,7 +15,8 @@ import ( func TestDBGetSetDelete(t *testing.T) { dir := t.TempDir() - db, err := Open(t.Context(), dir, types.OpenOptions{}, false, threading.NewAdHocPool(), unit.MB*8, unit.MB*8) + db, err := Open(t.Context(), dir, types.OpenOptions{}, false, + threading.NewAdHocPool(), threading.NewAdHocPool(), unit.MB*8, unit.MB*8) if err != nil { t.Fatalf("Open: %v", err) } @@ -53,7 +54,8 @@ func TestDBGetSetDelete(t *testing.T) { func TestBatchAtomicWrite(t *testing.T) { dir := t.TempDir() - db, err := Open(t.Context(), dir, types.OpenOptions{}, false, threading.NewAdHocPool(), unit.MB*8, unit.MB*8) + db, err := Open(t.Context(), dir, types.OpenOptions{}, false, + threading.NewAdHocPool(), threading.NewAdHocPool(), unit.MB*8, unit.MB*8) if err != nil { t.Fatalf("Open: %v", err) } @@ -92,7 +94,8 @@ func TestBatchAtomicWrite(t *testing.T) { func TestIteratorBounds(t *testing.T) { dir := t.TempDir() - db, err := Open(t.Context(), dir, types.OpenOptions{}, false, threading.NewAdHocPool(), unit.MB*8, unit.MB*8) + db, err := Open(t.Context(), dir, types.OpenOptions{}, false, + threading.NewAdHocPool(), threading.NewAdHocPool(), unit.MB*8, unit.MB*8) if err != nil { t.Fatalf("Open: %v", err) } @@ -126,7 +129,8 @@ func TestIteratorBounds(t *testing.T) { func TestIteratorPrev(t *testing.T) { dir := t.TempDir() - db, err := Open(t.Context(), dir, types.OpenOptions{}, false, threading.NewAdHocPool(), unit.MB*8, unit.MB*8) + db, err := Open(t.Context(), dir, types.OpenOptions{}, false, + threading.NewAdHocPool(), threading.NewAdHocPool(), unit.MB*8, unit.MB*8) if err != nil { t.Fatalf("Open: %v", err) } @@ -192,7 +196,7 @@ func TestIteratorNextPrefixWithComparerSplit(t *testing.T) { dir := t.TempDir() db, err := Open(t.Context(), dir, types.OpenOptions{Comparer: &cmp}, false, - threading.NewAdHocPool(), unit.MB*8, unit.MB*8) + threading.NewAdHocPool(), threading.NewAdHocPool(), unit.MB*8, unit.MB*8) if err != nil { t.Fatalf("Open: %v", err) } @@ -228,7 +232,7 @@ func TestIteratorNextPrefixWithComparerSplit(t *testing.T) { func TestOpenOptionsComparerTypeCheck(t *testing.T) { dir := t.TempDir() _, err := Open(t.Context(), dir, types.OpenOptions{Comparer: "not-a-pebble-comparer"}, - false, threading.NewAdHocPool(), unit.MB*8, unit.MB*8) + false, threading.NewAdHocPool(), threading.NewAdHocPool(), unit.MB*8, unit.MB*8) if err == nil { t.Fatalf("expected error for invalid comparer type") } @@ -236,7 +240,8 @@ func TestOpenOptionsComparerTypeCheck(t *testing.T) { func TestErrNotFoundConsistency(t *testing.T) { dir := t.TempDir() - db, err := Open(t.Context(), dir, types.OpenOptions{}, false, threading.NewAdHocPool(), unit.MB*8, unit.MB*8) + db, err := Open(t.Context(), dir, types.OpenOptions{}, false, + threading.NewAdHocPool(), threading.NewAdHocPool(), unit.MB*8, unit.MB*8) if err != nil { t.Fatalf("Open: %v", err) } @@ -261,7 +266,8 @@ func TestErrNotFoundConsistency(t *testing.T) { func TestGetReturnsCopy(t *testing.T) { dir := t.TempDir() - db, err := Open(t.Context(), dir, types.OpenOptions{}, false, threading.NewAdHocPool(), unit.MB*8, unit.MB*8) + db, err := Open(t.Context(), dir, types.OpenOptions{}, false, + threading.NewAdHocPool(), threading.NewAdHocPool(), unit.MB*8, unit.MB*8) if err != nil { t.Fatalf("Open: %v", err) } @@ -291,7 +297,8 @@ func TestGetReturnsCopy(t *testing.T) { func TestBatchLenResetDelete(t *testing.T) { dir := t.TempDir() - db, err := Open(t.Context(), dir, types.OpenOptions{}, false, threading.NewAdHocPool(), unit.MB*8, unit.MB*8) + db, err := Open(t.Context(), dir, types.OpenOptions{}, false, + threading.NewAdHocPool(), threading.NewAdHocPool(), unit.MB*8, unit.MB*8) if err != nil { t.Fatalf("Open: %v", err) } @@ -347,7 +354,8 @@ func TestBatchLenResetDelete(t *testing.T) { func TestIteratorSeekLTAndValue(t *testing.T) { dir := t.TempDir() - db, err := Open(t.Context(), dir, types.OpenOptions{}, false, threading.NewAdHocPool(), unit.MB*8, unit.MB*8) + db, err := Open(t.Context(), dir, types.OpenOptions{}, false, + threading.NewAdHocPool(), threading.NewAdHocPool(), unit.MB*8, unit.MB*8) if err != nil { t.Fatalf("Open: %v", err) } @@ -384,7 +392,8 @@ func TestIteratorSeekLTAndValue(t *testing.T) { func TestFlush(t *testing.T) { dir := t.TempDir() - db, err := Open(t.Context(), dir, types.OpenOptions{}, false, threading.NewAdHocPool(), unit.MB*8, unit.MB*8) + db, err := Open(t.Context(), dir, types.OpenOptions{}, false, + threading.NewAdHocPool(), threading.NewAdHocPool(), unit.MB*8, unit.MB*8) if err != nil { t.Fatalf("Open: %v", err) } @@ -412,7 +421,8 @@ func TestFlush(t *testing.T) { func TestCloseIsIdempotent(t *testing.T) { dir := t.TempDir() - db, err := Open(t.Context(), dir, types.OpenOptions{}, false, threading.NewAdHocPool(), unit.MB*8, unit.MB*8) + db, err := Open(t.Context(), dir, types.OpenOptions{}, false, + threading.NewAdHocPool(), threading.NewAdHocPool(), unit.MB*8, unit.MB*8) if err != nil { t.Fatalf("Open: %v", err) } diff --git a/sei-db/db_engine/pebbledb/flatcache/cache.go b/sei-db/db_engine/pebbledb/flatcache/cache.go index ab6877e765..fa0c67b545 100644 --- a/sei-db/db_engine/pebbledb/flatcache/cache.go +++ b/sei-db/db_engine/pebbledb/flatcache/cache.go @@ -16,8 +16,6 @@ type CacheUpdate struct { // Cache describes a cache kapable of being used by a FlatKV store. type Cache interface { - // TODO decide if we should support individual modifications - // Get returns the value for the given key, or (nil, false) if not found. Get( // The entry to fetch. @@ -32,7 +30,7 @@ type Cache interface { // map with the results. // // It is not thread safe to read or mutate the map while this method is running. - BatchGet(keys map[string]types.BatchGetResult) + BatchGet(keys map[string]types.BatchGetResult) error // Set sets the value for the given key. Set(key []byte, value []byte) @@ -41,5 +39,5 @@ type Cache interface { Delete(key []byte) // BatchSet applies the given updates to the cache. - BatchSet(updates []CacheUpdate) + BatchSet(updates []CacheUpdate) error } diff --git a/sei-db/db_engine/pebbledb/flatcache/cache_impl.go b/sei-db/db_engine/pebbledb/flatcache/cache_impl.go index 161d7635ae..eb8def56e4 100644 --- a/sei-db/db_engine/pebbledb/flatcache/cache_impl.go +++ b/sei-db/db_engine/pebbledb/flatcache/cache_impl.go @@ -25,6 +25,9 @@ type cache struct { // A pool for asynchronous reads. readPool threading.Pool + // A pool for miscellaneous operations that are neither computationally intensive nor IO bound. + miscPool threading.Pool + // The interval at which to run garbage collection. garbageCollectionInterval time.Duration } @@ -40,6 +43,8 @@ func NewCache( maxSize int, // A work pool for reading from the DB. readPool threading.Pool, + // A work pool for miscellaneous operations that are neither computationally intensive nor IO bound. + miscPool threading.Pool, // The interval at which to run garbage collection. garbageCollectionInterval time.Duration, ) (Cache, error) { @@ -84,7 +89,7 @@ func NewCache( return c, nil } -func (c *cache) BatchSet(updates []CacheUpdate) { +func (c *cache) BatchSet(updates []CacheUpdate) error { // Sort entries by shard index so each shard is locked only once. shardMap := make(map[uint64][]CacheUpdate) for i := range updates { @@ -92,18 +97,23 @@ func (c *cache) BatchSet(updates []CacheUpdate) { shardMap[idx] = append(shardMap[idx], updates[i]) } - var wg sync.WaitGroup // TODO use a pool here + var wg sync.WaitGroup for shardIndex, shardEntries := range shardMap { wg.Add(1) - go func(shardIndex uint64, shardEntries []CacheUpdate) { - defer wg.Done() + err := c.miscPool.Submit(c.ctx, func() { c.shards[shardIndex].BatchSet(shardEntries) - }(shardIndex, shardEntries) + wg.Done() + }) + if err != nil { + return fmt.Errorf("failed to submit batch set: %w", err) + } } wg.Wait() + + return nil } -func (c *cache) BatchGet(keys map[string]types.BatchGetResult) { +func (c *cache) BatchGet(keys map[string]types.BatchGetResult) error { work := make(map[uint64]map[string]types.BatchGetResult) for key := range keys { idx := c.shardManager.Shard([]byte(key)) @@ -113,10 +123,11 @@ func (c *cache) BatchGet(keys map[string]types.BatchGetResult) { work[idx][key] = types.BatchGetResult{} } - var wg sync.WaitGroup // TODO use a pool here + var wg sync.WaitGroup for shardIndex, subMap := range work { wg.Add(1) - go func(shardIndex uint64, subMap map[string]types.BatchGetResult) { + + err := c.miscPool.Submit(c.ctx, func() { defer wg.Done() err := c.shards[shardIndex].BatchGet(subMap) if err != nil { @@ -124,7 +135,10 @@ func (c *cache) BatchGet(keys map[string]types.BatchGetResult) { subMap[key] = types.BatchGetResult{Error: err} } } - }(shardIndex, subMap) + }) + if err != nil { + return fmt.Errorf("failed to submit batch get: %w", err) + } } wg.Wait() @@ -133,6 +147,8 @@ func (c *cache) BatchGet(keys map[string]types.BatchGetResult) { keys[key] = result } } + + return nil } func (c *cache) Delete(key []byte) { diff --git a/sei-db/db_engine/types/types.go b/sei-db/db_engine/types/types.go index 9f2f26fc17..be5a6ed029 100644 --- a/sei-db/db_engine/types/types.go +++ b/sei-db/db_engine/types/types.go @@ -55,7 +55,7 @@ type KeyValueDB interface { // map with the results. // // It is not thread safe to read or mutate the map while this method is running. - BatchGet(keys map[string]BatchGetResult) + BatchGet(keys map[string]BatchGetResult) error // Set sets the value for the given key. Set(key, value []byte, opts WriteOptions) error @@ -72,10 +72,6 @@ type KeyValueDB interface { // Flush flushes the database to disk. Flush() error - // Signal to the cach layer that all data currently in the cache has been pushed down to the underlying - // storage layer. Useful if the calling layer keeps its own cache of data on top - DataFlushed() error - // Close closes the database. io.Closer } diff --git a/sei-db/state_db/sc/flatkv/snapshot.go b/sei-db/state_db/sc/flatkv/snapshot.go index f076799ce2..91ba4e2129 100644 --- a/sei-db/state_db/sc/flatkv/snapshot.go +++ b/sei-db/state_db/sc/flatkv/snapshot.go @@ -381,7 +381,8 @@ func (s *CommitStore) migrateFlatLayout(flatkvDir string) (string, error) { var version int64 metaPath := filepath.Join(flatkvDir, metadataDir) tmpMeta, err := pebbledb.Open( - s.ctx, metaPath, types.OpenOptions{}, s.config.EnablePebbleMetrics, s.readPool, unit.GB/2, unit.GB/2) + s.ctx, metaPath, types.OpenOptions{}, s.config.EnablePebbleMetrics, + s.readPool, s.miscPool, unit.GB/2, unit.GB/2) if err == nil { verData, verErr := tmpMeta.Get([]byte(MetaGlobalVersion)) _ = tmpMeta.Close() diff --git a/sei-db/state_db/sc/flatkv/snapshot_test.go b/sei-db/state_db/sc/flatkv/snapshot_test.go index 604daef2a3..6da33bd988 100644 --- a/sei-db/state_db/sc/flatkv/snapshot_test.go +++ b/sei-db/state_db/sc/flatkv/snapshot_test.go @@ -258,7 +258,7 @@ func TestMigrationFromFlatLayout(t *testing.T) { require.NoError(t, os.MkdirAll(dbPath, 0750)) // Create an actual PebbleDB so Open works db, err := pebbledb.Open(t.Context(), dbPath, types.OpenOptions{}, false, - threading.NewAdHocPool(), unit.MB*8, unit.MB*8) + threading.NewAdHocPool(), threading.NewAdHocPool(), unit.MB*8, unit.MB*8) require.NoError(t, err) require.NoError(t, db.Close()) } @@ -316,7 +316,7 @@ func TestOpenVersionValidation(t *testing.T) { accountDBPath := filepath.Join(snapDir, accountDBDir) db, err := pebbledb.Open(t.Context(), accountDBPath, types.OpenOptions{}, false, - threading.NewAdHocPool(), unit.MB*8, unit.MB*8) + threading.NewAdHocPool(), threading.NewAdHocPool(), unit.MB*8, unit.MB*8) require.NoError(t, err) lagMeta := &LocalMeta{CommittedVersion: 1} require.NoError(t, db.Set(DBLocalMetaKey, MarshalLocalMeta(lagMeta), types.WriteOptions{Sync: true})) diff --git a/sei-db/state_db/sc/flatkv/store.go b/sei-db/state_db/sc/flatkv/store.go index 5b773cafad..0ea01c4dea 100644 --- a/sei-db/state_db/sc/flatkv/store.go +++ b/sei-db/state_db/sc/flatkv/store.go @@ -105,6 +105,9 @@ type CommitStore struct { // A work pool for reading from the DB. readPool threading.Pool + + // A work pool for miscellaneous operations that are neither computationally intensive nor IO bound. + miscPool threading.Pool } var _ Store = (*CommitStore)(nil) @@ -123,6 +126,7 @@ func NewCommitStore( meter := otel.Meter(flatkvMeterName) readPool := threading.NewPool(ctx, "flatkv-read", 20, 1024) // TODO this should be configurable! + miscPool := threading.NewPool(ctx, "flatkv-misc", 20, 1024) // TODO this should be configurable! return &CommitStore{ ctx: ctx, @@ -139,6 +143,7 @@ func NewCommitStore( workingLtHash: lthash.New(), phaseTimer: metrics.NewPhaseTimer(meter, "seidb_main_thread"), readPool: readPool, + miscPool: miscPool, } } @@ -343,6 +348,7 @@ func (s *CommitStore) openAllDBs(snapDir, flatkvRoot string) (retErr error) { seidbtypes.OpenOptions{}, s.config.EnablePebbleMetrics, s.readPool, + s.miscPool, cacheSize, pageCacheSize) if err != nil { diff --git a/sei-db/state_db/sc/flatkv/store_test.go b/sei-db/state_db/sc/flatkv/store_test.go index d992cf6ede..33e2167808 100644 --- a/sei-db/state_db/sc/flatkv/store_test.go +++ b/sei-db/state_db/sc/flatkv/store_test.go @@ -61,7 +61,7 @@ func setupTestDB(t *testing.T) types.KeyValueDB { t.Helper() dir := t.TempDir() db, err := pebbledb.Open(t.Context(), dir, types.OpenOptions{}, false, - threading.NewAdHocPool(), unit.MB*8, unit.MB*8) + threading.NewAdHocPool(), threading.NewAdHocPool(), unit.MB*8, unit.MB*8) require.NoError(t, err) return db } diff --git a/sei-db/state_db/sc/flatkv/store_write.go b/sei-db/state_db/sc/flatkv/store_write.go index 11a2f23210..506b0cd000 100644 --- a/sei-db/state_db/sc/flatkv/store_write.go +++ b/sei-db/state_db/sc/flatkv/store_write.go @@ -22,7 +22,10 @@ func (s *CommitStore) ApplyChangeSets(cs []*proto.NamedChangeSet) error { s.phaseTimer.SetPhase("apply_change_sets_batch_read") // Batch read all old values from DBs in parallel. - storageOld, accountOld, codeOld, legacyOld := s.batchReadOldValues(cs) + storageOld, accountOld, codeOld, legacyOld, err := s.batchReadOldValues(cs) + if err != nil { + return fmt.Errorf("failed to batch read old values: %w", err) + } s.phaseTimer.SetPhase("apply_change_sets_prepare") @@ -471,6 +474,7 @@ func (s *CommitStore) batchReadOldValues(cs []*proto.NamedChangeSet) ( accountOld map[string]types.BatchGetResult, codeOld map[string]types.BatchGetResult, legacyOld map[string]types.BatchGetResult, + err error, ) { storageOld = make(map[string]types.BatchGetResult) accountOld = make(map[string]types.BatchGetResult) @@ -500,35 +504,60 @@ func (s *CommitStore) batchReadOldValues(cs []*proto.NamedChangeSet) ( } } + var storageErr error var wg sync.WaitGroup if len(storageOld) > 0 { wg.Add(1) go func() { defer wg.Done() - s.storageDB.BatchGet(storageOld) + storageErr = s.storageDB.BatchGet(storageOld) }() } + + var accountErr error if len(accountOld) > 0 { wg.Add(1) go func() { defer wg.Done() - s.accountDB.BatchGet(accountOld) + accountErr = s.accountDB.BatchGet(accountOld) }() } + var codeErr error if len(codeOld) > 0 { wg.Add(1) go func() { defer wg.Done() - s.codeDB.BatchGet(codeOld) + codeErr = s.codeDB.BatchGet(codeOld) }() } + + var legacyErr error if len(legacyOld) > 0 { wg.Add(1) go func() { defer wg.Done() - s.legacyDB.BatchGet(legacyOld) + legacyErr = s.legacyDB.BatchGet(legacyOld) }() } + wg.Wait() + + if storageErr != nil || accountErr != nil || codeErr != nil || legacyErr != nil { + errString := "" + if storageErr != nil { + errString += fmt.Sprintf(", storageDB: %s\n", storageErr.Error()) + } + if accountErr != nil { + errString += fmt.Sprintf(", accountDB: %s\n", accountErr.Error()) + } + if codeErr != nil { + errString += fmt.Sprintf(", codeDB: %s\n", codeErr.Error()) + } + if legacyErr != nil { + errString += fmt.Sprintf(", legacyDB: %s\n", legacyErr.Error()) + } + err = fmt.Errorf("batch get: %s", errString) + return + } return } From b4e4d2c5a9faceeda3666f93ec5ff5d138487295 Mon Sep 17 00:00:00 2001 From: Cody Littley Date: Mon, 9 Mar 2026 12:00:14 -0500 Subject: [PATCH 024/119] cleanup --- sei-db/common/threading/elastic_pool.go | 72 +++++++++++++++++++ .../threading/{pool_impl.go => fixed_pool.go} | 30 ++++---- sei-db/state_db/sc/flatkv/store.go | 4 +- 3 files changed, 87 insertions(+), 19 deletions(-) create mode 100644 sei-db/common/threading/elastic_pool.go rename sei-db/common/threading/{pool_impl.go => fixed_pool.go} (63%) diff --git a/sei-db/common/threading/elastic_pool.go b/sei-db/common/threading/elastic_pool.go new file mode 100644 index 0000000000..5621562d57 --- /dev/null +++ b/sei-db/common/threading/elastic_pool.go @@ -0,0 +1,72 @@ +package threading + +import ( + "context" + "fmt" +) + +var _ Pool = (*elasticPool)(nil) + +// elasticPool is a pool that guarantees every submitted task begins executing +// immediately without waiting for other tasks to finish first. It maintains a +// set of warm workers for goroutine reuse, and spawns temporary goroutines when +// all warm workers are busy. +// +// This is useful when tasks submitted to the pool may depend on other tasks in +// the same pool. For example, if task A is submitted and then submits task B, +// and A waits for B to complete, a fixed-size pool may deadlock when all +// workers are occupied, since task B can never be scheduled. An +// elastic pool avoids this by ensuring B starts immediately in a temporary +// goroutine if all workers are busy. +type elasticPool struct { + workQueue chan func() +} + +// NewElasticPool creates a pool with the given number of warm workers. Submitted +// tasks are handed off to an idle warm worker if one is available, otherwise a +// temporary goroutine is spawned. Tasks are never queued behind other tasks. +func NewElasticPool( + ctx context.Context, + name string, + warmWorkers int, +) Pool { + workQueue := make(chan func()) + ep := &elasticPool{ + workQueue: workQueue, + } + + for i := 0; i < warmWorkers; i++ { + go ep.worker() + } + + go func() { + <-ctx.Done() + close(workQueue) + }() + + return ep +} + +func (ep *elasticPool) Submit(ctx context.Context, task func()) (err error) { + defer func() { + if recover() != nil { + err = fmt.Errorf("elastic pool is shut down") + } + }() + + select { + case <-ctx.Done(): + return ctx.Err() + case ep.workQueue <- task: + return nil + default: + go task() + return nil + } +} + +func (ep *elasticPool) worker() { + for task := range ep.workQueue { + task() + } +} diff --git a/sei-db/common/threading/pool_impl.go b/sei-db/common/threading/fixed_pool.go similarity index 63% rename from sei-db/common/threading/pool_impl.go rename to sei-db/common/threading/fixed_pool.go index b77be64b38..5044d30cd2 100644 --- a/sei-db/common/threading/pool_impl.go +++ b/sei-db/common/threading/fixed_pool.go @@ -5,20 +5,19 @@ import ( "fmt" ) -var _ Pool = (*pool)(nil) +var _ Pool = (*fixedPool)(nil) -// pool is a pool of workers that can be used to execute tasks concurrently. +// fixedPool is a pool of workers that can be used to execute tasks concurrently. // More efficient than spawning large numbers of short lived goroutines. -type pool struct { - ctx context.Context - workQueue chan func() +type fixedPool struct { + workQueue chan func() } // TODO add metrics! // TODO unit test before merging! // Create a new work pool. -func NewPool( +func NewFixedPool( // The work pool shuts down when the context is done. ctx context.Context, // The name of the work pool. Used for metrics. @@ -30,13 +29,12 @@ func NewPool( ) Pool { workQueue := make(chan func(), queueSize) - workPool := &pool{ - ctx: ctx, + fp := &fixedPool{ workQueue: workQueue, } for i := 0; i < workers; i++ { - go workPool.worker() + go fp.worker() } // Shutdown the work pool when the context is done. @@ -50,27 +48,25 @@ func NewPool( } }() - return workPool + return fp } -func (wp *pool) Submit(ctx context.Context, task func()) (err error) { +func (fp *fixedPool) Submit(ctx context.Context, task func()) (err error) { defer func() { if recover() != nil { - err = fmt.Errorf("work pool is shut down") + err = fmt.Errorf("fixed pool is shut down") } }() select { case <-ctx.Done(): return ctx.Err() - case <-wp.ctx.Done(): - return fmt.Errorf("work pool is shut down") - case wp.workQueue <- task: + case fp.workQueue <- task: return nil } } -func (wp *pool) worker() { - for task := range wp.workQueue { +func (fp *fixedPool) worker() { + for task := range fp.workQueue { task() } } diff --git a/sei-db/state_db/sc/flatkv/store.go b/sei-db/state_db/sc/flatkv/store.go index 0ea01c4dea..1a51f1ef42 100644 --- a/sei-db/state_db/sc/flatkv/store.go +++ b/sei-db/state_db/sc/flatkv/store.go @@ -125,8 +125,8 @@ func NewCommitStore( } meter := otel.Meter(flatkvMeterName) - readPool := threading.NewPool(ctx, "flatkv-read", 20, 1024) // TODO this should be configurable! - miscPool := threading.NewPool(ctx, "flatkv-misc", 20, 1024) // TODO this should be configurable! + readPool := threading.NewFixedPool(ctx, "flatkv-read", 20, 1024) // TODO this should be configurable! + miscPool := threading.NewFixedPool(ctx, "flatkv-misc", 20, 1024) // TODO this should be configurable! return &CommitStore{ ctx: ctx, From e53fefab8fe5d6902f4a0dfcb6a40e7069758ec4 Mon Sep 17 00:00:00 2001 From: Cody Littley Date: Mon, 9 Mar 2026 13:30:42 -0500 Subject: [PATCH 025/119] use pool --- sei-db/state_db/sc/flatkv/store.go | 8 ++- sei-db/state_db/sc/flatkv/store_read.go | 91 ------------------------ sei-db/state_db/sc/flatkv/store_write.go | 79 +++++++++++--------- 3 files changed, 50 insertions(+), 128 deletions(-) diff --git a/sei-db/state_db/sc/flatkv/store.go b/sei-db/state_db/sc/flatkv/store.go index 1a51f1ef42..c641ab63d6 100644 --- a/sei-db/state_db/sc/flatkv/store.go +++ b/sei-db/state_db/sc/flatkv/store.go @@ -103,10 +103,14 @@ type CommitStore struct { // Used to track time spent in various phases of execution. phaseTimer *metrics.PhaseTimer - // A work pool for reading from the DB. + // A work pool for reading from the DBs. + // + // Uses a fixed-size pool. readPool threading.Pool // A work pool for miscellaneous operations that are neither computationally intensive nor IO bound. + // + // Uses an elasticly-sized pool, so it is safe to submit tasks that have dependencies on other tasks in the pool. miscPool threading.Pool } @@ -126,7 +130,7 @@ func NewCommitStore( meter := otel.Meter(flatkvMeterName) readPool := threading.NewFixedPool(ctx, "flatkv-read", 20, 1024) // TODO this should be configurable! - miscPool := threading.NewFixedPool(ctx, "flatkv-misc", 20, 1024) // TODO this should be configurable! + miscPool := threading.NewElasticPool(ctx, "flatkv-misc", 20) return &CommitStore{ ctx: ctx, diff --git a/sei-db/state_db/sc/flatkv/store_read.go b/sei-db/state_db/sc/flatkv/store_read.go index b2a4ecb847..9975f6e727 100644 --- a/sei-db/state_db/sc/flatkv/store_read.go +++ b/sei-db/state_db/sc/flatkv/store_read.go @@ -3,9 +3,7 @@ package flatkv import ( "bytes" "encoding/binary" - "fmt" - errorutils "github.com/sei-protocol/sei-chain/sei-db/common/errors" "github.com/sei-protocol/sei-chain/sei-db/common/evm" ) @@ -200,92 +198,3 @@ func (s *CommitStore) IteratorByPrefix(prefix []byte) Iterator { return &emptyIterator{} } } - -// ============================================================================= -// Internal Getters (used by ApplyChangeSets for LtHash computation) -// ============================================================================= - -// getAccountValue loads AccountValue from pending writes or DB. -// Returns zero AccountValue if not found (new account). -// Returns error if existing data is corrupted (decode fails) or I/O error occurs. -func (s *CommitStore) getAccountValue(addr Address) (AccountValue, error) { - // Check pending writes first - if paw, ok := s.accountWrites[string(addr[:])]; ok { - return paw.value, nil - } - - // Read from accountDB - value, err := s.accountDB.Get(AccountKey(addr)) - if err != nil { - if errorutils.IsNotFound(err) { - return AccountValue{}, nil // New account - } - return AccountValue{}, fmt.Errorf("accountDB I/O error for addr %x: %w", addr, err) - } - - av, err := DecodeAccountValue(value) - if err != nil { - return AccountValue{}, fmt.Errorf("corrupted AccountValue for addr %x: %w", addr, err) - } - return av, nil -} - -// getStorageValue returns the storage value from pending writes or DB. -// Returns (nil, nil) if not found. -// Returns (nil, error) if I/O error occurs. -func (s *CommitStore) getStorageValue(key []byte) ([]byte, error) { - if pw, ok := s.storageWrites[string(key)]; ok { - if pw.isDelete { - return nil, nil - } - return pw.value, nil - } - value, err := s.storageDB.Get(key) - if err != nil { - if errorutils.IsNotFound(err) { - return nil, nil - } - return nil, fmt.Errorf("storageDB I/O error for key %x: %w", key, err) - } - return value, nil -} - -// getCodeValue returns the code value from pending writes or DB. -// Returns (nil, nil) if not found. -// Returns (nil, error) if I/O error occurs. -func (s *CommitStore) getCodeValue(key []byte) ([]byte, error) { - if pw, ok := s.codeWrites[string(key)]; ok { - if pw.isDelete { - return nil, nil - } - return pw.value, nil - } - value, err := s.codeDB.Get(key) - if err != nil { - if errorutils.IsNotFound(err) { - return nil, nil - } - return nil, fmt.Errorf("codeDB I/O error for key %x: %w", key, err) - } - return value, nil -} - -// getLegacyValue returns the legacy value from pending writes or DB. -// Returns (nil, nil) if not found. -// Returns (nil, error) if I/O error occurs. -func (s *CommitStore) getLegacyValue(key []byte) ([]byte, error) { - if pw, ok := s.legacyWrites[string(key)]; ok { - if pw.isDelete { - return nil, nil - } - return pw.value, nil - } - value, err := s.legacyDB.Get(key) - if err != nil { - if errorutils.IsNotFound(err) { - return nil, nil - } - return nil, fmt.Errorf("legacyDB I/O error for key %x: %w", key, err) - } - return value, nil -} diff --git a/sei-db/state_db/sc/flatkv/store_write.go b/sei-db/state_db/sc/flatkv/store_write.go index 506b0cd000..0ba97c650d 100644 --- a/sei-db/state_db/sc/flatkv/store_write.go +++ b/sei-db/state_db/sc/flatkv/store_write.go @@ -2,6 +2,7 @@ package flatkv import ( "encoding/binary" + "errors" "fmt" "sync" @@ -128,12 +129,14 @@ func (s *CommitStore) ApplyChangeSets(cs []*proto.NamedChangeSet) error { } else { if kind == evm.EVMKeyNonce { if len(pair.Value) != NonceLen { - return fmt.Errorf("invalid nonce value length: got %d, expected %d", len(pair.Value), NonceLen) + return fmt.Errorf("invalid nonce value length: got %d, expected %d", + len(pair.Value), NonceLen) } paw.value.Nonce = binary.BigEndian.Uint64(pair.Value) } else { if len(pair.Value) != CodeHashLen { - return fmt.Errorf("invalid codehash value length: got %d, expected %d", len(pair.Value), CodeHashLen) + return fmt.Errorf("invalid codehash value length: got %d, expected %d", + len(pair.Value), CodeHashLen) } copy(paw.value.CodeHash[:], pair.Value) } @@ -284,10 +287,13 @@ func (s *CommitStore) flushAllDBs() error { var wg sync.WaitGroup wg.Add(4) for i, db := range []types.KeyValueDB{s.accountDB, s.codeDB, s.storageDB, s.legacyDB} { - go func(idx int, db types.KeyValueDB) { - defer wg.Done() - errs[idx] = db.Flush() - }(i, db) + err := s.miscPool.Submit(s.ctx, func() { + errs[i] = db.Flush() + wg.Done() + }) + if err != nil { + return fmt.Errorf("failed to submit flush: %w", err) + } } wg.Wait() names := [4]string{"accountDB", "codeDB", "storageDB", "legacyDB"} @@ -446,10 +452,13 @@ func (s *CommitStore) commitBatches(version int64) error { var wg sync.WaitGroup wg.Add(len(pending)) for i, p := range pending { - go func(idx int, b types.Batch) { - defer wg.Done() - errs[idx] = b.Commit(syncOpt) - }(i, p.batch) + err := s.miscPool.Submit(s.ctx, func() { + errs[i] = p.batch.Commit(syncOpt) + wg.Done() + }) + if err != nil { + return fmt.Errorf("failed to submit commit: %w", err) + } } wg.Wait() @@ -508,56 +517,56 @@ func (s *CommitStore) batchReadOldValues(cs []*proto.NamedChangeSet) ( var wg sync.WaitGroup if len(storageOld) > 0 { wg.Add(1) - go func() { + err = s.miscPool.Submit(s.ctx, func() { defer wg.Done() storageErr = s.storageDB.BatchGet(storageOld) - }() + }) + if err != nil { + err = fmt.Errorf("failed to submit batch get: %w", err) + return + } } var accountErr error if len(accountOld) > 0 { wg.Add(1) - go func() { + err = s.miscPool.Submit(s.ctx, func() { defer wg.Done() accountErr = s.accountDB.BatchGet(accountOld) - }() + }) + if err != nil { + err = fmt.Errorf("failed to submit batch get: %w", err) + return + } } var codeErr error if len(codeOld) > 0 { wg.Add(1) - go func() { + err = s.miscPool.Submit(s.ctx, func() { defer wg.Done() codeErr = s.codeDB.BatchGet(codeOld) - }() + }) + if err != nil { + err = fmt.Errorf("failed to submit batch get: %w", err) + return + } } var legacyErr error if len(legacyOld) > 0 { wg.Add(1) - go func() { + err = s.miscPool.Submit(s.ctx, func() { defer wg.Done() legacyErr = s.legacyDB.BatchGet(legacyOld) - }() + }) + if err != nil { + err = fmt.Errorf("failed to submit batch get: %w", err) + return + } } wg.Wait() + err = errors.Join(storageErr, accountErr, codeErr, legacyErr) - if storageErr != nil || accountErr != nil || codeErr != nil || legacyErr != nil { - errString := "" - if storageErr != nil { - errString += fmt.Sprintf(", storageDB: %s\n", storageErr.Error()) - } - if accountErr != nil { - errString += fmt.Sprintf(", accountDB: %s\n", accountErr.Error()) - } - if codeErr != nil { - errString += fmt.Sprintf(", codeDB: %s\n", codeErr.Error()) - } - if legacyErr != nil { - errString += fmt.Sprintf(", legacyDB: %s\n", legacyErr.Error()) - } - err = fmt.Errorf("batch get: %s", errString) - return - } return } From 438fc8d87d9ac0e9b8514e6c5218703d1b98091b Mon Sep 17 00:00:00 2001 From: Cody Littley Date: Mon, 9 Mar 2026 13:50:22 -0500 Subject: [PATCH 026/119] fix ctx lifecycle --- .../pebbledb/flatcache/cache_impl.go | 1 + sei-db/state_db/bench/cryptosim/cryptosim.go | 20 +++++++++++++++++-- sei-db/state_db/sc/flatkv/store.go | 4 ++++ sei-db/state_db/sc/flatkv/store_lifecycle.go | 5 ++++- 4 files changed, 27 insertions(+), 3 deletions(-) diff --git a/sei-db/db_engine/pebbledb/flatcache/cache_impl.go b/sei-db/db_engine/pebbledb/flatcache/cache_impl.go index eb8def56e4..a1d419d0fd 100644 --- a/sei-db/db_engine/pebbledb/flatcache/cache_impl.go +++ b/sei-db/db_engine/pebbledb/flatcache/cache_impl.go @@ -81,6 +81,7 @@ func NewCache( shardManager: shardManager, shards: shards, readPool: readPool, + miscPool: miscPool, garbageCollectionInterval: garbageCollectionInterval, } diff --git a/sei-db/state_db/bench/cryptosim/cryptosim.go b/sei-db/state_db/bench/cryptosim/cryptosim.go index 04fc441b1a..e970c14053 100644 --- a/sei-db/state_db/bench/cryptosim/cryptosim.go +++ b/sei-db/state_db/bench/cryptosim/cryptosim.go @@ -27,6 +27,11 @@ type CryptoSim struct { ctx context.Context cancel context.CancelFunc + // Cancels the DB infrastructure context. Only called after the database + // has been fully closed during teardown, so that pools, caches, and + // background goroutines remain functional throughout graceful shutdown. + dbCancel context.CancelFunc + // The configuration for the benchmark. config *CryptoSimConfig @@ -93,23 +98,31 @@ func NewCryptoSim( config.MinimumNumberOfDormantAccounts = 2 * config.TransactionsPerBlock } + // The workload context is cancelled on Ctrl-C (or programmatically) to + // stop the benchmark loop and executors. ctx, cancel := context.WithCancel(ctx) + // The DB context keeps pools, caches, and background goroutines alive + // until teardown has finished closing the database. + dbCtx, dbCancel := context.WithCancel(context.Background()) + dataDir, err := resolveAndCreateDataDir(config.DataDir) if err != nil { cancel() + dbCancel() return nil, fmt.Errorf("failed to resolve and create data directory: %w", err) } fmt.Printf("Running cryptosim benchmark from data directory: %s\n", dataDir) - db, err := wrappers.NewDBImpl(ctx, config.Backend, dataDir) + db, err := wrappers.NewDBImpl(dbCtx, config.Backend, dataDir) if err != nil { cancel() + dbCancel() return nil, fmt.Errorf("failed to create database: %w", err) } - metrics := NewCryptosimMetrics(ctx, db.GetPhaseTimer(), config) + metrics := NewCryptosimMetrics(dbCtx, db.GetPhaseTimer(), config) // Server start deferred until after DataGenerator loads DB state and sets gauges, // avoiding rate() spikes when restarting with a preserved DB. @@ -128,6 +141,7 @@ func NewCryptoSim( if closeErr := db.Close(); closeErr != nil { fmt.Printf("failed to close database during error recovery: %v\n", closeErr) } + dbCancel() return nil, fmt.Errorf("failed to create data generator: %w", err) } threadCount := int(config.ThreadsPerCore)*runtime.NumCPU() + config.ConstantThreadCount @@ -145,6 +159,7 @@ func NewCryptoSim( c := &CryptoSim{ ctx: ctx, cancel: cancel, + dbCancel: dbCancel, config: config, consoleUpdatePeriod: consoleUpdatePeriod, lastConsoleUpdateTime: start, @@ -408,6 +423,7 @@ func (c *CryptoSim) teardown() { fmt.Printf("failed to close database: %v\n", err) } + c.dbCancel() c.dataGenerator.Close() c.closeChan <- struct{}{} diff --git a/sei-db/state_db/sc/flatkv/store.go b/sei-db/state_db/sc/flatkv/store.go index c641ab63d6..594ec9d8dd 100644 --- a/sei-db/state_db/sc/flatkv/store.go +++ b/sei-db/state_db/sc/flatkv/store.go @@ -65,6 +65,7 @@ type pendingAccountWrite struct { // NOT thread-safe; callers must serialize all operations. type CommitStore struct { ctx context.Context + cancel context.CancelFunc log logger.Logger config Config dbDir string @@ -129,11 +130,14 @@ func NewCommitStore( } meter := otel.Meter(flatkvMeterName) + ctx, cancel := context.WithCancel(ctx) + readPool := threading.NewFixedPool(ctx, "flatkv-read", 20, 1024) // TODO this should be configurable! miscPool := threading.NewElasticPool(ctx, "flatkv-misc", 20) return &CommitStore{ ctx: ctx, + cancel: cancel, log: log, config: cfg, dbDir: dbDir, diff --git a/sei-db/state_db/sc/flatkv/store_lifecycle.go b/sei-db/state_db/sc/flatkv/store_lifecycle.go index 4c73f50136..257b7bab25 100644 --- a/sei-db/state_db/sc/flatkv/store_lifecycle.go +++ b/sei-db/state_db/sc/flatkv/store_lifecycle.go @@ -66,9 +66,12 @@ func (s *CommitStore) closeDBsOnly() error { return nil } -// Close closes all database instances and releases the file lock. +// Close closes all database instances, cancels the store's context to +// stop background goroutines (pools, caches, metrics), and releases the +// file lock. func (s *CommitStore) Close() error { err := s.closeDBsOnly() + s.cancel() if s.fileLock != nil { if lockErr := s.fileLock.Unlock(); lockErr != nil { From 23440f6e45f048af5b988e6851842e47816089cb Mon Sep 17 00:00:00 2001 From: Cody Littley Date: Mon, 9 Mar 2026 14:19:30 -0500 Subject: [PATCH 027/119] rename package --- sei-db/db_engine/pebbledb/batch.go | 12 ++++++------ sei-db/db_engine/pebbledb/db.go | 6 +++--- .../pebbledb/{flatcache => pebblecache}/cache.go | 2 +- .../{flatcache => pebblecache}/cache_impl.go | 4 +--- .../pebbledb/{flatcache => pebblecache}/lru_queue.go | 2 +- .../{flatcache => pebblecache}/lru_queue_test.go | 2 +- .../{flatcache => pebblecache}/read_scheduler.go | 2 +- .../pebbledb/{flatcache => pebblecache}/shard.go | 2 +- .../{flatcache => pebblecache}/shard_manager.go | 2 +- 9 files changed, 16 insertions(+), 18 deletions(-) rename sei-db/db_engine/pebbledb/{flatcache => pebblecache}/cache.go (96%) rename sei-db/db_engine/pebbledb/{flatcache => pebblecache}/cache_impl.go (98%) rename sei-db/db_engine/pebbledb/{flatcache => pebblecache}/lru_queue.go (99%) rename sei-db/db_engine/pebbledb/{flatcache => pebblecache}/lru_queue_test.go (98%) rename sei-db/db_engine/pebbledb/{flatcache => pebblecache}/read_scheduler.go (99%) rename sei-db/db_engine/pebbledb/{flatcache => pebblecache}/shard.go (99%) rename sei-db/db_engine/pebbledb/{flatcache => pebblecache}/shard_manager.go (98%) diff --git a/sei-db/db_engine/pebbledb/batch.go b/sei-db/db_engine/pebbledb/batch.go index 4ea63ae598..c49d87c62a 100644 --- a/sei-db/db_engine/pebbledb/batch.go +++ b/sei-db/db_engine/pebbledb/batch.go @@ -4,7 +4,7 @@ import ( "fmt" "github.com/cockroachdb/pebble/v2" - "github.com/sei-protocol/sei-chain/sei-db/db_engine/pebbledb/flatcache" + "github.com/sei-protocol/sei-chain/sei-db/db_engine/pebbledb/pebblecache" "github.com/sei-protocol/sei-chain/sei-db/db_engine/types" ) @@ -13,15 +13,15 @@ import ( // even if Commit() succeeds. Failure to Close() will leak memory. type pebbleBatch struct { b *pebble.Batch - cache flatcache.Cache + cache pebblecache.Cache // Writes are tracked so the cache can be updated after a successful commit. - pendingCacheUpdates []flatcache.CacheUpdate + pendingCacheUpdates []pebblecache.CacheUpdate } var _ types.Batch = (*pebbleBatch)(nil) -func newPebbleBatch(db *pebble.DB, cache flatcache.Cache) *pebbleBatch { +func newPebbleBatch(db *pebble.DB, cache pebblecache.Cache) *pebbleBatch { return &pebbleBatch{b: db.NewBatch(), cache: cache} } @@ -30,7 +30,7 @@ func (p *pebbleDB) NewBatch() types.Batch { } func (pb *pebbleBatch) Set(key, value []byte) error { - pb.pendingCacheUpdates = append(pb.pendingCacheUpdates, flatcache.CacheUpdate{ + pb.pendingCacheUpdates = append(pb.pendingCacheUpdates, pebblecache.CacheUpdate{ Key: key, Value: value, }) @@ -38,7 +38,7 @@ func (pb *pebbleBatch) Set(key, value []byte) error { } func (pb *pebbleBatch) Delete(key []byte) error { - pb.pendingCacheUpdates = append(pb.pendingCacheUpdates, flatcache.CacheUpdate{ + pb.pendingCacheUpdates = append(pb.pendingCacheUpdates, pebblecache.CacheUpdate{ Key: key, IsDelete: true, }) diff --git a/sei-db/db_engine/pebbledb/db.go b/sei-db/db_engine/pebbledb/db.go index fbee058bca..0aa5f4cf4e 100644 --- a/sei-db/db_engine/pebbledb/db.go +++ b/sei-db/db_engine/pebbledb/db.go @@ -15,7 +15,7 @@ import ( errorutils "github.com/sei-protocol/sei-chain/sei-db/common/errors" "github.com/sei-protocol/sei-chain/sei-db/common/metrics" "github.com/sei-protocol/sei-chain/sei-db/common/threading" - "github.com/sei-protocol/sei-chain/sei-db/db_engine/pebbledb/flatcache" + "github.com/sei-protocol/sei-chain/sei-db/db_engine/pebbledb/pebblecache" "github.com/sei-protocol/sei-chain/sei-db/db_engine/types" ) @@ -25,7 +25,7 @@ const metricsScrapeInterval = 10 * time.Second type pebbleDB struct { db *pebble.DB metricsCancel context.CancelFunc - cache flatcache.Cache + cache pebblecache.Cache } var _ types.KeyValueDB = (*pebbleDB)(nil) @@ -115,7 +115,7 @@ func Open( } // A high level cache per key. - cache, err := flatcache.NewCache( + cache, err := pebblecache.NewCache( ctx, readFunction, 8, diff --git a/sei-db/db_engine/pebbledb/flatcache/cache.go b/sei-db/db_engine/pebbledb/pebblecache/cache.go similarity index 96% rename from sei-db/db_engine/pebbledb/flatcache/cache.go rename to sei-db/db_engine/pebbledb/pebblecache/cache.go index fa0c67b545..a9ba30060d 100644 --- a/sei-db/db_engine/pebbledb/flatcache/cache.go +++ b/sei-db/db_engine/pebbledb/pebblecache/cache.go @@ -1,4 +1,4 @@ -package flatcache // TODO rename the flatcache package! +package pebblecache import "github.com/sei-protocol/sei-chain/sei-db/db_engine/types" diff --git a/sei-db/db_engine/pebbledb/flatcache/cache_impl.go b/sei-db/db_engine/pebbledb/pebblecache/cache_impl.go similarity index 98% rename from sei-db/db_engine/pebbledb/flatcache/cache_impl.go rename to sei-db/db_engine/pebbledb/pebblecache/cache_impl.go index a1d419d0fd..592feeca12 100644 --- a/sei-db/db_engine/pebbledb/flatcache/cache_impl.go +++ b/sei-db/db_engine/pebbledb/pebblecache/cache_impl.go @@ -1,4 +1,4 @@ -package flatcache +package pebblecache import ( "context" @@ -205,5 +205,3 @@ func (c *cache) runGarbageCollection() { } } } - -// TODO create a warming mechanism diff --git a/sei-db/db_engine/pebbledb/flatcache/lru_queue.go b/sei-db/db_engine/pebbledb/pebblecache/lru_queue.go similarity index 99% rename from sei-db/db_engine/pebbledb/flatcache/lru_queue.go rename to sei-db/db_engine/pebbledb/pebblecache/lru_queue.go index 545e985946..2e0e516f79 100644 --- a/sei-db/db_engine/pebbledb/flatcache/lru_queue.go +++ b/sei-db/db_engine/pebbledb/pebblecache/lru_queue.go @@ -1,4 +1,4 @@ -package flatcache +package pebblecache import "container/list" diff --git a/sei-db/db_engine/pebbledb/flatcache/lru_queue_test.go b/sei-db/db_engine/pebbledb/pebblecache/lru_queue_test.go similarity index 98% rename from sei-db/db_engine/pebbledb/flatcache/lru_queue_test.go rename to sei-db/db_engine/pebbledb/pebblecache/lru_queue_test.go index 9fa17a8ce9..38cd1000fb 100644 --- a/sei-db/db_engine/pebbledb/flatcache/lru_queue_test.go +++ b/sei-db/db_engine/pebbledb/pebblecache/lru_queue_test.go @@ -1,4 +1,4 @@ -package flatcache +package pebblecache import ( "bytes" diff --git a/sei-db/db_engine/pebbledb/flatcache/read_scheduler.go b/sei-db/db_engine/pebbledb/pebblecache/read_scheduler.go similarity index 99% rename from sei-db/db_engine/pebbledb/flatcache/read_scheduler.go rename to sei-db/db_engine/pebbledb/pebblecache/read_scheduler.go index d4a11f1e76..2002a435c9 100644 --- a/sei-db/db_engine/pebbledb/flatcache/read_scheduler.go +++ b/sei-db/db_engine/pebbledb/pebblecache/read_scheduler.go @@ -1,4 +1,4 @@ -package flatcache +package pebblecache import ( "context" diff --git a/sei-db/db_engine/pebbledb/flatcache/shard.go b/sei-db/db_engine/pebbledb/pebblecache/shard.go similarity index 99% rename from sei-db/db_engine/pebbledb/flatcache/shard.go rename to sei-db/db_engine/pebbledb/pebblecache/shard.go index 9d57529ec0..9bdc9bcbbd 100644 --- a/sei-db/db_engine/pebbledb/flatcache/shard.go +++ b/sei-db/db_engine/pebbledb/pebblecache/shard.go @@ -1,4 +1,4 @@ -package flatcache +package pebblecache import ( "context" diff --git a/sei-db/db_engine/pebbledb/flatcache/shard_manager.go b/sei-db/db_engine/pebbledb/pebblecache/shard_manager.go similarity index 98% rename from sei-db/db_engine/pebbledb/flatcache/shard_manager.go rename to sei-db/db_engine/pebbledb/pebblecache/shard_manager.go index 473e8c3a4a..cb7e3c694c 100644 --- a/sei-db/db_engine/pebbledb/flatcache/shard_manager.go +++ b/sei-db/db_engine/pebbledb/pebblecache/shard_manager.go @@ -1,4 +1,4 @@ -package flatcache +package pebblecache import ( "errors" From 4ecc8fd631058f4741b2d73a6a7cb98a8b792d54 Mon Sep 17 00:00:00 2001 From: Cody Littley Date: Mon, 9 Mar 2026 14:32:07 -0500 Subject: [PATCH 028/119] Clean up string copies --- .../pebbledb/pebblecache/lru_queue.go | 16 +++++------ .../pebbledb/pebblecache/lru_queue_test.go | 27 +++++++++---------- .../db_engine/pebbledb/pebblecache/shard.go | 19 +++++++------ 3 files changed, 30 insertions(+), 32 deletions(-) diff --git a/sei-db/db_engine/pebbledb/pebblecache/lru_queue.go b/sei-db/db_engine/pebbledb/pebblecache/lru_queue.go index 2e0e516f79..bea6b2e1d9 100644 --- a/sei-db/db_engine/pebbledb/pebblecache/lru_queue.go +++ b/sei-db/db_engine/pebbledb/pebblecache/lru_queue.go @@ -10,7 +10,7 @@ type lruQueue struct { } type lruQueueEntry struct { - key []byte + key string size int } @@ -29,8 +29,7 @@ func (lru *lruQueue) Push( // the size of the key + value size int, ) { - keyString := string(key) // TODO revisit and maybe do unsafe copies - if elem, ok := lru.entries[keyString]; ok { + if elem, ok := lru.entries[string(key)]; ok { entry := elem.Value.(*lruQueueEntry) lru.totalSize += size - entry.size entry.size = size @@ -38,12 +37,12 @@ func (lru *lruQueue) Push( return } - keyCopy := append([]byte(nil), key...) // TODO don't do this + keyStr := string(key) elem := lru.order.PushBack(&lruQueueEntry{ - key: keyCopy, + key: keyStr, size: size, }) - lru.entries[keyString] = elem + lru.entries[keyStr] = elem lru.totalSize += size } @@ -68,8 +67,9 @@ func (lru *lruQueue) GetCount() int { } // Pops a single element out of the queue. The element removed is the entry least recently passed to Update(). +// Returns the key in string form to avoid copying the key an additional time. // Panics if the queue is empty. -func (lru *lruQueue) PopLeastRecentlyUsed() []byte { +func (lru *lruQueue) PopLeastRecentlyUsed() string { elem := lru.order.Front() if elem == nil { panic("cannot pop from empty LRU queue") @@ -77,7 +77,7 @@ func (lru *lruQueue) PopLeastRecentlyUsed() []byte { lru.order.Remove(elem) entry := elem.Value.(*lruQueueEntry) - delete(lru.entries, string(entry.key)) + delete(lru.entries, entry.key) lru.totalSize -= entry.size return entry.key } diff --git a/sei-db/db_engine/pebbledb/pebblecache/lru_queue_test.go b/sei-db/db_engine/pebbledb/pebblecache/lru_queue_test.go index 38cd1000fb..3a96b55126 100644 --- a/sei-db/db_engine/pebbledb/pebblecache/lru_queue_test.go +++ b/sei-db/db_engine/pebbledb/pebblecache/lru_queue_test.go @@ -1,7 +1,6 @@ package pebblecache import ( - "bytes" "testing" ) @@ -22,16 +21,16 @@ func TestLRUQueueTracksSizeCountAndOrder(t *testing.T) { lru.Touch([]byte("a")) - if got := lru.PopLeastRecentlyUsed(); !bytes.Equal(got, []byte("b")) { - t.Fatalf("first pop = %q, want %q", got, []byte("b")) + if got := lru.PopLeastRecentlyUsed(); got != "b" { + t.Fatalf("first pop = %q, want %q", got, "b") } - if got := lru.PopLeastRecentlyUsed(); !bytes.Equal(got, []byte("c")) { - t.Fatalf("second pop = %q, want %q", got, []byte("c")) + if got := lru.PopLeastRecentlyUsed(); got != "c" { + t.Fatalf("second pop = %q, want %q", got, "c") } - if got := lru.PopLeastRecentlyUsed(); !bytes.Equal(got, []byte("a")) { - t.Fatalf("third pop = %q, want %q", got, []byte("a")) + if got := lru.PopLeastRecentlyUsed(); got != "a" { + t.Fatalf("third pop = %q, want %q", got, "a") } if got := lru.GetCount(); got != 0 { @@ -58,24 +57,24 @@ func TestLRUQueuePushUpdatesExistingEntry(t *testing.T) { t.Fatalf("GetTotalSize() = %d, want 16", got) } - if got := lru.PopLeastRecentlyUsed(); !bytes.Equal(got, []byte("b")) { - t.Fatalf("first pop = %q, want %q", got, []byte("b")) + if got := lru.PopLeastRecentlyUsed(); got != "b" { + t.Fatalf("first pop = %q, want %q", got, "b") } - if got := lru.PopLeastRecentlyUsed(); !bytes.Equal(got, []byte("a")) { - t.Fatalf("second pop = %q, want %q", got, []byte("a")) + if got := lru.PopLeastRecentlyUsed(); got != "a" { + t.Fatalf("second pop = %q, want %q", got, "a") } } -func TestLRUQueueCopiesInsertedKey(t *testing.T) { +func TestLRUQueueIsolatesFromCallerMutation(t *testing.T) { lru := NewLRUQueue() key := []byte("a") lru.Push(key, 1) key[0] = 'z' - if got := lru.PopLeastRecentlyUsed(); !bytes.Equal(got, []byte("a")) { - t.Fatalf("pop after mutating caller key = %q, want %q", got, []byte("a")) + if got := lru.PopLeastRecentlyUsed(); got != "a" { + t.Fatalf("pop after mutating caller key = %q, want %q", got, "a") } } diff --git a/sei-db/db_engine/pebbledb/pebblecache/shard.go b/sei-db/db_engine/pebbledb/pebblecache/shard.go index 9bdc9bcbbd..375e24073b 100644 --- a/sei-db/db_engine/pebbledb/pebblecache/shard.go +++ b/sei-db/db_engine/pebbledb/pebblecache/shard.go @@ -9,8 +9,6 @@ import ( "github.com/sei-protocol/sei-chain/sei-db/db_engine/types" ) -// TODO unsafe byte-> string conversion maybe? - // A single shard of a Cache. type shard struct { ctx context.Context @@ -168,14 +166,15 @@ func (se *shardEntry) injectValue(key []byte, value []byte) { // Get a shard entry for a given key. Caller is responsible for holding the shard's lock // when this method is called. func (s *shard) getEntry(key []byte) *shardEntry { - entry, ok := s.data[string(key)] - if !ok { - entry = &shardEntry{ - shard: s, - status: statusUnknown, - } - s.data[string(key)] = entry + if entry, ok := s.data[string(key)]; ok { + return entry + } + entry := &shardEntry{ + shard: s, + status: statusUnknown, } + keyStr := string(key) + s.data[keyStr] = entry return entry } @@ -329,7 +328,7 @@ func (s *shard) RunGarbageCollection() { // TODO maybe just do this after each u for s.gcQueue.GetTotalSize() > s.maxSize { next := s.gcQueue.PopLeastRecentlyUsed() - delete(s.data, string(next)) // TODO use unsafe copy + delete(s.data, next) } s.lock.Unlock() From 7a315c6e708cc9719a297d945d4b1ddbd69b59da Mon Sep 17 00:00:00 2001 From: Cody Littley Date: Mon, 9 Mar 2026 14:42:28 -0500 Subject: [PATCH 029/119] simplify gc --- sei-db/db_engine/pebbledb/db.go | 5 +- .../pebbledb/pebblecache/cache_impl.go | 57 +++---------------- .../db_engine/pebbledb/pebblecache/shard.go | 25 ++++---- 3 files changed, 22 insertions(+), 65 deletions(-) diff --git a/sei-db/db_engine/pebbledb/db.go b/sei-db/db_engine/pebbledb/db.go index 0aa5f4cf4e..1194197ec9 100644 --- a/sei-db/db_engine/pebbledb/db.go +++ b/sei-db/db_engine/pebbledb/db.go @@ -114,15 +114,14 @@ func Open( return cloned } - // A high level cache per key. + // A high level cache per key (as opposed to the low level pebble page cache). cache, err := pebblecache.NewCache( ctx, readFunction, 8, cacheSize, readPool, - miscPool, - 10*time.Second) + miscPool) if err != nil { return nil, fmt.Errorf("failed to create flatcache: %w", err) } diff --git a/sei-db/db_engine/pebbledb/pebblecache/cache_impl.go b/sei-db/db_engine/pebbledb/pebblecache/cache_impl.go index 592feeca12..da572a3d46 100644 --- a/sei-db/db_engine/pebbledb/pebblecache/cache_impl.go +++ b/sei-db/db_engine/pebbledb/pebblecache/cache_impl.go @@ -4,7 +4,6 @@ import ( "context" "fmt" "sync" - "time" "github.com/sei-protocol/sei-chain/sei-db/common/threading" "github.com/sei-protocol/sei-chain/sei-db/db_engine/types" @@ -27,9 +26,6 @@ type cache struct { // A pool for miscellaneous operations that are neither computationally intensive nor IO bound. miscPool threading.Pool - - // The interval at which to run garbage collection. - garbageCollectionInterval time.Duration } // Creates a new Cache. @@ -45,8 +41,6 @@ func NewCache( readPool threading.Pool, // A work pool for miscellaneous operations that are neither computationally intensive nor IO bound. miscPool threading.Pool, - // The interval at which to run garbage collection. - garbageCollectionInterval time.Duration, ) (Cache, error) { if shardCount <= 0 || (shardCount&(shardCount-1)) != 0 { return nil, ErrNumShardsNotPowerOfTwo @@ -59,10 +53,6 @@ func NewCache( if err != nil { return nil, fmt.Errorf("failed to create shard manager: %w", err) } - if garbageCollectionInterval <= 0 { - return nil, fmt.Errorf("garbageCollectionInterval must be greater than 0") - } - sizePerShard := maxSize / shardCount if sizePerShard <= 0 { return nil, fmt.Errorf("maxSize must be greater than shardCount") @@ -76,18 +66,13 @@ func NewCache( } } - c := &cache{ - ctx: ctx, - shardManager: shardManager, - shards: shards, - readPool: readPool, - miscPool: miscPool, - garbageCollectionInterval: garbageCollectionInterval, - } - - go c.runGarbageCollection() - - return c, nil + return &cache{ + ctx: ctx, + shardManager: shardManager, + shards: shards, + readPool: readPool, + miscPool: miscPool, + }, nil } func (c *cache) BatchSet(updates []CacheUpdate) error { @@ -177,31 +162,3 @@ func (c *cache) Set(key []byte, value []byte) { shard := c.shards[shardIndex] shard.Set(key, value) } - -// TODO add GC metrics - -// Periodically runs garbage collection in the background. -func (c *cache) runGarbageCollection() { - - // Spread out work evenly across all shards, so that we visit each shard roughly once per interval. - gcSubInterval := c.garbageCollectionInterval / time.Duration(len(c.shards)) - if gcSubInterval == 0 { - // technically possible if the number of shards is very large and the interval is very small - gcSubInterval = 1 - } - ticker := time.NewTicker(gcSubInterval) - defer ticker.Stop() - - nextShardIndex := 0 - - for { - select { - case <-c.ctx.Done(): - return - case <-ticker.C: - shardIndex := nextShardIndex - nextShardIndex = (nextShardIndex + 1) % len(c.shards) - c.shards[shardIndex].RunGarbageCollection() - } - } -} diff --git a/sei-db/db_engine/pebbledb/pebblecache/shard.go b/sei-db/db_engine/pebbledb/pebblecache/shard.go index 375e24073b..3dc3c2f77f 100644 --- a/sei-db/db_engine/pebbledb/pebblecache/shard.go +++ b/sei-db/db_engine/pebbledb/pebblecache/shard.go @@ -156,6 +156,7 @@ func (se *shardEntry) injectValue(key []byte, value []byte) { se.value = value se.shard.gcQueue.Push(key, len(key)+len(value)) } + se.shard.evictUnlocked() } se.shard.lock.Unlock() @@ -274,9 +275,19 @@ func (s *shard) bulkInjectValues(reads []pendingRead) { s.gcQueue.Push([]byte(reads[i].key), len(reads[i].key)+len(reads[i].value)) } } + s.evictUnlocked() s.lock.Unlock() } +// Evicts least recently used entries until the cache is within its size budget. +// Caller is required to hold the lock. +func (s *shard) evictUnlocked() { + for s.gcQueue.GetTotalSize() > s.maxSize { + next := s.gcQueue.PopLeastRecentlyUsed() + delete(s.data, next) + } +} + // Set sets the value for the given key. func (s *shard) Set(key []byte, value []byte) { s.lock.Lock() @@ -291,6 +302,7 @@ func (s *shard) setUnlocked(key []byte, value []byte) { entry.value = value s.gcQueue.Push(key, len(key)+len(value)) + s.evictUnlocked() } // BatchSet sets the values for a batch of keys. @@ -320,16 +332,5 @@ func (s *shard) deleteUnlocked(key []byte) { entry.value = nil s.gcQueue.Push(key, len(key)) -} - -// RunGarbageCollection runs the garbage collection process. -func (s *shard) RunGarbageCollection() { // TODO maybe just do this after each update? - s.lock.Lock() - - for s.gcQueue.GetTotalSize() > s.maxSize { - next := s.gcQueue.PopLeastRecentlyUsed() - delete(s.data, next) - } - - s.lock.Unlock() + s.evictUnlocked() } From a3f390795fe915cc280256d9bcc6cb5f2314523a Mon Sep 17 00:00:00 2001 From: Cody Littley Date: Mon, 9 Mar 2026 15:09:21 -0500 Subject: [PATCH 030/119] better error handling --- sei-db/db_engine/pebbledb/db.go | 9 +- .../pebbledb/pebblecache/cache_impl.go | 2 +- .../pebbledb/pebblecache/read_scheduler.go | 80 ----------------- .../db_engine/pebbledb/pebblecache/shard.go | 88 ++++++++++++------- 4 files changed, 62 insertions(+), 117 deletions(-) delete mode 100644 sei-db/db_engine/pebbledb/pebblecache/read_scheduler.go diff --git a/sei-db/db_engine/pebbledb/db.go b/sei-db/db_engine/pebbledb/db.go index 1194197ec9..a07cc6d94c 100644 --- a/sei-db/db_engine/pebbledb/db.go +++ b/sei-db/db_engine/pebbledb/db.go @@ -104,14 +104,17 @@ func Open( return nil, err } - readFunction := func(key []byte) []byte { // TODO error handling! + readFunction := func(key []byte) ([]byte, bool, error) { val, closer, err := db.Get(key) if err != nil { - return nil + if errors.Is(err, pebble.ErrNotFound) { + return nil, false, nil + } + return nil, false, fmt.Errorf("failed to read from pebble: %w", err) } cloned := bytes.Clone(val) _ = closer.Close() - return cloned + return cloned, true, nil } // A high level cache per key (as opposed to the low level pebble page cache). diff --git a/sei-db/db_engine/pebbledb/pebblecache/cache_impl.go b/sei-db/db_engine/pebbledb/pebblecache/cache_impl.go index da572a3d46..19504c1519 100644 --- a/sei-db/db_engine/pebbledb/pebblecache/cache_impl.go +++ b/sei-db/db_engine/pebbledb/pebblecache/cache_impl.go @@ -32,7 +32,7 @@ type cache struct { func NewCache( ctx context.Context, // A function that reads a value from the database. - readFunc func(key []byte) []byte, + readFunc func(key []byte) ([]byte, bool, error), // The number of shards in the cache. Must be a power of two and greater than 0. shardCount int, // The maximum size of the cache, in bytes. diff --git a/sei-db/db_engine/pebbledb/pebblecache/read_scheduler.go b/sei-db/db_engine/pebbledb/pebblecache/read_scheduler.go deleted file mode 100644 index 2002a435c9..0000000000 --- a/sei-db/db_engine/pebbledb/pebblecache/read_scheduler.go +++ /dev/null @@ -1,80 +0,0 @@ -package pebblecache - -import ( - "context" - "fmt" -) - -// A utility for scheduling asynchronous DB reads. -type readScheduler struct { - ctx context.Context - readFunc func(key []byte) []byte - requestChan chan *readRequest -} - -// A request to read a value from the database. -type readRequest struct { - // The key to read. - key []byte - - // The entry to write the result to. - entry *shardEntry - - // If true, the worker will send the value directly to entry.valueChan - // without calling InjectValue (which acquires the shard lock). - // Used by BatchGet to defer cache updates to a single bulk operation. - skipInject bool -} - -// Creates a new ReadScheduler. -func NewReadScheduler( - ctx context.Context, - readFunc func(key []byte) []byte, - // The number of background goroutines to read values from the database. - workerCount int, - // The max size of the read queue. - readQueueSize int, -) *readScheduler { - rs := &readScheduler{ - ctx: ctx, - readFunc: readFunc, - requestChan: make(chan *readRequest, readQueueSize), - } - - for i := 0; i < workerCount; i++ { - go rs.readWorker() - } - - return rs -} - -// ScheduleRead schedules a read for the given key within the given shard. -// This method returns immediately, and the read is performed asynchronously. -// When eventually completed, the read result is inserted into the provided shard entry. -// If skipInject is true, the worker sends the value directly to entry.valueChan -// without calling InjectValue. -func (r *readScheduler) ScheduleRead(key []byte, entry *shardEntry, skipInject bool) error { - select { - case <-r.ctx.Done(): - return fmt.Errorf("context done") - case r.requestChan <- &readRequest{key: key, entry: entry, skipInject: skipInject}: - return nil - } -} - -// A worker that reads values from the database. -func (r *readScheduler) readWorker() { - for { - select { - case <-r.ctx.Done(): - return - case request := <-r.requestChan: - value := r.readFunc(request.key) - if request.skipInject { - request.entry.valueChan <- value - } else { - request.entry.injectValue(request.key, value) - } - } - } -} diff --git a/sei-db/db_engine/pebbledb/pebblecache/shard.go b/sei-db/db_engine/pebbledb/pebblecache/shard.go index 3dc3c2f77f..a7e10823cc 100644 --- a/sei-db/db_engine/pebbledb/pebblecache/shard.go +++ b/sei-db/db_engine/pebbledb/pebblecache/shard.go @@ -26,12 +26,19 @@ type shard struct { readPool threading.Pool // A function that reads a value from the database. - readFunc func(key []byte) []byte + readFunc func(key []byte) ([]byte, bool, error) // The maximum size of this cache, in bytes. maxSize int } +// The result of a read from the underlying database. +type readResult struct { + value []byte + found bool + err error +} + // The status of a value in the cache. type valueStatus int @@ -59,14 +66,14 @@ type shardEntry struct { // If the value is not available when we request it, // it will be written to this channel when it is available. - valueChan chan []byte + valueChan chan readResult } // Creates a new Shard. func NewShard( ctx context.Context, readPool threading.Pool, - readFunc func(key []byte) []byte, + readFunc func(key []byte) ([]byte, bool, error), maxSize int, ) (*shard, error) { @@ -110,58 +117,66 @@ func (s *shard) Get(key []byte, updateLru bool) ([]byte, bool, error) { // Another goroutine initiated a read, wait for that read to finish. valueChan := entry.valueChan s.lock.Unlock() - value, err := threading.InterruptiblePull(s.ctx, valueChan) + result, err := threading.InterruptiblePull(s.ctx, valueChan) if err != nil { return nil, false, fmt.Errorf("failed to pull value from channel: %w", err) } - valueChan <- value // reload the channel in case there are other listeners - return value, value != nil, nil + valueChan <- result // reload the channel in case there are other listeners + if result.err != nil { + return nil, false, fmt.Errorf("failed to read value from database: %w", result.err) + } + return result.value, result.found, nil case statusUnknown: // We are the first goroutine to read this value. entry.status = statusScheduled - valueChan := make(chan []byte, 1) + valueChan := make(chan readResult, 1) entry.valueChan = valueChan s.lock.Unlock() err := s.readPool.Submit(s.ctx, func() { - value := s.readFunc(key) - entry.injectValue(key, value) + value, found, readErr := s.readFunc(key) + entry.injectValue(key, readResult{value: value, found: found, err: readErr}) }) if err != nil { return nil, false, fmt.Errorf("failed to schedule read: %w", err) } - value, err := threading.InterruptiblePull(s.ctx, valueChan) + result, err := threading.InterruptiblePull(s.ctx, valueChan) if err != nil { return nil, false, fmt.Errorf("failed to pull value from channel: %w", err) } - valueChan <- value // reload the channel in case there are other listeners - return value, value != nil, nil + valueChan <- result // reload the channel in case there are other listeners + if result.err != nil { + return nil, false, result.err + } + return result.value, result.found, nil default: panic(fmt.Sprintf("unexpected status: %#v", entry.status)) } } // This method is called by the read scheduler when a value becomes available. -func (se *shardEntry) injectValue(key []byte, value []byte) { +func (se *shardEntry) injectValue(key []byte, result readResult) { se.shard.lock.Lock() if se.status == statusScheduled { - // In the time since the read was scheduled, nobody has written to this entry, - // so safe to overwrite the value. - if value == nil { + if result.err != nil { + // Don't cache errors — reset so the next caller retries. + delete(se.shard.data, string(key)) + } else if !result.found { se.status = statusDeleted se.value = nil se.shard.gcQueue.Push(key, len(key)) + se.shard.evictUnlocked() } else { se.status = statusAvailable - se.value = value - se.shard.gcQueue.Push(key, len(key)+len(value)) + se.value = result.value + se.shard.gcQueue.Push(key, len(key)+len(result.value)) + se.shard.evictUnlocked() } - se.shard.evictUnlocked() } se.shard.lock.Unlock() - se.valueChan <- value + se.valueChan <- result } // Get a shard entry for a given key. Caller is responsible for holding the shard's lock @@ -183,10 +198,10 @@ func (s *shard) getEntry(key []byte) *shardEntry { type pendingRead struct { key string entry *shardEntry - valueChan chan []byte + valueChan chan readResult needsSchedule bool // Populated after the read completes, used by bulkInjectValues. - value []byte + result readResult } // BatchGet reads a batch of keys from the shard. Results are written into the provided map. @@ -210,7 +225,7 @@ func (s *shard) BatchGet(keys map[string]types.BatchGetResult) error { }) case statusUnknown: entry.status = statusScheduled - valueChan := make(chan []byte, 1) + valueChan := make(chan readResult, 1) entry.valueChan = valueChan pending = append(pending, pendingRead{ key: key, @@ -229,9 +244,8 @@ func (s *shard) BatchGet(keys map[string]types.BatchGetResult) error { if pending[i].needsSchedule { p := &pending[i] err := s.readPool.Submit(s.ctx, func() { - value := s.readFunc([]byte(p.key)) - p.entry.valueChan <- value - // Intentionally do not call injectValue here, we want to defer the update to a single bulk operation. + value, found, readErr := s.readFunc([]byte(p.key)) + p.entry.valueChan <- readResult{value: value, found: found, err: readErr} }) if err != nil { return fmt.Errorf("failed to schedule read: %w", err) @@ -240,14 +254,18 @@ func (s *shard) BatchGet(keys map[string]types.BatchGetResult) error { } for i := range pending { - value, err := threading.InterruptiblePull(s.ctx, pending[i].valueChan) + result, err := threading.InterruptiblePull(s.ctx, pending[i].valueChan) if err != nil { return fmt.Errorf("failed to pull value from channel: %w", err) } - pending[i].valueChan <- value - pending[i].value = value + pending[i].valueChan <- result + pending[i].result = result - keys[pending[i].key] = types.BatchGetResult{Value: value, Found: value != nil} + if result.err != nil { + keys[pending[i].key] = types.BatchGetResult{Error: result.err} + } else { + keys[pending[i].key] = types.BatchGetResult{Value: result.value, Found: result.found} + } } if len(pending) > 0 { @@ -265,14 +283,18 @@ func (s *shard) bulkInjectValues(reads []pendingRead) { if entry.status != statusScheduled { continue } - if reads[i].value == nil { + result := reads[i].result + if result.err != nil { + // Don't cache errors — reset so the next caller retries. + delete(s.data, reads[i].key) + } else if !result.found { entry.status = statusDeleted entry.value = nil s.gcQueue.Push([]byte(reads[i].key), len(reads[i].key)) } else { entry.status = statusAvailable - entry.value = reads[i].value - s.gcQueue.Push([]byte(reads[i].key), len(reads[i].key)+len(reads[i].value)) + entry.value = result.value + s.gcQueue.Push([]byte(reads[i].key), len(reads[i].key)+len(result.value)) } } s.evictUnlocked() From f255b870d1824d2331b1219c57d7a05342795ceb Mon Sep 17 00:00:00 2001 From: Cody Littley Date: Mon, 9 Mar 2026 16:38:21 -0500 Subject: [PATCH 031/119] use config to configure cache params --- sei-db/db_engine/pebbledb/db.go | 33 ++++---- sei-db/db_engine/pebbledb/db_test.go | 65 ++++++---------- sei-db/db_engine/pebbledb/pebbledb_config.go | 48 ++++++++++++ .../pebbledb/pebbledb_test_config.go | 20 +++++ sei-db/db_engine/types/types.go | 2 +- sei-db/state_db/sc/flatkv/config.go | 41 +++++++++- sei-db/state_db/sc/flatkv/snapshot.go | 9 ++- sei-db/state_db/sc/flatkv/snapshot_test.go | 14 ++-- sei-db/state_db/sc/flatkv/store.go | 76 +++++++++---------- sei-db/state_db/sc/flatkv/store_test.go | 8 +- 10 files changed, 197 insertions(+), 119 deletions(-) create mode 100644 sei-db/db_engine/pebbledb/pebbledb_config.go create mode 100644 sei-db/db_engine/pebbledb/pebbledb_test_config.go diff --git a/sei-db/db_engine/pebbledb/db.go b/sei-db/db_engine/pebbledb/db.go index a07cc6d94c..abef1d5311 100644 --- a/sei-db/db_engine/pebbledb/db.go +++ b/sei-db/db_engine/pebbledb/db.go @@ -35,33 +35,28 @@ var _ types.KeyValueDB = (*pebbleDB)(nil) // Open opens (or creates) a Pebble-backed DB at path, returning the DB interface. func Open( ctx context.Context, - path string, - opts types.OpenOptions, - enableMetrics bool, + config *PebbleDBConfig, + // Used to determine the ordering of keys in the database. + comparer *pebble.Comparer, // A work pool for reading from the DB. readPool threading.Pool, // A work pool for miscellaneous operations that are neither computationally intensive nor IO bound. miscPool threading.Pool, - cacheSize int, - pageCacheSize int, ) (_ types.KeyValueDB, err error) { - // Validate options before allocating resources to avoid leaks on validation failure - var cmp *pebble.Comparer - if opts.Comparer != nil { - var ok bool - cmp, ok = opts.Comparer.(*pebble.Comparer) - if !ok { - return nil, fmt.Errorf("OpenOptions.Comparer must be *pebble.Comparer, got %T", opts.Comparer) - } + + if err := config.Validate(); err != nil { + return nil, fmt.Errorf("failed to validate config: %w", err) } // Internal pebbleDB cache, used to cache pages in memory. // TODO verify accuracy of this statement - pebbleCache := pebble.NewCache(int64(pageCacheSize)) + pebbleCache := pebble.NewCache(int64(config.PageCacheSize)) defer pebbleCache.Unref() + // TODO potentially expose more options here... + popts := &pebble.Options{ Cache: pebbleCache, - Comparer: cmp, + Comparer: comparer, // FormatMajorVersion is pinned to a specific version to prevent accidental // breaking changes when updating the pebble dependency. Using FormatNewest // would cause the on-disk format to silently upgrade when pebble is updated, @@ -99,7 +94,7 @@ func Open( // at the bottom level since most data lives there and false positive rate is low popts.Levels[6].FilterPolicy = nil - db, err := pebble.Open(path, popts) + db, err := pebble.Open(config.DataDir, popts) if err != nil { return nil, err } @@ -122,7 +117,7 @@ func Open( ctx, readFunction, 8, - cacheSize, + config.CacheSize, readPool, miscPool) if err != nil { @@ -130,8 +125,8 @@ func Open( } ctx, cancel := context.WithCancel(ctx) - if enableMetrics { - metrics.NewPebbleMetrics(ctx, db, filepath.Base(path), metricsScrapeInterval) + if config.EnableMetrics { + metrics.NewPebbleMetrics(ctx, db, filepath.Base(config.DataDir), metricsScrapeInterval) } return &pebbleDB{ diff --git a/sei-db/db_engine/pebbledb/db_test.go b/sei-db/db_engine/pebbledb/db_test.go index 451cd925db..8ccc2ddea2 100644 --- a/sei-db/db_engine/pebbledb/db_test.go +++ b/sei-db/db_engine/pebbledb/db_test.go @@ -9,14 +9,12 @@ import ( "github.com/cockroachdb/pebble/v2" errorutils "github.com/sei-protocol/sei-chain/sei-db/common/errors" "github.com/sei-protocol/sei-chain/sei-db/common/threading" - "github.com/sei-protocol/sei-chain/sei-db/common/unit" "github.com/sei-protocol/sei-chain/sei-db/db_engine/types" ) func TestDBGetSetDelete(t *testing.T) { - dir := t.TempDir() - db, err := Open(t.Context(), dir, types.OpenOptions{}, false, - threading.NewAdHocPool(), threading.NewAdHocPool(), unit.MB*8, unit.MB*8) + cfg := DefaultTestConfig(t) + db, err := Open(t.Context(), &cfg, pebble.DefaultComparer, threading.NewAdHocPool(), threading.NewAdHocPool()) if err != nil { t.Fatalf("Open: %v", err) } @@ -53,9 +51,8 @@ func TestDBGetSetDelete(t *testing.T) { } func TestBatchAtomicWrite(t *testing.T) { - dir := t.TempDir() - db, err := Open(t.Context(), dir, types.OpenOptions{}, false, - threading.NewAdHocPool(), threading.NewAdHocPool(), unit.MB*8, unit.MB*8) + cfg := DefaultTestConfig(t) + db, err := Open(t.Context(), &cfg, pebble.DefaultComparer, threading.NewAdHocPool(), threading.NewAdHocPool()) if err != nil { t.Fatalf("Open: %v", err) } @@ -93,9 +90,8 @@ func TestBatchAtomicWrite(t *testing.T) { } func TestIteratorBounds(t *testing.T) { - dir := t.TempDir() - db, err := Open(t.Context(), dir, types.OpenOptions{}, false, - threading.NewAdHocPool(), threading.NewAdHocPool(), unit.MB*8, unit.MB*8) + cfg := DefaultTestConfig(t) + db, err := Open(t.Context(), &cfg, pebble.DefaultComparer, threading.NewAdHocPool(), threading.NewAdHocPool()) if err != nil { t.Fatalf("Open: %v", err) } @@ -128,9 +124,8 @@ func TestIteratorBounds(t *testing.T) { } func TestIteratorPrev(t *testing.T) { - dir := t.TempDir() - db, err := Open(t.Context(), dir, types.OpenOptions{}, false, - threading.NewAdHocPool(), threading.NewAdHocPool(), unit.MB*8, unit.MB*8) + cfg := DefaultTestConfig(t) + db, err := Open(t.Context(), &cfg, pebble.DefaultComparer, threading.NewAdHocPool(), threading.NewAdHocPool()) if err != nil { t.Fatalf("Open: %v", err) } @@ -194,9 +189,8 @@ func TestIteratorNextPrefixWithComparerSplit(t *testing.T) { return append(dst, a...) } - dir := t.TempDir() - db, err := Open(t.Context(), dir, types.OpenOptions{Comparer: &cmp}, false, - threading.NewAdHocPool(), threading.NewAdHocPool(), unit.MB*8, unit.MB*8) + cfg := DefaultTestConfig(t) + db, err := Open(t.Context(), &cfg, &cmp, threading.NewAdHocPool(), threading.NewAdHocPool()) if err != nil { t.Fatalf("Open: %v", err) } @@ -229,19 +223,9 @@ func TestIteratorNextPrefixWithComparerSplit(t *testing.T) { } } -func TestOpenOptionsComparerTypeCheck(t *testing.T) { - dir := t.TempDir() - _, err := Open(t.Context(), dir, types.OpenOptions{Comparer: "not-a-pebble-comparer"}, - false, threading.NewAdHocPool(), threading.NewAdHocPool(), unit.MB*8, unit.MB*8) - if err == nil { - t.Fatalf("expected error for invalid comparer type") - } -} - func TestErrNotFoundConsistency(t *testing.T) { - dir := t.TempDir() - db, err := Open(t.Context(), dir, types.OpenOptions{}, false, - threading.NewAdHocPool(), threading.NewAdHocPool(), unit.MB*8, unit.MB*8) + cfg := DefaultTestConfig(t) + db, err := Open(t.Context(), &cfg, pebble.DefaultComparer, threading.NewAdHocPool(), threading.NewAdHocPool()) if err != nil { t.Fatalf("Open: %v", err) } @@ -265,9 +249,8 @@ func TestErrNotFoundConsistency(t *testing.T) { } func TestGetReturnsCopy(t *testing.T) { - dir := t.TempDir() - db, err := Open(t.Context(), dir, types.OpenOptions{}, false, - threading.NewAdHocPool(), threading.NewAdHocPool(), unit.MB*8, unit.MB*8) + cfg := DefaultTestConfig(t) + db, err := Open(t.Context(), &cfg, pebble.DefaultComparer, threading.NewAdHocPool(), threading.NewAdHocPool()) if err != nil { t.Fatalf("Open: %v", err) } @@ -296,9 +279,8 @@ func TestGetReturnsCopy(t *testing.T) { } func TestBatchLenResetDelete(t *testing.T) { - dir := t.TempDir() - db, err := Open(t.Context(), dir, types.OpenOptions{}, false, - threading.NewAdHocPool(), threading.NewAdHocPool(), unit.MB*8, unit.MB*8) + cfg := DefaultTestConfig(t) + db, err := Open(t.Context(), &cfg, pebble.DefaultComparer, threading.NewAdHocPool(), threading.NewAdHocPool()) if err != nil { t.Fatalf("Open: %v", err) } @@ -353,9 +335,8 @@ func TestBatchLenResetDelete(t *testing.T) { } func TestIteratorSeekLTAndValue(t *testing.T) { - dir := t.TempDir() - db, err := Open(t.Context(), dir, types.OpenOptions{}, false, - threading.NewAdHocPool(), threading.NewAdHocPool(), unit.MB*8, unit.MB*8) + cfg := DefaultTestConfig(t) + db, err := Open(t.Context(), &cfg, pebble.DefaultComparer, threading.NewAdHocPool(), threading.NewAdHocPool()) if err != nil { t.Fatalf("Open: %v", err) } @@ -391,9 +372,8 @@ func TestIteratorSeekLTAndValue(t *testing.T) { } func TestFlush(t *testing.T) { - dir := t.TempDir() - db, err := Open(t.Context(), dir, types.OpenOptions{}, false, - threading.NewAdHocPool(), threading.NewAdHocPool(), unit.MB*8, unit.MB*8) + cfg := DefaultTestConfig(t) + db, err := Open(t.Context(), &cfg, pebble.DefaultComparer, threading.NewAdHocPool(), threading.NewAdHocPool()) if err != nil { t.Fatalf("Open: %v", err) } @@ -420,9 +400,8 @@ func TestFlush(t *testing.T) { } func TestCloseIsIdempotent(t *testing.T) { - dir := t.TempDir() - db, err := Open(t.Context(), dir, types.OpenOptions{}, false, - threading.NewAdHocPool(), threading.NewAdHocPool(), unit.MB*8, unit.MB*8) + cfg := DefaultTestConfig(t) + db, err := Open(t.Context(), &cfg, pebble.DefaultComparer, threading.NewAdHocPool(), threading.NewAdHocPool()) if err != nil { t.Fatalf("Open: %v", err) } diff --git a/sei-db/db_engine/pebbledb/pebbledb_config.go b/sei-db/db_engine/pebbledb/pebbledb_config.go new file mode 100644 index 0000000000..67fc82435c --- /dev/null +++ b/sei-db/db_engine/pebbledb/pebbledb_config.go @@ -0,0 +1,48 @@ +package pebbledb + +import ( + "fmt" + + "github.com/sei-protocol/sei-chain/sei-db/common/unit" +) + +// Configuration for the PebbleDB database. +type PebbleDBConfig struct { + // The directory to store the database files. This has no default value and must be provided. + DataDir string + // The size of key-value cache, in bytes. + CacheSize int + // The number of shards in the key-value cache. Must be a power of two and greater than 0. + CacheShardCount int + // The size of pebbleDB's internal page cache, in bytes. + PageCacheSize int + // Whether to enable metrics. + EnableMetrics bool +} + +// Default configuration for the PebbleDB database. +func DefaultConfig() PebbleDBConfig { + return PebbleDBConfig{ + CacheSize: 512 * unit.MB, + CacheShardCount: 8, + PageCacheSize: 512 * unit.MB, + EnableMetrics: true, + } +} + +// Validates the configuration (basic sanity checks). +func (c *PebbleDBConfig) Validate() error { + if c.DataDir == "" { + return fmt.Errorf("data dir is required") + } + if c.CacheShardCount <= 0 || (c.CacheShardCount&(c.CacheShardCount-1)) != 0 { + return fmt.Errorf("cache shard count must be a power of two and greater than 0") + } + if c.CacheSize <= 0 { + return fmt.Errorf("cache size must be greater than 0") + } + if c.PageCacheSize <= 0 { + return fmt.Errorf("page cache size must be greater than 0") + } + return nil +} diff --git a/sei-db/db_engine/pebbledb/pebbledb_test_config.go b/sei-db/db_engine/pebbledb/pebbledb_test_config.go new file mode 100644 index 0000000000..462dd97d3c --- /dev/null +++ b/sei-db/db_engine/pebbledb/pebbledb_test_config.go @@ -0,0 +1,20 @@ +package pebbledb + +import ( + "testing" + + "github.com/sei-protocol/sei-chain/sei-db/common/unit" +) + +// Default configuration suitable for testing. Allocates much smaller cache sizes and disables metrics. +// DataDir defaults to t.TempDir(); callers that need a specific path can override it after calling. +func DefaultTestConfig(t *testing.T) PebbleDBConfig { + cfg := DefaultConfig() + + cfg.DataDir = t.TempDir() + cfg.CacheSize = 16 * unit.MB + cfg.PageCacheSize = 16 * unit.MB + cfg.EnableMetrics = false + + return cfg +} diff --git a/sei-db/db_engine/types/types.go b/sei-db/db_engine/types/types.go index be5a6ed029..e1c8c03e73 100644 --- a/sei-db/db_engine/types/types.go +++ b/sei-db/db_engine/types/types.go @@ -29,7 +29,7 @@ type IterOptions struct { // // Comparer is optional; when set it must be compatible with the underlying // engine (e.g. *pebble.Comparer for PebbleDB). -type OpenOptions struct { +type OpenOptions struct { // TODO remove if unused Comparer any } diff --git a/sei-db/state_db/sc/flatkv/config.go b/sei-db/state_db/sc/flatkv/config.go index ebe47e4c68..efd1618820 100644 --- a/sei-db/state_db/sc/flatkv/config.go +++ b/sei-db/state_db/sc/flatkv/config.go @@ -1,5 +1,10 @@ package flatkv +import ( + "github.com/sei-protocol/sei-chain/sei-db/common/unit" + "github.com/sei-protocol/sei-chain/sei-db/db_engine/pebbledb" +) + const ( DefaultSnapshotInterval uint32 = 10000 DefaultSnapshotKeepRecent uint32 = 2 @@ -33,15 +38,49 @@ type Config struct { // EnablePebbleMetrics defines if the Pebble metrics should be enabled. // Default: true EnablePebbleMetrics bool `mapstructure:"enable-pebble-metrics"` + + // AccountDBConfig defines the configuration for the account database. + AccountDBConfig pebbledb.PebbleDBConfig + + // CodeDBConfig defines the configuration for the code database. + CodeDBConfig pebbledb.PebbleDBConfig + + // StorageDBConfig defines the configuration for the storage database. + StorageDBConfig pebbledb.PebbleDBConfig + + // LegacyDBConfig defines the configuration for the legacy database. + LegacyDBConfig pebbledb.PebbleDBConfig + + // MetadataDBConfig defines the configuration for the metadata database. + MetadataDBConfig pebbledb.PebbleDBConfig } // DefaultConfig returns Config with safe default values. func DefaultConfig() Config { - return Config{ + cfg := Config{ Fsync: false, AsyncWriteBuffer: 0, SnapshotInterval: DefaultSnapshotInterval, SnapshotKeepRecent: DefaultSnapshotKeepRecent, EnablePebbleMetrics: true, + AccountDBConfig: pebbledb.DefaultConfig(), + CodeDBConfig: pebbledb.DefaultConfig(), + StorageDBConfig: pebbledb.DefaultConfig(), + LegacyDBConfig: pebbledb.DefaultConfig(), + MetadataDBConfig: pebbledb.DefaultConfig(), } + + cfg.AccountDBConfig.CacheSize = unit.GB + cfg.StorageDBConfig.CacheSize = unit.GB * 4 + + return cfg } + +/* + + accountDBDir = "account" + codeDBDir = "code" + storageDBDir = "storage" + legacyDBDir = "legacy" + metadataDir = "metadata" +*/ diff --git a/sei-db/state_db/sc/flatkv/snapshot.go b/sei-db/state_db/sc/flatkv/snapshot.go index 91ba4e2129..f6ad259657 100644 --- a/sei-db/state_db/sc/flatkv/snapshot.go +++ b/sei-db/state_db/sc/flatkv/snapshot.go @@ -11,7 +11,7 @@ import ( "strings" "time" - "github.com/sei-protocol/sei-chain/sei-db/common/unit" + "github.com/cockroachdb/pebble/v2" "github.com/sei-protocol/sei-chain/sei-db/db_engine/pebbledb" "github.com/sei-protocol/sei-chain/sei-db/db_engine/types" "github.com/sei-protocol/sei-chain/sei-db/proto" @@ -379,10 +379,11 @@ func (s *CommitStore) migrateFlatLayout(flatkvDir string) (string, error) { // Determine version for the snapshot name. The metadata DB might still // be at the flat location or might have been moved in a prior attempt. var version int64 - metaPath := filepath.Join(flatkvDir, metadataDir) + metaCfg := s.config.MetadataDBConfig + metaCfg.DataDir = filepath.Join(flatkvDir, metadataDir) tmpMeta, err := pebbledb.Open( - s.ctx, metaPath, types.OpenOptions{}, s.config.EnablePebbleMetrics, - s.readPool, s.miscPool, unit.GB/2, unit.GB/2) + s.ctx, &metaCfg, pebble.DefaultComparer, + s.readPool, s.miscPool) if err == nil { verData, verErr := tmpMeta.Get([]byte(MetaGlobalVersion)) _ = tmpMeta.Close() diff --git a/sei-db/state_db/sc/flatkv/snapshot_test.go b/sei-db/state_db/sc/flatkv/snapshot_test.go index 6da33bd988..f1d8cf4cdb 100644 --- a/sei-db/state_db/sc/flatkv/snapshot_test.go +++ b/sei-db/state_db/sc/flatkv/snapshot_test.go @@ -7,9 +7,9 @@ import ( "strings" "testing" + "github.com/cockroachdb/pebble/v2" "github.com/sei-protocol/sei-chain/sei-db/common/evm" "github.com/sei-protocol/sei-chain/sei-db/common/threading" - "github.com/sei-protocol/sei-chain/sei-db/common/unit" "github.com/sei-protocol/sei-chain/sei-db/db_engine/pebbledb" "github.com/sei-protocol/sei-chain/sei-db/db_engine/types" "github.com/sei-protocol/sei-chain/sei-db/proto" @@ -257,8 +257,10 @@ func TestMigrationFromFlatLayout(t *testing.T) { dbPath := filepath.Join(flatkvDir, sub) require.NoError(t, os.MkdirAll(dbPath, 0750)) // Create an actual PebbleDB so Open works - db, err := pebbledb.Open(t.Context(), dbPath, types.OpenOptions{}, false, - threading.NewAdHocPool(), threading.NewAdHocPool(), unit.MB*8, unit.MB*8) + cfg := pebbledb.DefaultTestConfig(t) + cfg.DataDir = dbPath + db, err := pebbledb.Open(t.Context(), &cfg, pebble.DefaultComparer, + threading.NewAdHocPool(), threading.NewAdHocPool()) require.NoError(t, err) require.NoError(t, db.Close()) } @@ -315,8 +317,10 @@ func TestOpenVersionValidation(t *testing.T) { require.NoError(t, err) accountDBPath := filepath.Join(snapDir, accountDBDir) - db, err := pebbledb.Open(t.Context(), accountDBPath, types.OpenOptions{}, false, - threading.NewAdHocPool(), threading.NewAdHocPool(), unit.MB*8, unit.MB*8) + acctCfg := pebbledb.DefaultTestConfig(t) + acctCfg.DataDir = accountDBPath + db, err := pebbledb.Open(t.Context(), &acctCfg, pebble.DefaultComparer, + threading.NewAdHocPool(), threading.NewAdHocPool()) require.NoError(t, err) lagMeta := &LocalMeta{CommittedVersion: 1} require.NoError(t, db.Set(DBLocalMetaKey, MarshalLocalMeta(lagMeta), types.WriteOptions{Sync: true})) diff --git a/sei-db/state_db/sc/flatkv/store.go b/sei-db/state_db/sc/flatkv/store.go index 594ec9d8dd..415293b7d4 100644 --- a/sei-db/state_db/sc/flatkv/store.go +++ b/sei-db/state_db/sc/flatkv/store.go @@ -8,10 +8,10 @@ import ( "path/filepath" "time" + "github.com/cockroachdb/pebble/v2" "github.com/sei-protocol/sei-chain/sei-db/common/logger" "github.com/sei-protocol/sei-chain/sei-db/common/metrics" "github.com/sei-protocol/sei-chain/sei-db/common/threading" - "github.com/sei-protocol/sei-chain/sei-db/common/unit" "github.com/sei-protocol/sei-chain/sei-db/db_engine/pebbledb" seidbtypes "github.com/sei-protocol/sei-chain/sei-db/db_engine/types" "github.com/sei-protocol/sei-chain/sei-db/proto" @@ -311,28 +311,23 @@ func (s *CommitStore) acquireFileLock(dir string) error { return nil } +// openPebbleDB sets the DataDir on cfg, creates the directory, and opens a PebbleDB instance. +func (s *CommitStore) openPebbleDB(cfg *pebbledb.PebbleDBConfig, dir string) (seidbtypes.KeyValueDB, error) { + cfg.DataDir = dir + if err := os.MkdirAll(dir, 0750); err != nil { + return nil, fmt.Errorf("create directory %s: %w", dir, err) + } + db, err := pebbledb.Open(s.ctx, cfg, pebble.DefaultComparer, s.readPool, s.miscPool) + if err != nil { + return nil, fmt.Errorf("open %s: %w", dir, err) + } + return db, nil +} + // openAllDBs opens the 5 PebbleDBs from the snapshot directory, the changelog // WAL from the flatkv root, and loads per-DB local metadata. On failure all // already-opened handles are closed. func (s *CommitStore) openAllDBs(snapDir, flatkvRoot string) (retErr error) { - type namedPath struct { - name string - path string - } - dbPaths := []namedPath{ - {accountDBDir, filepath.Join(snapDir, accountDBDir)}, - {codeDBDir, filepath.Join(snapDir, codeDBDir)}, - {storageDBDir, filepath.Join(snapDir, storageDBDir)}, - {legacyDBDir, filepath.Join(snapDir, legacyDBDir)}, - {metadataDir, filepath.Join(snapDir, metadataDir)}, - } - - for _, np := range dbPaths { - if err := os.MkdirAll(np.path, 0750); err != nil { - return fmt.Errorf("failed to create directory %s: %w", np.path, err) - } - } - var toClose []io.Closer defer func() { if retErr != nil { @@ -349,40 +344,37 @@ func (s *CommitStore) openAllDBs(snapDir, flatkvRoot string) (retErr error) { } }() - openDB := func(np namedPath, cacheSize int, pageCacheSize int) (seidbtypes.KeyValueDB, error) { - db, err := pebbledb.Open( - s.ctx, - np.path, - seidbtypes.OpenOptions{}, - s.config.EnablePebbleMetrics, - s.readPool, - s.miscPool, - cacheSize, - pageCacheSize) - if err != nil { - return nil, fmt.Errorf("failed to open %s: %w", np.name, err) - } - toClose = append(toClose, db) - return db, nil - } - - // TODO don't hardcode the cache sizes! var err error - if s.accountDB, err = openDB(dbPaths[0], unit.GB/2, unit.GB/2); err != nil { + + s.accountDB, err = s.openPebbleDB(&s.config.AccountDBConfig, filepath.Join(snapDir, accountDBDir)) + if err != nil { return err } - if s.codeDB, err = openDB(dbPaths[1], unit.GB/2, unit.GB/2); err != nil { + toClose = append(toClose, s.accountDB) + + s.codeDB, err = s.openPebbleDB(&s.config.CodeDBConfig, filepath.Join(snapDir, codeDBDir)) + if err != nil { return err } - if s.storageDB, err = openDB(dbPaths[2], unit.GB*4, unit.GB/2); err != nil { + toClose = append(toClose, s.codeDB) + + s.storageDB, err = s.openPebbleDB(&s.config.StorageDBConfig, filepath.Join(snapDir, storageDBDir)) + if err != nil { return err } - if s.legacyDB, err = openDB(dbPaths[3], unit.GB/2, unit.GB/2); err != nil { + toClose = append(toClose, s.storageDB) + + s.legacyDB, err = s.openPebbleDB(&s.config.LegacyDBConfig, filepath.Join(snapDir, legacyDBDir)) + if err != nil { return err } - if s.metadataDB, err = openDB(dbPaths[4], unit.GB/2, unit.GB/2); err != nil { + toClose = append(toClose, s.legacyDB) + + s.metadataDB, err = s.openPebbleDB(&s.config.MetadataDBConfig, filepath.Join(snapDir, metadataDir)) + if err != nil { return err } + toClose = append(toClose, s.metadataDB) changelogPath := filepath.Join(flatkvRoot, changelogDir) s.changelog, err = wal.NewChangelogWAL(s.log, changelogPath, wal.Config{ diff --git a/sei-db/state_db/sc/flatkv/store_test.go b/sei-db/state_db/sc/flatkv/store_test.go index 33e2167808..37dd796867 100644 --- a/sei-db/state_db/sc/flatkv/store_test.go +++ b/sei-db/state_db/sc/flatkv/store_test.go @@ -7,9 +7,9 @@ import ( "github.com/stretchr/testify/require" + "github.com/cockroachdb/pebble/v2" "github.com/sei-protocol/sei-chain/sei-db/common/evm" "github.com/sei-protocol/sei-chain/sei-db/common/threading" - "github.com/sei-protocol/sei-chain/sei-db/common/unit" "github.com/sei-protocol/sei-chain/sei-db/db_engine/pebbledb" "github.com/sei-protocol/sei-chain/sei-db/db_engine/types" "github.com/sei-protocol/sei-chain/sei-db/proto" @@ -59,9 +59,9 @@ func makeChangeSet(key, value []byte, delete bool) *proto.NamedChangeSet { // setupTestDB creates a temporary PebbleDB for testing func setupTestDB(t *testing.T) types.KeyValueDB { t.Helper() - dir := t.TempDir() - db, err := pebbledb.Open(t.Context(), dir, types.OpenOptions{}, false, - threading.NewAdHocPool(), threading.NewAdHocPool(), unit.MB*8, unit.MB*8) + cfg := pebbledb.DefaultTestConfig(t) + db, err := pebbledb.Open(t.Context(), &cfg, pebble.DefaultComparer, + threading.NewAdHocPool(), threading.NewAdHocPool()) require.NoError(t, err) return db } From cf0a73da353a7cb5a204361f660580bedc4c3978 Mon Sep 17 00:00:00 2001 From: Cody Littley Date: Mon, 9 Mar 2026 17:04:55 -0500 Subject: [PATCH 032/119] Allow flatkv config to be set in tests --- sei-db/config/sc_config.go | 2 +- sei-db/state_db/bench/cryptosim/cryptosim.go | 7 ++++++- .../state_db/bench/cryptosim/cryptosim_config.go | 9 ++++++++- sei-db/state_db/bench/helper.go | 2 +- .../state_db/bench/wrappers/db_implementations.go | 14 ++++++++------ sei-db/state_db/sc/flatkv/config.go | 13 ++----------- sei-db/state_db/sc/flatkv/snapshot_test.go | 13 +++++++------ sei-db/state_db/sc/flatkv/store.go | 4 ++-- sei-db/state_db/sc/flatkv/store_test.go | 2 +- sei-db/state_db/sc/flatkv/store_write_test.go | 10 +++++----- 10 files changed, 41 insertions(+), 35 deletions(-) diff --git a/sei-db/config/sc_config.go b/sei-db/config/sc_config.go index b32995a77d..e74ce29b3c 100644 --- a/sei-db/config/sc_config.go +++ b/sei-db/config/sc_config.go @@ -43,7 +43,7 @@ type StateCommitConfig struct { MemIAVLConfig memiavl.Config // FlatKVConfig is the configuration for the FlatKV (EVM) backend - FlatKVConfig flatkv.Config + FlatKVConfig *flatkv.Config // Max concurrent historical proof queries (RPC /store path). HistoricalProofMaxInFlight int `mapstructure:"historical-proof-max-inflight"` diff --git a/sei-db/state_db/bench/cryptosim/cryptosim.go b/sei-db/state_db/bench/cryptosim/cryptosim.go index e970c14053..2dc615f98d 100644 --- a/sei-db/state_db/bench/cryptosim/cryptosim.go +++ b/sei-db/state_db/bench/cryptosim/cryptosim.go @@ -115,7 +115,12 @@ func NewCryptoSim( fmt.Printf("Running cryptosim benchmark from data directory: %s\n", dataDir) - db, err := wrappers.NewDBImpl(dbCtx, config.Backend, dataDir) + var dbConfig any + if config.Backend == wrappers.FlatKV { + dbConfig = config.FlatKVConfig + } + + db, err := wrappers.NewDBImpl(dbCtx, config.Backend, dataDir, dbConfig) if err != nil { cancel() dbCancel() diff --git a/sei-db/state_db/bench/cryptosim/cryptosim_config.go b/sei-db/state_db/bench/cryptosim/cryptosim_config.go index 14c01cedc8..9e6807f38c 100644 --- a/sei-db/state_db/bench/cryptosim/cryptosim_config.go +++ b/sei-db/state_db/bench/cryptosim/cryptosim_config.go @@ -7,6 +7,7 @@ import ( "path/filepath" "github.com/sei-protocol/sei-chain/sei-db/state_db/bench/wrappers" + "github.com/sei-protocol/sei-chain/sei-db/state_db/sc/flatkv" ) const ( @@ -138,6 +139,9 @@ type CryptoSimConfig struct { // If true, the data directory will be deleted on a clean shutdown. DeleteDataDirOnShutdown bool + + // Configures the FlatKV database. Ignored if Backend is not "FlatKV". + FlatKVConfig *flatkv.Config } // Returns the default configuration for the cryptosim benchmark. @@ -146,7 +150,7 @@ func DefaultCryptoSimConfig() *CryptoSimConfig { // Note: if you add new fields or modify default values, be sure to keep config/basic-config.json in sync. // That file should contain every available config set to its default value, as a reference. - return &CryptoSimConfig{ + cfg := &CryptoSimConfig{ NumberOfHotAccounts: 100, MinimumNumberOfColdAccounts: 1_000_000, MinimumNumberOfDormantAccounts: 1_000_000, @@ -178,7 +182,10 @@ func DefaultCryptoSimConfig() *CryptoSimConfig { EnableSuspension: true, DeleteDataDirOnStartup: false, DeleteDataDirOnShutdown: false, + FlatKVConfig: flatkv.DefaultConfig(), } + + return cfg } // StringifiedConfig returns the config as human-readable, multi-line JSON. diff --git a/sei-db/state_db/bench/helper.go b/sei-db/state_db/bench/helper.go index 63cc405a6b..f13de6bee3 100644 --- a/sei-db/state_db/bench/helper.go +++ b/sei-db/state_db/bench/helper.go @@ -395,7 +395,7 @@ func runBenchmark(b *testing.B, scenario TestScenario, withProgress bool) { func() { dbDir := b.TempDir() b.StopTimer() - cs, err := wrappers.NewDBImpl(b.Context(), scenario.Backend, dbDir) + cs, err := wrappers.NewDBImpl(b.Context(), scenario.Backend, dbDir, nil) require.NoError(b, err) // Load snapshot if available diff --git a/sei-db/state_db/bench/wrappers/db_implementations.go b/sei-db/state_db/bench/wrappers/db_implementations.go index 9e120cf5ec..c428958dcb 100644 --- a/sei-db/state_db/bench/wrappers/db_implementations.go +++ b/sei-db/state_db/bench/wrappers/db_implementations.go @@ -45,11 +45,13 @@ func newMemIAVLCommitStore(dbDir string) (DBWrapper, error) { return NewMemIAVLWrapper(cs), nil } -func newFlatKVCommitStore(ctx context.Context, dbDir string) (DBWrapper, error) { - cfg := flatkv.DefaultConfig() - cfg.Fsync = false +func newFlatKVCommitStore(ctx context.Context, dbDir string, config *flatkv.Config) (DBWrapper, error) { + if config == nil { + config = flatkv.DefaultConfig() + } + fmt.Printf("Opening flatKV from directory %s\n", dbDir) - cs := flatkv.NewCommitStore(ctx, dbDir, logger.NewNopLogger(), cfg) + cs := flatkv.NewCommitStore(ctx, dbDir, logger.NewNopLogger(), config) _, err := cs.LoadVersion(0) if err != nil { if closeErr := cs.Close(); closeErr != nil { @@ -115,12 +117,12 @@ func newCombinedCompositeDualSSComposite(ctx context.Context, dbDir string) (DBW } // NewDBImpl instantiates a new empty DBWrapper based on the given DBType. -func NewDBImpl(ctx context.Context, dbType DBType, dataDir string) (DBWrapper, error) { +func NewDBImpl(ctx context.Context, dbType DBType, dataDir string, dbConfig any) (DBWrapper, error) { switch dbType { case MemIAVL: return newMemIAVLCommitStore(dataDir) case FlatKV: - return newFlatKVCommitStore(ctx, dataDir) + return newFlatKVCommitStore(ctx, dataDir, dbConfig.(*flatkv.Config)) case CompositeDual: return newCompositeCommitStore(ctx, dataDir, config.DualWrite) case CompositeSplit: diff --git a/sei-db/state_db/sc/flatkv/config.go b/sei-db/state_db/sc/flatkv/config.go index efd1618820..f623cef278 100644 --- a/sei-db/state_db/sc/flatkv/config.go +++ b/sei-db/state_db/sc/flatkv/config.go @@ -56,8 +56,8 @@ type Config struct { } // DefaultConfig returns Config with safe default values. -func DefaultConfig() Config { - cfg := Config{ +func DefaultConfig() *Config { + cfg := &Config{ Fsync: false, AsyncWriteBuffer: 0, SnapshotInterval: DefaultSnapshotInterval, @@ -75,12 +75,3 @@ func DefaultConfig() Config { return cfg } - -/* - - accountDBDir = "account" - codeDBDir = "code" - storageDBDir = "storage" - legacyDBDir = "legacy" - metadataDir = "metadata" -*/ diff --git a/sei-db/state_db/sc/flatkv/snapshot_test.go b/sei-db/state_db/sc/flatkv/snapshot_test.go index f1d8cf4cdb..f798f5f961 100644 --- a/sei-db/state_db/sc/flatkv/snapshot_test.go +++ b/sei-db/state_db/sc/flatkv/snapshot_test.go @@ -317,8 +317,9 @@ func TestOpenVersionValidation(t *testing.T) { require.NoError(t, err) accountDBPath := filepath.Join(snapDir, accountDBDir) - acctCfg := pebbledb.DefaultTestConfig(t) + acctCfg := pebbledb.DefaultConfig() acctCfg.DataDir = accountDBPath + acctCfg.EnableMetrics = false db, err := pebbledb.Open(t.Context(), &acctCfg, pebble.DefaultComparer, threading.NewAdHocPool(), threading.NewAdHocPool()) require.NoError(t, err) @@ -763,7 +764,7 @@ func TestCreateWorkingDirReclones(t *testing.T) { func TestPruneSnapshotsKeepsRecent(t *testing.T) { dir := t.TempDir() - s := NewCommitStore(t.Context(), filepath.Join(dir, flatkvRootDir), nil, Config{SnapshotKeepRecent: 1}) + s := NewCommitStore(t.Context(), filepath.Join(dir, flatkvRootDir), nil, &Config{SnapshotKeepRecent: 1}) _, err := s.LoadVersion(0) require.NoError(t, err) @@ -787,7 +788,7 @@ func TestPruneSnapshotsKeepsRecent(t *testing.T) { func TestPruneSnapshotsKeepAll(t *testing.T) { dir := t.TempDir() - s := NewCommitStore(t.Context(), filepath.Join(dir, flatkvRootDir), nil, Config{SnapshotKeepRecent: 100}) + s := NewCommitStore(t.Context(), filepath.Join(dir, flatkvRootDir), nil, &Config{SnapshotKeepRecent: 100}) _, err := s.LoadVersion(0) require.NoError(t, err) defer s.Close() @@ -921,7 +922,7 @@ func TestTryTruncateWAL(t *testing.T) { // SnapshotKeepRecent=0 so pruneSnapshots removes snapshot-0 once // the manual snapshot at v5 is created; this makes v5 the earliest // snapshot and gives tryTruncateWAL a positive truncation offset. - s := NewCommitStore(t.Context(), filepath.Join(dir, flatkvRootDir), nil, Config{SnapshotKeepRecent: 0}) + s := NewCommitStore(t.Context(), filepath.Join(dir, flatkvRootDir), nil, &Config{SnapshotKeepRecent: 0}) _, err := s.LoadVersion(0) require.NoError(t, err) defer s.Close() @@ -1065,7 +1066,7 @@ func TestSeekSnapshotExact(t *testing.T) { func TestMultipleSnapshotsAndReopen(t *testing.T) { dir := t.TempDir() - s := NewCommitStore(t.Context(), filepath.Join(dir, flatkvRootDir), nil, Config{SnapshotKeepRecent: 10}) + s := NewCommitStore(t.Context(), filepath.Join(dir, flatkvRootDir), nil, &Config{SnapshotKeepRecent: 10}) _, err := s.LoadVersion(0) require.NoError(t, err) @@ -1079,7 +1080,7 @@ func TestMultipleSnapshotsAndReopen(t *testing.T) { for i, expectedHash := range hashes { ver := int64(i + 1) - s2 := NewCommitStore(t.Context(), filepath.Join(dir, flatkvRootDir), nil, Config{SnapshotKeepRecent: 10}) + s2 := NewCommitStore(t.Context(), filepath.Join(dir, flatkvRootDir), nil, &Config{SnapshotKeepRecent: 10}) _, err := s2.LoadVersion(ver) require.NoError(t, err) require.Equal(t, ver, s2.Version()) diff --git a/sei-db/state_db/sc/flatkv/store.go b/sei-db/state_db/sc/flatkv/store.go index 415293b7d4..f175233e8c 100644 --- a/sei-db/state_db/sc/flatkv/store.go +++ b/sei-db/state_db/sc/flatkv/store.go @@ -67,7 +67,7 @@ type CommitStore struct { ctx context.Context cancel context.CancelFunc log logger.Logger - config Config + config *Config dbDir string // Five separate PebbleDB instances @@ -123,7 +123,7 @@ func NewCommitStore( ctx context.Context, dbDir string, log logger.Logger, - cfg Config, + cfg *Config, ) *CommitStore { if log == nil { log = logger.NewNopLogger() diff --git a/sei-db/state_db/sc/flatkv/store_test.go b/sei-db/state_db/sc/flatkv/store_test.go index 37dd796867..e0ed0aa49f 100644 --- a/sei-db/state_db/sc/flatkv/store_test.go +++ b/sei-db/state_db/sc/flatkv/store_test.go @@ -77,7 +77,7 @@ func setupTestStore(t *testing.T) *CommitStore { } // setupTestStoreWithConfig creates a test store with custom config -func setupTestStoreWithConfig(t *testing.T, cfg Config) *CommitStore { +func setupTestStoreWithConfig(t *testing.T, cfg *Config) *CommitStore { t.Helper() dir := t.TempDir() s := NewCommitStore(t.Context(), filepath.Join(dir, flatkvRootDir), nil, cfg) diff --git a/sei-db/state_db/sc/flatkv/store_write_test.go b/sei-db/state_db/sc/flatkv/store_write_test.go index 04915daf51..300b78cf36 100644 --- a/sei-db/state_db/sc/flatkv/store_write_test.go +++ b/sei-db/state_db/sc/flatkv/store_write_test.go @@ -535,7 +535,7 @@ func TestStoreFsyncConfig(t *testing.T) { t.Run("FsyncDisabled", func(t *testing.T) { dir := t.TempDir() - store := NewCommitStore(t.Context(), filepath.Join(dir, flatkvRootDir), nil, Config{ + store := NewCommitStore(t.Context(), filepath.Join(dir, flatkvRootDir), nil, &Config{ Fsync: false, }) _, err := store.LoadVersion(0) @@ -571,7 +571,7 @@ func TestAutoSnapshotTriggeredByInterval(t *testing.T) { SnapshotInterval: 5, SnapshotKeepRecent: 2, } - s := NewCommitStore(t.Context(), filepath.Join(dir, flatkvRootDir), nil, cfg) + s := NewCommitStore(t.Context(), filepath.Join(dir, flatkvRootDir), nil, &cfg) _, err := s.LoadVersion(0) require.NoError(t, err) defer s.Close() @@ -591,7 +591,7 @@ func TestAutoSnapshotTriggeredByInterval(t *testing.T) { func TestAutoSnapshotNotTriggeredBeforeInterval(t *testing.T) { dir := t.TempDir() - cfg := Config{ + cfg := &Config{ SnapshotInterval: 10, SnapshotKeepRecent: 2, } @@ -621,7 +621,7 @@ func TestAutoSnapshotNotTriggeredBeforeInterval(t *testing.T) { func TestAutoSnapshotDisabledWhenIntervalZero(t *testing.T) { dir := t.TempDir() - cfg := Config{SnapshotInterval: 0} + cfg := &Config{SnapshotInterval: 0} s := NewCommitStore(t.Context(), filepath.Join(dir, flatkvRootDir), nil, cfg) _, err := s.LoadVersion(0) require.NoError(t, err) @@ -837,7 +837,7 @@ func TestEmptyCommitAdvancesVersion(t *testing.T) { func TestStoreFsyncEnabled(t *testing.T) { dir := t.TempDir() - cfg := Config{Fsync: true} + cfg := &Config{Fsync: true} s := NewCommitStore(t.Context(), filepath.Join(dir, flatkvRootDir), nil, cfg) _, err := s.LoadVersion(0) require.NoError(t, err) From 0b34737d775e70205c6cd1a6c76e3bedde78db1a Mon Sep 17 00:00:00 2001 From: Cody Littley Date: Tue, 10 Mar 2026 08:35:22 -0500 Subject: [PATCH 033/119] tweak config --- sei-db/common/threading/elastic_pool.go | 2 ++ sei-db/db_engine/pebbledb/db.go | 2 -- sei-db/db_engine/types/types.go | 13 ------------- .../state_db/bench/cryptosim/config/large.json | 7 ------- .../state_db/bench/cryptosim/config/medium.json | 7 ------- .../bench/cryptosim/config/standard-perf.json | 16 ++++++++++++++++ 6 files changed, 18 insertions(+), 29 deletions(-) delete mode 100644 sei-db/state_db/bench/cryptosim/config/large.json delete mode 100644 sei-db/state_db/bench/cryptosim/config/medium.json create mode 100644 sei-db/state_db/bench/cryptosim/config/standard-perf.json diff --git a/sei-db/common/threading/elastic_pool.go b/sei-db/common/threading/elastic_pool.go index 5621562d57..6a3fbe53b8 100644 --- a/sei-db/common/threading/elastic_pool.go +++ b/sei-db/common/threading/elastic_pool.go @@ -60,6 +60,8 @@ func (ep *elasticPool) Submit(ctx context.Context, task func()) (err error) { case ep.workQueue <- task: return nil default: + // We hit this case when all workers are busy. Under standard operation, this should + // be fairly rare, but it's not catastrophic if it happens. go task() return nil } diff --git a/sei-db/db_engine/pebbledb/db.go b/sei-db/db_engine/pebbledb/db.go index abef1d5311..afb39b906d 100644 --- a/sei-db/db_engine/pebbledb/db.go +++ b/sei-db/db_engine/pebbledb/db.go @@ -30,8 +30,6 @@ type pebbleDB struct { var _ types.KeyValueDB = (*pebbleDB)(nil) -// TODO create a config struct for this! - // Open opens (or creates) a Pebble-backed DB at path, returning the DB interface. func Open( ctx context.Context, diff --git a/sei-db/db_engine/types/types.go b/sei-db/db_engine/types/types.go index e1c8c03e73..d8f26db7c5 100644 --- a/sei-db/db_engine/types/types.go +++ b/sei-db/db_engine/types/types.go @@ -20,19 +20,6 @@ type IterOptions struct { UpperBound []byte } -// OpenOptions configures opening a DB. -// -// NOTE: This is intentionally minimal today. Most performance-critical knobs -// (cache size, memtable sizing, compaction settings, etc.) are currently owned by -// the backend implementations. If/when we need per-node tuning, we can extend -// this struct or add engine-specific options. -// -// Comparer is optional; when set it must be compatible with the underlying -// engine (e.g. *pebble.Comparer for PebbleDB). -type OpenOptions struct { // TODO remove if unused - Comparer any -} - // BatchGetResult describes the result of a single key lookup within a BatchGet call. type BatchGetResult struct { // The value for the given key. diff --git a/sei-db/state_db/bench/cryptosim/config/large.json b/sei-db/state_db/bench/cryptosim/config/large.json deleted file mode 100644 index a4cb063b00..0000000000 --- a/sei-db/state_db/bench/cryptosim/config/large.json +++ /dev/null @@ -1,7 +0,0 @@ -{ - "Comment": "A large simulation. This is the largest sane simulation for local testing.", - "DataDir": "data", - "MinimumNumberOfColdAccounts": 1000000, - "MinimumNumberOfDormantAccounts": 100000000 -} - diff --git a/sei-db/state_db/bench/cryptosim/config/medium.json b/sei-db/state_db/bench/cryptosim/config/medium.json deleted file mode 100644 index 7687f2d4f1..0000000000 --- a/sei-db/state_db/bench/cryptosim/config/medium.json +++ /dev/null @@ -1,7 +0,0 @@ -{ - "Comment": "A medium-sized simulation. Takes a few minutes to set up, but is not extremely onerous to set up.", - "DataDir": "data", - "MinimumNumberOfColdAccounts": 1000000, - "MinimumNumberOfDormantAccounts": 10000000 -} - diff --git a/sei-db/state_db/bench/cryptosim/config/standard-perf.json b/sei-db/state_db/bench/cryptosim/config/standard-perf.json new file mode 100644 index 0000000000..fb0a736ab7 --- /dev/null +++ b/sei-db/state_db/bench/cryptosim/config/standard-perf.json @@ -0,0 +1,16 @@ +{ + "Comment": "The standardized parameters for performance and longevity testing.", + "DataDir": "data", + "MinimumNumberOfColdAccounts": 1000000, + "MinimumNumberOfDormantAccounts": 100000000, + "AccountDBConfig": { + "CacheSize": 1073741824 + }, + "CodeDBConfig": { + "CacheSize": 1073741824 + }, + "StorageDBConfig": { + "CacheSize": 4294967296 + } +} + From 452aa4d5cd4a462619ef66052b4bdfc56bee25ec Mon Sep 17 00:00:00 2001 From: Cody Littley Date: Tue, 10 Mar 2026 09:22:11 -0500 Subject: [PATCH 034/119] incremental progress --- sei-cosmos/storev2/rootmulti/store.go | 5 +- .../bench/wrappers/db_implementations.go | 12 +- sei-db/state_db/sc/composite/store.go | 10 +- sei-db/state_db/sc/composite/store_test.go | 33 ++-- sei-db/state_db/sc/flatkv/config.go | 83 +++++++-- .../sc/flatkv/lthash_correctness_test.go | 8 +- sei-db/state_db/sc/flatkv/snapshot_test.go | 157 +++++++++++------- sei-db/state_db/sc/flatkv/store.go | 20 ++- sei-db/state_db/sc/flatkv/store_test.go | 93 +++++++---- sei-db/state_db/sc/flatkv/store_write_test.go | 45 +++-- 10 files changed, 315 insertions(+), 151 deletions(-) diff --git a/sei-cosmos/storev2/rootmulti/store.go b/sei-cosmos/storev2/rootmulti/store.go index 10d378b630..835892efb8 100644 --- a/sei-cosmos/storev2/rootmulti/store.go +++ b/sei-cosmos/storev2/rootmulti/store.go @@ -92,7 +92,10 @@ func NewStore( limiter = rate.NewLimiter(rate.Limit(scConfig.HistoricalProofRateLimit), burst) } ctx := context.Background() - scStore := composite.NewCompositeCommitStore(ctx, scDir, logger, scConfig) + scStore, err := composite.NewCompositeCommitStore(ctx, scDir, logger, scConfig) + if err != nil { + panic(err) + } store := &Store{ logger: logger, scStore: scStore, diff --git a/sei-db/state_db/bench/wrappers/db_implementations.go b/sei-db/state_db/bench/wrappers/db_implementations.go index c428958dcb..bf98c617d9 100644 --- a/sei-db/state_db/bench/wrappers/db_implementations.go +++ b/sei-db/state_db/bench/wrappers/db_implementations.go @@ -51,8 +51,11 @@ func newFlatKVCommitStore(ctx context.Context, dbDir string, config *flatkv.Conf } fmt.Printf("Opening flatKV from directory %s\n", dbDir) - cs := flatkv.NewCommitStore(ctx, dbDir, logger.NewNopLogger(), config) - _, err := cs.LoadVersion(0) + cs, err := flatkv.NewCommitStore(ctx, dbDir, logger.NewNopLogger(), config) + if err != nil { + return nil, fmt.Errorf("failed to create FlatKV commit store: %w", err) + } + _, err = cs.LoadVersion(0) if err != nil { if closeErr := cs.Close(); closeErr != nil { fmt.Printf("failed to close commit store during error recovery: %v\n", closeErr) @@ -68,7 +71,10 @@ func newCompositeCommitStore(ctx context.Context, dbDir string, writeMode config cfg.MemIAVLConfig.AsyncCommitBuffer = 10 cfg.MemIAVLConfig.SnapshotInterval = 100 - cs := composite.NewCompositeCommitStore(ctx, dbDir, logger.NewNopLogger(), cfg) + cs, err := composite.NewCompositeCommitStore(ctx, dbDir, logger.NewNopLogger(), cfg) + if err != nil { + return nil, fmt.Errorf("failed to create Composite commit store: %w", err) + } cs.Initialize([]string{EVMStoreName}) loaded, err := cs.LoadVersion(0, false) diff --git a/sei-db/state_db/sc/composite/store.go b/sei-db/state_db/sc/composite/store.go index 4d750d4437..17a50cc690 100644 --- a/sei-db/state_db/sc/composite/store.go +++ b/sei-db/state_db/sc/composite/store.go @@ -50,7 +50,7 @@ func NewCompositeCommitStore( homeDir string, logger logger.Logger, cfg config.StateCommitConfig, -) *CompositeCommitStore { +) (*CompositeCommitStore, error) { // Always initialize the Cosmos backend (creates struct only, not opened) cosmosCommitter := memiavl.NewCommitStore(homeDir, logger, cfg.MemIAVLConfig) @@ -65,10 +65,14 @@ func NewCompositeCommitStore( // Note: DB is NOT opened here, will be opened in LoadVersion if cfg.WriteMode == config.DualWrite || cfg.WriteMode == config.SplitWrite { flatkvPath := filepath.Join(homeDir, "data", "flatkv") - store.evmCommitter = flatkv.NewCommitStore(ctx, flatkvPath, logger, cfg.FlatKVConfig) + var err error + store.evmCommitter, err = flatkv.NewCommitStore(ctx, flatkvPath, logger, cfg.FlatKVConfig) + if err != nil { + return nil, fmt.Errorf("failed to create FlatKV commit store: %w", err) + } } - return store + return store, nil } // Initialize initializes the store with the given store names diff --git a/sei-db/state_db/sc/composite/store_test.go b/sei-db/state_db/sc/composite/store_test.go index e4ed33ebe7..9d50634c91 100644 --- a/sei-db/state_db/sc/composite/store_test.go +++ b/sei-db/state_db/sc/composite/store_test.go @@ -15,10 +15,11 @@ func TestCompositeStoreBasicOperations(t *testing.T) { dir := t.TempDir() cfg := config.DefaultStateCommitConfig() - cs := NewCompositeCommitStore(t.Context(), dir, logger.NewNopLogger(), cfg) + cs, err := NewCompositeCommitStore(t.Context(), dir, logger.NewNopLogger(), cfg) + require.NoError(t, err) cs.Initialize([]string{"test", EVMStoreName}) - _, err := cs.LoadVersion(0, false) + _, err = cs.LoadVersion(0, false) require.NoError(t, err) defer func() { require.NoError(t, cs.Close()) @@ -64,10 +65,11 @@ func TestEmptyChangesets(t *testing.T) { dir := t.TempDir() cfg := config.DefaultStateCommitConfig() - cs := NewCompositeCommitStore(t.Context(), dir, logger.NewNopLogger(), cfg) + cs, err := NewCompositeCommitStore(t.Context(), dir, logger.NewNopLogger(), cfg) + require.NoError(t, err) cs.Initialize([]string{"test"}) - _, err := cs.LoadVersion(0, false) + _, err = cs.LoadVersion(0, false) require.NoError(t, err) defer func() { require.NoError(t, cs.Close()) @@ -85,10 +87,11 @@ func TestLoadVersionCopyExisting(t *testing.T) { dir := t.TempDir() cfg := config.DefaultStateCommitConfig() - cs := NewCompositeCommitStore(t.Context(), dir, logger.NewNopLogger(), cfg) + cs, err := NewCompositeCommitStore(t.Context(), dir, logger.NewNopLogger(), cfg) + require.NoError(t, err) cs.Initialize([]string{"test"}) - _, err := cs.LoadVersion(0, false) + _, err = cs.LoadVersion(0, false) require.NoError(t, err) err = cs.ApplyChangeSets([]*proto.NamedChangeSet{ @@ -122,10 +125,11 @@ func TestWorkingAndLastCommitInfo(t *testing.T) { dir := t.TempDir() cfg := config.DefaultStateCommitConfig() - cs := NewCompositeCommitStore(t.Context(), dir, logger.NewNopLogger(), cfg) + cs, err := NewCompositeCommitStore(t.Context(), dir, logger.NewNopLogger(), cfg) + require.NoError(t, err) cs.Initialize([]string{"test"}) - _, err := cs.LoadVersion(0, false) + _, err = cs.LoadVersion(0, false) require.NoError(t, err) defer func() { require.NoError(t, cs.Close()) @@ -157,10 +161,11 @@ func TestRollback(t *testing.T) { dir := t.TempDir() cfg := config.DefaultStateCommitConfig() - cs := NewCompositeCommitStore(t.Context(), dir, logger.NewNopLogger(), cfg) + cs, err := NewCompositeCommitStore(t.Context(), dir, logger.NewNopLogger(), cfg) + require.NoError(t, err) cs.Initialize([]string{"test"}) - _, err := cs.LoadVersion(0, false) + _, err = cs.LoadVersion(0, false) require.NoError(t, err) // Commit a few versions @@ -193,10 +198,11 @@ func TestGetVersions(t *testing.T) { dir := t.TempDir() cfg := config.DefaultStateCommitConfig() - cs := NewCompositeCommitStore(t.Context(), dir, logger.NewNopLogger(), cfg) + cs, err := NewCompositeCommitStore(t.Context(), dir, logger.NewNopLogger(), cfg) + require.NoError(t, err) cs.Initialize([]string{"test"}) - _, err := cs.LoadVersion(0, false) + _, err = cs.LoadVersion(0, false) require.NoError(t, err) for i := 0; i < 3; i++ { @@ -216,7 +222,8 @@ func TestGetVersions(t *testing.T) { } require.NoError(t, cs.Close()) - cs2 := NewCompositeCommitStore(t.Context(), dir, logger.NewNopLogger(), cfg) + cs2, err := NewCompositeCommitStore(t.Context(), dir, logger.NewNopLogger(), cfg) + require.NoError(t, err) cs2.Initialize([]string{"test"}) latestVersion, err := cs2.GetLatestVersion() diff --git a/sei-db/state_db/sc/flatkv/config.go b/sei-db/state_db/sc/flatkv/config.go index f623cef278..8c5b5a5041 100644 --- a/sei-db/state_db/sc/flatkv/config.go +++ b/sei-db/state_db/sc/flatkv/config.go @@ -1,6 +1,8 @@ package flatkv import ( + "fmt" + "github.com/sei-protocol/sei-chain/sei-db/common/unit" "github.com/sei-protocol/sei-chain/sei-db/db_engine/pebbledb" ) @@ -53,21 +55,45 @@ type Config struct { // MetadataDBConfig defines the configuration for the metadata database. MetadataDBConfig pebbledb.PebbleDBConfig + + // Controls the number of goroutines in the DB read pool. The number of threads in this pool is equal to + // ReaderThreadsPerCore * runtime.NumCPU() + ReaderConstantThreadCount. + ReaderThreadsPerCore float64 + + // Controls the number of goroutines in the DB read pool. The number of threads in this pool is equal to + // ReaderThreadsPerCore * runtime.NumCPU() + ReaderConstantThreadCount. + ReaderConstantThreadCount int + + // Controls the size of the queue for work sent to the read pool. + ReaderPoolQueueSize int + + // Controls the number of goroutines pre-allocated in the thread pool for miscellaneous operations. + // The number of threads in this pool is equal to MiscThreadsPerCore * runtime.NumCPU() + MiscConstantThreadCount. + MiscPoolThreadsPerCore float64 + + // Controls the number of goroutines pre-allocated in the thread pool for miscellaneous operations. + // The number of threads in this pool is equal to MiscThreadsPerCore * runtime.NumCPU() + MiscConstantThreadCount. + MiscConstantThreadCount int } // DefaultConfig returns Config with safe default values. func DefaultConfig() *Config { cfg := &Config{ - Fsync: false, - AsyncWriteBuffer: 0, - SnapshotInterval: DefaultSnapshotInterval, - SnapshotKeepRecent: DefaultSnapshotKeepRecent, - EnablePebbleMetrics: true, - AccountDBConfig: pebbledb.DefaultConfig(), - CodeDBConfig: pebbledb.DefaultConfig(), - StorageDBConfig: pebbledb.DefaultConfig(), - LegacyDBConfig: pebbledb.DefaultConfig(), - MetadataDBConfig: pebbledb.DefaultConfig(), + Fsync: false, + AsyncWriteBuffer: 0, + SnapshotInterval: DefaultSnapshotInterval, + SnapshotKeepRecent: DefaultSnapshotKeepRecent, + EnablePebbleMetrics: true, + AccountDBConfig: pebbledb.DefaultConfig(), + CodeDBConfig: pebbledb.DefaultConfig(), + StorageDBConfig: pebbledb.DefaultConfig(), + LegacyDBConfig: pebbledb.DefaultConfig(), + MetadataDBConfig: pebbledb.DefaultConfig(), + ReaderThreadsPerCore: 2.0, + ReaderConstantThreadCount: 0, + ReaderPoolQueueSize: 1024, + MiscPoolThreadsPerCore: 4.0, + MiscConstantThreadCount: 0, } cfg.AccountDBConfig.CacheSize = unit.GB @@ -75,3 +101,40 @@ func DefaultConfig() *Config { return cfg } + +// Validate checks that the configuration is sane and returns an error if it is not. +func (c *Config) Validate() error { + if c.AccountDBConfig.Validate() != nil { + return fmt.Errorf("account db config is invalid: %w", c.AccountDBConfig.Validate()) + } + if c.CodeDBConfig.Validate() != nil { + return fmt.Errorf("code db config is invalid: %w", c.CodeDBConfig.Validate()) + } + if c.StorageDBConfig.Validate() != nil { + return fmt.Errorf("storage db config is invalid: %w", c.StorageDBConfig.Validate()) + } + if c.LegacyDBConfig.Validate() != nil { + return fmt.Errorf("legacy db config is invalid: %w", c.LegacyDBConfig.Validate()) + } + if c.MetadataDBConfig.Validate() != nil { + return fmt.Errorf("metadata db config is invalid: %w", c.MetadataDBConfig.Validate()) + } + + if c.ReaderThreadsPerCore < 0 { + return fmt.Errorf("reader threads per core must be greater than 0") + } + if c.ReaderConstantThreadCount < 0 { + return fmt.Errorf("reader constant thread count must be greater than 0") + } + if c.ReaderPoolQueueSize < 0 { + return fmt.Errorf("reader pool queue size must be greater than 0") + } + if c.MiscPoolThreadsPerCore < 0 { + return fmt.Errorf("misc threads per core must be greater than 0") + } + if c.MiscConstantThreadCount < 0 { + return fmt.Errorf("misc constant thread count must be greater than 0") + } + + return nil +} diff --git a/sei-db/state_db/sc/flatkv/lthash_correctness_test.go b/sei-db/state_db/sc/flatkv/lthash_correctness_test.go index 597019e447..17a0c68060 100644 --- a/sei-db/state_db/sc/flatkv/lthash_correctness_test.go +++ b/sei-db/state_db/sc/flatkv/lthash_correctness_test.go @@ -640,8 +640,9 @@ func TestLtHashPersistenceAfterReopen(t *testing.T) { dir := t.TempDir() // Phase 1: create state and close - s1 := NewCommitStore(t.Context(), dir, nil, DefaultConfig()) - _, err := s1.LoadVersion(0) + s1, err := NewCommitStore(t.Context(), dir, nil, DefaultConfig()) + require.NoError(t, err) + _, err = s1.LoadVersion(0) require.NoError(t, err) for i := 1; i <= 10; i++ { @@ -658,7 +659,8 @@ func TestLtHashPersistenceAfterReopen(t *testing.T) { require.NoError(t, s1.Close()) // Phase 2: reopen and verify - s2 := NewCommitStore(t.Context(), dir, nil, DefaultConfig()) + s2, err := NewCommitStore(t.Context(), dir, nil, DefaultConfig()) + require.NoError(t, err) _, err = s2.LoadVersion(0) require.NoError(t, err) defer s2.Close() diff --git a/sei-db/state_db/sc/flatkv/snapshot_test.go b/sei-db/state_db/sc/flatkv/snapshot_test.go index f798f5f961..fd43dbf2a4 100644 --- a/sei-db/state_db/sc/flatkv/snapshot_test.go +++ b/sei-db/state_db/sc/flatkv/snapshot_test.go @@ -34,8 +34,9 @@ func commitStorageEntry(t *testing.T, s *CommitStore, addr Address, slot Slot, v func TestSnapshotCreatesDir(t *testing.T) { dir := t.TempDir() - s := NewCommitStore(t.Context(), filepath.Join(dir, flatkvRootDir), nil, DefaultConfig()) - _, err := s.LoadVersion(0) + s, err := NewCommitStore(t.Context(), filepath.Join(dir, flatkvRootDir), nil, DefaultConfig()) + require.NoError(t, err) + _, err = s.LoadVersion(0) require.NoError(t, err) defer s.Close() @@ -61,8 +62,9 @@ func TestSnapshotCreatesDir(t *testing.T) { func TestSnapshotIdempotent(t *testing.T) { dir := t.TempDir() - s := NewCommitStore(t.Context(), filepath.Join(dir, flatkvRootDir), nil, DefaultConfig()) - _, err := s.LoadVersion(0) + s, err := NewCommitStore(t.Context(), filepath.Join(dir, flatkvRootDir), nil, DefaultConfig()) + require.NoError(t, err) + _, err = s.LoadVersion(0) require.NoError(t, err) defer s.Close() @@ -81,8 +83,9 @@ func TestOpenFromSnapshot(t *testing.T) { dir := t.TempDir() // Phase 1: create store, commit v1 and v2, snapshot at v2, commit v3 - s1 := NewCommitStore(t.Context(), filepath.Join(dir, flatkvRootDir), nil, DefaultConfig()) - _, err := s1.LoadVersion(0) + s1, err := NewCommitStore(t.Context(), filepath.Join(dir, flatkvRootDir), nil, DefaultConfig()) + require.NoError(t, err) + _, err = s1.LoadVersion(0) require.NoError(t, err) commitStorageEntry(t, s1, Address{0x10}, Slot{0x01}, []byte{0x01}) @@ -98,7 +101,8 @@ func TestOpenFromSnapshot(t *testing.T) { require.NoError(t, s1.Close()) // Phase 2: reopen - should catchup from v2 snapshot + WAL entry for v3 - s2 := NewCommitStore(t.Context(), filepath.Join(dir, flatkvRootDir), nil, DefaultConfig()) + s2, err := NewCommitStore(t.Context(), filepath.Join(dir, flatkvRootDir), nil, DefaultConfig()) + require.NoError(t, err) _, err = s2.LoadVersion(0) require.NoError(t, err) defer s2.Close() @@ -120,8 +124,9 @@ func TestOpenFromSnapshot(t *testing.T) { func TestCatchupUpdatesLtHash(t *testing.T) { dir := t.TempDir() - s1 := NewCommitStore(t.Context(), filepath.Join(dir, flatkvRootDir), nil, DefaultConfig()) - _, err := s1.LoadVersion(0) + s1, err := NewCommitStore(t.Context(), filepath.Join(dir, flatkvRootDir), nil, DefaultConfig()) + require.NoError(t, err) + _, err = s1.LoadVersion(0) require.NoError(t, err) // Commit 5 versions, snapshot at v2 @@ -138,7 +143,8 @@ func TestCatchupUpdatesLtHash(t *testing.T) { require.NoError(t, s1.Close()) // Reopen: catchup from v2 snapshot through v3,v4,v5 via WAL - s2 := NewCommitStore(t.Context(), filepath.Join(dir, flatkvRootDir), nil, DefaultConfig()) + s2, err := NewCommitStore(t.Context(), filepath.Join(dir, flatkvRootDir), nil, DefaultConfig()) + require.NoError(t, err) _, err = s2.LoadVersion(0) require.NoError(t, err) defer s2.Close() @@ -152,8 +158,9 @@ func TestCatchupUpdatesLtHash(t *testing.T) { func TestRollbackRewindsState(t *testing.T) { dir := t.TempDir() - s := NewCommitStore(t.Context(), filepath.Join(dir, flatkvRootDir), nil, DefaultConfig()) - _, err := s.LoadVersion(0) + s, err := NewCommitStore(t.Context(), filepath.Join(dir, flatkvRootDir), nil, DefaultConfig()) + require.NoError(t, err) + _, err = s.LoadVersion(0) require.NoError(t, err) // Commit v1..v5, snapshot at v3 @@ -189,8 +196,9 @@ func TestRollbackRewindsState(t *testing.T) { func TestRollbackToSnapshotExact(t *testing.T) { dir := t.TempDir() - s := NewCommitStore(t.Context(), filepath.Join(dir, flatkvRootDir), nil, DefaultConfig()) - _, err := s.LoadVersion(0) + s, err := NewCommitStore(t.Context(), filepath.Join(dir, flatkvRootDir), nil, DefaultConfig()) + require.NoError(t, err) + _, err = s.LoadVersion(0) require.NoError(t, err) commitStorageEntry(t, s, Address{0x40}, Slot{0x01}, []byte{0x01}) @@ -210,8 +218,9 @@ func TestRollbackToSnapshotExact(t *testing.T) { func TestPartialSnapshotCleanup(t *testing.T) { dir := t.TempDir() - s := NewCommitStore(t.Context(), filepath.Join(dir, flatkvRootDir), nil, DefaultConfig()) - _, err := s.LoadVersion(0) + s, err := NewCommitStore(t.Context(), filepath.Join(dir, flatkvRootDir), nil, DefaultConfig()) + require.NoError(t, err) + _, err = s.LoadVersion(0) require.NoError(t, err) commitStorageEntry(t, s, Address{0x50}, Slot{0x01}, []byte{0x01}) @@ -270,7 +279,8 @@ func TestMigrationFromFlatLayout(t *testing.T) { require.True(t, os.IsNotExist(err)) // Open the store - should trigger migration - s := NewCommitStore(t.Context(), filepath.Join(dir, flatkvRootDir), nil, DefaultConfig()) + s, err := NewCommitStore(t.Context(), filepath.Join(dir, flatkvRootDir), nil, DefaultConfig()) + require.NoError(t, err) _, err = s.LoadVersion(0) require.NoError(t, err) defer s.Close() @@ -301,8 +311,9 @@ func TestOpenVersionValidation(t *testing.T) { dir := t.TempDir() // Phase 1: create store, commit some data - s1 := NewCommitStore(t.Context(), filepath.Join(dir, flatkvRootDir), nil, DefaultConfig()) - _, err := s1.LoadVersion(0) + s1, err := NewCommitStore(t.Context(), filepath.Join(dir, flatkvRootDir), nil, DefaultConfig()) + require.NoError(t, err) + _, err = s1.LoadVersion(0) require.NoError(t, err) commitStorageEntry(t, s1, Address{0x60}, Slot{0x01}, []byte{0x11}) @@ -328,7 +339,8 @@ func TestOpenVersionValidation(t *testing.T) { require.NoError(t, db.Close()) // Phase 3: reopen - should detect skew and catchup - s2 := NewCommitStore(t.Context(), filepath.Join(dir, flatkvRootDir), nil, DefaultConfig()) + s2, err := NewCommitStore(t.Context(), filepath.Join(dir, flatkvRootDir), nil, DefaultConfig()) + require.NoError(t, err) _, err = s2.LoadVersion(0) require.NoError(t, err) defer s2.Close() @@ -397,8 +409,9 @@ func TestSeekSnapshot(t *testing.T) { func TestLoadVersionWithTarget(t *testing.T) { dir := t.TempDir() - s1 := NewCommitStore(t.Context(), filepath.Join(dir, flatkvRootDir), nil, DefaultConfig()) - _, err := s1.LoadVersion(0) + s1, err := NewCommitStore(t.Context(), filepath.Join(dir, flatkvRootDir), nil, DefaultConfig()) + require.NoError(t, err) + _, err = s1.LoadVersion(0) require.NoError(t, err) commitStorageEntry(t, s1, Address{0x70}, Slot{0x01}, []byte{0x01}) @@ -410,7 +423,8 @@ func TestLoadVersionWithTarget(t *testing.T) { require.NoError(t, s1.Close()) // Reopen at specific version 3 - s2 := NewCommitStore(t.Context(), filepath.Join(dir, flatkvRootDir), nil, DefaultConfig()) + s2, err := NewCommitStore(t.Context(), filepath.Join(dir, flatkvRootDir), nil, DefaultConfig()) + require.NoError(t, err) _, err = s2.LoadVersion(3) require.NoError(t, err) defer s2.Close() @@ -429,8 +443,9 @@ func TestSnapshotThenCatchupThenVerifyCorrectness(t *testing.T) { key := evm.BuildMemIAVLEVMKey(evm.EVMKeyStorage, StorageKey(addr, slot)) // Phase 1: build baseline at v2 and snapshot it. - s1 := NewCommitStore(t.Context(), filepath.Join(dir, flatkvRootDir), nil, DefaultConfig()) - _, err := s1.LoadVersion(0) + s1, err := NewCommitStore(t.Context(), filepath.Join(dir, flatkvRootDir), nil, DefaultConfig()) + require.NoError(t, err) + _, err = s1.LoadVersion(0) require.NoError(t, err) commitStorageEntry(t, s1, addr, slot, []byte{0x01}) // v1 @@ -450,7 +465,8 @@ func TestSnapshotThenCatchupThenVerifyCorrectness(t *testing.T) { // Phase 3: reopen exactly at v2. If later commits had mutated the snapshot // baseline in place, we'd incorrectly read 0x04 here. - s2 := NewCommitStore(t.Context(), filepath.Join(dir, flatkvRootDir), nil, DefaultConfig()) + s2, err := NewCommitStore(t.Context(), filepath.Join(dir, flatkvRootDir), nil, DefaultConfig()) + require.NoError(t, err) _, err = s2.LoadVersion(2) require.NoError(t, err) gotV2, ok := s2.Get(key) @@ -459,7 +475,8 @@ func TestSnapshotThenCatchupThenVerifyCorrectness(t *testing.T) { require.NoError(t, s2.Close()) // Phase 4: reopen latest again to ensure catchup/replay still reaches v4. - s3 := NewCommitStore(t.Context(), filepath.Join(dir, flatkvRootDir), nil, DefaultConfig()) + s3, err := NewCommitStore(t.Context(), filepath.Join(dir, flatkvRootDir), nil, DefaultConfig()) + require.NoError(t, err) _, err = s3.LoadVersion(0) require.NoError(t, err) defer s3.Close() @@ -479,8 +496,9 @@ func TestLoadVersionMixedSequence(t *testing.T) { slot := Slot{0x81} key := evm.BuildMemIAVLEVMKey(evm.EVMKeyStorage, StorageKey(addr, slot)) - s := NewCommitStore(t.Context(), filepath.Join(dir, flatkvRootDir), nil, DefaultConfig()) - _, err := s.LoadVersion(0) + s, err := NewCommitStore(t.Context(), filepath.Join(dir, flatkvRootDir), nil, DefaultConfig()) + require.NoError(t, err) + _, err = s.LoadVersion(0) require.NoError(t, err) commitStorageEntry(t, s, addr, slot, []byte{0x01}) @@ -494,7 +512,8 @@ func TestLoadVersionMixedSequence(t *testing.T) { require.NoError(t, s.Close()) // Round 1: load exactly v2 - s1 := NewCommitStore(t.Context(), filepath.Join(dir, flatkvRootDir), nil, DefaultConfig()) + s1, err := NewCommitStore(t.Context(), filepath.Join(dir, flatkvRootDir), nil, DefaultConfig()) + require.NoError(t, err) _, err = s1.LoadVersion(2) require.NoError(t, err) require.Equal(t, int64(2), s1.Version()) @@ -505,7 +524,8 @@ func TestLoadVersionMixedSequence(t *testing.T) { require.NoError(t, s1.Close()) // Round 2: load latest (catches up through v3, v4) - s2 := NewCommitStore(t.Context(), filepath.Join(dir, flatkvRootDir), nil, DefaultConfig()) + s2, err := NewCommitStore(t.Context(), filepath.Join(dir, flatkvRootDir), nil, DefaultConfig()) + require.NoError(t, err) _, err = s2.LoadVersion(0) require.NoError(t, err) require.Equal(t, int64(4), s2.Version()) @@ -516,7 +536,8 @@ func TestLoadVersionMixedSequence(t *testing.T) { require.NoError(t, s2.Close()) // Round 3: load v2 AGAIN — snapshot must still be clean. - s3 := NewCommitStore(t.Context(), filepath.Join(dir, flatkvRootDir), nil, DefaultConfig()) + s3, err := NewCommitStore(t.Context(), filepath.Join(dir, flatkvRootDir), nil, DefaultConfig()) + require.NoError(t, err) _, err = s3.LoadVersion(2) require.NoError(t, err, "LoadVersion(2) must succeed after LoadVersion(0) dirtied working dir") require.Equal(t, int64(2), s3.Version()) @@ -532,8 +553,9 @@ func TestLoadVersionMixedSequence(t *testing.T) { func TestRollbackTargetBeforeWALStart(t *testing.T) { dir := t.TempDir() - s := NewCommitStore(t.Context(), filepath.Join(dir, flatkvRootDir), nil, DefaultConfig()) - _, err := s.LoadVersion(0) + s, err := NewCommitStore(t.Context(), filepath.Join(dir, flatkvRootDir), nil, DefaultConfig()) + require.NoError(t, err) + _, err = s.LoadVersion(0) require.NoError(t, err) // Build: v1..v5, snapshot at v2 @@ -566,7 +588,8 @@ func TestRollbackTargetBeforeWALStart(t *testing.T) { // Simulate restart: should stay at v2. require.NoError(t, s.Close()) - s2 := NewCommitStore(t.Context(), filepath.Join(dir, flatkvRootDir), nil, DefaultConfig()) + s2, err := NewCommitStore(t.Context(), filepath.Join(dir, flatkvRootDir), nil, DefaultConfig()) + require.NoError(t, err) _, err = s2.LoadVersion(0) require.NoError(t, err) defer s2.Close() @@ -764,8 +787,9 @@ func TestCreateWorkingDirReclones(t *testing.T) { func TestPruneSnapshotsKeepsRecent(t *testing.T) { dir := t.TempDir() - s := NewCommitStore(t.Context(), filepath.Join(dir, flatkvRootDir), nil, &Config{SnapshotKeepRecent: 1}) - _, err := s.LoadVersion(0) + s, err := NewCommitStore(t.Context(), filepath.Join(dir, flatkvRootDir), nil, &Config{SnapshotKeepRecent: 1}) + require.NoError(t, err) + _, err = s.LoadVersion(0) require.NoError(t, err) for i := 0; i < 5; i++ { @@ -788,8 +812,9 @@ func TestPruneSnapshotsKeepsRecent(t *testing.T) { func TestPruneSnapshotsKeepAll(t *testing.T) { dir := t.TempDir() - s := NewCommitStore(t.Context(), filepath.Join(dir, flatkvRootDir), nil, &Config{SnapshotKeepRecent: 100}) - _, err := s.LoadVersion(0) + s, err := NewCommitStore(t.Context(), filepath.Join(dir, flatkvRootDir), nil, &Config{SnapshotKeepRecent: 100}) + require.NoError(t, err) + _, err = s.LoadVersion(0) require.NoError(t, err) defer s.Close() @@ -824,7 +849,8 @@ func TestOrphanSnapshotRecovery(t *testing.T) { _, err := os.Lstat(currentPath(flatkvDir)) require.True(t, os.IsNotExist(err), "no current symlink should exist") - s := NewCommitStore(t.Context(), filepath.Join(dir, flatkvRootDir), nil, DefaultConfig()) + s, err := NewCommitStore(t.Context(), filepath.Join(dir, flatkvRootDir), nil, DefaultConfig()) + require.NoError(t, err) _, err = s.LoadVersion(0) require.NoError(t, err) defer s.Close() @@ -886,8 +912,9 @@ func TestTraverseSnapshotsEarlyStop(t *testing.T) { func TestVerifyWALTailSuccess(t *testing.T) { dir := t.TempDir() - s := NewCommitStore(t.Context(), filepath.Join(dir, flatkvRootDir), nil, DefaultConfig()) - _, err := s.LoadVersion(0) + s, err := NewCommitStore(t.Context(), filepath.Join(dir, flatkvRootDir), nil, DefaultConfig()) + require.NoError(t, err) + _, err = s.LoadVersion(0) require.NoError(t, err) defer s.Close() @@ -900,8 +927,9 @@ func TestVerifyWALTailSuccess(t *testing.T) { func TestVerifyWALTailMismatch(t *testing.T) { dir := t.TempDir() - s := NewCommitStore(t.Context(), filepath.Join(dir, flatkvRootDir), nil, DefaultConfig()) - _, err := s.LoadVersion(0) + s, err := NewCommitStore(t.Context(), filepath.Join(dir, flatkvRootDir), nil, DefaultConfig()) + require.NoError(t, err) + _, err = s.LoadVersion(0) require.NoError(t, err) defer s.Close() @@ -922,8 +950,9 @@ func TestTryTruncateWAL(t *testing.T) { // SnapshotKeepRecent=0 so pruneSnapshots removes snapshot-0 once // the manual snapshot at v5 is created; this makes v5 the earliest // snapshot and gives tryTruncateWAL a positive truncation offset. - s := NewCommitStore(t.Context(), filepath.Join(dir, flatkvRootDir), nil, &Config{SnapshotKeepRecent: 0}) - _, err := s.LoadVersion(0) + s, err := NewCommitStore(t.Context(), filepath.Join(dir, flatkvRootDir), nil, &Config{SnapshotKeepRecent: 0}) + require.NoError(t, err) + _, err = s.LoadVersion(0) require.NoError(t, err) defer s.Close() @@ -947,8 +976,9 @@ func TestTryTruncateWAL(t *testing.T) { func TestTryTruncateWALNoSnapshot(t *testing.T) { dir := t.TempDir() - s := NewCommitStore(t.Context(), filepath.Join(dir, flatkvRootDir), nil, DefaultConfig()) - _, err := s.LoadVersion(0) + s, err := NewCommitStore(t.Context(), filepath.Join(dir, flatkvRootDir), nil, DefaultConfig()) + require.NoError(t, err) + _, err = s.LoadVersion(0) require.NoError(t, err) defer s.Close() @@ -968,8 +998,9 @@ func TestTryTruncateWALNoSnapshot(t *testing.T) { func TestRollbackRemovesPostTargetSnapshots(t *testing.T) { dir := t.TempDir() - s := NewCommitStore(t.Context(), filepath.Join(dir, flatkvRootDir), nil, DefaultConfig()) - _, err := s.LoadVersion(0) + s, err := NewCommitStore(t.Context(), filepath.Join(dir, flatkvRootDir), nil, DefaultConfig()) + require.NoError(t, err) + _, err = s.LoadVersion(0) require.NoError(t, err) for i := 0; i < 3; i++ { @@ -1066,8 +1097,9 @@ func TestSeekSnapshotExact(t *testing.T) { func TestMultipleSnapshotsAndReopen(t *testing.T) { dir := t.TempDir() - s := NewCommitStore(t.Context(), filepath.Join(dir, flatkvRootDir), nil, &Config{SnapshotKeepRecent: 10}) - _, err := s.LoadVersion(0) + s, err := NewCommitStore(t.Context(), filepath.Join(dir, flatkvRootDir), nil, &Config{SnapshotKeepRecent: 10}) + require.NoError(t, err) + _, err = s.LoadVersion(0) require.NoError(t, err) var hashes [][]byte @@ -1080,8 +1112,9 @@ func TestMultipleSnapshotsAndReopen(t *testing.T) { for i, expectedHash := range hashes { ver := int64(i + 1) - s2 := NewCommitStore(t.Context(), filepath.Join(dir, flatkvRootDir), nil, &Config{SnapshotKeepRecent: 10}) - _, err := s2.LoadVersion(ver) + s2, err := NewCommitStore(t.Context(), filepath.Join(dir, flatkvRootDir), nil, &Config{SnapshotKeepRecent: 10}) + require.NoError(t, err) + _, err = s2.LoadVersion(ver) require.NoError(t, err) require.Equal(t, ver, s2.Version()) require.Equal(t, expectedHash, s2.RootHash(), "hash mismatch at version %d", ver) @@ -1095,8 +1128,9 @@ func TestMultipleSnapshotsAndReopen(t *testing.T) { func TestWriteSnapshotUpdatesSnapshotBase(t *testing.T) { dir := t.TempDir() - s := NewCommitStore(context.Background(), filepath.Join(dir, flatkvRootDir), nil, DefaultConfig()) - _, err := s.LoadVersion(0) + s, err := NewCommitStore(context.Background(), filepath.Join(dir, flatkvRootDir), nil, DefaultConfig()) + require.NoError(t, err) + _, err = s.LoadVersion(0) require.NoError(t, err) commitStorageEntry(t, s, Address{0xF0}, Slot{0x01}, []byte{0x01}) @@ -1121,7 +1155,8 @@ func TestWriteSnapshotUpdatesSnapshotBase(t *testing.T) { // Reopen: working dir should be reused (SNAPSHOT_BASE matches current), // so committedVersion should be 5 (from working dir metadata), not 2 // (from the snapshot). Catchup should replay 0 entries. - s2 := NewCommitStore(context.Background(), filepath.Join(dir, flatkvRootDir), nil, DefaultConfig()) + s2, err := NewCommitStore(context.Background(), filepath.Join(dir, flatkvRootDir), nil, DefaultConfig()) + require.NoError(t, err) _, err = s2.LoadVersion(0) require.NoError(t, err) defer s2.Close() @@ -1132,8 +1167,9 @@ func TestWriteSnapshotUpdatesSnapshotBase(t *testing.T) { func TestSnapshotPreservesAllKeyTypes(t *testing.T) { dir := t.TempDir() - s := NewCommitStore(t.Context(), filepath.Join(dir, flatkvRootDir), nil, DefaultConfig()) - _, err := s.LoadVersion(0) + s, err := NewCommitStore(t.Context(), filepath.Join(dir, flatkvRootDir), nil, DefaultConfig()) + require.NoError(t, err) + _, err = s.LoadVersion(0) require.NoError(t, err) addr := Address{0xAB} @@ -1153,7 +1189,8 @@ func TestSnapshotPreservesAllKeyTypes(t *testing.T) { require.NoError(t, s.WriteSnapshot("")) require.NoError(t, s.Close()) - s2 := NewCommitStore(t.Context(), filepath.Join(dir, flatkvRootDir), nil, DefaultConfig()) + s2, err := NewCommitStore(t.Context(), filepath.Join(dir, flatkvRootDir), nil, DefaultConfig()) + require.NoError(t, err) _, err = s2.LoadVersion(0) require.NoError(t, err) defer s2.Close() diff --git a/sei-db/state_db/sc/flatkv/store.go b/sei-db/state_db/sc/flatkv/store.go index f175233e8c..c9777a163e 100644 --- a/sei-db/state_db/sc/flatkv/store.go +++ b/sei-db/state_db/sc/flatkv/store.go @@ -6,6 +6,7 @@ import ( "io" "os" "path/filepath" + "runtime" "time" "github.com/cockroachdb/pebble/v2" @@ -124,7 +125,14 @@ func NewCommitStore( dbDir string, log logger.Logger, cfg *Config, -) *CommitStore { +) (*CommitStore, error) { + + // TODO pre-populate file paths in sub-configs + + if err := cfg.Validate(); err != nil { + return nil, fmt.Errorf("failed to validate config: %w", err) + } + if log == nil { log = logger.NewNopLogger() } @@ -132,8 +140,12 @@ func NewCommitStore( ctx, cancel := context.WithCancel(ctx) - readPool := threading.NewFixedPool(ctx, "flatkv-read", 20, 1024) // TODO this should be configurable! - miscPool := threading.NewElasticPool(ctx, "flatkv-misc", 20) + coreCount := runtime.NumCPU() + readPoolSize := int(cfg.ReaderThreadsPerCore*float64(coreCount) + float64(cfg.ReaderConstantThreadCount)) + miscPoolSize := int(cfg.MiscPoolThreadsPerCore*float64(coreCount) + float64(cfg.MiscConstantThreadCount)) + + readPool := threading.NewFixedPool(ctx, "flatkv-read", readPoolSize, cfg.ReaderPoolQueueSize) + miscPool := threading.NewElasticPool(ctx, "flatkv-misc", miscPoolSize) return &CommitStore{ ctx: ctx, @@ -152,7 +164,7 @@ func NewCommitStore( phaseTimer: metrics.NewPhaseTimer(meter, "seidb_main_thread"), readPool: readPool, miscPool: miscPool, - } + }, nil } func (s *CommitStore) flatkvDir() string { diff --git a/sei-db/state_db/sc/flatkv/store_test.go b/sei-db/state_db/sc/flatkv/store_test.go index e0ed0aa49f..12fd44c639 100644 --- a/sei-db/state_db/sc/flatkv/store_test.go +++ b/sei-db/state_db/sc/flatkv/store_test.go @@ -70,8 +70,9 @@ func setupTestDB(t *testing.T) types.KeyValueDB { func setupTestStore(t *testing.T) *CommitStore { t.Helper() dir := t.TempDir() - s := NewCommitStore(t.Context(), filepath.Join(dir, flatkvRootDir), nil, DefaultConfig()) - _, err := s.LoadVersion(0) + s, err := NewCommitStore(t.Context(), filepath.Join(dir, flatkvRootDir), nil, DefaultConfig()) + require.NoError(t, err) + _, err = s.LoadVersion(0) require.NoError(t, err) return s } @@ -80,8 +81,9 @@ func setupTestStore(t *testing.T) *CommitStore { func setupTestStoreWithConfig(t *testing.T, cfg *Config) *CommitStore { t.Helper() dir := t.TempDir() - s := NewCommitStore(t.Context(), filepath.Join(dir, flatkvRootDir), nil, cfg) - _, err := s.LoadVersion(0) + s, err := NewCommitStore(t.Context(), filepath.Join(dir, flatkvRootDir), nil, cfg) + require.NoError(t, err) + _, err = s.LoadVersion(0) require.NoError(t, err) return s } @@ -100,8 +102,9 @@ func commitAndCheck(t *testing.T, s *CommitStore) int64 { func TestStoreOpenClose(t *testing.T) { dir := t.TempDir() - s := NewCommitStore(t.Context(), filepath.Join(dir, flatkvRootDir), nil, DefaultConfig()) - _, err := s.LoadVersion(0) + s, err := NewCommitStore(t.Context(), filepath.Join(dir, flatkvRootDir), nil, DefaultConfig()) + require.NoError(t, err) + _, err = s.LoadVersion(0) require.NoError(t, err) require.NoError(t, s.Close()) @@ -109,8 +112,9 @@ func TestStoreOpenClose(t *testing.T) { func TestStoreClose(t *testing.T) { dir := t.TempDir() - s := NewCommitStore(t.Context(), filepath.Join(dir, flatkvRootDir), nil, DefaultConfig()) - _, err := s.LoadVersion(0) + s, err := NewCommitStore(t.Context(), filepath.Join(dir, flatkvRootDir), nil, DefaultConfig()) + require.NoError(t, err) + _, err = s.LoadVersion(0) require.NoError(t, err) // Close should succeed @@ -304,8 +308,9 @@ func TestStorePersistence(t *testing.T) { key := memiavlStorageKey(addr, slot) // Write and close - s1 := NewCommitStore(t.Context(), filepath.Join(dir, flatkvRootDir), nil, DefaultConfig()) - _, err := s1.LoadVersion(0) + s1, err := NewCommitStore(t.Context(), filepath.Join(dir, flatkvRootDir), nil, DefaultConfig()) + require.NoError(t, err) + _, err = s1.LoadVersion(0) require.NoError(t, err) cs := makeChangeSet(key, value, false) @@ -314,7 +319,8 @@ func TestStorePersistence(t *testing.T) { require.NoError(t, s1.Close()) // Reopen and verify - s2 := NewCommitStore(t.Context(), filepath.Join(dir, flatkvRootDir), nil, DefaultConfig()) + s2, err := NewCommitStore(t.Context(), filepath.Join(dir, flatkvRootDir), nil, DefaultConfig()) + require.NoError(t, err) _, err = s2.LoadVersion(0) require.NoError(t, err) defer s2.Close() @@ -431,11 +437,13 @@ func TestStoreRollbackNoSnapshot(t *testing.T) { func TestFileLockPreventsDoubleOpen(t *testing.T) { dir := t.TempDir() - s1 := NewCommitStore(t.Context(), filepath.Join(dir, flatkvRootDir), nil, DefaultConfig()) - _, err := s1.LoadVersion(0) + s1, err := NewCommitStore(t.Context(), filepath.Join(dir, flatkvRootDir), nil, DefaultConfig()) + require.NoError(t, err) + _, err = s1.LoadVersion(0) require.NoError(t, err) - s2 := NewCommitStore(t.Context(), filepath.Join(dir, flatkvRootDir), nil, DefaultConfig()) + s2, err := NewCommitStore(t.Context(), filepath.Join(dir, flatkvRootDir), nil, DefaultConfig()) + require.NoError(t, err) _, err = s2.LoadVersion(0) require.Error(t, err, "second open on same dir should fail due to file lock") require.Contains(t, err.Error(), "file lock") @@ -453,8 +461,9 @@ func TestFileLockPreventsDoubleOpen(t *testing.T) { func TestClearChangelog(t *testing.T) { dir := t.TempDir() - s := NewCommitStore(t.Context(), filepath.Join(dir, flatkvRootDir), nil, DefaultConfig()) - _, err := s.LoadVersion(0) + s, err := NewCommitStore(t.Context(), filepath.Join(dir, flatkvRootDir), nil, DefaultConfig()) + require.NoError(t, err) + _, err = s.LoadVersion(0) require.NoError(t, err) defer s.Close() @@ -478,8 +487,9 @@ func TestClearChangelog(t *testing.T) { func TestCloseDBsOnlyIdempotent(t *testing.T) { dir := t.TempDir() - s := NewCommitStore(t.Context(), filepath.Join(dir, flatkvRootDir), nil, DefaultConfig()) - _, err := s.LoadVersion(0) + s, err := NewCommitStore(t.Context(), filepath.Join(dir, flatkvRootDir), nil, DefaultConfig()) + require.NoError(t, err) + _, err = s.LoadVersion(0) require.NoError(t, err) require.NoError(t, s.closeDBsOnly()) @@ -496,8 +506,9 @@ func TestCloseDBsOnlyIdempotent(t *testing.T) { func TestLoadVersionTargetBeyondWALFails(t *testing.T) { dir := t.TempDir() - s1 := NewCommitStore(t.Context(), filepath.Join(dir, flatkvRootDir), nil, DefaultConfig()) - _, err := s1.LoadVersion(0) + s1, err := NewCommitStore(t.Context(), filepath.Join(dir, flatkvRootDir), nil, DefaultConfig()) + require.NoError(t, err) + _, err = s1.LoadVersion(0) require.NoError(t, err) commitStorageEntry(t, s1, Address{0x01}, Slot{0x01}, []byte{0x01}) @@ -505,7 +516,8 @@ func TestLoadVersionTargetBeyondWALFails(t *testing.T) { require.NoError(t, s1.WriteSnapshot("")) require.NoError(t, s1.Close()) - s2 := NewCommitStore(t.Context(), filepath.Join(dir, flatkvRootDir), nil, DefaultConfig()) + s2, err := NewCommitStore(t.Context(), filepath.Join(dir, flatkvRootDir), nil, DefaultConfig()) + require.NoError(t, err) _, err = s2.LoadVersion(100) require.Error(t, err, "loading version beyond WAL should fail") } @@ -517,8 +529,9 @@ func TestLoadVersionTargetBeyondWALFails(t *testing.T) { func TestReopenReusesWorkingDir(t *testing.T) { dir := t.TempDir() - s := NewCommitStore(t.Context(), filepath.Join(dir, flatkvRootDir), nil, DefaultConfig()) - _, err := s.LoadVersion(0) + s, err := NewCommitStore(t.Context(), filepath.Join(dir, flatkvRootDir), nil, DefaultConfig()) + require.NoError(t, err) + _, err = s.LoadVersion(0) require.NoError(t, err) commitStorageEntry(t, s, Address{0x01}, Slot{0x01}, []byte{0x01}) @@ -530,7 +543,8 @@ func TestReopenReusesWorkingDir(t *testing.T) { _, err = os.Stat(basePath) require.NoError(t, err, "SNAPSHOT_BASE should exist after close") - s2 := NewCommitStore(t.Context(), filepath.Join(dir, flatkvRootDir), nil, DefaultConfig()) + s2, err := NewCommitStore(t.Context(), filepath.Join(dir, flatkvRootDir), nil, DefaultConfig()) + require.NoError(t, err) _, err = s2.LoadVersion(0) require.NoError(t, err) defer s2.Close() @@ -544,8 +558,9 @@ func TestReopenReusesWorkingDir(t *testing.T) { func TestWalOffsetForVersionFastPath(t *testing.T) { dir := t.TempDir() - s := NewCommitStore(t.Context(), filepath.Join(dir, flatkvRootDir), nil, DefaultConfig()) - _, err := s.LoadVersion(0) + s, err := NewCommitStore(t.Context(), filepath.Join(dir, flatkvRootDir), nil, DefaultConfig()) + require.NoError(t, err) + _, err = s.LoadVersion(0) require.NoError(t, err) defer s.Close() @@ -566,8 +581,9 @@ func TestWalOffsetForVersionFastPath(t *testing.T) { func TestWalOffsetForVersionBeforeWAL(t *testing.T) { dir := t.TempDir() - s := NewCommitStore(t.Context(), filepath.Join(dir, flatkvRootDir), nil, DefaultConfig()) - _, err := s.LoadVersion(0) + s, err := NewCommitStore(t.Context(), filepath.Join(dir, flatkvRootDir), nil, DefaultConfig()) + require.NoError(t, err) + _, err = s.LoadVersion(0) require.NoError(t, err) defer s.Close() @@ -582,8 +598,9 @@ func TestWalOffsetForVersionBeforeWAL(t *testing.T) { func TestWalOffsetForVersionNotFound(t *testing.T) { dir := t.TempDir() - s := NewCommitStore(t.Context(), filepath.Join(dir, flatkvRootDir), nil, DefaultConfig()) - _, err := s.LoadVersion(0) + s, err := NewCommitStore(t.Context(), filepath.Join(dir, flatkvRootDir), nil, DefaultConfig()) + require.NoError(t, err) + _, err = s.LoadVersion(0) require.NoError(t, err) defer s.Close() @@ -600,8 +617,9 @@ func TestWalOffsetForVersionNotFound(t *testing.T) { func TestCatchupFromSpecificVersion(t *testing.T) { dir := t.TempDir() - s1 := NewCommitStore(t.Context(), filepath.Join(dir, flatkvRootDir), nil, DefaultConfig()) - _, err := s1.LoadVersion(0) + s1, err := NewCommitStore(t.Context(), filepath.Join(dir, flatkvRootDir), nil, DefaultConfig()) + require.NoError(t, err) + _, err = s1.LoadVersion(0) require.NoError(t, err) for i := 0; i < 10; i++ { @@ -612,7 +630,8 @@ func TestCatchupFromSpecificVersion(t *testing.T) { require.NoError(t, s1.WriteSnapshot("")) require.NoError(t, s1.Close()) - s2 := NewCommitStore(t.Context(), filepath.Join(dir, flatkvRootDir), nil, DefaultConfig()) + s2, err := NewCommitStore(t.Context(), filepath.Join(dir, flatkvRootDir), nil, DefaultConfig()) + require.NoError(t, err) _, err = s2.LoadVersion(0) require.NoError(t, err) defer s2.Close() @@ -661,8 +680,9 @@ func TestPersistenceAllKeyTypes(t *testing.T) { addr := Address{0xAA} slot := Slot{0xBB} - s1 := NewCommitStore(t.Context(), filepath.Join(dir, flatkvRootDir), nil, DefaultConfig()) - _, err := s1.LoadVersion(0) + s1, err := NewCommitStore(t.Context(), filepath.Join(dir, flatkvRootDir), nil, DefaultConfig()) + require.NoError(t, err) + _, err = s1.LoadVersion(0) require.NoError(t, err) storageKey := evm.BuildMemIAVLEVMKey(evm.EVMKeyStorage, StorageKey(addr, slot)) @@ -680,7 +700,8 @@ func TestPersistenceAllKeyTypes(t *testing.T) { hash := s1.RootHash() require.NoError(t, s1.Close()) - s2 := NewCommitStore(t.Context(), filepath.Join(dir, flatkvRootDir), nil, DefaultConfig()) + s2, err := NewCommitStore(t.Context(), filepath.Join(dir, flatkvRootDir), nil, DefaultConfig()) + require.NoError(t, err) _, err = s2.LoadVersion(0) require.NoError(t, err) defer s2.Close() diff --git a/sei-db/state_db/sc/flatkv/store_write_test.go b/sei-db/state_db/sc/flatkv/store_write_test.go index 300b78cf36..c862fbccae 100644 --- a/sei-db/state_db/sc/flatkv/store_write_test.go +++ b/sei-db/state_db/sc/flatkv/store_write_test.go @@ -523,8 +523,9 @@ func TestStoreLegacyEmptyCommitLocalMeta(t *testing.T) { func TestStoreFsyncConfig(t *testing.T) { t.Run("DefaultConfig", func(t *testing.T) { dir := t.TempDir() - store := NewCommitStore(t.Context(), filepath.Join(dir, flatkvRootDir), nil, DefaultConfig()) - _, err := store.LoadVersion(0) + store, err := NewCommitStore(t.Context(), filepath.Join(dir, flatkvRootDir), nil, DefaultConfig()) + require.NoError(t, err) + _, err = store.LoadVersion(0) require.NoError(t, err) defer store.Close() @@ -535,10 +536,11 @@ func TestStoreFsyncConfig(t *testing.T) { t.Run("FsyncDisabled", func(t *testing.T) { dir := t.TempDir() - store := NewCommitStore(t.Context(), filepath.Join(dir, flatkvRootDir), nil, &Config{ + store, err := NewCommitStore(t.Context(), filepath.Join(dir, flatkvRootDir), nil, &Config{ Fsync: false, }) - _, err := store.LoadVersion(0) + require.NoError(t, err) + _, err = store.LoadVersion(0) require.NoError(t, err) defer store.Close() @@ -571,8 +573,9 @@ func TestAutoSnapshotTriggeredByInterval(t *testing.T) { SnapshotInterval: 5, SnapshotKeepRecent: 2, } - s := NewCommitStore(t.Context(), filepath.Join(dir, flatkvRootDir), nil, &cfg) - _, err := s.LoadVersion(0) + s, err := NewCommitStore(t.Context(), filepath.Join(dir, flatkvRootDir), nil, &cfg) + require.NoError(t, err) + _, err = s.LoadVersion(0) require.NoError(t, err) defer s.Close() @@ -595,8 +598,9 @@ func TestAutoSnapshotNotTriggeredBeforeInterval(t *testing.T) { SnapshotInterval: 10, SnapshotKeepRecent: 2, } - s := NewCommitStore(t.Context(), filepath.Join(dir, flatkvRootDir), nil, cfg) - _, err := s.LoadVersion(0) + s, err := NewCommitStore(t.Context(), filepath.Join(dir, flatkvRootDir), nil, cfg) + require.NoError(t, err) + _, err = s.LoadVersion(0) require.NoError(t, err) defer s.Close() @@ -622,8 +626,9 @@ func TestAutoSnapshotNotTriggeredBeforeInterval(t *testing.T) { func TestAutoSnapshotDisabledWhenIntervalZero(t *testing.T) { dir := t.TempDir() cfg := &Config{SnapshotInterval: 0} - s := NewCommitStore(t.Context(), filepath.Join(dir, flatkvRootDir), nil, cfg) - _, err := s.LoadVersion(0) + s, err := NewCommitStore(t.Context(), filepath.Join(dir, flatkvRootDir), nil, cfg) + require.NoError(t, err) + _, err = s.LoadVersion(0) require.NoError(t, err) defer s.Close() @@ -714,8 +719,9 @@ func TestMultipleApplyAccountFieldsPreservesOther(t *testing.T) { func TestLtHashDeterministicAcrossReopen(t *testing.T) { writeAndGetHash := func() []byte { dir := t.TempDir() - s := NewCommitStore(t.Context(), filepath.Join(dir, flatkvRootDir), nil, DefaultConfig()) - _, err := s.LoadVersion(0) + s, err := NewCommitStore(t.Context(), filepath.Join(dir, flatkvRootDir), nil, DefaultConfig()) + require.NoError(t, err) + _, err = s.LoadVersion(0) require.NoError(t, err) commitStorageEntry(t, s, Address{0x01}, Slot{0x01}, []byte{0xAA}) @@ -838,8 +844,9 @@ func TestEmptyCommitAdvancesVersion(t *testing.T) { func TestStoreFsyncEnabled(t *testing.T) { dir := t.TempDir() cfg := &Config{Fsync: true} - s := NewCommitStore(t.Context(), filepath.Join(dir, flatkvRootDir), nil, cfg) - _, err := s.LoadVersion(0) + s, err := NewCommitStore(t.Context(), filepath.Join(dir, flatkvRootDir), nil, cfg) + require.NoError(t, err) + _, err = s.LoadVersion(0) require.NoError(t, err) defer s.Close() @@ -859,8 +866,9 @@ func TestStoreFsyncEnabled(t *testing.T) { func TestLastSnapshotTimeUpdated(t *testing.T) { dir := t.TempDir() - s := NewCommitStore(t.Context(), filepath.Join(dir, flatkvRootDir), nil, DefaultConfig()) - _, err := s.LoadVersion(0) + s, err := NewCommitStore(t.Context(), filepath.Join(dir, flatkvRootDir), nil, DefaultConfig()) + require.NoError(t, err) + _, err = s.LoadVersion(0) require.NoError(t, err) defer s.Close() @@ -879,8 +887,9 @@ func TestLastSnapshotTimeUpdated(t *testing.T) { func TestWALRecordsChangesets(t *testing.T) { dir := t.TempDir() - s := NewCommitStore(t.Context(), filepath.Join(dir, flatkvRootDir), nil, DefaultConfig()) - _, err := s.LoadVersion(0) + s, err := NewCommitStore(t.Context(), filepath.Join(dir, flatkvRootDir), nil, DefaultConfig()) + require.NoError(t, err) + _, err = s.LoadVersion(0) require.NoError(t, err) commitStorageEntry(t, s, Address{0x01}, Slot{0x01}, []byte{0xAA}) From 1c804a81cd3d0823954d071ddec0df284d5af9d4 Mon Sep 17 00:00:00 2001 From: Cody Littley Date: Tue, 10 Mar 2026 10:02:18 -0500 Subject: [PATCH 035/119] move data dir into config --- .../bench/wrappers/db_implementations.go | 3 +- sei-db/state_db/sc/composite/store.go | 4 +- sei-db/state_db/sc/flatkv/config.go | 30 ++++ .../state_db/sc/flatkv/flatkv_test_config.go | 37 +++++ .../sc/flatkv/lthash_correctness_test.go | 8 +- sei-db/state_db/sc/flatkv/snapshot_test.go | 140 ++++++++++++------ sei-db/state_db/sc/flatkv/store.go | 41 +++-- sei-db/state_db/sc/flatkv/store_test.go | 82 ++++++---- sei-db/state_db/sc/flatkv/store_write_test.go | 35 +++-- 9 files changed, 263 insertions(+), 117 deletions(-) create mode 100644 sei-db/state_db/sc/flatkv/flatkv_test_config.go diff --git a/sei-db/state_db/bench/wrappers/db_implementations.go b/sei-db/state_db/bench/wrappers/db_implementations.go index bf98c617d9..3449db3390 100644 --- a/sei-db/state_db/bench/wrappers/db_implementations.go +++ b/sei-db/state_db/bench/wrappers/db_implementations.go @@ -49,9 +49,10 @@ func newFlatKVCommitStore(ctx context.Context, dbDir string, config *flatkv.Conf if config == nil { config = flatkv.DefaultConfig() } + config.DataDir = dbDir fmt.Printf("Opening flatKV from directory %s\n", dbDir) - cs, err := flatkv.NewCommitStore(ctx, dbDir, logger.NewNopLogger(), config) + cs, err := flatkv.NewCommitStore(ctx, logger.NewNopLogger(), config) if err != nil { return nil, fmt.Errorf("failed to create FlatKV commit store: %w", err) } diff --git a/sei-db/state_db/sc/composite/store.go b/sei-db/state_db/sc/composite/store.go index 17a50cc690..c41ca4459c 100644 --- a/sei-db/state_db/sc/composite/store.go +++ b/sei-db/state_db/sc/composite/store.go @@ -64,9 +64,9 @@ func NewCompositeCommitStore( // Initialize FlatKV store struct if write mode requires it // Note: DB is NOT opened here, will be opened in LoadVersion if cfg.WriteMode == config.DualWrite || cfg.WriteMode == config.SplitWrite { - flatkvPath := filepath.Join(homeDir, "data", "flatkv") + cfg.FlatKVConfig.DataDir = filepath.Join(homeDir, "data", "flatkv") var err error - store.evmCommitter, err = flatkv.NewCommitStore(ctx, flatkvPath, logger, cfg.FlatKVConfig) + store.evmCommitter, err = flatkv.NewCommitStore(ctx, logger, cfg.FlatKVConfig) if err != nil { return nil, fmt.Errorf("failed to create FlatKV commit store: %w", err) } diff --git a/sei-db/state_db/sc/flatkv/config.go b/sei-db/state_db/sc/flatkv/config.go index 8c5b5a5041..9cc5d46194 100644 --- a/sei-db/state_db/sc/flatkv/config.go +++ b/sei-db/state_db/sc/flatkv/config.go @@ -2,6 +2,7 @@ package flatkv import ( "fmt" + "path/filepath" "github.com/sei-protocol/sei-chain/sei-db/common/unit" "github.com/sei-protocol/sei-chain/sei-db/db_engine/pebbledb" @@ -14,6 +15,10 @@ const ( // Config defines configuration for the FlatKV (EVM) commit store. type Config struct { + // DataDir is the root directory for the FlatKV data files. + // Must be set before calling Validate(). + DataDir string + // Fsync controls whether PebbleDB writes (data DBs + metadataDB) use fsync. // WAL always uses NoSync (matching memiavl); crash recovery relies on // WAL catchup, which is idempotent. @@ -102,8 +107,33 @@ func DefaultConfig() *Config { return cfg } +// InitializeDataDirectories sets the DataDir for each nested PebbleDB config +// that does not already have one, using DataDir as the base path. The DBs live +// under the working directory: /working/. +func (c *Config) InitializeDataDirectories() { + workDir := filepath.Join(c.DataDir, workingDirName) + if c.AccountDBConfig.DataDir == "" { + c.AccountDBConfig.DataDir = filepath.Join(workDir, accountDBDir) + } + if c.CodeDBConfig.DataDir == "" { + c.CodeDBConfig.DataDir = filepath.Join(workDir, codeDBDir) + } + if c.StorageDBConfig.DataDir == "" { + c.StorageDBConfig.DataDir = filepath.Join(workDir, storageDBDir) + } + if c.LegacyDBConfig.DataDir == "" { + c.LegacyDBConfig.DataDir = filepath.Join(workDir, legacyDBDir) + } + if c.MetadataDBConfig.DataDir == "" { + c.MetadataDBConfig.DataDir = filepath.Join(workDir, metadataDir) + } +} + // Validate checks that the configuration is sane and returns an error if it is not. func (c *Config) Validate() error { + if c.DataDir == "" { + return fmt.Errorf("data dir is required") + } if c.AccountDBConfig.Validate() != nil { return fmt.Errorf("account db config is invalid: %w", c.AccountDBConfig.Validate()) } diff --git a/sei-db/state_db/sc/flatkv/flatkv_test_config.go b/sei-db/state_db/sc/flatkv/flatkv_test_config.go new file mode 100644 index 0000000000..3a3c69068d --- /dev/null +++ b/sei-db/state_db/sc/flatkv/flatkv_test_config.go @@ -0,0 +1,37 @@ +package flatkv + +import ( + "path/filepath" + "testing" + + "github.com/sei-protocol/sei-chain/sei-db/common/unit" + "github.com/sei-protocol/sei-chain/sei-db/db_engine/pebbledb" +) + +func smallTestPebbleConfig() pebbledb.PebbleDBConfig { + return pebbledb.PebbleDBConfig{ + CacheSize: 16 * unit.MB, + CacheShardCount: 8, + PageCacheSize: 16 * unit.MB, + EnableMetrics: false, + } +} + +// DefaultTestConfig returns a Config suitable for unit tests. It uses +// t.TempDir() as the DataDir root, small cache sizes, and disables metrics. +func DefaultTestConfig(t *testing.T) *Config { + t.Helper() + return &Config{ + DataDir: filepath.Join(t.TempDir(), flatkvRootDir), + SnapshotInterval: DefaultSnapshotInterval, + SnapshotKeepRecent: DefaultSnapshotKeepRecent, + AccountDBConfig: smallTestPebbleConfig(), + CodeDBConfig: smallTestPebbleConfig(), + StorageDBConfig: smallTestPebbleConfig(), + LegacyDBConfig: smallTestPebbleConfig(), + MetadataDBConfig: smallTestPebbleConfig(), + ReaderThreadsPerCore: 2.0, + ReaderPoolQueueSize: 1024, + MiscPoolThreadsPerCore: 4.0, + } +} diff --git a/sei-db/state_db/sc/flatkv/lthash_correctness_test.go b/sei-db/state_db/sc/flatkv/lthash_correctness_test.go index 17a0c68060..fb10dd1344 100644 --- a/sei-db/state_db/sc/flatkv/lthash_correctness_test.go +++ b/sei-db/state_db/sc/flatkv/lthash_correctness_test.go @@ -640,7 +640,9 @@ func TestLtHashPersistenceAfterReopen(t *testing.T) { dir := t.TempDir() // Phase 1: create state and close - s1, err := NewCommitStore(t.Context(), dir, nil, DefaultConfig()) + cfg := DefaultTestConfig(t) + cfg.DataDir = dir + s1, err := NewCommitStore(t.Context(), nil, cfg) require.NoError(t, err) _, err = s1.LoadVersion(0) require.NoError(t, err) @@ -659,7 +661,9 @@ func TestLtHashPersistenceAfterReopen(t *testing.T) { require.NoError(t, s1.Close()) // Phase 2: reopen and verify - s2, err := NewCommitStore(t.Context(), dir, nil, DefaultConfig()) + cfg = DefaultTestConfig(t) + cfg.DataDir = dir + s2, err := NewCommitStore(t.Context(), nil, cfg) require.NoError(t, err) _, err = s2.LoadVersion(0) require.NoError(t, err) diff --git a/sei-db/state_db/sc/flatkv/snapshot_test.go b/sei-db/state_db/sc/flatkv/snapshot_test.go index fd43dbf2a4..69aac85600 100644 --- a/sei-db/state_db/sc/flatkv/snapshot_test.go +++ b/sei-db/state_db/sc/flatkv/snapshot_test.go @@ -34,7 +34,9 @@ func commitStorageEntry(t *testing.T, s *CommitStore, addr Address, slot Slot, v func TestSnapshotCreatesDir(t *testing.T) { dir := t.TempDir() - s, err := NewCommitStore(t.Context(), filepath.Join(dir, flatkvRootDir), nil, DefaultConfig()) + cfg := DefaultTestConfig(t) + cfg.DataDir = filepath.Join(dir, flatkvRootDir) + s, err := NewCommitStore(t.Context(), nil, cfg) require.NoError(t, err) _, err = s.LoadVersion(0) require.NoError(t, err) @@ -62,7 +64,9 @@ func TestSnapshotCreatesDir(t *testing.T) { func TestSnapshotIdempotent(t *testing.T) { dir := t.TempDir() - s, err := NewCommitStore(t.Context(), filepath.Join(dir, flatkvRootDir), nil, DefaultConfig()) + cfg := DefaultTestConfig(t) + cfg.DataDir = filepath.Join(dir, flatkvRootDir) + s, err := NewCommitStore(t.Context(), nil, cfg) require.NoError(t, err) _, err = s.LoadVersion(0) require.NoError(t, err) @@ -83,7 +87,9 @@ func TestOpenFromSnapshot(t *testing.T) { dir := t.TempDir() // Phase 1: create store, commit v1 and v2, snapshot at v2, commit v3 - s1, err := NewCommitStore(t.Context(), filepath.Join(dir, flatkvRootDir), nil, DefaultConfig()) + cfg := DefaultTestConfig(t) + cfg.DataDir = filepath.Join(dir, flatkvRootDir) + s1, err := NewCommitStore(t.Context(), nil, cfg) require.NoError(t, err) _, err = s1.LoadVersion(0) require.NoError(t, err) @@ -101,7 +107,9 @@ func TestOpenFromSnapshot(t *testing.T) { require.NoError(t, s1.Close()) // Phase 2: reopen - should catchup from v2 snapshot + WAL entry for v3 - s2, err := NewCommitStore(t.Context(), filepath.Join(dir, flatkvRootDir), nil, DefaultConfig()) + cfg = DefaultTestConfig(t) + cfg.DataDir = filepath.Join(dir, flatkvRootDir) + s2, err := NewCommitStore(t.Context(), nil, cfg) require.NoError(t, err) _, err = s2.LoadVersion(0) require.NoError(t, err) @@ -124,7 +132,9 @@ func TestOpenFromSnapshot(t *testing.T) { func TestCatchupUpdatesLtHash(t *testing.T) { dir := t.TempDir() - s1, err := NewCommitStore(t.Context(), filepath.Join(dir, flatkvRootDir), nil, DefaultConfig()) + cfg := DefaultTestConfig(t) + cfg.DataDir = filepath.Join(dir, flatkvRootDir) + s1, err := NewCommitStore(t.Context(), nil, cfg) require.NoError(t, err) _, err = s1.LoadVersion(0) require.NoError(t, err) @@ -143,7 +153,9 @@ func TestCatchupUpdatesLtHash(t *testing.T) { require.NoError(t, s1.Close()) // Reopen: catchup from v2 snapshot through v3,v4,v5 via WAL - s2, err := NewCommitStore(t.Context(), filepath.Join(dir, flatkvRootDir), nil, DefaultConfig()) + cfg = DefaultTestConfig(t) + cfg.DataDir = filepath.Join(dir, flatkvRootDir) + s2, err := NewCommitStore(t.Context(), nil, cfg) require.NoError(t, err) _, err = s2.LoadVersion(0) require.NoError(t, err) @@ -156,9 +168,8 @@ func TestCatchupUpdatesLtHash(t *testing.T) { } func TestRollbackRewindsState(t *testing.T) { - dir := t.TempDir() - - s, err := NewCommitStore(t.Context(), filepath.Join(dir, flatkvRootDir), nil, DefaultConfig()) + cfg := DefaultTestConfig(t) + s, err := NewCommitStore(t.Context(), nil, cfg) require.NoError(t, err) _, err = s.LoadVersion(0) require.NoError(t, err) @@ -194,9 +205,8 @@ func TestRollbackRewindsState(t *testing.T) { } func TestRollbackToSnapshotExact(t *testing.T) { - dir := t.TempDir() - - s, err := NewCommitStore(t.Context(), filepath.Join(dir, flatkvRootDir), nil, DefaultConfig()) + cfg := DefaultTestConfig(t) + s, err := NewCommitStore(t.Context(), nil, cfg) require.NoError(t, err) _, err = s.LoadVersion(0) require.NoError(t, err) @@ -218,7 +228,9 @@ func TestRollbackToSnapshotExact(t *testing.T) { func TestPartialSnapshotCleanup(t *testing.T) { dir := t.TempDir() - s, err := NewCommitStore(t.Context(), filepath.Join(dir, flatkvRootDir), nil, DefaultConfig()) + cfg := DefaultTestConfig(t) + cfg.DataDir = filepath.Join(dir, flatkvRootDir) + s, err := NewCommitStore(t.Context(), nil, cfg) require.NoError(t, err) _, err = s.LoadVersion(0) require.NoError(t, err) @@ -279,7 +291,9 @@ func TestMigrationFromFlatLayout(t *testing.T) { require.True(t, os.IsNotExist(err)) // Open the store - should trigger migration - s, err := NewCommitStore(t.Context(), filepath.Join(dir, flatkvRootDir), nil, DefaultConfig()) + cfg := DefaultTestConfig(t) + cfg.DataDir = filepath.Join(dir, flatkvRootDir) + s, err := NewCommitStore(t.Context(), nil, cfg) require.NoError(t, err) _, err = s.LoadVersion(0) require.NoError(t, err) @@ -311,7 +325,9 @@ func TestOpenVersionValidation(t *testing.T) { dir := t.TempDir() // Phase 1: create store, commit some data - s1, err := NewCommitStore(t.Context(), filepath.Join(dir, flatkvRootDir), nil, DefaultConfig()) + cfg := DefaultTestConfig(t) + cfg.DataDir = filepath.Join(dir, flatkvRootDir) + s1, err := NewCommitStore(t.Context(), nil, cfg) require.NoError(t, err) _, err = s1.LoadVersion(0) require.NoError(t, err) @@ -339,7 +355,9 @@ func TestOpenVersionValidation(t *testing.T) { require.NoError(t, db.Close()) // Phase 3: reopen - should detect skew and catchup - s2, err := NewCommitStore(t.Context(), filepath.Join(dir, flatkvRootDir), nil, DefaultConfig()) + cfg = DefaultTestConfig(t) + cfg.DataDir = filepath.Join(dir, flatkvRootDir) + s2, err := NewCommitStore(t.Context(), nil, cfg) require.NoError(t, err) _, err = s2.LoadVersion(0) require.NoError(t, err) @@ -409,7 +427,9 @@ func TestSeekSnapshot(t *testing.T) { func TestLoadVersionWithTarget(t *testing.T) { dir := t.TempDir() - s1, err := NewCommitStore(t.Context(), filepath.Join(dir, flatkvRootDir), nil, DefaultConfig()) + cfg := DefaultTestConfig(t) + cfg.DataDir = filepath.Join(dir, flatkvRootDir) + s1, err := NewCommitStore(t.Context(), nil, cfg) require.NoError(t, err) _, err = s1.LoadVersion(0) require.NoError(t, err) @@ -423,7 +443,9 @@ func TestLoadVersionWithTarget(t *testing.T) { require.NoError(t, s1.Close()) // Reopen at specific version 3 - s2, err := NewCommitStore(t.Context(), filepath.Join(dir, flatkvRootDir), nil, DefaultConfig()) + cfg = DefaultTestConfig(t) + cfg.DataDir = filepath.Join(dir, flatkvRootDir) + s2, err := NewCommitStore(t.Context(), nil, cfg) require.NoError(t, err) _, err = s2.LoadVersion(3) require.NoError(t, err) @@ -443,7 +465,9 @@ func TestSnapshotThenCatchupThenVerifyCorrectness(t *testing.T) { key := evm.BuildMemIAVLEVMKey(evm.EVMKeyStorage, StorageKey(addr, slot)) // Phase 1: build baseline at v2 and snapshot it. - s1, err := NewCommitStore(t.Context(), filepath.Join(dir, flatkvRootDir), nil, DefaultConfig()) + cfg := DefaultTestConfig(t) + cfg.DataDir = filepath.Join(dir, flatkvRootDir) + s1, err := NewCommitStore(t.Context(), nil, cfg) require.NoError(t, err) _, err = s1.LoadVersion(0) require.NoError(t, err) @@ -465,7 +489,9 @@ func TestSnapshotThenCatchupThenVerifyCorrectness(t *testing.T) { // Phase 3: reopen exactly at v2. If later commits had mutated the snapshot // baseline in place, we'd incorrectly read 0x04 here. - s2, err := NewCommitStore(t.Context(), filepath.Join(dir, flatkvRootDir), nil, DefaultConfig()) + cfg = DefaultTestConfig(t) + cfg.DataDir = filepath.Join(dir, flatkvRootDir) + s2, err := NewCommitStore(t.Context(), nil, cfg) require.NoError(t, err) _, err = s2.LoadVersion(2) require.NoError(t, err) @@ -475,7 +501,9 @@ func TestSnapshotThenCatchupThenVerifyCorrectness(t *testing.T) { require.NoError(t, s2.Close()) // Phase 4: reopen latest again to ensure catchup/replay still reaches v4. - s3, err := NewCommitStore(t.Context(), filepath.Join(dir, flatkvRootDir), nil, DefaultConfig()) + cfg = DefaultTestConfig(t) + cfg.DataDir = filepath.Join(dir, flatkvRootDir) + s3, err := NewCommitStore(t.Context(), nil, cfg) require.NoError(t, err) _, err = s3.LoadVersion(0) require.NoError(t, err) @@ -496,7 +524,9 @@ func TestLoadVersionMixedSequence(t *testing.T) { slot := Slot{0x81} key := evm.BuildMemIAVLEVMKey(evm.EVMKeyStorage, StorageKey(addr, slot)) - s, err := NewCommitStore(t.Context(), filepath.Join(dir, flatkvRootDir), nil, DefaultConfig()) + cfg := DefaultTestConfig(t) + cfg.DataDir = filepath.Join(dir, flatkvRootDir) + s, err := NewCommitStore(t.Context(), nil, cfg) require.NoError(t, err) _, err = s.LoadVersion(0) require.NoError(t, err) @@ -512,7 +542,9 @@ func TestLoadVersionMixedSequence(t *testing.T) { require.NoError(t, s.Close()) // Round 1: load exactly v2 - s1, err := NewCommitStore(t.Context(), filepath.Join(dir, flatkvRootDir), nil, DefaultConfig()) + cfg = DefaultTestConfig(t) + cfg.DataDir = filepath.Join(dir, flatkvRootDir) + s1, err := NewCommitStore(t.Context(), nil, cfg) require.NoError(t, err) _, err = s1.LoadVersion(2) require.NoError(t, err) @@ -524,7 +556,9 @@ func TestLoadVersionMixedSequence(t *testing.T) { require.NoError(t, s1.Close()) // Round 2: load latest (catches up through v3, v4) - s2, err := NewCommitStore(t.Context(), filepath.Join(dir, flatkvRootDir), nil, DefaultConfig()) + cfg = DefaultTestConfig(t) + cfg.DataDir = filepath.Join(dir, flatkvRootDir) + s2, err := NewCommitStore(t.Context(), nil, cfg) require.NoError(t, err) _, err = s2.LoadVersion(0) require.NoError(t, err) @@ -536,7 +570,9 @@ func TestLoadVersionMixedSequence(t *testing.T) { require.NoError(t, s2.Close()) // Round 3: load v2 AGAIN — snapshot must still be clean. - s3, err := NewCommitStore(t.Context(), filepath.Join(dir, flatkvRootDir), nil, DefaultConfig()) + cfg = DefaultTestConfig(t) + cfg.DataDir = filepath.Join(dir, flatkvRootDir) + s3, err := NewCommitStore(t.Context(), nil, cfg) require.NoError(t, err) _, err = s3.LoadVersion(2) require.NoError(t, err, "LoadVersion(2) must succeed after LoadVersion(0) dirtied working dir") @@ -553,7 +589,9 @@ func TestLoadVersionMixedSequence(t *testing.T) { func TestRollbackTargetBeforeWALStart(t *testing.T) { dir := t.TempDir() - s, err := NewCommitStore(t.Context(), filepath.Join(dir, flatkvRootDir), nil, DefaultConfig()) + cfg := DefaultTestConfig(t) + cfg.DataDir = filepath.Join(dir, flatkvRootDir) + s, err := NewCommitStore(t.Context(), nil, cfg) require.NoError(t, err) _, err = s.LoadVersion(0) require.NoError(t, err) @@ -588,7 +626,9 @@ func TestRollbackTargetBeforeWALStart(t *testing.T) { // Simulate restart: should stay at v2. require.NoError(t, s.Close()) - s2, err := NewCommitStore(t.Context(), filepath.Join(dir, flatkvRootDir), nil, DefaultConfig()) + cfg = DefaultTestConfig(t) + cfg.DataDir = filepath.Join(dir, flatkvRootDir) + s2, err := NewCommitStore(t.Context(), nil, cfg) require.NoError(t, err) _, err = s2.LoadVersion(0) require.NoError(t, err) @@ -787,7 +827,7 @@ func TestCreateWorkingDirReclones(t *testing.T) { func TestPruneSnapshotsKeepsRecent(t *testing.T) { dir := t.TempDir() - s, err := NewCommitStore(t.Context(), filepath.Join(dir, flatkvRootDir), nil, &Config{SnapshotKeepRecent: 1}) + s, err := NewCommitStore(t.Context(), nil, &Config{DataDir: filepath.Join(dir, flatkvRootDir), SnapshotKeepRecent: 1}) require.NoError(t, err) _, err = s.LoadVersion(0) require.NoError(t, err) @@ -812,7 +852,7 @@ func TestPruneSnapshotsKeepsRecent(t *testing.T) { func TestPruneSnapshotsKeepAll(t *testing.T) { dir := t.TempDir() - s, err := NewCommitStore(t.Context(), filepath.Join(dir, flatkvRootDir), nil, &Config{SnapshotKeepRecent: 100}) + s, err := NewCommitStore(t.Context(), nil, &Config{DataDir: filepath.Join(dir, flatkvRootDir), SnapshotKeepRecent: 100}) require.NoError(t, err) _, err = s.LoadVersion(0) require.NoError(t, err) @@ -849,7 +889,9 @@ func TestOrphanSnapshotRecovery(t *testing.T) { _, err := os.Lstat(currentPath(flatkvDir)) require.True(t, os.IsNotExist(err), "no current symlink should exist") - s, err := NewCommitStore(t.Context(), filepath.Join(dir, flatkvRootDir), nil, DefaultConfig()) + cfg := DefaultTestConfig(t) + cfg.DataDir = filepath.Join(dir, flatkvRootDir) + s, err := NewCommitStore(t.Context(), nil, cfg) require.NoError(t, err) _, err = s.LoadVersion(0) require.NoError(t, err) @@ -911,8 +953,8 @@ func TestTraverseSnapshotsEarlyStop(t *testing.T) { // ============================================================================= func TestVerifyWALTailSuccess(t *testing.T) { - dir := t.TempDir() - s, err := NewCommitStore(t.Context(), filepath.Join(dir, flatkvRootDir), nil, DefaultConfig()) + cfg := DefaultTestConfig(t) + s, err := NewCommitStore(t.Context(), nil, cfg) require.NoError(t, err) _, err = s.LoadVersion(0) require.NoError(t, err) @@ -926,8 +968,8 @@ func TestVerifyWALTailSuccess(t *testing.T) { } func TestVerifyWALTailMismatch(t *testing.T) { - dir := t.TempDir() - s, err := NewCommitStore(t.Context(), filepath.Join(dir, flatkvRootDir), nil, DefaultConfig()) + cfg := DefaultTestConfig(t) + s, err := NewCommitStore(t.Context(), nil, cfg) require.NoError(t, err) _, err = s.LoadVersion(0) require.NoError(t, err) @@ -950,7 +992,7 @@ func TestTryTruncateWAL(t *testing.T) { // SnapshotKeepRecent=0 so pruneSnapshots removes snapshot-0 once // the manual snapshot at v5 is created; this makes v5 the earliest // snapshot and gives tryTruncateWAL a positive truncation offset. - s, err := NewCommitStore(t.Context(), filepath.Join(dir, flatkvRootDir), nil, &Config{SnapshotKeepRecent: 0}) + s, err := NewCommitStore(t.Context(), nil, &Config{DataDir: filepath.Join(dir, flatkvRootDir), SnapshotKeepRecent: 0}) require.NoError(t, err) _, err = s.LoadVersion(0) require.NoError(t, err) @@ -975,8 +1017,8 @@ func TestTryTruncateWAL(t *testing.T) { } func TestTryTruncateWALNoSnapshot(t *testing.T) { - dir := t.TempDir() - s, err := NewCommitStore(t.Context(), filepath.Join(dir, flatkvRootDir), nil, DefaultConfig()) + cfg := DefaultTestConfig(t) + s, err := NewCommitStore(t.Context(), nil, cfg) require.NoError(t, err) _, err = s.LoadVersion(0) require.NoError(t, err) @@ -998,7 +1040,9 @@ func TestTryTruncateWALNoSnapshot(t *testing.T) { func TestRollbackRemovesPostTargetSnapshots(t *testing.T) { dir := t.TempDir() - s, err := NewCommitStore(t.Context(), filepath.Join(dir, flatkvRootDir), nil, DefaultConfig()) + cfg := DefaultTestConfig(t) + cfg.DataDir = filepath.Join(dir, flatkvRootDir) + s, err := NewCommitStore(t.Context(), nil, cfg) require.NoError(t, err) _, err = s.LoadVersion(0) require.NoError(t, err) @@ -1097,7 +1141,7 @@ func TestSeekSnapshotExact(t *testing.T) { func TestMultipleSnapshotsAndReopen(t *testing.T) { dir := t.TempDir() - s, err := NewCommitStore(t.Context(), filepath.Join(dir, flatkvRootDir), nil, &Config{SnapshotKeepRecent: 10}) + s, err := NewCommitStore(t.Context(), nil, &Config{DataDir: filepath.Join(dir, flatkvRootDir), SnapshotKeepRecent: 10}) require.NoError(t, err) _, err = s.LoadVersion(0) require.NoError(t, err) @@ -1112,7 +1156,7 @@ func TestMultipleSnapshotsAndReopen(t *testing.T) { for i, expectedHash := range hashes { ver := int64(i + 1) - s2, err := NewCommitStore(t.Context(), filepath.Join(dir, flatkvRootDir), nil, &Config{SnapshotKeepRecent: 10}) + s2, err := NewCommitStore(t.Context(), nil, &Config{DataDir: filepath.Join(dir, flatkvRootDir), SnapshotKeepRecent: 10}) require.NoError(t, err) _, err = s2.LoadVersion(ver) require.NoError(t, err) @@ -1128,7 +1172,9 @@ func TestMultipleSnapshotsAndReopen(t *testing.T) { func TestWriteSnapshotUpdatesSnapshotBase(t *testing.T) { dir := t.TempDir() - s, err := NewCommitStore(context.Background(), filepath.Join(dir, flatkvRootDir), nil, DefaultConfig()) + cfg := DefaultTestConfig(t) + cfg.DataDir = filepath.Join(dir, flatkvRootDir) + s, err := NewCommitStore(context.Background(), nil, cfg) require.NoError(t, err) _, err = s.LoadVersion(0) require.NoError(t, err) @@ -1155,7 +1201,9 @@ func TestWriteSnapshotUpdatesSnapshotBase(t *testing.T) { // Reopen: working dir should be reused (SNAPSHOT_BASE matches current), // so committedVersion should be 5 (from working dir metadata), not 2 // (from the snapshot). Catchup should replay 0 entries. - s2, err := NewCommitStore(context.Background(), filepath.Join(dir, flatkvRootDir), nil, DefaultConfig()) + cfg = DefaultTestConfig(t) + cfg.DataDir = filepath.Join(dir, flatkvRootDir) + s2, err := NewCommitStore(context.Background(), nil, cfg) require.NoError(t, err) _, err = s2.LoadVersion(0) require.NoError(t, err) @@ -1167,7 +1215,9 @@ func TestWriteSnapshotUpdatesSnapshotBase(t *testing.T) { func TestSnapshotPreservesAllKeyTypes(t *testing.T) { dir := t.TempDir() - s, err := NewCommitStore(t.Context(), filepath.Join(dir, flatkvRootDir), nil, DefaultConfig()) + cfg := DefaultTestConfig(t) + cfg.DataDir = filepath.Join(dir, flatkvRootDir) + s, err := NewCommitStore(t.Context(), nil, cfg) require.NoError(t, err) _, err = s.LoadVersion(0) require.NoError(t, err) @@ -1189,7 +1239,9 @@ func TestSnapshotPreservesAllKeyTypes(t *testing.T) { require.NoError(t, s.WriteSnapshot("")) require.NoError(t, s.Close()) - s2, err := NewCommitStore(t.Context(), filepath.Join(dir, flatkvRootDir), nil, DefaultConfig()) + cfg = DefaultTestConfig(t) + cfg.DataDir = filepath.Join(dir, flatkvRootDir) + s2, err := NewCommitStore(t.Context(), nil, cfg) require.NoError(t, err) _, err = s2.LoadVersion(0) require.NoError(t, err) diff --git a/sei-db/state_db/sc/flatkv/store.go b/sei-db/state_db/sc/flatkv/store.go index c9777a163e..b7c355c539 100644 --- a/sei-db/state_db/sc/flatkv/store.go +++ b/sei-db/state_db/sc/flatkv/store.go @@ -69,7 +69,6 @@ type CommitStore struct { cancel context.CancelFunc log logger.Logger config *Config - dbDir string // Five separate PebbleDB instances metadataDB seidbtypes.KeyValueDB // Global version + LtHash watermark @@ -122,12 +121,11 @@ var _ Store = (*CommitStore)(nil) // Call LoadVersion to open and initialize. func NewCommitStore( ctx context.Context, - dbDir string, log logger.Logger, cfg *Config, ) (*CommitStore, error) { - // TODO pre-populate file paths in sub-configs + cfg.InitializeDataDirectories() if err := cfg.Validate(); err != nil { return nil, fmt.Errorf("failed to validate config: %w", err) @@ -152,7 +150,6 @@ func NewCommitStore( cancel: cancel, log: log, config: cfg, - dbDir: dbDir, localMeta: make(map[string]*LocalMeta), accountWrites: make(map[string]*pendingAccountWrite), codeWrites: make(map[string]*pendingKVWrite), @@ -168,7 +165,7 @@ func NewCommitStore( } func (s *CommitStore) flatkvDir() string { - return s.dbDir + return s.config.DataDir } // LoadVersion loads the specified version of the database. @@ -291,7 +288,7 @@ func (s *CommitStore) open() (retErr error) { return fmt.Errorf("create working dir: %w", err) } - if err := s.openAllDBs(workDir, dir); err != nil { + if err := s.openAllDBs(); err != nil { return err } @@ -323,23 +320,22 @@ func (s *CommitStore) acquireFileLock(dir string) error { return nil } -// openPebbleDB sets the DataDir on cfg, creates the directory, and opens a PebbleDB instance. -func (s *CommitStore) openPebbleDB(cfg *pebbledb.PebbleDBConfig, dir string) (seidbtypes.KeyValueDB, error) { - cfg.DataDir = dir - if err := os.MkdirAll(dir, 0750); err != nil { - return nil, fmt.Errorf("create directory %s: %w", dir, err) +// openPebbleDB creates the directory at cfg.DataDir and opens a PebbleDB instance. +func (s *CommitStore) openPebbleDB(cfg *pebbledb.PebbleDBConfig) (seidbtypes.KeyValueDB, error) { + if err := os.MkdirAll(cfg.DataDir, 0750); err != nil { + return nil, fmt.Errorf("create directory %s: %w", cfg.DataDir, err) } db, err := pebbledb.Open(s.ctx, cfg, pebble.DefaultComparer, s.readPool, s.miscPool) if err != nil { - return nil, fmt.Errorf("open %s: %w", dir, err) + return nil, fmt.Errorf("open %s: %w", cfg.DataDir, err) } return db, nil } -// openAllDBs opens the 5 PebbleDBs from the snapshot directory, the changelog -// WAL from the flatkv root, and loads per-DB local metadata. On failure all -// already-opened handles are closed. -func (s *CommitStore) openAllDBs(snapDir, flatkvRoot string) (retErr error) { +// openAllDBs opens the 5 PebbleDBs using the paths in the config, the +// changelog WAL from the flatkv root, and loads per-DB local metadata. +// On failure all already-opened handles are closed. +func (s *CommitStore) openAllDBs() (retErr error) { var toClose []io.Closer defer func() { if retErr != nil { @@ -358,37 +354,37 @@ func (s *CommitStore) openAllDBs(snapDir, flatkvRoot string) (retErr error) { var err error - s.accountDB, err = s.openPebbleDB(&s.config.AccountDBConfig, filepath.Join(snapDir, accountDBDir)) + s.accountDB, err = s.openPebbleDB(&s.config.AccountDBConfig) if err != nil { return err } toClose = append(toClose, s.accountDB) - s.codeDB, err = s.openPebbleDB(&s.config.CodeDBConfig, filepath.Join(snapDir, codeDBDir)) + s.codeDB, err = s.openPebbleDB(&s.config.CodeDBConfig) if err != nil { return err } toClose = append(toClose, s.codeDB) - s.storageDB, err = s.openPebbleDB(&s.config.StorageDBConfig, filepath.Join(snapDir, storageDBDir)) + s.storageDB, err = s.openPebbleDB(&s.config.StorageDBConfig) if err != nil { return err } toClose = append(toClose, s.storageDB) - s.legacyDB, err = s.openPebbleDB(&s.config.LegacyDBConfig, filepath.Join(snapDir, legacyDBDir)) + s.legacyDB, err = s.openPebbleDB(&s.config.LegacyDBConfig) if err != nil { return err } toClose = append(toClose, s.legacyDB) - s.metadataDB, err = s.openPebbleDB(&s.config.MetadataDBConfig, filepath.Join(snapDir, metadataDir)) + s.metadataDB, err = s.openPebbleDB(&s.config.MetadataDBConfig) if err != nil { return err } toClose = append(toClose, s.metadataDB) - changelogPath := filepath.Join(flatkvRoot, changelogDir) + changelogPath := filepath.Join(s.flatkvDir(), changelogDir) s.changelog, err = wal.NewChangelogWAL(s.log, changelogPath, wal.Config{ WriteBufferSize: 0, KeepRecent: 0, @@ -399,7 +395,6 @@ func (s *CommitStore) openAllDBs(snapDir, flatkvRoot string) (retErr error) { } toClose = append(toClose, s.changelog) - // Load per-DB local metadata (or initialize if not present) dataDBs := map[string]seidbtypes.KeyValueDB{ accountDBDir: s.accountDB, codeDBDir: s.codeDB, diff --git a/sei-db/state_db/sc/flatkv/store_test.go b/sei-db/state_db/sc/flatkv/store_test.go index 12fd44c639..dc87f30585 100644 --- a/sei-db/state_db/sc/flatkv/store_test.go +++ b/sei-db/state_db/sc/flatkv/store_test.go @@ -69,8 +69,7 @@ func setupTestDB(t *testing.T) types.KeyValueDB { // setupTestStore creates a minimal test store func setupTestStore(t *testing.T) *CommitStore { t.Helper() - dir := t.TempDir() - s, err := NewCommitStore(t.Context(), filepath.Join(dir, flatkvRootDir), nil, DefaultConfig()) + s, err := NewCommitStore(t.Context(), nil, DefaultTestConfig(t)) require.NoError(t, err) _, err = s.LoadVersion(0) require.NoError(t, err) @@ -81,7 +80,8 @@ func setupTestStore(t *testing.T) *CommitStore { func setupTestStoreWithConfig(t *testing.T, cfg *Config) *CommitStore { t.Helper() dir := t.TempDir() - s, err := NewCommitStore(t.Context(), filepath.Join(dir, flatkvRootDir), nil, cfg) + cfg.DataDir = filepath.Join(dir, flatkvRootDir) + s, err := NewCommitStore(t.Context(), nil, cfg) require.NoError(t, err) _, err = s.LoadVersion(0) require.NoError(t, err) @@ -101,8 +101,8 @@ func commitAndCheck(t *testing.T, s *CommitStore) int64 { // ============================================================================= func TestStoreOpenClose(t *testing.T) { - dir := t.TempDir() - s, err := NewCommitStore(t.Context(), filepath.Join(dir, flatkvRootDir), nil, DefaultConfig()) + cfg := DefaultTestConfig(t) + s, err := NewCommitStore(t.Context(), nil, cfg) require.NoError(t, err) _, err = s.LoadVersion(0) require.NoError(t, err) @@ -111,8 +111,8 @@ func TestStoreOpenClose(t *testing.T) { } func TestStoreClose(t *testing.T) { - dir := t.TempDir() - s, err := NewCommitStore(t.Context(), filepath.Join(dir, flatkvRootDir), nil, DefaultConfig()) + cfg := DefaultTestConfig(t) + s, err := NewCommitStore(t.Context(), nil, cfg) require.NoError(t, err) _, err = s.LoadVersion(0) require.NoError(t, err) @@ -308,7 +308,9 @@ func TestStorePersistence(t *testing.T) { key := memiavlStorageKey(addr, slot) // Write and close - s1, err := NewCommitStore(t.Context(), filepath.Join(dir, flatkvRootDir), nil, DefaultConfig()) + cfg := DefaultTestConfig(t) + cfg.DataDir = filepath.Join(dir, flatkvRootDir) + s1, err := NewCommitStore(t.Context(), nil, cfg) require.NoError(t, err) _, err = s1.LoadVersion(0) require.NoError(t, err) @@ -319,7 +321,9 @@ func TestStorePersistence(t *testing.T) { require.NoError(t, s1.Close()) // Reopen and verify - s2, err := NewCommitStore(t.Context(), filepath.Join(dir, flatkvRootDir), nil, DefaultConfig()) + cfg = DefaultTestConfig(t) + cfg.DataDir = filepath.Join(dir, flatkvRootDir) + s2, err := NewCommitStore(t.Context(), nil, cfg) require.NoError(t, err) _, err = s2.LoadVersion(0) require.NoError(t, err) @@ -437,12 +441,16 @@ func TestStoreRollbackNoSnapshot(t *testing.T) { func TestFileLockPreventsDoubleOpen(t *testing.T) { dir := t.TempDir() - s1, err := NewCommitStore(t.Context(), filepath.Join(dir, flatkvRootDir), nil, DefaultConfig()) + cfg := DefaultTestConfig(t) + cfg.DataDir = filepath.Join(dir, flatkvRootDir) + s1, err := NewCommitStore(t.Context(), nil, cfg) require.NoError(t, err) _, err = s1.LoadVersion(0) require.NoError(t, err) - s2, err := NewCommitStore(t.Context(), filepath.Join(dir, flatkvRootDir), nil, DefaultConfig()) + cfg = DefaultTestConfig(t) + cfg.DataDir = filepath.Join(dir, flatkvRootDir) + s2, err := NewCommitStore(t.Context(), nil, cfg) require.NoError(t, err) _, err = s2.LoadVersion(0) require.Error(t, err, "second open on same dir should fail due to file lock") @@ -460,8 +468,8 @@ func TestFileLockPreventsDoubleOpen(t *testing.T) { // ============================================================================= func TestClearChangelog(t *testing.T) { - dir := t.TempDir() - s, err := NewCommitStore(t.Context(), filepath.Join(dir, flatkvRootDir), nil, DefaultConfig()) + cfg := DefaultTestConfig(t) + s, err := NewCommitStore(t.Context(), nil, cfg) require.NoError(t, err) _, err = s.LoadVersion(0) require.NoError(t, err) @@ -486,8 +494,8 @@ func TestClearChangelog(t *testing.T) { // ============================================================================= func TestCloseDBsOnlyIdempotent(t *testing.T) { - dir := t.TempDir() - s, err := NewCommitStore(t.Context(), filepath.Join(dir, flatkvRootDir), nil, DefaultConfig()) + cfg := DefaultTestConfig(t) + s, err := NewCommitStore(t.Context(), nil, cfg) require.NoError(t, err) _, err = s.LoadVersion(0) require.NoError(t, err) @@ -506,7 +514,9 @@ func TestCloseDBsOnlyIdempotent(t *testing.T) { func TestLoadVersionTargetBeyondWALFails(t *testing.T) { dir := t.TempDir() - s1, err := NewCommitStore(t.Context(), filepath.Join(dir, flatkvRootDir), nil, DefaultConfig()) + cfg := DefaultTestConfig(t) + cfg.DataDir = filepath.Join(dir, flatkvRootDir) + s1, err := NewCommitStore(t.Context(), nil, cfg) require.NoError(t, err) _, err = s1.LoadVersion(0) require.NoError(t, err) @@ -516,7 +526,9 @@ func TestLoadVersionTargetBeyondWALFails(t *testing.T) { require.NoError(t, s1.WriteSnapshot("")) require.NoError(t, s1.Close()) - s2, err := NewCommitStore(t.Context(), filepath.Join(dir, flatkvRootDir), nil, DefaultConfig()) + cfg = DefaultTestConfig(t) + cfg.DataDir = filepath.Join(dir, flatkvRootDir) + s2, err := NewCommitStore(t.Context(), nil, cfg) require.NoError(t, err) _, err = s2.LoadVersion(100) require.Error(t, err, "loading version beyond WAL should fail") @@ -529,7 +541,9 @@ func TestLoadVersionTargetBeyondWALFails(t *testing.T) { func TestReopenReusesWorkingDir(t *testing.T) { dir := t.TempDir() - s, err := NewCommitStore(t.Context(), filepath.Join(dir, flatkvRootDir), nil, DefaultConfig()) + cfg := DefaultTestConfig(t) + cfg.DataDir = filepath.Join(dir, flatkvRootDir) + s, err := NewCommitStore(t.Context(), nil, cfg) require.NoError(t, err) _, err = s.LoadVersion(0) require.NoError(t, err) @@ -543,7 +557,9 @@ func TestReopenReusesWorkingDir(t *testing.T) { _, err = os.Stat(basePath) require.NoError(t, err, "SNAPSHOT_BASE should exist after close") - s2, err := NewCommitStore(t.Context(), filepath.Join(dir, flatkvRootDir), nil, DefaultConfig()) + cfg = DefaultTestConfig(t) + cfg.DataDir = filepath.Join(dir, flatkvRootDir) + s2, err := NewCommitStore(t.Context(), nil, cfg) require.NoError(t, err) _, err = s2.LoadVersion(0) require.NoError(t, err) @@ -557,8 +573,8 @@ func TestReopenReusesWorkingDir(t *testing.T) { // ============================================================================= func TestWalOffsetForVersionFastPath(t *testing.T) { - dir := t.TempDir() - s, err := NewCommitStore(t.Context(), filepath.Join(dir, flatkvRootDir), nil, DefaultConfig()) + cfg := DefaultTestConfig(t) + s, err := NewCommitStore(t.Context(), nil, cfg) require.NoError(t, err) _, err = s.LoadVersion(0) require.NoError(t, err) @@ -580,8 +596,8 @@ func TestWalOffsetForVersionFastPath(t *testing.T) { } func TestWalOffsetForVersionBeforeWAL(t *testing.T) { - dir := t.TempDir() - s, err := NewCommitStore(t.Context(), filepath.Join(dir, flatkvRootDir), nil, DefaultConfig()) + cfg := DefaultTestConfig(t) + s, err := NewCommitStore(t.Context(), nil, cfg) require.NoError(t, err) _, err = s.LoadVersion(0) require.NoError(t, err) @@ -597,8 +613,8 @@ func TestWalOffsetForVersionBeforeWAL(t *testing.T) { } func TestWalOffsetForVersionNotFound(t *testing.T) { - dir := t.TempDir() - s, err := NewCommitStore(t.Context(), filepath.Join(dir, flatkvRootDir), nil, DefaultConfig()) + cfg := DefaultTestConfig(t) + s, err := NewCommitStore(t.Context(), nil, cfg) require.NoError(t, err) _, err = s.LoadVersion(0) require.NoError(t, err) @@ -617,7 +633,9 @@ func TestWalOffsetForVersionNotFound(t *testing.T) { func TestCatchupFromSpecificVersion(t *testing.T) { dir := t.TempDir() - s1, err := NewCommitStore(t.Context(), filepath.Join(dir, flatkvRootDir), nil, DefaultConfig()) + cfg := DefaultTestConfig(t) + cfg.DataDir = filepath.Join(dir, flatkvRootDir) + s1, err := NewCommitStore(t.Context(), nil, cfg) require.NoError(t, err) _, err = s1.LoadVersion(0) require.NoError(t, err) @@ -630,7 +648,9 @@ func TestCatchupFromSpecificVersion(t *testing.T) { require.NoError(t, s1.WriteSnapshot("")) require.NoError(t, s1.Close()) - s2, err := NewCommitStore(t.Context(), filepath.Join(dir, flatkvRootDir), nil, DefaultConfig()) + cfg = DefaultTestConfig(t) + cfg.DataDir = filepath.Join(dir, flatkvRootDir) + s2, err := NewCommitStore(t.Context(), nil, cfg) require.NoError(t, err) _, err = s2.LoadVersion(0) require.NoError(t, err) @@ -680,7 +700,9 @@ func TestPersistenceAllKeyTypes(t *testing.T) { addr := Address{0xAA} slot := Slot{0xBB} - s1, err := NewCommitStore(t.Context(), filepath.Join(dir, flatkvRootDir), nil, DefaultConfig()) + cfg := DefaultTestConfig(t) + cfg.DataDir = filepath.Join(dir, flatkvRootDir) + s1, err := NewCommitStore(t.Context(), nil, cfg) require.NoError(t, err) _, err = s1.LoadVersion(0) require.NoError(t, err) @@ -700,7 +722,9 @@ func TestPersistenceAllKeyTypes(t *testing.T) { hash := s1.RootHash() require.NoError(t, s1.Close()) - s2, err := NewCommitStore(t.Context(), filepath.Join(dir, flatkvRootDir), nil, DefaultConfig()) + cfg = DefaultTestConfig(t) + cfg.DataDir = filepath.Join(dir, flatkvRootDir) + s2, err := NewCommitStore(t.Context(), nil, cfg) require.NoError(t, err) _, err = s2.LoadVersion(0) require.NoError(t, err) diff --git a/sei-db/state_db/sc/flatkv/store_write_test.go b/sei-db/state_db/sc/flatkv/store_write_test.go index c862fbccae..e2f46b4e98 100644 --- a/sei-db/state_db/sc/flatkv/store_write_test.go +++ b/sei-db/state_db/sc/flatkv/store_write_test.go @@ -522,8 +522,8 @@ func TestStoreLegacyEmptyCommitLocalMeta(t *testing.T) { func TestStoreFsyncConfig(t *testing.T) { t.Run("DefaultConfig", func(t *testing.T) { - dir := t.TempDir() - store, err := NewCommitStore(t.Context(), filepath.Join(dir, flatkvRootDir), nil, DefaultConfig()) + cfg := DefaultTestConfig(t) + store, err := NewCommitStore(t.Context(), nil, cfg) require.NoError(t, err) _, err = store.LoadVersion(0) require.NoError(t, err) @@ -536,8 +536,9 @@ func TestStoreFsyncConfig(t *testing.T) { t.Run("FsyncDisabled", func(t *testing.T) { dir := t.TempDir() - store, err := NewCommitStore(t.Context(), filepath.Join(dir, flatkvRootDir), nil, &Config{ - Fsync: false, + store, err := NewCommitStore(t.Context(), nil, &Config{ + DataDir: filepath.Join(dir, flatkvRootDir), + Fsync: false, }) require.NoError(t, err) _, err = store.LoadVersion(0) @@ -570,10 +571,11 @@ func TestStoreFsyncConfig(t *testing.T) { func TestAutoSnapshotTriggeredByInterval(t *testing.T) { dir := t.TempDir() cfg := Config{ + DataDir: filepath.Join(dir, flatkvRootDir), SnapshotInterval: 5, SnapshotKeepRecent: 2, } - s, err := NewCommitStore(t.Context(), filepath.Join(dir, flatkvRootDir), nil, &cfg) + s, err := NewCommitStore(t.Context(), nil, &cfg) require.NoError(t, err) _, err = s.LoadVersion(0) require.NoError(t, err) @@ -595,10 +597,11 @@ func TestAutoSnapshotTriggeredByInterval(t *testing.T) { func TestAutoSnapshotNotTriggeredBeforeInterval(t *testing.T) { dir := t.TempDir() cfg := &Config{ + DataDir: filepath.Join(dir, flatkvRootDir), SnapshotInterval: 10, SnapshotKeepRecent: 2, } - s, err := NewCommitStore(t.Context(), filepath.Join(dir, flatkvRootDir), nil, cfg) + s, err := NewCommitStore(t.Context(), nil, cfg) require.NoError(t, err) _, err = s.LoadVersion(0) require.NoError(t, err) @@ -625,8 +628,8 @@ func TestAutoSnapshotNotTriggeredBeforeInterval(t *testing.T) { func TestAutoSnapshotDisabledWhenIntervalZero(t *testing.T) { dir := t.TempDir() - cfg := &Config{SnapshotInterval: 0} - s, err := NewCommitStore(t.Context(), filepath.Join(dir, flatkvRootDir), nil, cfg) + cfg := &Config{DataDir: filepath.Join(dir, flatkvRootDir), SnapshotInterval: 0} + s, err := NewCommitStore(t.Context(), nil, cfg) require.NoError(t, err) _, err = s.LoadVersion(0) require.NoError(t, err) @@ -718,8 +721,8 @@ func TestMultipleApplyAccountFieldsPreservesOther(t *testing.T) { func TestLtHashDeterministicAcrossReopen(t *testing.T) { writeAndGetHash := func() []byte { - dir := t.TempDir() - s, err := NewCommitStore(t.Context(), filepath.Join(dir, flatkvRootDir), nil, DefaultConfig()) + cfg := DefaultTestConfig(t) + s, err := NewCommitStore(t.Context(), nil, cfg) require.NoError(t, err) _, err = s.LoadVersion(0) require.NoError(t, err) @@ -843,8 +846,8 @@ func TestEmptyCommitAdvancesVersion(t *testing.T) { func TestStoreFsyncEnabled(t *testing.T) { dir := t.TempDir() - cfg := &Config{Fsync: true} - s, err := NewCommitStore(t.Context(), filepath.Join(dir, flatkvRootDir), nil, cfg) + cfg := &Config{DataDir: filepath.Join(dir, flatkvRootDir), Fsync: true} + s, err := NewCommitStore(t.Context(), nil, cfg) require.NoError(t, err) _, err = s.LoadVersion(0) require.NoError(t, err) @@ -865,8 +868,8 @@ func TestStoreFsyncEnabled(t *testing.T) { // ============================================================================= func TestLastSnapshotTimeUpdated(t *testing.T) { - dir := t.TempDir() - s, err := NewCommitStore(t.Context(), filepath.Join(dir, flatkvRootDir), nil, DefaultConfig()) + cfg := DefaultTestConfig(t) + s, err := NewCommitStore(t.Context(), nil, cfg) require.NoError(t, err) _, err = s.LoadVersion(0) require.NoError(t, err) @@ -886,8 +889,8 @@ func TestLastSnapshotTimeUpdated(t *testing.T) { // ============================================================================= func TestWALRecordsChangesets(t *testing.T) { - dir := t.TempDir() - s, err := NewCommitStore(t.Context(), filepath.Join(dir, flatkvRootDir), nil, DefaultConfig()) + cfg := DefaultTestConfig(t) + s, err := NewCommitStore(t.Context(), nil, cfg) require.NoError(t, err) _, err = s.LoadVersion(0) require.NoError(t, err) From 663b2eacac80e0a062267e043f71b3ffb833aa99 Mon Sep 17 00:00:00 2001 From: Cody Littley Date: Tue, 10 Mar 2026 10:04:44 -0500 Subject: [PATCH 036/119] fix config file --- .../bench/cryptosim/config/standard-perf.json | 18 ++++++++++-------- 1 file changed, 10 insertions(+), 8 deletions(-) diff --git a/sei-db/state_db/bench/cryptosim/config/standard-perf.json b/sei-db/state_db/bench/cryptosim/config/standard-perf.json index fb0a736ab7..a233241242 100644 --- a/sei-db/state_db/bench/cryptosim/config/standard-perf.json +++ b/sei-db/state_db/bench/cryptosim/config/standard-perf.json @@ -3,14 +3,16 @@ "DataDir": "data", "MinimumNumberOfColdAccounts": 1000000, "MinimumNumberOfDormantAccounts": 100000000, - "AccountDBConfig": { - "CacheSize": 1073741824 - }, - "CodeDBConfig": { - "CacheSize": 1073741824 - }, - "StorageDBConfig": { - "CacheSize": 4294967296 + "FlatKVConfig": { + "AccountDBConfig": { + "CacheSize": 1073741824 + }, + "CodeDBConfig": { + "CacheSize": 1073741824 + }, + "StorageDBConfig": { + "CacheSize": 4294967296 + } } } From bb530b56fef4670373030ae2ee1501a3e3d68fcd Mon Sep 17 00:00:00 2001 From: Cody Littley Date: Tue, 10 Mar 2026 10:36:58 -0500 Subject: [PATCH 037/119] cleanup --- sei-db/db_engine/pebbledb/db.go | 8 +++----- sei-db/db_engine/pebbledb/pebbledb_config.go | 10 +++++----- sei-db/db_engine/pebbledb/pebbledb_test_config.go | 2 +- sei-db/state_db/sc/flatkv/flatkv_test_config.go | 2 +- 4 files changed, 10 insertions(+), 12 deletions(-) diff --git a/sei-db/db_engine/pebbledb/db.go b/sei-db/db_engine/pebbledb/db.go index afb39b906d..cf43c11d50 100644 --- a/sei-db/db_engine/pebbledb/db.go +++ b/sei-db/db_engine/pebbledb/db.go @@ -46,12 +46,10 @@ func Open( return nil, fmt.Errorf("failed to validate config: %w", err) } - // Internal pebbleDB cache, used to cache pages in memory. // TODO verify accuracy of this statement - pebbleCache := pebble.NewCache(int64(config.PageCacheSize)) + // Internal pebbleDB block cache, used to cache uncompressed SSTable data blocks in memory. + pebbleCache := pebble.NewCache(int64(config.BlockCacheSize)) defer pebbleCache.Unref() - // TODO potentially expose more options here... - popts := &pebble.Options{ Cache: pebbleCache, Comparer: comparer, @@ -110,7 +108,7 @@ func Open( return cloned, true, nil } - // A high level cache per key (as opposed to the low level pebble page cache). + // A high level cache per key (as opposed to the low level pebble block cache). cache, err := pebblecache.NewCache( ctx, readFunction, diff --git a/sei-db/db_engine/pebbledb/pebbledb_config.go b/sei-db/db_engine/pebbledb/pebbledb_config.go index 67fc82435c..feedb176d4 100644 --- a/sei-db/db_engine/pebbledb/pebbledb_config.go +++ b/sei-db/db_engine/pebbledb/pebbledb_config.go @@ -14,8 +14,8 @@ type PebbleDBConfig struct { CacheSize int // The number of shards in the key-value cache. Must be a power of two and greater than 0. CacheShardCount int - // The size of pebbleDB's internal page cache, in bytes. - PageCacheSize int + // The size of pebbleDB's internal block cache, in bytes. + BlockCacheSize int // Whether to enable metrics. EnableMetrics bool } @@ -25,7 +25,7 @@ func DefaultConfig() PebbleDBConfig { return PebbleDBConfig{ CacheSize: 512 * unit.MB, CacheShardCount: 8, - PageCacheSize: 512 * unit.MB, + BlockCacheSize: 512 * unit.MB, EnableMetrics: true, } } @@ -41,8 +41,8 @@ func (c *PebbleDBConfig) Validate() error { if c.CacheSize <= 0 { return fmt.Errorf("cache size must be greater than 0") } - if c.PageCacheSize <= 0 { - return fmt.Errorf("page cache size must be greater than 0") + if c.BlockCacheSize <= 0 { + return fmt.Errorf("block cache size must be greater than 0") } return nil } diff --git a/sei-db/db_engine/pebbledb/pebbledb_test_config.go b/sei-db/db_engine/pebbledb/pebbledb_test_config.go index 462dd97d3c..ef39ed299c 100644 --- a/sei-db/db_engine/pebbledb/pebbledb_test_config.go +++ b/sei-db/db_engine/pebbledb/pebbledb_test_config.go @@ -13,7 +13,7 @@ func DefaultTestConfig(t *testing.T) PebbleDBConfig { cfg.DataDir = t.TempDir() cfg.CacheSize = 16 * unit.MB - cfg.PageCacheSize = 16 * unit.MB + cfg.BlockCacheSize = 16 * unit.MB cfg.EnableMetrics = false return cfg diff --git a/sei-db/state_db/sc/flatkv/flatkv_test_config.go b/sei-db/state_db/sc/flatkv/flatkv_test_config.go index 3a3c69068d..05328cac81 100644 --- a/sei-db/state_db/sc/flatkv/flatkv_test_config.go +++ b/sei-db/state_db/sc/flatkv/flatkv_test_config.go @@ -12,7 +12,7 @@ func smallTestPebbleConfig() pebbledb.PebbleDBConfig { return pebbledb.PebbleDBConfig{ CacheSize: 16 * unit.MB, CacheShardCount: 8, - PageCacheSize: 16 * unit.MB, + BlockCacheSize: 16 * unit.MB, EnableMetrics: false, } } From 04daf755d07d3b3836f7d3c16f33508dc1eab240 Mon Sep 17 00:00:00 2001 From: Cody Littley Date: Tue, 10 Mar 2026 10:39:27 -0500 Subject: [PATCH 038/119] move pebble metrics to proper location --- sei-db/db_engine/pebbledb/db.go | 3 +-- .../{common/metrics => db_engine/pebbledb}/pebble_metrics.go | 5 +---- sei-db/db_engine/pebbledb/pebblecache/cache_metrics.go | 1 + 3 files changed, 3 insertions(+), 6 deletions(-) rename sei-db/{common/metrics => db_engine/pebbledb}/pebble_metrics.go (99%) create mode 100644 sei-db/db_engine/pebbledb/pebblecache/cache_metrics.go diff --git a/sei-db/db_engine/pebbledb/db.go b/sei-db/db_engine/pebbledb/db.go index cf43c11d50..cc19d587e0 100644 --- a/sei-db/db_engine/pebbledb/db.go +++ b/sei-db/db_engine/pebbledb/db.go @@ -13,7 +13,6 @@ import ( "github.com/cockroachdb/pebble/v2/sstable" errorutils "github.com/sei-protocol/sei-chain/sei-db/common/errors" - "github.com/sei-protocol/sei-chain/sei-db/common/metrics" "github.com/sei-protocol/sei-chain/sei-db/common/threading" "github.com/sei-protocol/sei-chain/sei-db/db_engine/pebbledb/pebblecache" "github.com/sei-protocol/sei-chain/sei-db/db_engine/types" @@ -122,7 +121,7 @@ func Open( ctx, cancel := context.WithCancel(ctx) if config.EnableMetrics { - metrics.NewPebbleMetrics(ctx, db, filepath.Base(config.DataDir), metricsScrapeInterval) + NewPebbleMetrics(ctx, db, filepath.Base(config.DataDir), metricsScrapeInterval) } return &pebbleDB{ diff --git a/sei-db/common/metrics/pebble_metrics.go b/sei-db/db_engine/pebbledb/pebble_metrics.go similarity index 99% rename from sei-db/common/metrics/pebble_metrics.go rename to sei-db/db_engine/pebbledb/pebble_metrics.go index d41e55d807..3096462fa9 100644 --- a/sei-db/common/metrics/pebble_metrics.go +++ b/sei-db/db_engine/pebbledb/pebble_metrics.go @@ -1,7 +1,4 @@ -// Package metrics provides OpenTelemetry instruments and scrapers for Pebble DB metrics, -// allowing any Pebble instance to export compaction, flush, cache, and storage metrics -// to OTel-compatible backends (e.g., Prometheus). -package metrics +package pebbledb import ( "context" diff --git a/sei-db/db_engine/pebbledb/pebblecache/cache_metrics.go b/sei-db/db_engine/pebbledb/pebblecache/cache_metrics.go new file mode 100644 index 0000000000..eca272475e --- /dev/null +++ b/sei-db/db_engine/pebbledb/pebblecache/cache_metrics.go @@ -0,0 +1 @@ +package pebblecache \ No newline at end of file From 354818ee35f0c9860762bbbae87afc020d588865 Mon Sep 17 00:00:00 2001 From: Cody Littley Date: Tue, 10 Mar 2026 12:06:14 -0500 Subject: [PATCH 039/119] clean up metrics --- sei-db/db_engine/pebbledb/db.go | 23 +-- .../pebbledb/pebblecache/cache_impl.go | 30 +++- .../pebbledb/pebblecache/cache_metrics.go | 134 +++++++++++++++++- .../db_engine/pebbledb/pebblecache/shard.go | 37 ++++- sei-db/db_engine/pebbledb/pebbledb_config.go | 15 +- 5 files changed, 219 insertions(+), 20 deletions(-) diff --git a/sei-db/db_engine/pebbledb/db.go b/sei-db/db_engine/pebbledb/db.go index cc19d587e0..840ac9e29c 100644 --- a/sei-db/db_engine/pebbledb/db.go +++ b/sei-db/db_engine/pebbledb/db.go @@ -6,7 +6,6 @@ import ( "errors" "fmt" "path/filepath" - "time" "github.com/cockroachdb/pebble/v2" "github.com/cockroachdb/pebble/v2/bloom" @@ -18,8 +17,6 @@ import ( "github.com/sei-protocol/sei-chain/sei-db/db_engine/types" ) -const metricsScrapeInterval = 10 * time.Second - // pebbleDB implements the db_engine.DB interface using PebbleDB. type pebbleDB struct { db *pebble.DB @@ -107,6 +104,16 @@ func Open( return cloned, true, nil } + ctx, cancel := context.WithCancel(ctx) + if config.EnableMetrics { + NewPebbleMetrics(ctx, db, filepath.Base(config.DataDir), config.MetricsScrapeInterval) + } + + var cacheName string + if config.EnableMetrics { + cacheName = filepath.Base(config.DataDir) + } + // A high level cache per key (as opposed to the low level pebble block cache). cache, err := pebblecache.NewCache( ctx, @@ -114,16 +121,14 @@ func Open( 8, config.CacheSize, readPool, - miscPool) + miscPool, + cacheName, + config.MetricsScrapeInterval) if err != nil { + cancel() return nil, fmt.Errorf("failed to create flatcache: %w", err) } - ctx, cancel := context.WithCancel(ctx) - if config.EnableMetrics { - NewPebbleMetrics(ctx, db, filepath.Base(config.DataDir), metricsScrapeInterval) - } - return &pebbleDB{ db: db, metricsCancel: cancel, diff --git a/sei-db/db_engine/pebbledb/pebblecache/cache_impl.go b/sei-db/db_engine/pebbledb/pebblecache/cache_impl.go index 19504c1519..edebe382c7 100644 --- a/sei-db/db_engine/pebbledb/pebblecache/cache_impl.go +++ b/sei-db/db_engine/pebbledb/pebblecache/cache_impl.go @@ -4,6 +4,7 @@ import ( "context" "fmt" "sync" + "time" "github.com/sei-protocol/sei-chain/sei-db/common/threading" "github.com/sei-protocol/sei-chain/sei-db/db_engine/types" @@ -28,7 +29,8 @@ type cache struct { miscPool threading.Pool } -// Creates a new Cache. +// Creates a new Cache. If cacheName is non-empty, OTel metrics are enabled and the +// background size scrape runs every metricsScrapeInterval. func NewCache( ctx context.Context, // A function that reads a value from the database. @@ -41,6 +43,10 @@ func NewCache( readPool threading.Pool, // A work pool for miscellaneous operations that are neither computationally intensive nor IO bound. miscPool threading.Pool, + // Name used as the "cache" attribute on metrics. Empty string disables metrics. + cacheName string, + // How often to scrape cache size for metrics. Ignored if cacheName is empty. + metricsScrapeInterval time.Duration, ) (Cache, error) { if shardCount <= 0 || (shardCount&(shardCount-1)) != 0 { return nil, ErrNumShardsNotPowerOfTwo @@ -66,13 +72,31 @@ func NewCache( } } - return &cache{ + c := &cache{ ctx: ctx, shardManager: shardManager, shards: shards, readPool: readPool, miscPool: miscPool, - }, nil + } + + if cacheName != "" { + metrics := newCacheMetrics(ctx, cacheName, metricsScrapeInterval, c.getCacheSizeInfo) + for _, s := range c.shards { + s.metrics = metrics + } + } + + return c, nil +} + +func (c *cache) getCacheSizeInfo() (bytes int64, entries int64) { + for _, s := range c.shards { + b, e := s.getSizeInfo() + bytes += int64(b) + entries += int64(e) + } + return bytes, entries } func (c *cache) BatchSet(updates []CacheUpdate) error { diff --git a/sei-db/db_engine/pebbledb/pebblecache/cache_metrics.go b/sei-db/db_engine/pebbledb/pebblecache/cache_metrics.go index eca272475e..fd5b077bca 100644 --- a/sei-db/db_engine/pebbledb/pebblecache/cache_metrics.go +++ b/sei-db/db_engine/pebbledb/pebblecache/cache_metrics.go @@ -1 +1,133 @@ -package pebblecache \ No newline at end of file +package pebblecache + +import ( + "context" + "time" + + "go.opentelemetry.io/otel" + "go.opentelemetry.io/otel/attribute" + "go.opentelemetry.io/otel/metric" +) + +const cacheMeterName = "seidb_pebblecache" + +// CacheMetrics records OTel metrics for a pebblecache instance. +// All report methods are nil-safe: if the receiver is nil, they are no-ops, +// allowing the cache to call them unconditionally regardless of whether metrics +// are enabled. +// +// The cacheName is used as the "cache" attribute on all recorded metrics, +// enabling multiple cache instances to be distinguished in dashboards. +type CacheMetrics struct { + // Pre-computed attribute option reused on every recording to avoid + // per-call allocations on the hot path. + attrs metric.MeasurementOption + + sizeBytes metric.Int64Gauge + sizeEntries metric.Int64Gauge + hits metric.Int64Counter + misses metric.Int64Counter + missLatency metric.Float64Histogram +} + +// newCacheMetrics creates a CacheMetrics that records cache statistics via OTel. +// A background goroutine scrapes cache size every scrapeInterval until ctx is +// cancelled. The cacheName is attached as the "cache" attribute to all recorded +// metrics, enabling multiple cache instances to be distinguished in dashboards. +// +// Multiple instances are safe: OTel instrument registration is idempotent, so each +// call receives references to the same underlying instruments. The "cache" attribute +// distinguishes series (e.g. pebblecache_hits{cache="state"}). +func newCacheMetrics( + ctx context.Context, + cacheName string, + scrapeInterval time.Duration, + getSize func() (bytes int64, entries int64), +) *CacheMetrics { + meter := otel.Meter(cacheMeterName) + + sizeBytes, _ := meter.Int64Gauge( + "pebblecache_size_bytes", + metric.WithDescription("Current cache size in bytes"), + metric.WithUnit("By"), + ) + sizeEntries, _ := meter.Int64Gauge( + "pebblecache_size_entries", + metric.WithDescription("Current number of entries in the cache"), + metric.WithUnit("{count}"), + ) + hits, _ := meter.Int64Counter( + "pebblecache_hits", + metric.WithDescription("Total number of cache hits"), + metric.WithUnit("{count}"), + ) + misses, _ := meter.Int64Counter( + "pebblecache_misses", + metric.WithDescription("Total number of cache misses"), + metric.WithUnit("{count}"), + ) + missLatency, _ := meter.Float64Histogram( + "pebblecache_miss_latency", + metric.WithDescription("Time taken to resolve a cache miss from the backing store"), + metric.WithUnit("s"), + ) + + cm := &CacheMetrics{ + attrs: metric.WithAttributes(attribute.String("cache", cacheName)), + sizeBytes: sizeBytes, + sizeEntries: sizeEntries, + hits: hits, + misses: misses, + missLatency: missLatency, + } + + go cm.collectLoop(ctx, scrapeInterval, getSize) + + return cm +} + +func (cm *CacheMetrics) reportCacheHits(count int64) { + if cm == nil { + return + } + cm.hits.Add(context.Background(), count, cm.attrs) +} + +func (cm *CacheMetrics) reportCacheMisses(count int64) { + if cm == nil { + return + } + cm.misses.Add(context.Background(), count, cm.attrs) +} + +func (cm *CacheMetrics) reportCacheMissLatency(latency time.Duration) { + if cm == nil { + return + } + cm.missLatency.Record(context.Background(), latency.Seconds(), cm.attrs) +} + +// collectLoop periodically scrapes cache size from the provided function +// and records it as gauge values. It exits when ctx is cancelled. +func (cm *CacheMetrics) collectLoop( + ctx context.Context, + interval time.Duration, + getSize func() (bytes int64, entries int64), +) { + + if cm == nil { + return + } + ticker := time.NewTicker(interval) + defer ticker.Stop() + for { + select { + case <-ctx.Done(): + return + case <-ticker.C: + bytes, entries := getSize() + cm.sizeBytes.Record(ctx, bytes, cm.attrs) + cm.sizeEntries.Record(ctx, entries, cm.attrs) + } + } +} diff --git a/sei-db/db_engine/pebbledb/pebblecache/shard.go b/sei-db/db_engine/pebbledb/pebblecache/shard.go index a7e10823cc..937faf447f 100644 --- a/sei-db/db_engine/pebbledb/pebblecache/shard.go +++ b/sei-db/db_engine/pebbledb/pebblecache/shard.go @@ -4,6 +4,7 @@ import ( "context" "fmt" "sync" + "time" "github.com/sei-protocol/sei-chain/sei-db/common/threading" "github.com/sei-protocol/sei-chain/sei-db/db_engine/types" @@ -30,6 +31,9 @@ type shard struct { // The maximum size of this cache, in bytes. maxSize int + + // Cache-level metrics. Nil-safe; if nil, no metrics are recorded. + metrics *CacheMetrics } // The result of a read from the underlying database. @@ -106,18 +110,23 @@ func (s *shard) Get(key []byte, updateLru bool) ([]byte, bool, error) { s.gcQueue.Touch(key) } s.lock.Unlock() + s.metrics.reportCacheHits(1) return value, true, nil case statusDeleted: if updateLru { s.gcQueue.Touch(key) } s.lock.Unlock() + s.metrics.reportCacheHits(1) return nil, false, nil case statusScheduled: // Another goroutine initiated a read, wait for that read to finish. valueChan := entry.valueChan s.lock.Unlock() + s.metrics.reportCacheMisses(1) + startTime := time.Now() result, err := threading.InterruptiblePull(s.ctx, valueChan) + s.metrics.reportCacheMissLatency(time.Since(startTime)) if err != nil { return nil, false, fmt.Errorf("failed to pull value from channel: %w", err) } @@ -132,6 +141,8 @@ func (s *shard) Get(key []byte, updateLru bool) ([]byte, bool, error) { valueChan := make(chan readResult, 1) entry.valueChan = valueChan s.lock.Unlock() + s.metrics.reportCacheMisses(1) + startTime := time.Now() err := s.readPool.Submit(s.ctx, func() { value, found, readErr := s.readFunc(key) entry.injectValue(key, readResult{value: value, found: found, err: readErr}) @@ -140,6 +151,7 @@ func (s *shard) Get(key []byte, updateLru bool) ([]byte, bool, error) { return nil, false, fmt.Errorf("failed to schedule read: %w", err) } result, err := threading.InterruptiblePull(s.ctx, valueChan) + s.metrics.reportCacheMissLatency(time.Since(startTime)) if err != nil { return nil, false, fmt.Errorf("failed to pull value from channel: %w", err) } @@ -207,6 +219,7 @@ type pendingRead struct { // BatchGet reads a batch of keys from the shard. Results are written into the provided map. func (s *shard) BatchGet(keys map[string]types.BatchGetResult) error { pending := make([]pendingRead, 0, len(keys)) + var hits int64 s.lock.Lock() for key := range keys { @@ -215,8 +228,10 @@ func (s *shard) BatchGet(keys map[string]types.BatchGetResult) error { switch entry.status { case statusAvailable: keys[key] = types.BatchGetResult{Value: entry.value, Found: true} + hits++ case statusDeleted: keys[key] = types.BatchGetResult{Found: false} + hits++ case statusScheduled: pending = append(pending, pendingRead{ key: key, @@ -240,6 +255,16 @@ func (s *shard) BatchGet(keys map[string]types.BatchGetResult) error { } s.lock.Unlock() + if hits > 0 { + s.metrics.reportCacheHits(hits) + } + if len(pending) == 0 { + return nil + } + + s.metrics.reportCacheMisses(int64(len(pending))) + startTime := time.Now() + for i := range pending { if pending[i].needsSchedule { p := &pending[i] @@ -268,9 +293,8 @@ func (s *shard) BatchGet(keys map[string]types.BatchGetResult) error { } } - if len(pending) > 0 { - go s.bulkInjectValues(pending) - } + s.metrics.reportCacheMissLatency(time.Since(startTime)) + go s.bulkInjectValues(pending) return nil } @@ -310,6 +334,13 @@ func (s *shard) evictUnlocked() { } } +// getSizeInfo returns the current size (bytes) and entry count under the shard lock. +func (s *shard) getSizeInfo() (bytes int, entries int) { + s.lock.Lock() + defer s.lock.Unlock() + return s.gcQueue.GetTotalSize(), s.gcQueue.GetCount() +} + // Set sets the value for the given key. func (s *shard) Set(key []byte, value []byte) { s.lock.Lock() diff --git a/sei-db/db_engine/pebbledb/pebbledb_config.go b/sei-db/db_engine/pebbledb/pebbledb_config.go index feedb176d4..005d3c3c71 100644 --- a/sei-db/db_engine/pebbledb/pebbledb_config.go +++ b/sei-db/db_engine/pebbledb/pebbledb_config.go @@ -2,6 +2,7 @@ package pebbledb import ( "fmt" + "time" "github.com/sei-protocol/sei-chain/sei-db/common/unit" ) @@ -18,15 +19,18 @@ type PebbleDBConfig struct { BlockCacheSize int // Whether to enable metrics. EnableMetrics bool + // How often to scrape metrics (pebble internals + cache size). + MetricsScrapeInterval time.Duration } // Default configuration for the PebbleDB database. func DefaultConfig() PebbleDBConfig { return PebbleDBConfig{ - CacheSize: 512 * unit.MB, - CacheShardCount: 8, - BlockCacheSize: 512 * unit.MB, - EnableMetrics: true, + CacheSize: 512 * unit.MB, + CacheShardCount: 8, + BlockCacheSize: 512 * unit.MB, + EnableMetrics: true, + MetricsScrapeInterval: 10 * time.Second, } } @@ -44,5 +48,8 @@ func (c *PebbleDBConfig) Validate() error { if c.BlockCacheSize <= 0 { return fmt.Errorf("block cache size must be greater than 0") } + if c.EnableMetrics && c.MetricsScrapeInterval <= 0 { + return fmt.Errorf("metrics scrape interval must be positive when metrics are enabled") + } return nil } From b1574ac33f870a9afcfbe72d75fae3b963e40cba Mon Sep 17 00:00:00 2001 From: Cody Littley Date: Tue, 10 Mar 2026 12:28:08 -0500 Subject: [PATCH 040/119] updated dashboard --- .../dashboards/cryptosim-dashboard.json | 1114 ++++++++++++++--- .../pebbledb/pebblecache/cache_metrics.go | 5 + 2 files changed, 950 insertions(+), 169 deletions(-) diff --git a/docker/monitornode/dashboards/cryptosim-dashboard.json b/docker/monitornode/dashboards/cryptosim-dashboard.json index 12d6542a09..d93513cc27 100644 --- a/docker/monitornode/dashboards/cryptosim-dashboard.json +++ b/docker/monitornode/dashboards/cryptosim-dashboard.json @@ -436,7 +436,7 @@ "targets": [ { "editorMode": "code", - "expr": "rate(cryptosim_transactions_processed_total[5m])", + "expr": "rate(cryptosim_transactions_processed_total[$__rate_interval])", "legendFormat": "__auto", "range": true, "refId": "A" @@ -453,6 +453,782 @@ "x": 0, "y": 33 }, + "id": 277, + "panels": [ + { + "datasource": { + "type": "prometheus", + "uid": "PBFA97CFB590B2093" + }, + "fieldConfig": { + "defaults": { + "color": { + "mode": "palette-classic" + }, + "custom": { + "axisBorderShow": false, + "axisCenteredZero": false, + "axisColorMode": "text", + "axisLabel": "", + "axisPlacement": "auto", + "barAlignment": 0, + "barWidthFactor": 0.6, + "drawStyle": "line", + "fillOpacity": 0, + "gradientMode": "none", + "hideFrom": { + "legend": false, + "tooltip": false, + "viz": false + }, + "insertNulls": false, + "lineInterpolation": "linear", + "lineWidth": 1, + "pointSize": 5, + "scaleDistribution": { + "type": "linear" + }, + "showPoints": "auto", + "showValues": false, + "spanNulls": false, + "stacking": { + "group": "A", + "mode": "none" + }, + "thresholdsStyle": { + "mode": "off" + } + }, + "mappings": [], + "thresholds": { + "mode": "absolute", + "steps": [ + { + "color": "green", + "value": 0 + }, + { + "color": "red", + "value": 80 + } + ] + }, + "unit": "locale" + }, + "overrides": [] + }, + "gridPos": { + "h": 8, + "w": 12, + "x": 0, + "y": 34 + }, + "id": 279, + "options": { + "legend": { + "calcs": [], + "displayMode": "list", + "placement": "bottom", + "showLegend": true + }, + "tooltip": { + "hideZeros": false, + "mode": "single", + "sort": "none" + } + }, + "pluginVersion": "12.4.0", + "targets": [ + { + "editorMode": "code", + "expr": "pebblecache_size_entries", + "legendFormat": "{{cache}}", + "range": true, + "refId": "A" + } + ], + "title": "Cache Entry Count", + "type": "timeseries" + }, + { + "datasource": { + "type": "prometheus", + "uid": "PBFA97CFB590B2093" + }, + "fieldConfig": { + "defaults": { + "color": { + "mode": "palette-classic" + }, + "custom": { + "axisBorderShow": false, + "axisCenteredZero": false, + "axisColorMode": "text", + "axisLabel": "", + "axisPlacement": "auto", + "barAlignment": 0, + "barWidthFactor": 0.6, + "drawStyle": "line", + "fillOpacity": 0, + "gradientMode": "none", + "hideFrom": { + "legend": false, + "tooltip": false, + "viz": false + }, + "insertNulls": false, + "lineInterpolation": "linear", + "lineWidth": 1, + "pointSize": 5, + "scaleDistribution": { + "type": "linear" + }, + "showPoints": "auto", + "showValues": false, + "spanNulls": false, + "stacking": { + "group": "A", + "mode": "none" + }, + "thresholdsStyle": { + "mode": "off" + } + }, + "mappings": [], + "thresholds": { + "mode": "absolute", + "steps": [ + { + "color": "green", + "value": 0 + }, + { + "color": "red", + "value": 80 + } + ] + }, + "unit": "bytes" + }, + "overrides": [] + }, + "gridPos": { + "h": 8, + "w": 12, + "x": 12, + "y": 34 + }, + "id": 278, + "options": { + "legend": { + "calcs": [], + "displayMode": "list", + "placement": "bottom", + "showLegend": true + }, + "tooltip": { + "hideZeros": false, + "mode": "single", + "sort": "none" + } + }, + "pluginVersion": "12.4.0", + "targets": [ + { + "editorMode": "code", + "expr": "pebblecache_size_bytes", + "legendFormat": "{{cache}}", + "range": true, + "refId": "A" + } + ], + "title": "Cache Size (bytes)", + "type": "timeseries" + }, + { + "datasource": { + "type": "prometheus", + "uid": "PBFA97CFB590B2093" + }, + "fieldConfig": { + "defaults": { + "color": { + "mode": "palette-classic" + }, + "custom": { + "axisBorderShow": false, + "axisCenteredZero": false, + "axisColorMode": "text", + "axisLabel": "", + "axisPlacement": "auto", + "barAlignment": 0, + "barWidthFactor": 0.6, + "drawStyle": "line", + "fillOpacity": 0, + "gradientMode": "none", + "hideFrom": { + "legend": false, + "tooltip": false, + "viz": false + }, + "insertNulls": false, + "lineInterpolation": "linear", + "lineWidth": 1, + "pointSize": 5, + "scaleDistribution": { + "type": "linear" + }, + "showPoints": "auto", + "showValues": false, + "spanNulls": false, + "stacking": { + "group": "A", + "mode": "none" + }, + "thresholdsStyle": { + "mode": "off" + } + }, + "mappings": [], + "thresholds": { + "mode": "absolute", + "steps": [ + { + "color": "green", + "value": 0 + }, + { + "color": "red", + "value": 80 + } + ] + }, + "unit": "locale" + }, + "overrides": [] + }, + "gridPos": { + "h": 8, + "w": 12, + "x": 0, + "y": 42 + }, + "id": 280, + "options": { + "legend": { + "calcs": [], + "displayMode": "list", + "placement": "bottom", + "showLegend": true + }, + "tooltip": { + "hideZeros": false, + "mode": "single", + "sort": "none" + } + }, + "pluginVersion": "12.4.0", + "targets": [ + { + "editorMode": "code", + "expr": "sum by (cache) (rate(pebblecache_hits_total[$__rate_interval]))", + "legendFormat": "__auto", + "range": true, + "refId": "A" + } + ], + "title": "Cache Hits / Second", + "type": "timeseries" + }, + { + "datasource": { + "type": "prometheus", + "uid": "PBFA97CFB590B2093" + }, + "fieldConfig": { + "defaults": { + "color": { + "mode": "palette-classic" + }, + "custom": { + "axisBorderShow": false, + "axisCenteredZero": false, + "axisColorMode": "text", + "axisLabel": "", + "axisPlacement": "auto", + "barAlignment": 0, + "barWidthFactor": 0.6, + "drawStyle": "line", + "fillOpacity": 0, + "gradientMode": "none", + "hideFrom": { + "legend": false, + "tooltip": false, + "viz": false + }, + "insertNulls": false, + "lineInterpolation": "linear", + "lineWidth": 1, + "pointSize": 5, + "scaleDistribution": { + "type": "linear" + }, + "showPoints": "auto", + "showValues": false, + "spanNulls": false, + "stacking": { + "group": "A", + "mode": "none" + }, + "thresholdsStyle": { + "mode": "off" + } + }, + "mappings": [], + "thresholds": { + "mode": "absolute", + "steps": [ + { + "color": "green", + "value": 0 + }, + { + "color": "red", + "value": 80 + } + ] + }, + "unit": "locale" + }, + "overrides": [] + }, + "gridPos": { + "h": 8, + "w": 12, + "x": 12, + "y": 42 + }, + "id": 281, + "options": { + "legend": { + "calcs": [], + "displayMode": "list", + "placement": "bottom", + "showLegend": true + }, + "tooltip": { + "hideZeros": false, + "mode": "single", + "sort": "none" + } + }, + "pluginVersion": "12.4.0", + "targets": [ + { + "editorMode": "code", + "expr": "sum by (cache) (rate(pebblecache_misses_total[$__rate_interval]))", + "legendFormat": "__auto", + "range": true, + "refId": "A" + } + ], + "title": "Cache Misses / Second", + "type": "timeseries" + }, + { + "datasource": { + "type": "prometheus", + "uid": "PBFA97CFB590B2093" + }, + "fieldConfig": { + "defaults": { + "color": { + "mode": "palette-classic" + }, + "custom": { + "axisBorderShow": false, + "axisCenteredZero": false, + "axisColorMode": "text", + "axisLabel": "", + "axisPlacement": "auto", + "barAlignment": 0, + "barWidthFactor": 0.6, + "drawStyle": "line", + "fillOpacity": 0, + "gradientMode": "none", + "hideFrom": { + "legend": false, + "tooltip": false, + "viz": false + }, + "insertNulls": false, + "lineInterpolation": "linear", + "lineWidth": 1, + "pointSize": 5, + "scaleDistribution": { + "type": "linear" + }, + "showPoints": "auto", + "showValues": false, + "spanNulls": false, + "stacking": { + "group": "A", + "mode": "none" + }, + "thresholdsStyle": { + "mode": "off" + } + }, + "mappings": [], + "max": 100, + "min": 0, + "thresholds": { + "mode": "absolute", + "steps": [ + { + "color": "green", + "value": 0 + }, + { + "color": "red", + "value": 80 + } + ] + }, + "unit": "percent" + }, + "overrides": [] + }, + "gridPos": { + "h": 8, + "w": 12, + "x": 0, + "y": 50 + }, + "id": 282, + "options": { + "legend": { + "calcs": [], + "displayMode": "list", + "placement": "bottom", + "showLegend": true + }, + "tooltip": { + "hideZeros": false, + "mode": "single", + "sort": "none" + } + }, + "pluginVersion": "12.4.0", + "targets": [ + { + "editorMode": "code", + "expr": "sum by (cache) (rate(pebblecache_hits_total[$__rate_interval]))\n/\nclamp_min(\n sum by (cache) (rate(pebblecache_hits_total[$__rate_interval]))\n +\n sum by (cache) (rate(pebblecache_misses_total[$__rate_interval])),\n 1e-10\n)\n* 100", + "legendFormat": "__auto", + "range": true, + "refId": "A" + } + ], + "title": "Cache Hit Percentage", + "type": "timeseries" + }, + { + "datasource": { + "type": "prometheus", + "uid": "PBFA97CFB590B2093" + }, + "fieldConfig": { + "defaults": { + "color": { + "mode": "palette-classic" + }, + "custom": { + "axisBorderShow": false, + "axisCenteredZero": false, + "axisColorMode": "text", + "axisLabel": "", + "axisPlacement": "auto", + "barAlignment": 0, + "barWidthFactor": 0.6, + "drawStyle": "line", + "fillOpacity": 0, + "gradientMode": "none", + "hideFrom": { + "legend": false, + "tooltip": false, + "viz": false + }, + "insertNulls": false, + "lineInterpolation": "linear", + "lineWidth": 1, + "pointSize": 5, + "scaleDistribution": { + "type": "linear" + }, + "showPoints": "auto", + "showValues": false, + "spanNulls": false, + "stacking": { + "group": "A", + "mode": "none" + }, + "thresholdsStyle": { + "mode": "off" + } + }, + "mappings": [], + "thresholds": { + "mode": "absolute", + "steps": [ + { + "color": "green", + "value": 0 + }, + { + "color": "red", + "value": 80 + } + ] + }, + "unit": "s" + }, + "overrides": [] + }, + "gridPos": { + "h": 8, + "w": 12, + "x": 12, + "y": 50 + }, + "id": 285, + "options": { + "legend": { + "calcs": [], + "displayMode": "list", + "placement": "bottom", + "showLegend": true + }, + "tooltip": { + "hideZeros": false, + "mode": "single", + "sort": "none" + } + }, + "pluginVersion": "12.4.0", + "targets": [ + { + "editorMode": "code", + "expr": "sum by (cache) (rate(pebblecache_miss_latency_seconds_sum[$__rate_interval]))\n/\nclamp_min(sum by (cache) (rate(pebblecache_miss_latency_seconds_count[$__rate_interval])), 1e-10)", + "legendFormat": "__auto", + "range": true, + "refId": "A" + } + ], + "title": "Average Cache Miss Latency", + "type": "timeseries" + }, + { + "datasource": { + "type": "prometheus", + "uid": "PBFA97CFB590B2093" + }, + "fieldConfig": { + "defaults": { + "color": { + "mode": "palette-classic" + }, + "custom": { + "axisBorderShow": false, + "axisCenteredZero": false, + "axisColorMode": "text", + "axisLabel": "", + "axisPlacement": "auto", + "barAlignment": 0, + "barWidthFactor": 0.6, + "drawStyle": "line", + "fillOpacity": 0, + "gradientMode": "none", + "hideFrom": { + "legend": false, + "tooltip": false, + "viz": false + }, + "insertNulls": false, + "lineInterpolation": "linear", + "lineWidth": 1, + "pointSize": 5, + "scaleDistribution": { + "type": "linear" + }, + "showPoints": "auto", + "showValues": false, + "spanNulls": false, + "stacking": { + "group": "A", + "mode": "none" + }, + "thresholdsStyle": { + "mode": "off" + } + }, + "mappings": [], + "thresholds": { + "mode": "absolute", + "steps": [ + { + "color": "green", + "value": 0 + }, + { + "color": "red", + "value": 80 + } + ] + }, + "unit": "s" + }, + "overrides": [] + }, + "gridPos": { + "h": 8, + "w": 12, + "x": 0, + "y": 58 + }, + "id": 284, + "options": { + "legend": { + "calcs": [], + "displayMode": "list", + "placement": "bottom", + "showLegend": true + }, + "tooltip": { + "hideZeros": false, + "mode": "single", + "sort": "none" + } + }, + "pluginVersion": "12.4.0", + "targets": [ + { + "editorMode": "code", + "expr": "histogram_quantile(0.5, sum by (cache, le) (rate(pebblecache_miss_latency_seconds_bucket[$__rate_interval])))", + "legendFormat": "__auto", + "range": true, + "refId": "A" + } + ], + "title": "Cache Miss Latency (p50)", + "type": "timeseries" + }, + { + "datasource": { + "type": "prometheus", + "uid": "PBFA97CFB590B2093" + }, + "fieldConfig": { + "defaults": { + "color": { + "mode": "palette-classic" + }, + "custom": { + "axisBorderShow": false, + "axisCenteredZero": false, + "axisColorMode": "text", + "axisLabel": "", + "axisPlacement": "auto", + "barAlignment": 0, + "barWidthFactor": 0.6, + "drawStyle": "line", + "fillOpacity": 0, + "gradientMode": "none", + "hideFrom": { + "legend": false, + "tooltip": false, + "viz": false + }, + "insertNulls": false, + "lineInterpolation": "linear", + "lineWidth": 1, + "pointSize": 5, + "scaleDistribution": { + "type": "linear" + }, + "showPoints": "auto", + "showValues": false, + "spanNulls": false, + "stacking": { + "group": "A", + "mode": "none" + }, + "thresholdsStyle": { + "mode": "off" + } + }, + "mappings": [], + "thresholds": { + "mode": "absolute", + "steps": [ + { + "color": "green", + "value": 0 + }, + { + "color": "red", + "value": 80 + } + ] + }, + "unit": "s" + }, + "overrides": [] + }, + "gridPos": { + "h": 8, + "w": 12, + "x": 12, + "y": 58 + }, + "id": 283, + "options": { + "legend": { + "calcs": [], + "displayMode": "list", + "placement": "bottom", + "showLegend": true + }, + "tooltip": { + "hideZeros": false, + "mode": "single", + "sort": "none" + } + }, + "pluginVersion": "12.4.0", + "targets": [ + { + "editorMode": "code", + "expr": "histogram_quantile(0.99, sum by (cache, le) (rate(pebblecache_miss_latency_seconds_bucket[$__rate_interval])))", + "legendFormat": "__auto", + "range": true, + "refId": "A" + } + ], + "title": "Cache Miss Latency (p99)", + "type": "timeseries" + } + ], + "title": "Cache", + "type": "row" + }, + { + "collapsed": true, + "gridPos": { + "h": 1, + "w": 24, + "x": 0, + "y": 34 + }, "id": 9, "panels": [ { @@ -521,7 +1297,7 @@ "h": 8, "w": 12, "x": 0, - "y": 18 + "y": 66 }, "id": 6, "options": { @@ -651,7 +1427,7 @@ "h": 8, "w": 12, "x": 12, - "y": 18 + "y": 66 }, "id": 7, "options": { @@ -746,7 +1522,7 @@ "h": 8, "w": 12, "x": 0, - "y": 3370 + "y": 74 }, "id": 10, "options": { @@ -840,7 +1616,7 @@ "h": 8, "w": 12, "x": 12, - "y": 3370 + "y": 74 }, "id": 11, "options": { @@ -879,7 +1655,7 @@ "h": 1, "w": 24, "x": 0, - "y": 34 + "y": 35 }, "id": 13, "panels": [ @@ -909,7 +1685,7 @@ "h": 16, "w": 12, "x": 0, - "y": 19 + "y": 6867 }, "id": 20, "options": { @@ -1012,7 +1788,7 @@ "h": 8, "w": 12, "x": 12, - "y": 19 + "y": 6867 }, "id": 19, "options": { @@ -1143,7 +1919,7 @@ "h": 8, "w": 12, "x": 12, - "y": 3243 + "y": 6883 }, "id": 21, "options": { @@ -1238,7 +2014,7 @@ "h": 8, "w": 12, "x": 0, - "y": 3291 + "y": 6891 }, "id": 22, "options": { @@ -1369,7 +2145,7 @@ "h": 8, "w": 12, "x": 12, - "y": 3291 + "y": 6891 }, "id": 23, "options": { @@ -1500,7 +2276,7 @@ "h": 8, "w": 12, "x": 0, - "y": 3299 + "y": 6899 }, "id": 24, "options": { @@ -1631,7 +2407,7 @@ "h": 8, "w": 12, "x": 12, - "y": 3299 + "y": 6899 }, "id": 25, "options": { @@ -1762,7 +2538,7 @@ "h": 8, "w": 12, "x": 0, - "y": 3307 + "y": 6907 }, "id": 26, "options": { @@ -1893,7 +2669,7 @@ "h": 8, "w": 12, "x": 12, - "y": 3307 + "y": 6907 }, "id": 27, "options": { @@ -2024,7 +2800,7 @@ "h": 8, "w": 12, "x": 0, - "y": 3315 + "y": 6915 }, "id": 28, "options": { @@ -2099,7 +2875,7 @@ "h": 1, "w": 24, "x": 0, - "y": 35 + "y": 36 }, "id": 8, "panels": [ @@ -2168,7 +2944,7 @@ "h": 8, "w": 12, "x": 0, - "y": 44 + "y": 6868 }, "id": 1, "options": { @@ -2267,7 +3043,7 @@ "h": 8, "w": 12, "x": 12, - "y": 44 + "y": 6868 }, "id": 18, "options": { @@ -2342,7 +3118,7 @@ "h": 1, "w": 24, "x": 0, - "y": 36 + "y": 37 }, "id": 12, "panels": [ @@ -2411,7 +3187,7 @@ "h": 8, "w": 12, "x": 0, - "y": 7478 + "y": 17750 }, "id": 3, "options": { @@ -2510,7 +3286,7 @@ "h": 8, "w": 12, "x": 12, - "y": 7478 + "y": 17750 }, "id": 4, "options": { @@ -2585,7 +3361,7 @@ "h": 1, "w": 24, "x": 0, - "y": 37 + "y": 38 }, "id": 29, "panels": [ @@ -2655,7 +3431,7 @@ "h": 8, "w": 12, "x": 0, - "y": 3382 + "y": 13654 }, "id": 31, "options": { @@ -2750,7 +3526,7 @@ "h": 8, "w": 12, "x": 12, - "y": 3382 + "y": 13654 }, "id": 36, "options": { @@ -2845,7 +3621,7 @@ "h": 8, "w": 12, "x": 0, - "y": 3390 + "y": 13662 }, "id": 38, "options": { @@ -2884,7 +3660,7 @@ "h": 1, "w": 24, "x": 0, - "y": 38 + "y": 39 }, "id": 35, "panels": [ @@ -2953,7 +3729,7 @@ "h": 8, "w": 12, "x": 0, - "y": 8089 + "y": 18361 }, "id": 30, "options": { @@ -3048,7 +3824,7 @@ "h": 8, "w": 12, "x": 12, - "y": 8089 + "y": 18361 }, "id": 33, "options": { @@ -3143,7 +3919,7 @@ "h": 8, "w": 12, "x": 0, - "y": 8129 + "y": 18401 }, "id": 34, "options": { @@ -3182,7 +3958,7 @@ "h": 1, "w": 24, "x": 0, - "y": 39 + "y": 40 }, "id": 37, "panels": [ @@ -3252,7 +4028,7 @@ "h": 8, "w": 12, "x": 0, - "y": 7482 + "y": 10248 }, "id": 39, "options": { @@ -3347,7 +4123,7 @@ "h": 8, "w": 12, "x": 12, - "y": 7482 + "y": 10248 }, "id": 40, "options": { @@ -3442,7 +4218,7 @@ "h": 8, "w": 12, "x": 0, - "y": 7490 + "y": 10296 }, "id": 41, "options": { @@ -3537,7 +4313,7 @@ "h": 8, "w": 12, "x": 12, - "y": 7490 + "y": 10296 }, "id": 42, "options": { @@ -3632,7 +4408,7 @@ "h": 8, "w": 12, "x": 0, - "y": 7498 + "y": 10304 }, "id": 32, "options": { @@ -3671,7 +4447,7 @@ "h": 1, "w": 24, "x": 0, - "y": 40 + "y": 41 }, "id": 44, "panels": [ @@ -3741,7 +4517,7 @@ "h": 8, "w": 12, "x": 0, - "y": 7283 + "y": 17555 }, "id": 43, "options": { @@ -3780,7 +4556,7 @@ "h": 1, "w": 24, "x": 0, - "y": 41 + "y": 42 }, "id": 117, "panels": [ @@ -4040,7 +4816,7 @@ "h": 8, "w": 12, "x": 0, - "y": 218 + "y": 10490 }, "id": 261, "options": { @@ -4135,7 +4911,7 @@ "h": 8, "w": 12, "x": 12, - "y": 218 + "y": 10490 }, "id": 263, "options": { @@ -4230,7 +5006,7 @@ "h": 8, "w": 12, "x": 0, - "y": 226 + "y": 10498 }, "id": 262, "options": { @@ -4325,7 +5101,7 @@ "h": 8, "w": 12, "x": 12, - "y": 226 + "y": 10498 }, "id": 264, "options": { @@ -4364,7 +5140,7 @@ "h": 1, "w": 24, "x": 0, - "y": 42 + "y": 43 }, "id": 191, "panels": [ @@ -4433,7 +5209,7 @@ "h": 8, "w": 12, "x": 0, - "y": 3403 + "y": 13675 }, "id": 155, "options": { @@ -4528,7 +5304,7 @@ "h": 8, "w": 12, "x": 12, - "y": 3403 + "y": 13675 }, "id": 111, "options": { @@ -4623,7 +5399,7 @@ "h": 8, "w": 12, "x": 0, - "y": 3467 + "y": 13739 }, "id": 175, "options": { @@ -4718,7 +5494,7 @@ "h": 8, "w": 12, "x": 12, - "y": 3467 + "y": 13739 }, "id": 173, "options": { @@ -4812,7 +5588,7 @@ "h": 8, "w": 12, "x": 0, - "y": 3475 + "y": 13747 }, "id": 138, "options": { @@ -4907,7 +5683,7 @@ "h": 8, "w": 12, "x": 12, - "y": 3475 + "y": 13747 }, "id": 172, "options": { @@ -5002,7 +5778,7 @@ "h": 8, "w": 12, "x": 0, - "y": 3483 + "y": 13755 }, "id": 236, "options": { @@ -5041,7 +5817,7 @@ "h": 1, "w": 24, "x": 0, - "y": 43 + "y": 44 }, "id": 118, "panels": [ @@ -5110,7 +5886,7 @@ "h": 8, "w": 12, "x": 0, - "y": 1556 + "y": 11828 }, "id": 127, "options": { @@ -5205,7 +5981,7 @@ "h": 8, "w": 12, "x": 12, - "y": 1556 + "y": 11828 }, "id": 120, "options": { @@ -5299,7 +6075,7 @@ "h": 8, "w": 12, "x": 0, - "y": 1564 + "y": 11836 }, "id": 128, "options": { @@ -5394,7 +6170,7 @@ "h": 8, "w": 12, "x": 12, - "y": 1564 + "y": 11836 }, "id": 121, "options": { @@ -5488,7 +6264,7 @@ "h": 8, "w": 12, "x": 0, - "y": 1572 + "y": 11844 }, "id": 129, "options": { @@ -5583,7 +6359,7 @@ "h": 8, "w": 12, "x": 12, - "y": 1572 + "y": 11844 }, "id": 122, "options": { @@ -5677,7 +6453,7 @@ "h": 8, "w": 12, "x": 0, - "y": 1580 + "y": 11852 }, "id": 130, "options": { @@ -5772,7 +6548,7 @@ "h": 8, "w": 12, "x": 12, - "y": 1580 + "y": 11852 }, "id": 123, "options": { @@ -5866,7 +6642,7 @@ "h": 8, "w": 12, "x": 0, - "y": 1588 + "y": 11860 }, "id": 131, "options": { @@ -5961,7 +6737,7 @@ "h": 8, "w": 12, "x": 12, - "y": 1588 + "y": 11860 }, "id": 124, "options": { @@ -6055,7 +6831,7 @@ "h": 8, "w": 12, "x": 0, - "y": 1596 + "y": 11868 }, "id": 132, "options": { @@ -6150,7 +6926,7 @@ "h": 8, "w": 12, "x": 12, - "y": 1596 + "y": 11868 }, "id": 125, "options": { @@ -6244,7 +7020,7 @@ "h": 8, "w": 12, "x": 0, - "y": 1604 + "y": 11876 }, "id": 119, "options": { @@ -6339,7 +7115,7 @@ "h": 8, "w": 12, "x": 12, - "y": 1604 + "y": 11876 }, "id": 126, "options": { @@ -6378,7 +7154,7 @@ "h": 1, "w": 24, "x": 0, - "y": 44 + "y": 45 }, "id": 115, "panels": [ @@ -6447,7 +7223,7 @@ "h": 8, "w": 12, "x": 0, - "y": 53 + "y": 10253 }, "id": 101, "options": { @@ -6541,7 +7317,7 @@ "h": 8, "w": 12, "x": 12, - "y": 53 + "y": 10253 }, "id": 187, "options": { @@ -6636,7 +7412,7 @@ "h": 8, "w": 12, "x": 0, - "y": 61 + "y": 10261 }, "id": 113, "options": { @@ -6730,7 +7506,7 @@ "h": 8, "w": 12, "x": 12, - "y": 61 + "y": 10261 }, "id": 103, "options": { @@ -6825,7 +7601,7 @@ "h": 8, "w": 12, "x": 0, - "y": 69 + "y": 10269 }, "id": 102, "options": { @@ -6920,7 +7696,7 @@ "h": 8, "w": 12, "x": 12, - "y": 69 + "y": 10269 }, "id": 116, "options": { @@ -7014,7 +7790,7 @@ "h": 8, "w": 12, "x": 0, - "y": 77 + "y": 10277 }, "id": 135, "options": { @@ -7109,7 +7885,7 @@ "h": 8, "w": 12, "x": 12, - "y": 77 + "y": 10277 }, "id": 134, "options": { @@ -7203,7 +7979,7 @@ "h": 8, "w": 12, "x": 0, - "y": 85 + "y": 10285 }, "id": 136, "options": { @@ -7298,7 +8074,7 @@ "h": 8, "w": 12, "x": 12, - "y": 85 + "y": 10285 }, "id": 159, "options": { @@ -7337,7 +8113,7 @@ "h": 1, "w": 24, "x": 0, - "y": 45 + "y": 46 }, "id": 193, "panels": [ @@ -7406,7 +8182,7 @@ "h": 8, "w": 12, "x": 0, - "y": 5130 + "y": 15402 }, "id": 141, "options": { @@ -7500,7 +8276,7 @@ "h": 8, "w": 12, "x": 12, - "y": 5130 + "y": 15402 }, "id": 148, "options": { @@ -7594,7 +8370,7 @@ "h": 8, "w": 12, "x": 0, - "y": 5138 + "y": 15410 }, "id": 142, "options": { @@ -7688,7 +8464,7 @@ "h": 8, "w": 12, "x": 12, - "y": 5138 + "y": 15410 }, "id": 149, "options": { @@ -7782,7 +8558,7 @@ "h": 8, "w": 12, "x": 0, - "y": 5146 + "y": 15418 }, "id": 143, "options": { @@ -7876,7 +8652,7 @@ "h": 8, "w": 12, "x": 12, - "y": 5146 + "y": 15418 }, "id": 150, "options": { @@ -7970,7 +8746,7 @@ "h": 8, "w": 12, "x": 0, - "y": 5154 + "y": 15426 }, "id": 144, "options": { @@ -8064,7 +8840,7 @@ "h": 8, "w": 12, "x": 12, - "y": 5154 + "y": 15426 }, "id": 151, "options": { @@ -8158,7 +8934,7 @@ "h": 8, "w": 12, "x": 0, - "y": 5162 + "y": 15434 }, "id": 145, "options": { @@ -8252,7 +9028,7 @@ "h": 8, "w": 12, "x": 12, - "y": 5162 + "y": 15434 }, "id": 152, "options": { @@ -8346,7 +9122,7 @@ "h": 8, "w": 12, "x": 0, - "y": 5170 + "y": 15442 }, "id": 146, "options": { @@ -8440,7 +9216,7 @@ "h": 8, "w": 12, "x": 12, - "y": 5170 + "y": 15442 }, "id": 153, "options": { @@ -8534,7 +9310,7 @@ "h": 8, "w": 12, "x": 0, - "y": 5178 + "y": 15450 }, "id": 147, "options": { @@ -8628,7 +9404,7 @@ "h": 8, "w": 12, "x": 12, - "y": 5178 + "y": 15450 }, "id": 154, "options": { @@ -8667,7 +9443,7 @@ "h": 1, "w": 24, "x": 0, - "y": 46 + "y": 47 }, "id": 192, "panels": [ @@ -8736,7 +9512,7 @@ "h": 8, "w": 12, "x": 0, - "y": 4906 + "y": 15178 }, "id": 190, "options": { @@ -8831,7 +9607,7 @@ "h": 8, "w": 12, "x": 12, - "y": 4906 + "y": 15178 }, "id": 184, "options": { @@ -8925,7 +9701,7 @@ "h": 8, "w": 12, "x": 0, - "y": 4914 + "y": 15186 }, "id": 188, "options": { @@ -9020,7 +9796,7 @@ "h": 8, "w": 12, "x": 12, - "y": 4914 + "y": 15186 }, "id": 186, "options": { @@ -9115,7 +9891,7 @@ "h": 8, "w": 12, "x": 0, - "y": 4922 + "y": 15194 }, "id": 185, "options": { @@ -9209,7 +9985,7 @@ "h": 8, "w": 12, "x": 12, - "y": 4922 + "y": 15194 }, "id": 189, "options": { @@ -9303,7 +10079,7 @@ "h": 8, "w": 12, "x": 0, - "y": 4930 + "y": 15202 }, "id": 181, "options": { @@ -9398,7 +10174,7 @@ "h": 8, "w": 12, "x": 12, - "y": 4930 + "y": 15202 }, "id": 182, "options": { @@ -9437,7 +10213,7 @@ "h": 1, "w": 24, "x": 0, - "y": 47 + "y": 48 }, "id": 194, "panels": [ @@ -9506,7 +10282,7 @@ "h": 8, "w": 12, "x": 0, - "y": 856 + "y": 11128 }, "id": 170, "options": { @@ -9601,7 +10377,7 @@ "h": 8, "w": 12, "x": 12, - "y": 856 + "y": 11128 }, "id": 171, "options": { @@ -9695,7 +10471,7 @@ "h": 8, "w": 12, "x": 0, - "y": 1256 + "y": 11528 }, "id": 162, "options": { @@ -9790,7 +10566,7 @@ "h": 8, "w": 12, "x": 12, - "y": 1256 + "y": 11528 }, "id": 108, "options": { @@ -9885,7 +10661,7 @@ "h": 8, "w": 12, "x": 0, - "y": 1264 + "y": 11536 }, "id": 169, "options": { @@ -9979,7 +10755,7 @@ "h": 8, "w": 12, "x": 12, - "y": 1264 + "y": 11536 }, "id": 166, "options": { @@ -10073,7 +10849,7 @@ "h": 8, "w": 12, "x": 0, - "y": 1272 + "y": 11544 }, "id": 157, "options": { @@ -10168,7 +10944,7 @@ "h": 8, "w": 12, "x": 12, - "y": 1272 + "y": 11544 }, "id": 158, "options": { @@ -10262,7 +11038,7 @@ "h": 8, "w": 12, "x": 0, - "y": 1280 + "y": 11552 }, "id": 167, "options": { @@ -10357,7 +11133,7 @@ "h": 8, "w": 12, "x": 12, - "y": 1280 + "y": 11552 }, "id": 168, "options": { @@ -10451,7 +11227,7 @@ "h": 8, "w": 12, "x": 0, - "y": 1288 + "y": 11560 }, "id": 137, "options": { @@ -10546,7 +11322,7 @@ "h": 8, "w": 12, "x": 12, - "y": 1288 + "y": 11560 }, "id": 183, "options": { @@ -10641,7 +11417,7 @@ "h": 8, "w": 12, "x": 0, - "y": 1296 + "y": 11568 }, "id": 241, "options": { @@ -10736,7 +11512,7 @@ "h": 8, "w": 12, "x": 12, - "y": 1296 + "y": 11568 }, "id": 242, "options": { @@ -10831,7 +11607,7 @@ "h": 8, "w": 12, "x": 0, - "y": 1304 + "y": 11576 }, "id": 243, "options": { @@ -10926,7 +11702,7 @@ "h": 8, "w": 12, "x": 12, - "y": 1304 + "y": 11576 }, "id": 244, "options": { @@ -11021,7 +11797,7 @@ "h": 8, "w": 12, "x": 0, - "y": 1312 + "y": 11584 }, "id": 245, "options": { @@ -11116,7 +11892,7 @@ "h": 8, "w": 12, "x": 12, - "y": 1312 + "y": 11584 }, "id": 246, "options": { @@ -11211,7 +11987,7 @@ "h": 8, "w": 12, "x": 0, - "y": 1320 + "y": 11592 }, "id": 247, "options": { @@ -11306,7 +12082,7 @@ "h": 8, "w": 12, "x": 12, - "y": 1320 + "y": 11592 }, "id": 248, "options": { @@ -11345,7 +12121,7 @@ "h": 1, "w": 24, "x": 0, - "y": 48 + "y": 49 }, "id": 195, "panels": [ @@ -11414,7 +12190,7 @@ "h": 8, "w": 12, "x": 0, - "y": 81 + "y": 10353 }, "id": 161, "options": { @@ -11508,7 +12284,7 @@ "h": 8, "w": 12, "x": 12, - "y": 81 + "y": 10353 }, "id": 104, "options": { @@ -11603,7 +12379,7 @@ "h": 8, "w": 12, "x": 0, - "y": 89 + "y": 10361 }, "id": 105, "options": { @@ -11697,7 +12473,7 @@ "h": 8, "w": 12, "x": 12, - "y": 89 + "y": 10361 }, "id": 164, "options": { @@ -11791,7 +12567,7 @@ "h": 8, "w": 12, "x": 0, - "y": 97 + "y": 10369 }, "id": 163, "options": { @@ -11886,7 +12662,7 @@ "h": 8, "w": 12, "x": 12, - "y": 97 + "y": 10369 }, "id": 165, "options": { @@ -11981,7 +12757,7 @@ "h": 8, "w": 12, "x": 0, - "y": 105 + "y": 10377 }, "id": 223, "options": { @@ -12020,7 +12796,7 @@ "h": 1, "w": 24, "x": 0, - "y": 49 + "y": 50 }, "id": 210, "panels": [ @@ -12090,7 +12866,7 @@ "h": 8, "w": 12, "x": 0, - "y": 858 + "y": 11130 }, "id": 211, "options": { @@ -12185,7 +12961,7 @@ "h": 8, "w": 12, "x": 12, - "y": 858 + "y": 11130 }, "id": 212, "options": { @@ -12280,7 +13056,7 @@ "h": 8, "w": 12, "x": 0, - "y": 938 + "y": 11210 }, "id": 213, "options": { @@ -12375,7 +13151,7 @@ "h": 8, "w": 12, "x": 12, - "y": 938 + "y": 11210 }, "id": 214, "options": { @@ -12470,7 +13246,7 @@ "h": 8, "w": 12, "x": 0, - "y": 946 + "y": 11218 }, "id": 215, "options": { @@ -12565,7 +13341,7 @@ "h": 8, "w": 12, "x": 12, - "y": 946 + "y": 11218 }, "id": 216, "options": { @@ -12660,7 +13436,7 @@ "h": 8, "w": 12, "x": 0, - "y": 954 + "y": 11226 }, "id": 217, "options": { @@ -12755,7 +13531,7 @@ "h": 8, "w": 12, "x": 12, - "y": 954 + "y": 11226 }, "id": 218, "options": { @@ -12794,7 +13570,7 @@ "h": 1, "w": 24, "x": 0, - "y": 50 + "y": 51 }, "id": 230, "panels": [ @@ -12864,7 +13640,7 @@ "h": 8, "w": 12, "x": 0, - "y": 3700 + "y": 13972 }, "id": 231, "options": { @@ -12958,7 +13734,7 @@ "h": 8, "w": 12, "x": 12, - "y": 3700 + "y": 13972 }, "id": 178, "options": { @@ -13053,7 +13829,7 @@ "h": 8, "w": 12, "x": 0, - "y": 3708 + "y": 13980 }, "id": 179, "options": { @@ -13147,7 +13923,7 @@ "h": 8, "w": 12, "x": 12, - "y": 3708 + "y": 13980 }, "id": 156, "options": { @@ -13186,7 +13962,7 @@ "h": 1, "w": 24, "x": 0, - "y": 51 + "y": 52 }, "id": 250, "panels": [ @@ -13256,7 +14032,7 @@ "h": 8, "w": 12, "x": 0, - "y": 3492 + "y": 13764 }, "id": 251, "options": { @@ -13351,7 +14127,7 @@ "h": 8, "w": 12, "x": 12, - "y": 3492 + "y": 13764 }, "id": 252, "options": { @@ -13446,7 +14222,7 @@ "h": 8, "w": 12, "x": 0, - "y": 3604 + "y": 13876 }, "id": 253, "options": { @@ -13541,7 +14317,7 @@ "h": 8, "w": 12, "x": 12, - "y": 3604 + "y": 13876 }, "id": 254, "options": { @@ -13636,7 +14412,7 @@ "h": 8, "w": 12, "x": 0, - "y": 3612 + "y": 13884 }, "id": 273, "options": { @@ -13675,7 +14451,7 @@ "h": 1, "w": 24, "x": 0, - "y": 52 + "y": 53 }, "id": 100, "panels": [ @@ -13744,7 +14520,7 @@ "h": 8, "w": 12, "x": 0, - "y": 861 + "y": 11133 }, "id": 107, "options": { @@ -13839,7 +14615,7 @@ "h": 8, "w": 12, "x": 12, - "y": 861 + "y": 11133 }, "id": 110, "options": { @@ -13934,7 +14710,7 @@ "h": 8, "w": 12, "x": 0, - "y": 869 + "y": 11141 }, "id": 180, "options": { @@ -14028,7 +14804,7 @@ "h": 8, "w": 12, "x": 12, - "y": 869 + "y": 11141 }, "id": 160, "options": { @@ -14122,7 +14898,7 @@ "h": 8, "w": 12, "x": 0, - "y": 877 + "y": 11149 }, "id": 139, "options": { @@ -14216,7 +14992,7 @@ "h": 8, "w": 12, "x": 12, - "y": 877 + "y": 11149 }, "id": 176, "options": { @@ -14310,7 +15086,7 @@ "h": 8, "w": 12, "x": 0, - "y": 885 + "y": 11157 }, "id": 133, "options": { @@ -14405,7 +15181,7 @@ "h": 8, "w": 12, "x": 12, - "y": 885 + "y": 11157 }, "id": 221, "options": { @@ -14499,7 +15275,7 @@ "h": 8, "w": 12, "x": 0, - "y": 893 + "y": 11165 }, "id": 177, "options": { @@ -14594,7 +15370,7 @@ "h": 8, "w": 12, "x": 12, - "y": 893 + "y": 11165 }, "id": 271, "options": { @@ -14689,7 +15465,7 @@ "h": 8, "w": 12, "x": 0, - "y": 901 + "y": 11173 }, "id": 274, "options": { @@ -14784,7 +15560,7 @@ "h": 8, "w": 12, "x": 12, - "y": 901 + "y": 11173 }, "id": 272, "options": { @@ -14879,7 +15655,7 @@ "h": 8, "w": 12, "x": 0, - "y": 909 + "y": 11181 }, "id": 232, "options": { @@ -14974,7 +15750,7 @@ "h": 8, "w": 12, "x": 12, - "y": 909 + "y": 11181 }, "id": 233, "options": { @@ -15069,7 +15845,7 @@ "h": 8, "w": 12, "x": 0, - "y": 917 + "y": 11189 }, "id": 234, "options": { @@ -15164,7 +15940,7 @@ "h": 8, "w": 12, "x": 12, - "y": 917 + "y": 11189 }, "id": 235, "options": { @@ -15259,7 +16035,7 @@ "h": 8, "w": 12, "x": 0, - "y": 925 + "y": 11197 }, "id": 222, "options": { @@ -15308,6 +16084,6 @@ "timezone": "browser", "title": "CryptoSim", "uid": "adnqfm4", - "version": 29, + "version": 31, "weekStart": "" } \ No newline at end of file diff --git a/sei-db/db_engine/pebbledb/pebblecache/cache_metrics.go b/sei-db/db_engine/pebbledb/pebblecache/cache_metrics.go index fd5b077bca..9b7d73aa7f 100644 --- a/sei-db/db_engine/pebbledb/pebblecache/cache_metrics.go +++ b/sei-db/db_engine/pebbledb/pebblecache/cache_metrics.go @@ -70,6 +70,11 @@ func newCacheMetrics( "pebblecache_miss_latency", metric.WithDescription("Time taken to resolve a cache miss from the backing store"), metric.WithUnit("s"), + metric.WithExplicitBucketBoundaries( + 0.00001, 0.000025, 0.00005, 0.0001, 0.00025, 0.0005, // 10μs–500μs + 0.001, 0.0025, 0.005, 0.01, 0.025, 0.05, // 1ms–50ms + 0.1, 0.25, 0.5, 1, // 100ms–1s + ), ) cm := &CacheMetrics{ From 07e071c9e4f5583fcdb6070ea1999802ee2e1563 Mon Sep 17 00:00:00 2001 From: Cody Littley Date: Tue, 10 Mar 2026 12:36:51 -0500 Subject: [PATCH 041/119] fix histograms --- sei-db/common/metrics/buckets.go | 23 +++++++++++++++++++ sei-db/common/metrics/phase_timer.go | 1 + sei-db/db_engine/pebbledb/pebble_metrics.go | 12 ++++++++++ .../pebbledb/pebblecache/cache_metrics.go | 8 +++---- 4 files changed, 39 insertions(+), 5 deletions(-) create mode 100644 sei-db/common/metrics/buckets.go diff --git a/sei-db/common/metrics/buckets.go b/sei-db/common/metrics/buckets.go new file mode 100644 index 0000000000..42977fd032 --- /dev/null +++ b/sei-db/common/metrics/buckets.go @@ -0,0 +1,23 @@ +package metrics + +// Shared histogram bucket boundaries for use across the codebase. +// The OTel defaults are too coarse for meaningful percentile queries in Grafana. + +// LatencyBuckets covers 10μs to 5 minutes — wide enough for both fast key +// lookups and slow compactions/flushes without needing per-metric tuning. +var LatencyBuckets = []float64{ + 0.00001, 0.000025, 0.00005, 0.0001, 0.00025, 0.0005, // 10μs–500μs + 0.001, 0.0025, 0.005, 0.01, 0.025, 0.05, // 1ms–50ms + 0.1, 0.25, 0.5, 1, 2.5, 5, 10, 30, 60, 120, 300, // 100ms–5min +} + +// ByteSizeBuckets covers 256B to 1GB for data size histograms. +var ByteSizeBuckets = []float64{ + 256, 1024, 4096, 16384, 65536, 262144, // 256B–256KB + 1 << 20, 4 << 20, 16 << 20, 64 << 20, 256 << 20, 1 << 30, // 1MB–1GB +} + +// CountBuckets covers 1 to 1M for per-operation step/iteration counts. +var CountBuckets = []float64{ + 1, 5, 10, 50, 100, 500, 1000, 5000, 10000, 100000, 1000000, +} diff --git a/sei-db/common/metrics/phase_timer.go b/sei-db/common/metrics/phase_timer.go index 06700257db..3a4c5eaadb 100644 --- a/sei-db/common/metrics/phase_timer.go +++ b/sei-db/common/metrics/phase_timer.go @@ -30,6 +30,7 @@ func NewPhaseTimerFactory(meter metric.Meter, timerName string) *PhaseTimerFacto timerName+"_phase_latency_seconds", metric.WithDescription("Latency per phase (seconds); use for p99, p95, etc."), metric.WithUnit("s"), + metric.WithExplicitBucketBoundaries(LatencyBuckets...), ) return &PhaseTimerFactory{ phaseDurationTotal: phaseDurationTotal, diff --git a/sei-db/db_engine/pebbledb/pebble_metrics.go b/sei-db/db_engine/pebbledb/pebble_metrics.go index 3096462fa9..54f66c589e 100644 --- a/sei-db/db_engine/pebbledb/pebble_metrics.go +++ b/sei-db/db_engine/pebbledb/pebble_metrics.go @@ -10,6 +10,8 @@ import ( "go.opentelemetry.io/otel/metric" "github.com/cockroachdb/pebble/v2" + + smetrics "github.com/sei-protocol/sei-chain/sei-db/common/metrics" ) const pebbleMeterName = "seidb_pebble" @@ -263,31 +265,37 @@ func NewPebbleMetrics( "pebble_get_latency", metric.WithDescription("Time taken to get a key from PebbleDB"), metric.WithUnit("s"), + metric.WithExplicitBucketBoundaries(smetrics.LatencyBuckets...), ) applyChangesetLatency, _ := meter.Float64Histogram( "pebble_apply_changeset_latency", metric.WithDescription("Time taken to apply changeset to PebbleDB"), metric.WithUnit("s"), + metric.WithExplicitBucketBoundaries(smetrics.LatencyBuckets...), ) applyChangesetAsyncLatency, _ := meter.Float64Histogram( "pebble_apply_changeset_async_latency", metric.WithDescription("Time taken to queue changeset for async write"), metric.WithUnit("s"), + metric.WithExplicitBucketBoundaries(smetrics.LatencyBuckets...), ) pruneLatency, _ := meter.Float64Histogram( "pebble_prune_latency", metric.WithDescription("Time taken to prune old versions from PebbleDB"), metric.WithUnit("s"), + metric.WithExplicitBucketBoundaries(smetrics.LatencyBuckets...), ) importLatency, _ := meter.Float64Histogram( "pebble_import_latency", metric.WithDescription("Time taken to import snapshot data to PebbleDB"), metric.WithUnit("s"), + metric.WithExplicitBucketBoundaries(smetrics.LatencyBuckets...), ) batchWriteLatency, _ := meter.Float64Histogram( "pebble_batch_write_latency", metric.WithDescription("Time taken to write a batch to PebbleDB"), metric.WithUnit("s"), + metric.WithExplicitBucketBoundaries(smetrics.LatencyBuckets...), ) compactionCount, _ := meter.Int64Counter( @@ -299,6 +307,7 @@ func NewPebbleMetrics( "pebble_compaction_duration", metric.WithDescription("Duration of compaction operations"), metric.WithUnit("s"), + metric.WithExplicitBucketBoundaries(smetrics.LatencyBuckets...), ) compactionBytesRead, _ := meter.Int64Counter( "pebble_compaction_bytes_read", @@ -421,6 +430,7 @@ func NewPebbleMetrics( "pebble_flush_duration", metric.WithDescription("Duration of memtable flush operations"), metric.WithUnit("s"), + metric.WithExplicitBucketBoundaries(smetrics.LatencyBuckets...), ) flushBytesWritten, _ := meter.Int64Counter( "pebble_flush_bytes_written", @@ -976,6 +986,7 @@ func NewPebbleMetrics( "pebble_batch_size", metric.WithDescription("Size of batches written to PebbleDB"), metric.WithUnit("By"), + metric.WithExplicitBucketBoundaries(smetrics.ByteSizeBuckets...), ) pendingChangesQueueDepth, _ := meter.Int64Gauge( "pebble_pending_changes_queue_depth", @@ -986,6 +997,7 @@ func NewPebbleMetrics( "pebble_iterator_iterations", metric.WithDescription("Number of iterations per iterator"), metric.WithUnit("{count}"), + metric.WithExplicitBucketBoundaries(smetrics.CountBuckets...), ) pm := &PebbleMetrics{ diff --git a/sei-db/db_engine/pebbledb/pebblecache/cache_metrics.go b/sei-db/db_engine/pebbledb/pebblecache/cache_metrics.go index 9b7d73aa7f..c853cb5724 100644 --- a/sei-db/db_engine/pebbledb/pebblecache/cache_metrics.go +++ b/sei-db/db_engine/pebbledb/pebblecache/cache_metrics.go @@ -7,6 +7,8 @@ import ( "go.opentelemetry.io/otel" "go.opentelemetry.io/otel/attribute" "go.opentelemetry.io/otel/metric" + + smetrics "github.com/sei-protocol/sei-chain/sei-db/common/metrics" ) const cacheMeterName = "seidb_pebblecache" @@ -70,11 +72,7 @@ func newCacheMetrics( "pebblecache_miss_latency", metric.WithDescription("Time taken to resolve a cache miss from the backing store"), metric.WithUnit("s"), - metric.WithExplicitBucketBoundaries( - 0.00001, 0.000025, 0.00005, 0.0001, 0.00025, 0.0005, // 10μs–500μs - 0.001, 0.0025, 0.005, 0.01, 0.025, 0.05, // 1ms–50ms - 0.1, 0.25, 0.5, 1, // 100ms–1s - ), + metric.WithExplicitBucketBoundaries(smetrics.LatencyBuckets...), ) cm := &CacheMetrics{ From d09079689d50e1cdee3d05ee32da0da57aa25b28 Mon Sep 17 00:00:00 2001 From: Cody Littley Date: Tue, 10 Mar 2026 14:50:32 -0500 Subject: [PATCH 042/119] threading tests --- sei-db/common/threading/chan_utils_test.go | 61 +++++ sei-db/common/threading/fixed_pool.go | 1 - sei-db/common/threading/pool_test.go | 251 +++++++++++++++++++++ 3 files changed, 312 insertions(+), 1 deletion(-) create mode 100644 sei-db/common/threading/chan_utils_test.go create mode 100644 sei-db/common/threading/pool_test.go diff --git a/sei-db/common/threading/chan_utils_test.go b/sei-db/common/threading/chan_utils_test.go new file mode 100644 index 0000000000..0b1bd6d070 --- /dev/null +++ b/sei-db/common/threading/chan_utils_test.go @@ -0,0 +1,61 @@ +package threading + +import ( + "context" + "testing" +) + +func TestInterruptiblePush_Success(t *testing.T) { + ch := make(chan int, 1) + err := InterruptiblePush(t.Context(), ch, 42) + if err != nil { + t.Fatalf("unexpected error: %v", err) + } + if v := <-ch; v != 42 { + t.Errorf("expected 42, got %d", v) + } +} + +func TestInterruptiblePush_ContextCancelled(t *testing.T) { + ch := make(chan int) + ctx, cancel := context.WithCancel(t.Context()) + cancel() + + err := InterruptiblePush(ctx, ch, 42) + if err == nil { + t.Error("expected error from InterruptiblePush with cancelled context") + } +} + +func TestInterruptiblePull_Success(t *testing.T) { + ch := make(chan int, 1) + ch <- 42 + v, err := InterruptiblePull(t.Context(), ch) + if err != nil { + t.Fatalf("unexpected error: %v", err) + } + if v != 42 { + t.Errorf("expected 42, got %d", v) + } +} + +func TestInterruptiblePull_ContextCancelled(t *testing.T) { + ch := make(chan int) + ctx, cancel := context.WithCancel(t.Context()) + cancel() + + _, err := InterruptiblePull(ctx, ch) + if err == nil { + t.Error("expected error from InterruptiblePull with cancelled context") + } +} + +func TestInterruptiblePull_ChannelClosed(t *testing.T) { + ch := make(chan int) + close(ch) + + _, err := InterruptiblePull(t.Context(), ch) + if err == nil { + t.Error("expected error from InterruptiblePull on closed channel") + } +} diff --git a/sei-db/common/threading/fixed_pool.go b/sei-db/common/threading/fixed_pool.go index 5044d30cd2..13921ac883 100644 --- a/sei-db/common/threading/fixed_pool.go +++ b/sei-db/common/threading/fixed_pool.go @@ -13,7 +13,6 @@ type fixedPool struct { workQueue chan func() } -// TODO add metrics! // TODO unit test before merging! // Create a new work pool. diff --git a/sei-db/common/threading/pool_test.go b/sei-db/common/threading/pool_test.go new file mode 100644 index 0000000000..605237b8ed --- /dev/null +++ b/sei-db/common/threading/pool_test.go @@ -0,0 +1,251 @@ +package threading + +import ( + "context" + "sync" + "sync/atomic" + "testing" + "time" +) + +const testTimeout = 10 * time.Second + +func waitOrFail(t *testing.T, wg *sync.WaitGroup) { + t.Helper() + ch := make(chan struct{}) + go func() { + wg.Wait() + close(ch) + }() + select { + case <-ch: + case <-time.After(testTimeout): + t.Fatal("timed out waiting for tasks to complete") + } +} + +func createPools(ctx context.Context) []struct { + name string + pool Pool +} { + return []struct { + name string + pool Pool + }{ + {"FixedPool", NewFixedPool(ctx, "test-fixed", 4, 16)}, + {"ElasticPool", NewElasticPool(ctx, "test-elastic", 4)}, + {"AdHocPool", NewAdHocPool()}, + } +} + +func TestPool_AllTasksComplete(t *testing.T) { + for _, tc := range createPools(t.Context()) { + t.Run(tc.name, func(t *testing.T) { + const n = 100 + var counter atomic.Int64 + var wg sync.WaitGroup + wg.Add(n) + + for i := 0; i < n; i++ { + err := tc.pool.Submit(t.Context(), func() { + counter.Add(1) + wg.Done() + }) + if err != nil { + t.Fatalf("Submit failed: %v", err) + } + } + + waitOrFail(t, &wg) + if got := counter.Load(); got != n { + t.Errorf("expected %d tasks completed, got %d", n, got) + } + }) + } +} + +func TestPool_BlockedTasksDontCompleteUntilUnblocked(t *testing.T) { + for _, tc := range createPools(t.Context()) { + t.Run(tc.name, func(t *testing.T) { + blocker := make(chan struct{}) + var counter atomic.Int64 + var wg sync.WaitGroup + wg.Add(2) + + for i := 0; i < 2; i++ { + err := tc.pool.Submit(t.Context(), func() { + defer wg.Done() + <-blocker + counter.Add(1) + }) + if err != nil { + t.Fatalf("Submit failed: %v", err) + } + } + + time.Sleep(10 * time.Millisecond) + if got := counter.Load(); got != 0 { + t.Errorf("expected counter=0 while blocked, got %d", got) + } + + close(blocker) + waitOrFail(t, &wg) + + if got := counter.Load(); got != 2 { + t.Errorf("expected counter=2 after unblock, got %d", got) + } + }) + } +} + +func TestFixedPool_SubmitBlocksWhenFull(t *testing.T) { + const workers = 2 + const queueSize = 2 + pool := NewFixedPool(t.Context(), "test-fixed-block", workers, queueSize) + + blocker := make(chan struct{}) + var completed atomic.Int64 + var wg sync.WaitGroup + + // Phase 1: occupy all workers with blocking tasks. + wg.Add(workers) + for i := 0; i < workers; i++ { + err := pool.Submit(t.Context(), func() { + defer wg.Done() + <-blocker + completed.Add(1) + }) + if err != nil { + t.Fatalf("Submit failed: %v", err) + } + } + time.Sleep(10 * time.Millisecond) + + // Phase 2: fill the queue buffer. + wg.Add(queueSize) + for i := 0; i < queueSize; i++ { + err := pool.Submit(t.Context(), func() { + defer wg.Done() + <-blocker + completed.Add(1) + }) + if err != nil { + t.Fatalf("Submit failed: %v", err) + } + } + + // Phase 3: the next Submit must block — queue full, all workers busy. + wg.Add(1) + submitDone := make(chan struct{}) + start := time.Now() + go func() { + _ = pool.Submit(t.Context(), func() { + defer wg.Done() + <-blocker + completed.Add(1) + }) + close(submitDone) + }() + + time.Sleep(20 * time.Millisecond) + select { + case <-submitDone: + t.Fatalf("Submit returned after only %v; expected it to block", time.Since(start)) + default: + } + + close(blocker) + select { + case <-submitDone: + case <-time.After(testTimeout): + t.Fatal("timed out waiting for blocked submit to complete") + } + waitOrFail(t, &wg) + + expected := int64(workers + queueSize + 1) + if got := completed.Load(); got != expected { + t.Errorf("expected %d tasks completed, got %d", expected, got) + } +} + +func TestElasticPool_ScalesBeyondWarmWorkers(t *testing.T) { + const warmWorkers = 2 + const totalTasks = 10 + pool := NewElasticPool(t.Context(), "test-elastic-scale", warmWorkers) + + blocker := make(chan struct{}) + var started atomic.Int64 + var wg sync.WaitGroup + wg.Add(totalTasks) + + for i := 0; i < totalTasks; i++ { + err := pool.Submit(t.Context(), func() { + defer wg.Done() + started.Add(1) + <-blocker + }) + if err != nil { + t.Fatalf("Submit failed: %v", err) + } + } + + // All tasks should start promptly — elastic pool spawns extra goroutines. + time.Sleep(50 * time.Millisecond) + if got := started.Load(); got <= int64(warmWorkers) { + t.Errorf("expected started > %d (warm workers), got %d", warmWorkers, got) + } + if got := started.Load(); got != totalTasks { + t.Errorf("expected all %d tasks started, got %d", totalTasks, got) + } + + close(blocker) + waitOrFail(t, &wg) +} + +func TestFixedPool_SubmitReturnsErrorOnCancelledContext(t *testing.T) { + poolCtx, poolCancel := context.WithCancel(t.Context()) + defer poolCancel() + + // Use a zero-buffer queue so submit blocks once the worker is busy. + pool := NewFixedPool(poolCtx, "test-ctx", 1, 0) + + blocker := make(chan struct{}) + defer close(blocker) + + _ = pool.Submit(poolCtx, func() { <-blocker }) + time.Sleep(10 * time.Millisecond) + + submitCtx, submitCancel := context.WithCancel(t.Context()) + submitCancel() + + err := pool.Submit(submitCtx, func() {}) + if err == nil { + t.Error("expected error from Submit with cancelled context") + } +} + +func TestFixedPool_SubmitAfterShutdown(t *testing.T) { + ctx, cancel := context.WithCancel(t.Context()) + pool := NewFixedPool(ctx, "test-shutdown", 2, 4) + + cancel() + time.Sleep(10 * time.Millisecond) + + err := pool.Submit(t.Context(), func() {}) + if err == nil { + t.Error("expected error from Submit after pool shutdown") + } +} + +func TestElasticPool_SubmitAfterShutdown(t *testing.T) { + ctx, cancel := context.WithCancel(t.Context()) + pool := NewElasticPool(ctx, "test-shutdown", 2) + + cancel() + time.Sleep(10 * time.Millisecond) + + // Should not panic to the caller. Due to the default branch in the elastic + // pool's select, Submit may either return an error or silently run the task + // in a new goroutine — both outcomes are acceptable. + _ = pool.Submit(t.Context(), func() {}) +} From dfd92c1f7db2bcef4d6c27809383cdf622506d67 Mon Sep 17 00:00:00 2001 From: Cody Littley Date: Tue, 10 Mar 2026 14:56:28 -0500 Subject: [PATCH 043/119] test lru queue --- .../pebbledb/pebblecache/lru_queue_test.go | 433 ++++++++++++++++-- 1 file changed, 403 insertions(+), 30 deletions(-) diff --git a/sei-db/db_engine/pebbledb/pebblecache/lru_queue_test.go b/sei-db/db_engine/pebbledb/pebblecache/lru_queue_test.go index 3a96b55126..ca361afea1 100644 --- a/sei-db/db_engine/pebbledb/pebblecache/lru_queue_test.go +++ b/sei-db/db_engine/pebbledb/pebblecache/lru_queue_test.go @@ -1,81 +1,454 @@ package pebblecache import ( + "fmt" "testing" ) -func TestLRUQueueTracksSizeCountAndOrder(t *testing.T) { +func TestLRUQueueIsolatesFromCallerMutation(t *testing.T) { lru := NewLRUQueue() - lru.Push([]byte("a"), 3) - lru.Push([]byte("b"), 5) - lru.Push([]byte("c"), 7) + key := []byte("a") + lru.Push(key, 1) + key[0] = 'z' - if got := lru.GetCount(); got != 3 { - t.Fatalf("GetCount() = %d, want 3", got) + if got := lru.PopLeastRecentlyUsed(); got != "a" { + t.Fatalf("pop after mutating caller key = %q, want %q", got, "a") } +} + +func TestNewLRUQueueStartsEmpty(t *testing.T) { + lru := NewLRUQueue() - if got := lru.GetTotalSize(); got != 15 { - t.Fatalf("GetTotalSize() = %d, want 15", got) + if got := lru.GetCount(); got != 0 { + t.Fatalf("GetCount() = %d, want 0", got) + } + if got := lru.GetTotalSize(); got != 0 { + t.Fatalf("GetTotalSize() = %d, want 0", got) } +} - lru.Touch([]byte("a")) +func TestPopLeastRecentlyUsedPanicsOnEmptyQueue(t *testing.T) { + lru := NewLRUQueue() - if got := lru.PopLeastRecentlyUsed(); got != "b" { - t.Fatalf("first pop = %q, want %q", got, "b") + defer func() { + r := recover() + if r == nil { + t.Fatal("expected panic on pop from empty queue, but none occurred") + } + }() + + lru.PopLeastRecentlyUsed() +} + +func TestPopLeastRecentlyUsedPanicsAfterDrain(t *testing.T) { + lru := NewLRUQueue() + lru.Push([]byte("x"), 1) + lru.PopLeastRecentlyUsed() + + defer func() { + r := recover() + if r == nil { + t.Fatal("expected panic on pop from drained queue, but none occurred") + } + }() + + lru.PopLeastRecentlyUsed() +} + +func TestPushSingleElement(t *testing.T) { + lru := NewLRUQueue() + lru.Push([]byte("only"), 42) + + if got := lru.GetCount(); got != 1 { + t.Fatalf("GetCount() = %d, want 1", got) + } + if got := lru.GetTotalSize(); got != 42 { + t.Fatalf("GetTotalSize() = %d, want 42", got) } + if got := lru.PopLeastRecentlyUsed(); got != "only" { + t.Fatalf("pop = %q, want %q", got, "only") + } +} - if got := lru.PopLeastRecentlyUsed(); got != "c" { - t.Fatalf("second pop = %q, want %q", got, "c") +func TestPushDuplicateDecreasesSize(t *testing.T) { + lru := NewLRUQueue() + lru.Push([]byte("k"), 100) + lru.Push([]byte("k"), 30) + + if got := lru.GetCount(); got != 1 { + t.Fatalf("GetCount() = %d, want 1", got) } + if got := lru.GetTotalSize(); got != 30 { + t.Fatalf("GetTotalSize() = %d, want 30", got) + } +} + +func TestPushDuplicateMovesToBack(t *testing.T) { + lru := NewLRUQueue() + lru.Push([]byte("a"), 1) + lru.Push([]byte("b"), 1) + lru.Push([]byte("c"), 1) + + // Re-push "a" — should move it behind "b" and "c" + lru.Push([]byte("a"), 1) + if got := lru.PopLeastRecentlyUsed(); got != "b" { + t.Fatalf("pop = %q, want %q", got, "b") + } + if got := lru.PopLeastRecentlyUsed(); got != "c" { + t.Fatalf("pop = %q, want %q", got, "c") + } if got := lru.PopLeastRecentlyUsed(); got != "a" { - t.Fatalf("third pop = %q, want %q", got, "a") + t.Fatalf("pop = %q, want %q", got, "a") } +} - if got := lru.GetCount(); got != 0 { - t.Fatalf("GetCount() after pops = %d, want 0", got) +func TestPushZeroSize(t *testing.T) { + lru := NewLRUQueue() + lru.Push([]byte("z"), 0) + + if got := lru.GetCount(); got != 1 { + t.Fatalf("GetCount() = %d, want 1", got) + } + if got := lru.GetTotalSize(); got != 0 { + t.Fatalf("GetTotalSize() = %d, want 0", got) } + if got := lru.PopLeastRecentlyUsed(); got != "z" { + t.Fatalf("pop = %q, want %q", got, "z") + } if got := lru.GetTotalSize(); got != 0 { - t.Fatalf("GetTotalSize() after pops = %d, want 0", got) + t.Fatalf("GetTotalSize() after pop = %d, want 0", got) } } -func TestLRUQueuePushUpdatesExistingEntry(t *testing.T) { +func TestPushEmptyKey(t *testing.T) { lru := NewLRUQueue() + lru.Push([]byte(""), 5) + if got := lru.GetCount(); got != 1 { + t.Fatalf("GetCount() = %d, want 1", got) + } + if got := lru.PopLeastRecentlyUsed(); got != "" { + t.Fatalf("pop = %q, want %q", got, "") + } +} + +func TestPushRepeatedUpdatesToSameKey(t *testing.T) { + lru := NewLRUQueue() + lru.Push([]byte("k"), 1) + lru.Push([]byte("k"), 2) + lru.Push([]byte("k"), 3) + lru.Push([]byte("k"), 4) + + if got := lru.GetCount(); got != 1 { + t.Fatalf("GetCount() = %d, want 1", got) + } + if got := lru.GetTotalSize(); got != 4 { + t.Fatalf("GetTotalSize() = %d, want 4 (last push)", got) + } +} + +func TestTouchNonexistentKeyIsNoop(t *testing.T) { + lru := NewLRUQueue() + lru.Push([]byte("a"), 1) + + // Should not panic or change state. + lru.Touch([]byte("missing")) + + if got := lru.GetCount(); got != 1 { + t.Fatalf("GetCount() = %d, want 1", got) + } + if got := lru.PopLeastRecentlyUsed(); got != "a" { + t.Fatalf("pop = %q, want %q", got, "a") + } +} + +func TestTouchOnEmptyQueueIsNoop(t *testing.T) { + lru := NewLRUQueue() + lru.Touch([]byte("ghost")) + + if got := lru.GetCount(); got != 0 { + t.Fatalf("GetCount() = %d, want 0", got) + } +} + +func TestTouchSingleElement(t *testing.T) { + lru := NewLRUQueue() + lru.Push([]byte("solo"), 10) + lru.Touch([]byte("solo")) + + if got := lru.GetCount(); got != 1 { + t.Fatalf("GetCount() = %d, want 1", got) + } + if got := lru.PopLeastRecentlyUsed(); got != "solo" { + t.Fatalf("pop = %q, want %q", got, "solo") + } +} + +func TestTouchDoesNotAffectSizeOrCount(t *testing.T) { + lru := NewLRUQueue() lru.Push([]byte("a"), 3) - lru.Push([]byte("b"), 5) - lru.Push([]byte("a"), 11) + lru.Push([]byte("b"), 7) + + lru.Touch([]byte("a")) if got := lru.GetCount(); got != 2 { t.Fatalf("GetCount() = %d, want 2", got) } + if got := lru.GetTotalSize(); got != 10 { + t.Fatalf("GetTotalSize() = %d, want 10", got) + } +} + +func TestMultipleTouchesChangeOrder(t *testing.T) { + lru := NewLRUQueue() + lru.Push([]byte("a"), 1) + lru.Push([]byte("b"), 1) + lru.Push([]byte("c"), 1) - if got := lru.GetTotalSize(); got != 16 { - t.Fatalf("GetTotalSize() = %d, want 16", got) + // Order: a, b, c + lru.Touch([]byte("a")) // Order: b, c, a + lru.Touch([]byte("b")) // Order: c, a, b + + if got := lru.PopLeastRecentlyUsed(); got != "c" { + t.Fatalf("pop = %q, want %q", got, "c") } + if got := lru.PopLeastRecentlyUsed(); got != "a" { + t.Fatalf("pop = %q, want %q", got, "a") + } + if got := lru.PopLeastRecentlyUsed(); got != "b" { + t.Fatalf("pop = %q, want %q", got, "b") + } +} + +func TestTouchAlreadyMostRecentIsNoop(t *testing.T) { + lru := NewLRUQueue() + lru.Push([]byte("a"), 1) + lru.Push([]byte("b"), 1) + lru.Touch([]byte("b")) // "b" is already at back + + if got := lru.PopLeastRecentlyUsed(); got != "a" { + t.Fatalf("pop = %q, want %q", got, "a") + } if got := lru.PopLeastRecentlyUsed(); got != "b" { - t.Fatalf("first pop = %q, want %q", got, "b") + t.Fatalf("pop = %q, want %q", got, "b") } +} + +func TestPopDecrementsCountAndSize(t *testing.T) { + lru := NewLRUQueue() + lru.Push([]byte("a"), 10) + lru.Push([]byte("b"), 20) + lru.Push([]byte("c"), 30) + + lru.PopLeastRecentlyUsed() + + if got := lru.GetCount(); got != 2 { + t.Fatalf("GetCount() = %d, want 2", got) + } + if got := lru.GetTotalSize(); got != 50 { + t.Fatalf("GetTotalSize() = %d, want 50", got) + } + + lru.PopLeastRecentlyUsed() + + if got := lru.GetCount(); got != 1 { + t.Fatalf("GetCount() = %d, want 1", got) + } + if got := lru.GetTotalSize(); got != 30 { + t.Fatalf("GetTotalSize() = %d, want 30", got) + } +} + +func TestPopFIFOOrderWithoutTouches(t *testing.T) { + lru := NewLRUQueue() + keys := []string{"first", "second", "third", "fourth"} + for _, k := range keys { + lru.Push([]byte(k), 1) + } + + for _, want := range keys { + if got := lru.PopLeastRecentlyUsed(); got != want { + t.Fatalf("pop = %q, want %q", got, want) + } + } +} + +func TestPushAfterDrain(t *testing.T) { + lru := NewLRUQueue() + lru.Push([]byte("a"), 5) + lru.PopLeastRecentlyUsed() + + // Queue is empty; push new entries. + lru.Push([]byte("x"), 10) + lru.Push([]byte("y"), 20) + + if got := lru.GetCount(); got != 2 { + t.Fatalf("GetCount() = %d, want 2", got) + } + if got := lru.GetTotalSize(); got != 30 { + t.Fatalf("GetTotalSize() = %d, want 30", got) + } + if got := lru.PopLeastRecentlyUsed(); got != "x" { + t.Fatalf("pop = %q, want %q", got, "x") + } +} + +func TestPushPreviouslyPoppedKey(t *testing.T) { + lru := NewLRUQueue() + lru.Push([]byte("recycled"), 5) + lru.PopLeastRecentlyUsed() + + lru.Push([]byte("recycled"), 99) + + if got := lru.GetCount(); got != 1 { + t.Fatalf("GetCount() = %d, want 1", got) + } + if got := lru.GetTotalSize(); got != 99 { + t.Fatalf("GetTotalSize() = %d, want 99", got) + } + if got := lru.PopLeastRecentlyUsed(); got != "recycled" { + t.Fatalf("pop = %q, want %q", got, "recycled") + } +} + +func TestInterleavedPushAndPop(t *testing.T) { + lru := NewLRUQueue() + + lru.Push([]byte("a"), 1) + lru.Push([]byte("b"), 2) if got := lru.PopLeastRecentlyUsed(); got != "a" { - t.Fatalf("second pop = %q, want %q", got, "a") + t.Fatalf("pop = %q, want %q", got, "a") + } + + lru.Push([]byte("c"), 3) + + if got := lru.GetCount(); got != 2 { + t.Fatalf("GetCount() = %d, want 2", got) + } + if got := lru.GetTotalSize(); got != 5 { + t.Fatalf("GetTotalSize() = %d, want 5", got) + } + + // "b" was pushed before "c" + if got := lru.PopLeastRecentlyUsed(); got != "b" { + t.Fatalf("pop = %q, want %q", got, "b") + } + if got := lru.PopLeastRecentlyUsed(); got != "c" { + t.Fatalf("pop = %q, want %q", got, "c") } } -func TestLRUQueueIsolatesFromCallerMutation(t *testing.T) { +func TestTouchThenPushSameKey(t *testing.T) { lru := NewLRUQueue() + lru.Push([]byte("a"), 1) + lru.Push([]byte("b"), 1) - key := []byte("a") + lru.Touch([]byte("a")) // order: b, a + lru.Push([]byte("a"), 50) // updates size, stays at back + + if got := lru.GetCount(); got != 2 { + t.Fatalf("GetCount() = %d, want 2", got) + } + if got := lru.GetTotalSize(); got != 51 { + t.Fatalf("GetTotalSize() = %d, want 51", got) + } + if got := lru.PopLeastRecentlyUsed(); got != "b" { + t.Fatalf("pop = %q, want %q", got, "b") + } +} + +func TestBinaryKeyData(t *testing.T) { + lru := NewLRUQueue() + k1 := []byte{0x00, 0xFF, 0x01} + k2 := []byte{0x00, 0xFF, 0x02} + + lru.Push(k1, 10) + lru.Push(k2, 20) + + if got := lru.GetCount(); got != 2 { + t.Fatalf("GetCount() = %d, want 2", got) + } + if got := lru.PopLeastRecentlyUsed(); got != string(k1) { + t.Fatalf("pop = %q, want %q", got, string(k1)) + } + + lru.Touch(k2) + if got := lru.PopLeastRecentlyUsed(); got != string(k2) { + t.Fatalf("pop = %q, want %q", got, string(k2)) + } +} + +func TestCallerMutationAfterTouchDoesNotAffectQueue(t *testing.T) { + lru := NewLRUQueue() + key := []byte("abc") lru.Push(key, 1) - key[0] = 'z' - if got := lru.PopLeastRecentlyUsed(); got != "a" { - t.Fatalf("pop after mutating caller key = %q, want %q", got, "a") + key[0] = 'Z' + lru.Touch(key) // Touch with mutated key ("Zbc") — should be a no-op + + if got := lru.PopLeastRecentlyUsed(); got != "abc" { + t.Fatalf("pop = %q, want %q", got, "abc") } } -// TODO expand these tests +func TestManyEntries(t *testing.T) { + lru := NewLRUQueue() + n := 1000 + totalSize := 0 + + for i := 0; i < n; i++ { + k := fmt.Sprintf("key-%04d", i) + lru.Push([]byte(k), i+1) + totalSize += i + 1 + } + + if got := lru.GetCount(); got != n { + t.Fatalf("GetCount() = %d, want %d", got, n) + } + if got := lru.GetTotalSize(); got != totalSize { + t.Fatalf("GetTotalSize() = %d, want %d", got, totalSize) + } + + // FIFO order should be maintained. + for i := 0; i < n; i++ { + want := fmt.Sprintf("key-%04d", i) + if got := lru.PopLeastRecentlyUsed(); got != want { + t.Fatalf("pop %d = %q, want %q", i, got, want) + } + } + + if got := lru.GetCount(); got != 0 { + t.Fatalf("GetCount() after drain = %d, want 0", got) + } + if got := lru.GetTotalSize(); got != 0 { + t.Fatalf("GetTotalSize() after drain = %d, want 0", got) + } +} + +func TestPushUpdatedSizeThenPopVerifySizeAccounting(t *testing.T) { + lru := NewLRUQueue() + lru.Push([]byte("a"), 10) + lru.Push([]byte("b"), 20) + lru.Push([]byte("a"), 5) // decrease a's size from 10 to 5 + + // total = 5 + 20 = 25 + if got := lru.GetTotalSize(); got != 25 { + t.Fatalf("GetTotalSize() = %d, want 25", got) + } + + // Pop "b" (it's the LRU since "a" was re-pushed to back). + lru.PopLeastRecentlyUsed() + if got := lru.GetTotalSize(); got != 5 { + t.Fatalf("GetTotalSize() after popping b = %d, want 5", got) + } + + lru.PopLeastRecentlyUsed() + if got := lru.GetTotalSize(); got != 0 { + t.Fatalf("GetTotalSize() after popping a = %d, want 0", got) + } +} From f751a9b65a341141f773b7420311030276f5f49d Mon Sep 17 00:00:00 2001 From: Cody Littley Date: Tue, 10 Mar 2026 15:22:14 -0500 Subject: [PATCH 044/119] unit tests for shard --- .../pebbledb/pebblecache/lru_queue_test.go | 276 ++---- .../pebbledb/pebblecache/shard_manager.go | 5 - .../pebbledb/pebblecache/shard_test.go | 821 ++++++++++++++++++ 3 files changed, 887 insertions(+), 215 deletions(-) create mode 100644 sei-db/db_engine/pebbledb/pebblecache/shard_test.go diff --git a/sei-db/db_engine/pebbledb/pebblecache/lru_queue_test.go b/sei-db/db_engine/pebbledb/pebblecache/lru_queue_test.go index ca361afea1..70da01315a 100644 --- a/sei-db/db_engine/pebbledb/pebblecache/lru_queue_test.go +++ b/sei-db/db_engine/pebbledb/pebblecache/lru_queue_test.go @@ -3,6 +3,8 @@ package pebblecache import ( "fmt" "testing" + + "github.com/stretchr/testify/require" ) func TestLRUQueueIsolatesFromCallerMutation(t *testing.T) { @@ -12,33 +14,19 @@ func TestLRUQueueIsolatesFromCallerMutation(t *testing.T) { lru.Push(key, 1) key[0] = 'z' - if got := lru.PopLeastRecentlyUsed(); got != "a" { - t.Fatalf("pop after mutating caller key = %q, want %q", got, "a") - } + require.Equal(t, "a", lru.PopLeastRecentlyUsed()) } func TestNewLRUQueueStartsEmpty(t *testing.T) { lru := NewLRUQueue() - if got := lru.GetCount(); got != 0 { - t.Fatalf("GetCount() = %d, want 0", got) - } - if got := lru.GetTotalSize(); got != 0 { - t.Fatalf("GetTotalSize() = %d, want 0", got) - } + require.Equal(t, 0, lru.GetCount()) + require.Equal(t, 0, lru.GetTotalSize()) } func TestPopLeastRecentlyUsedPanicsOnEmptyQueue(t *testing.T) { lru := NewLRUQueue() - - defer func() { - r := recover() - if r == nil { - t.Fatal("expected panic on pop from empty queue, but none occurred") - } - }() - - lru.PopLeastRecentlyUsed() + require.Panics(t, func() { lru.PopLeastRecentlyUsed() }) } func TestPopLeastRecentlyUsedPanicsAfterDrain(t *testing.T) { @@ -46,29 +34,16 @@ func TestPopLeastRecentlyUsedPanicsAfterDrain(t *testing.T) { lru.Push([]byte("x"), 1) lru.PopLeastRecentlyUsed() - defer func() { - r := recover() - if r == nil { - t.Fatal("expected panic on pop from drained queue, but none occurred") - } - }() - - lru.PopLeastRecentlyUsed() + require.Panics(t, func() { lru.PopLeastRecentlyUsed() }) } func TestPushSingleElement(t *testing.T) { lru := NewLRUQueue() lru.Push([]byte("only"), 42) - if got := lru.GetCount(); got != 1 { - t.Fatalf("GetCount() = %d, want 1", got) - } - if got := lru.GetTotalSize(); got != 42 { - t.Fatalf("GetTotalSize() = %d, want 42", got) - } - if got := lru.PopLeastRecentlyUsed(); got != "only" { - t.Fatalf("pop = %q, want %q", got, "only") - } + require.Equal(t, 1, lru.GetCount()) + require.Equal(t, 42, lru.GetTotalSize()) + require.Equal(t, "only", lru.PopLeastRecentlyUsed()) } func TestPushDuplicateDecreasesSize(t *testing.T) { @@ -76,12 +51,8 @@ func TestPushDuplicateDecreasesSize(t *testing.T) { lru.Push([]byte("k"), 100) lru.Push([]byte("k"), 30) - if got := lru.GetCount(); got != 1 { - t.Fatalf("GetCount() = %d, want 1", got) - } - if got := lru.GetTotalSize(); got != 30 { - t.Fatalf("GetTotalSize() = %d, want 30", got) - } + require.Equal(t, 1, lru.GetCount()) + require.Equal(t, 30, lru.GetTotalSize()) } func TestPushDuplicateMovesToBack(t *testing.T) { @@ -93,46 +64,27 @@ func TestPushDuplicateMovesToBack(t *testing.T) { // Re-push "a" — should move it behind "b" and "c" lru.Push([]byte("a"), 1) - if got := lru.PopLeastRecentlyUsed(); got != "b" { - t.Fatalf("pop = %q, want %q", got, "b") - } - if got := lru.PopLeastRecentlyUsed(); got != "c" { - t.Fatalf("pop = %q, want %q", got, "c") - } - if got := lru.PopLeastRecentlyUsed(); got != "a" { - t.Fatalf("pop = %q, want %q", got, "a") - } + require.Equal(t, "b", lru.PopLeastRecentlyUsed()) + require.Equal(t, "c", lru.PopLeastRecentlyUsed()) + require.Equal(t, "a", lru.PopLeastRecentlyUsed()) } func TestPushZeroSize(t *testing.T) { lru := NewLRUQueue() lru.Push([]byte("z"), 0) - if got := lru.GetCount(); got != 1 { - t.Fatalf("GetCount() = %d, want 1", got) - } - if got := lru.GetTotalSize(); got != 0 { - t.Fatalf("GetTotalSize() = %d, want 0", got) - } - - if got := lru.PopLeastRecentlyUsed(); got != "z" { - t.Fatalf("pop = %q, want %q", got, "z") - } - if got := lru.GetTotalSize(); got != 0 { - t.Fatalf("GetTotalSize() after pop = %d, want 0", got) - } + require.Equal(t, 1, lru.GetCount()) + require.Equal(t, 0, lru.GetTotalSize()) + require.Equal(t, "z", lru.PopLeastRecentlyUsed()) + require.Equal(t, 0, lru.GetTotalSize()) } func TestPushEmptyKey(t *testing.T) { lru := NewLRUQueue() lru.Push([]byte(""), 5) - if got := lru.GetCount(); got != 1 { - t.Fatalf("GetCount() = %d, want 1", got) - } - if got := lru.PopLeastRecentlyUsed(); got != "" { - t.Fatalf("pop = %q, want %q", got, "") - } + require.Equal(t, 1, lru.GetCount()) + require.Equal(t, "", lru.PopLeastRecentlyUsed()) } func TestPushRepeatedUpdatesToSameKey(t *testing.T) { @@ -142,36 +94,25 @@ func TestPushRepeatedUpdatesToSameKey(t *testing.T) { lru.Push([]byte("k"), 3) lru.Push([]byte("k"), 4) - if got := lru.GetCount(); got != 1 { - t.Fatalf("GetCount() = %d, want 1", got) - } - if got := lru.GetTotalSize(); got != 4 { - t.Fatalf("GetTotalSize() = %d, want 4 (last push)", got) - } + require.Equal(t, 1, lru.GetCount()) + require.Equal(t, 4, lru.GetTotalSize()) } func TestTouchNonexistentKeyIsNoop(t *testing.T) { lru := NewLRUQueue() lru.Push([]byte("a"), 1) - // Should not panic or change state. lru.Touch([]byte("missing")) - if got := lru.GetCount(); got != 1 { - t.Fatalf("GetCount() = %d, want 1", got) - } - if got := lru.PopLeastRecentlyUsed(); got != "a" { - t.Fatalf("pop = %q, want %q", got, "a") - } + require.Equal(t, 1, lru.GetCount()) + require.Equal(t, "a", lru.PopLeastRecentlyUsed()) } func TestTouchOnEmptyQueueIsNoop(t *testing.T) { lru := NewLRUQueue() lru.Touch([]byte("ghost")) - if got := lru.GetCount(); got != 0 { - t.Fatalf("GetCount() = %d, want 0", got) - } + require.Equal(t, 0, lru.GetCount()) } func TestTouchSingleElement(t *testing.T) { @@ -179,12 +120,8 @@ func TestTouchSingleElement(t *testing.T) { lru.Push([]byte("solo"), 10) lru.Touch([]byte("solo")) - if got := lru.GetCount(); got != 1 { - t.Fatalf("GetCount() = %d, want 1", got) - } - if got := lru.PopLeastRecentlyUsed(); got != "solo" { - t.Fatalf("pop = %q, want %q", got, "solo") - } + require.Equal(t, 1, lru.GetCount()) + require.Equal(t, "solo", lru.PopLeastRecentlyUsed()) } func TestTouchDoesNotAffectSizeOrCount(t *testing.T) { @@ -194,12 +131,8 @@ func TestTouchDoesNotAffectSizeOrCount(t *testing.T) { lru.Touch([]byte("a")) - if got := lru.GetCount(); got != 2 { - t.Fatalf("GetCount() = %d, want 2", got) - } - if got := lru.GetTotalSize(); got != 10 { - t.Fatalf("GetTotalSize() = %d, want 10", got) - } + require.Equal(t, 2, lru.GetCount()) + require.Equal(t, 10, lru.GetTotalSize()) } func TestMultipleTouchesChangeOrder(t *testing.T) { @@ -212,15 +145,9 @@ func TestMultipleTouchesChangeOrder(t *testing.T) { lru.Touch([]byte("a")) // Order: b, c, a lru.Touch([]byte("b")) // Order: c, a, b - if got := lru.PopLeastRecentlyUsed(); got != "c" { - t.Fatalf("pop = %q, want %q", got, "c") - } - if got := lru.PopLeastRecentlyUsed(); got != "a" { - t.Fatalf("pop = %q, want %q", got, "a") - } - if got := lru.PopLeastRecentlyUsed(); got != "b" { - t.Fatalf("pop = %q, want %q", got, "b") - } + require.Equal(t, "c", lru.PopLeastRecentlyUsed()) + require.Equal(t, "a", lru.PopLeastRecentlyUsed()) + require.Equal(t, "b", lru.PopLeastRecentlyUsed()) } func TestTouchAlreadyMostRecentIsNoop(t *testing.T) { @@ -230,12 +157,8 @@ func TestTouchAlreadyMostRecentIsNoop(t *testing.T) { lru.Touch([]byte("b")) // "b" is already at back - if got := lru.PopLeastRecentlyUsed(); got != "a" { - t.Fatalf("pop = %q, want %q", got, "a") - } - if got := lru.PopLeastRecentlyUsed(); got != "b" { - t.Fatalf("pop = %q, want %q", got, "b") - } + require.Equal(t, "a", lru.PopLeastRecentlyUsed()) + require.Equal(t, "b", lru.PopLeastRecentlyUsed()) } func TestPopDecrementsCountAndSize(t *testing.T) { @@ -246,21 +169,13 @@ func TestPopDecrementsCountAndSize(t *testing.T) { lru.PopLeastRecentlyUsed() - if got := lru.GetCount(); got != 2 { - t.Fatalf("GetCount() = %d, want 2", got) - } - if got := lru.GetTotalSize(); got != 50 { - t.Fatalf("GetTotalSize() = %d, want 50", got) - } + require.Equal(t, 2, lru.GetCount()) + require.Equal(t, 50, lru.GetTotalSize()) lru.PopLeastRecentlyUsed() - if got := lru.GetCount(); got != 1 { - t.Fatalf("GetCount() = %d, want 1", got) - } - if got := lru.GetTotalSize(); got != 30 { - t.Fatalf("GetTotalSize() = %d, want 30", got) - } + require.Equal(t, 1, lru.GetCount()) + require.Equal(t, 30, lru.GetTotalSize()) } func TestPopFIFOOrderWithoutTouches(t *testing.T) { @@ -271,9 +186,7 @@ func TestPopFIFOOrderWithoutTouches(t *testing.T) { } for _, want := range keys { - if got := lru.PopLeastRecentlyUsed(); got != want { - t.Fatalf("pop = %q, want %q", got, want) - } + require.Equal(t, want, lru.PopLeastRecentlyUsed()) } } @@ -282,19 +195,12 @@ func TestPushAfterDrain(t *testing.T) { lru.Push([]byte("a"), 5) lru.PopLeastRecentlyUsed() - // Queue is empty; push new entries. lru.Push([]byte("x"), 10) lru.Push([]byte("y"), 20) - if got := lru.GetCount(); got != 2 { - t.Fatalf("GetCount() = %d, want 2", got) - } - if got := lru.GetTotalSize(); got != 30 { - t.Fatalf("GetTotalSize() = %d, want 30", got) - } - if got := lru.PopLeastRecentlyUsed(); got != "x" { - t.Fatalf("pop = %q, want %q", got, "x") - } + require.Equal(t, 2, lru.GetCount()) + require.Equal(t, 30, lru.GetTotalSize()) + require.Equal(t, "x", lru.PopLeastRecentlyUsed()) } func TestPushPreviouslyPoppedKey(t *testing.T) { @@ -304,15 +210,9 @@ func TestPushPreviouslyPoppedKey(t *testing.T) { lru.Push([]byte("recycled"), 99) - if got := lru.GetCount(); got != 1 { - t.Fatalf("GetCount() = %d, want 1", got) - } - if got := lru.GetTotalSize(); got != 99 { - t.Fatalf("GetTotalSize() = %d, want 99", got) - } - if got := lru.PopLeastRecentlyUsed(); got != "recycled" { - t.Fatalf("pop = %q, want %q", got, "recycled") - } + require.Equal(t, 1, lru.GetCount()) + require.Equal(t, 99, lru.GetTotalSize()) + require.Equal(t, "recycled", lru.PopLeastRecentlyUsed()) } func TestInterleavedPushAndPop(t *testing.T) { @@ -321,26 +221,14 @@ func TestInterleavedPushAndPop(t *testing.T) { lru.Push([]byte("a"), 1) lru.Push([]byte("b"), 2) - if got := lru.PopLeastRecentlyUsed(); got != "a" { - t.Fatalf("pop = %q, want %q", got, "a") - } + require.Equal(t, "a", lru.PopLeastRecentlyUsed()) lru.Push([]byte("c"), 3) - if got := lru.GetCount(); got != 2 { - t.Fatalf("GetCount() = %d, want 2", got) - } - if got := lru.GetTotalSize(); got != 5 { - t.Fatalf("GetTotalSize() = %d, want 5", got) - } - - // "b" was pushed before "c" - if got := lru.PopLeastRecentlyUsed(); got != "b" { - t.Fatalf("pop = %q, want %q", got, "b") - } - if got := lru.PopLeastRecentlyUsed(); got != "c" { - t.Fatalf("pop = %q, want %q", got, "c") - } + require.Equal(t, 2, lru.GetCount()) + require.Equal(t, 5, lru.GetTotalSize()) + require.Equal(t, "b", lru.PopLeastRecentlyUsed()) + require.Equal(t, "c", lru.PopLeastRecentlyUsed()) } func TestTouchThenPushSameKey(t *testing.T) { @@ -351,15 +239,9 @@ func TestTouchThenPushSameKey(t *testing.T) { lru.Touch([]byte("a")) // order: b, a lru.Push([]byte("a"), 50) // updates size, stays at back - if got := lru.GetCount(); got != 2 { - t.Fatalf("GetCount() = %d, want 2", got) - } - if got := lru.GetTotalSize(); got != 51 { - t.Fatalf("GetTotalSize() = %d, want 51", got) - } - if got := lru.PopLeastRecentlyUsed(); got != "b" { - t.Fatalf("pop = %q, want %q", got, "b") - } + require.Equal(t, 2, lru.GetCount()) + require.Equal(t, 51, lru.GetTotalSize()) + require.Equal(t, "b", lru.PopLeastRecentlyUsed()) } func TestBinaryKeyData(t *testing.T) { @@ -370,17 +252,11 @@ func TestBinaryKeyData(t *testing.T) { lru.Push(k1, 10) lru.Push(k2, 20) - if got := lru.GetCount(); got != 2 { - t.Fatalf("GetCount() = %d, want 2", got) - } - if got := lru.PopLeastRecentlyUsed(); got != string(k1) { - t.Fatalf("pop = %q, want %q", got, string(k1)) - } + require.Equal(t, 2, lru.GetCount()) + require.Equal(t, string(k1), lru.PopLeastRecentlyUsed()) lru.Touch(k2) - if got := lru.PopLeastRecentlyUsed(); got != string(k2) { - t.Fatalf("pop = %q, want %q", got, string(k2)) - } + require.Equal(t, string(k2), lru.PopLeastRecentlyUsed()) } func TestCallerMutationAfterTouchDoesNotAffectQueue(t *testing.T) { @@ -391,9 +267,7 @@ func TestCallerMutationAfterTouchDoesNotAffectQueue(t *testing.T) { key[0] = 'Z' lru.Touch(key) // Touch with mutated key ("Zbc") — should be a no-op - if got := lru.PopLeastRecentlyUsed(); got != "abc" { - t.Fatalf("pop = %q, want %q", got, "abc") - } + require.Equal(t, "abc", lru.PopLeastRecentlyUsed()) } func TestManyEntries(t *testing.T) { @@ -407,27 +281,16 @@ func TestManyEntries(t *testing.T) { totalSize += i + 1 } - if got := lru.GetCount(); got != n { - t.Fatalf("GetCount() = %d, want %d", got, n) - } - if got := lru.GetTotalSize(); got != totalSize { - t.Fatalf("GetTotalSize() = %d, want %d", got, totalSize) - } + require.Equal(t, n, lru.GetCount()) + require.Equal(t, totalSize, lru.GetTotalSize()) - // FIFO order should be maintained. for i := 0; i < n; i++ { want := fmt.Sprintf("key-%04d", i) - if got := lru.PopLeastRecentlyUsed(); got != want { - t.Fatalf("pop %d = %q, want %q", i, got, want) - } + require.Equal(t, want, lru.PopLeastRecentlyUsed(), "pop %d", i) } - if got := lru.GetCount(); got != 0 { - t.Fatalf("GetCount() after drain = %d, want 0", got) - } - if got := lru.GetTotalSize(); got != 0 { - t.Fatalf("GetTotalSize() after drain = %d, want 0", got) - } + require.Equal(t, 0, lru.GetCount()) + require.Equal(t, 0, lru.GetTotalSize()) } func TestPushUpdatedSizeThenPopVerifySizeAccounting(t *testing.T) { @@ -436,19 +299,12 @@ func TestPushUpdatedSizeThenPopVerifySizeAccounting(t *testing.T) { lru.Push([]byte("b"), 20) lru.Push([]byte("a"), 5) // decrease a's size from 10 to 5 - // total = 5 + 20 = 25 - if got := lru.GetTotalSize(); got != 25 { - t.Fatalf("GetTotalSize() = %d, want 25", got) - } + require.Equal(t, 25, lru.GetTotalSize()) // Pop "b" (it's the LRU since "a" was re-pushed to back). lru.PopLeastRecentlyUsed() - if got := lru.GetTotalSize(); got != 5 { - t.Fatalf("GetTotalSize() after popping b = %d, want 5", got) - } + require.Equal(t, 5, lru.GetTotalSize()) lru.PopLeastRecentlyUsed() - if got := lru.GetTotalSize(); got != 0 { - t.Fatalf("GetTotalSize() after popping a = %d, want 0", got) - } + require.Equal(t, 0, lru.GetTotalSize()) } diff --git a/sei-db/db_engine/pebbledb/pebblecache/shard_manager.go b/sei-db/db_engine/pebbledb/pebblecache/shard_manager.go index cb7e3c694c..4f3bbdb41d 100644 --- a/sei-db/db_engine/pebbledb/pebblecache/shard_manager.go +++ b/sei-db/db_engine/pebbledb/pebblecache/shard_manager.go @@ -36,11 +36,6 @@ func NewShardManager(numShards uint64) (*shardManager, error) { // Shard returns a shard index in [0, numShards). // addr should be the raw address bytes (e.g., 20-byte ETH address). func (s *shardManager) Shard(addr []byte) uint64 { - - // Temporary to measure impact of hash function - // x := binary.BigEndian.Uint64(addr) - // return x & s.mask - h := s.pool.Get().(*maphash.Hash) h.SetSeed(s.seed) h.Reset() diff --git a/sei-db/db_engine/pebbledb/pebblecache/shard_test.go b/sei-db/db_engine/pebbledb/pebblecache/shard_test.go new file mode 100644 index 0000000000..969a6c9d33 --- /dev/null +++ b/sei-db/db_engine/pebbledb/pebblecache/shard_test.go @@ -0,0 +1,821 @@ +package pebblecache + +import ( + "context" + "errors" + "fmt" + "sync" + "sync/atomic" + "testing" + "time" + + "github.com/stretchr/testify/require" + + "github.com/sei-protocol/sei-chain/sei-db/common/threading" + "github.com/sei-protocol/sei-chain/sei-db/db_engine/types" +) + +// --------------------------------------------------------------------------- +// helpers +// --------------------------------------------------------------------------- + +// newTestShard creates a shard backed by a simple in-memory map. +// The returned readFunc map can be populated before calling Get. +func newTestShard(t *testing.T, maxSize int, store map[string][]byte) *shard { + t.Helper() + readFunc := func(key []byte) ([]byte, bool, error) { + v, ok := store[string(key)] + if !ok { + return nil, false, nil + } + return v, true, nil + } + s, err := NewShard(context.Background(), threading.NewAdHocPool(), readFunc, maxSize) + require.NoError(t, err) + return s +} + +// --------------------------------------------------------------------------- +// NewShard +// --------------------------------------------------------------------------- + +func TestNewShardValid(t *testing.T) { + readFunc := func(key []byte) ([]byte, bool, error) { return nil, false, nil } + s, err := NewShard(context.Background(), threading.NewAdHocPool(), readFunc, 1024) + require.NoError(t, err) + require.NotNil(t, s) +} + +func TestNewShardZeroMaxSize(t *testing.T) { + readFunc := func(key []byte) ([]byte, bool, error) { return nil, false, nil } + _, err := NewShard(context.Background(), threading.NewAdHocPool(), readFunc, 0) + require.Error(t, err) +} + +func TestNewShardNegativeMaxSize(t *testing.T) { + readFunc := func(key []byte) ([]byte, bool, error) { return nil, false, nil } + _, err := NewShard(context.Background(), threading.NewAdHocPool(), readFunc, -10) + require.Error(t, err) +} + +// --------------------------------------------------------------------------- +// Get — cache miss flows +// --------------------------------------------------------------------------- + +func TestGetCacheMissFoundInDB(t *testing.T) { + store := map[string][]byte{"hello": []byte("world")} + s := newTestShard(t, 4096, store) + + val, found, err := s.Get([]byte("hello"), true) + require.NoError(t, err) + require.True(t, found) + require.Equal(t, "world", string(val)) +} + +func TestGetCacheMissNotFoundInDB(t *testing.T) { + s := newTestShard(t, 4096, map[string][]byte{}) + + val, found, err := s.Get([]byte("missing"), true) + require.NoError(t, err) + require.False(t, found) + require.Nil(t, val) +} + +func TestGetCacheMissDBError(t *testing.T) { + dbErr := errors.New("disk on fire") + readFunc := func(key []byte) ([]byte, bool, error) { return nil, false, dbErr } + s, _ := NewShard(context.Background(), threading.NewAdHocPool(), readFunc, 4096) + + _, _, err := s.Get([]byte("boom"), true) + require.Error(t, err) + require.ErrorIs(t, err, dbErr) +} + +func TestGetDBErrorDoesNotCacheResult(t *testing.T) { + var calls atomic.Int64 + readFunc := func(key []byte) ([]byte, bool, error) { + n := calls.Add(1) + if n == 1 { + return nil, false, errors.New("transient") + } + return []byte("recovered"), true, nil + } + s, _ := NewShard(context.Background(), threading.NewAdHocPool(), readFunc, 4096) + + _, _, err := s.Get([]byte("key"), true) + require.Error(t, err, "first call should fail") + + val, found, err := s.Get([]byte("key"), true) + require.NoError(t, err, "second call should succeed") + require.True(t, found) + require.Equal(t, "recovered", string(val)) + require.Equal(t, int64(2), calls.Load(), "error should not be cached") +} + +// --------------------------------------------------------------------------- +// Get — cache hit flows +// --------------------------------------------------------------------------- + +func TestGetCacheHitAvailable(t *testing.T) { + s := newTestShard(t, 4096, map[string][]byte{"k": []byte("v")}) + + s.Get([]byte("k"), true) + + val, found, err := s.Get([]byte("k"), true) + require.NoError(t, err) + require.True(t, found) + require.Equal(t, "v", string(val)) +} + +func TestGetCacheHitDeleted(t *testing.T) { + s := newTestShard(t, 4096, map[string][]byte{}) + + s.Get([]byte("gone"), true) + + val, found, err := s.Get([]byte("gone"), true) + require.NoError(t, err) + require.False(t, found) + require.Nil(t, val) +} + +func TestGetAfterSet(t *testing.T) { + var readCalls atomic.Int64 + readFunc := func(key []byte) ([]byte, bool, error) { + readCalls.Add(1) + return nil, false, nil + } + s, _ := NewShard(context.Background(), threading.NewAdHocPool(), readFunc, 4096) + + s.Set([]byte("k"), []byte("from-set")) + + val, found, err := s.Get([]byte("k"), true) + require.NoError(t, err) + require.True(t, found) + require.Equal(t, "from-set", string(val)) + require.Equal(t, int64(0), readCalls.Load(), "readFunc should not be called for Set-populated entry") +} + +func TestGetAfterDelete(t *testing.T) { + store := map[string][]byte{"k": []byte("v")} + s := newTestShard(t, 4096, store) + + s.Delete([]byte("k")) + + val, found, err := s.Get([]byte("k"), true) + require.NoError(t, err) + require.False(t, found) + require.Nil(t, val) +} + +// --------------------------------------------------------------------------- +// Get — concurrent reads on the same key +// --------------------------------------------------------------------------- + +func TestGetConcurrentSameKey(t *testing.T) { + var readCalls atomic.Int64 + gate := make(chan struct{}) + + readFunc := func(key []byte) ([]byte, bool, error) { + readCalls.Add(1) + <-gate + return []byte("value"), true, nil + } + s, _ := NewShard(context.Background(), threading.NewAdHocPool(), readFunc, 4096) + + const n = 10 + var wg sync.WaitGroup + errs := make([]error, n) + vals := make([]string, n) + founds := make([]bool, n) + + for i := 0; i < n; i++ { + wg.Add(1) + go func(idx int) { + defer wg.Done() + v, f, e := s.Get([]byte("shared"), true) + vals[idx] = string(v) + founds[idx] = f + errs[idx] = e + }(i) + } + + time.Sleep(50 * time.Millisecond) + close(gate) + wg.Wait() + + for i := 0; i < n; i++ { + require.NoError(t, errs[i], "goroutine %d", i) + require.True(t, founds[i], "goroutine %d", i) + require.Equal(t, "value", vals[i], "goroutine %d", i) + } + + require.Equal(t, int64(1), readCalls.Load(), "readFunc should be called exactly once") +} + +// --------------------------------------------------------------------------- +// Get — context cancellation +// --------------------------------------------------------------------------- + +func TestGetContextCancelled(t *testing.T) { + ctx, cancel := context.WithCancel(context.Background()) + + readFunc := func(key []byte) ([]byte, bool, error) { + time.Sleep(time.Second) + return []byte("late"), true, nil + } + s, _ := NewShard(ctx, threading.NewAdHocPool(), readFunc, 4096) + + cancel() + + _, _, err := s.Get([]byte("k"), true) + require.Error(t, err) +} + +// --------------------------------------------------------------------------- +// Get — updateLru flag +// --------------------------------------------------------------------------- + +func TestGetUpdateLruTrue(t *testing.T) { + store := map[string][]byte{ + "a": []byte("1"), + "b": []byte("2"), + } + s := newTestShard(t, 4096, store) + + s.Get([]byte("a"), true) + s.Get([]byte("b"), true) + + // Touch "a" via Get with updateLru=true, making "b" the LRU. + s.Get([]byte("a"), true) + + s.lock.Lock() + lru := s.gcQueue.PopLeastRecentlyUsed() + s.lock.Unlock() + + require.Equal(t, "b", lru) +} + +func TestGetUpdateLruFalse(t *testing.T) { + store := map[string][]byte{ + "a": []byte("1"), + "b": []byte("2"), + } + s := newTestShard(t, 4096, store) + + s.Get([]byte("a"), true) + s.Get([]byte("b"), true) + + // Access "a" without updating LRU — "a" should remain the LRU entry. + s.Get([]byte("a"), false) + + s.lock.Lock() + lru := s.gcQueue.PopLeastRecentlyUsed() + s.lock.Unlock() + + require.Equal(t, "a", lru, "updateLru=false should not move entry") +} + +// --------------------------------------------------------------------------- +// Set +// --------------------------------------------------------------------------- + +func TestSetNewKey(t *testing.T) { + s := newTestShard(t, 4096, map[string][]byte{}) + + s.Set([]byte("k"), []byte("v")) + + val, found, err := s.Get([]byte("k"), false) + require.NoError(t, err) + require.True(t, found) + require.Equal(t, "v", string(val)) +} + +func TestSetOverwritesExistingKey(t *testing.T) { + s := newTestShard(t, 4096, map[string][]byte{}) + + s.Set([]byte("k"), []byte("old")) + s.Set([]byte("k"), []byte("new")) + + val, found, err := s.Get([]byte("k"), false) + require.NoError(t, err) + require.True(t, found) + require.Equal(t, "new", string(val)) +} + +func TestSetOverwritesDeletedKey(t *testing.T) { + s := newTestShard(t, 4096, map[string][]byte{}) + + s.Delete([]byte("k")) + s.Set([]byte("k"), []byte("revived")) + + val, found, err := s.Get([]byte("k"), false) + require.NoError(t, err) + require.True(t, found) + require.Equal(t, "revived", string(val)) +} + +func TestSetNilValue(t *testing.T) { + s := newTestShard(t, 4096, map[string][]byte{}) + + s.Set([]byte("k"), nil) + + val, found, err := s.Get([]byte("k"), false) + require.NoError(t, err) + require.True(t, found) + require.Nil(t, val) +} + +func TestSetEmptyKey(t *testing.T) { + s := newTestShard(t, 4096, map[string][]byte{}) + + s.Set([]byte(""), []byte("empty-key-val")) + + val, found, err := s.Get([]byte(""), false) + require.NoError(t, err) + require.True(t, found) + require.Equal(t, "empty-key-val", string(val)) +} + +// --------------------------------------------------------------------------- +// Delete +// --------------------------------------------------------------------------- + +func TestDeleteExistingKey(t *testing.T) { + s := newTestShard(t, 4096, map[string][]byte{}) + + s.Set([]byte("k"), []byte("v")) + s.Delete([]byte("k")) + + val, found, err := s.Get([]byte("k"), false) + require.NoError(t, err) + require.False(t, found) + require.Nil(t, val) +} + +func TestDeleteNonexistentKey(t *testing.T) { + s := newTestShard(t, 4096, map[string][]byte{}) + + s.Delete([]byte("ghost")) + + val, found, err := s.Get([]byte("ghost"), false) + require.NoError(t, err) + require.False(t, found) + require.Nil(t, val) +} + +func TestDeleteThenSetThenGet(t *testing.T) { + s := newTestShard(t, 4096, map[string][]byte{}) + + s.Set([]byte("k"), []byte("v1")) + s.Delete([]byte("k")) + s.Set([]byte("k"), []byte("v2")) + + val, found, err := s.Get([]byte("k"), false) + require.NoError(t, err) + require.True(t, found) + require.Equal(t, "v2", string(val)) +} + +// --------------------------------------------------------------------------- +// BatchSet +// --------------------------------------------------------------------------- + +func TestBatchSetSetsMultiple(t *testing.T) { + s := newTestShard(t, 4096, map[string][]byte{}) + + s.BatchSet([]CacheUpdate{ + {Key: []byte("a"), Value: []byte("1")}, + {Key: []byte("b"), Value: []byte("2")}, + {Key: []byte("c"), Value: []byte("3")}, + }) + + for _, tc := range []struct { + key, want string + }{{"a", "1"}, {"b", "2"}, {"c", "3"}} { + val, found, err := s.Get([]byte(tc.key), false) + require.NoError(t, err, "Get(%q)", tc.key) + require.True(t, found, "Get(%q)", tc.key) + require.Equal(t, tc.want, string(val), "Get(%q)", tc.key) + } +} + +func TestBatchSetMixedSetAndDelete(t *testing.T) { + s := newTestShard(t, 4096, map[string][]byte{}) + + s.Set([]byte("keep"), []byte("v")) + s.Set([]byte("remove"), []byte("v")) + + s.BatchSet([]CacheUpdate{ + {Key: []byte("keep"), Value: []byte("updated")}, + {Key: []byte("remove"), IsDelete: true}, + {Key: []byte("new"), Value: []byte("fresh")}, + }) + + val, found, _ := s.Get([]byte("keep"), false) + require.True(t, found) + require.Equal(t, "updated", string(val)) + + _, found, _ = s.Get([]byte("remove"), false) + require.False(t, found, "expected remove to be deleted") + + val, found, _ = s.Get([]byte("new"), false) + require.True(t, found) + require.Equal(t, "fresh", string(val)) +} + +func TestBatchSetEmpty(t *testing.T) { + s := newTestShard(t, 4096, map[string][]byte{}) + s.BatchSet(nil) + s.BatchSet([]CacheUpdate{}) + + bytes, entries := s.getSizeInfo() + require.Equal(t, 0, bytes) + require.Equal(t, 0, entries) +} + +// --------------------------------------------------------------------------- +// BatchGet +// --------------------------------------------------------------------------- + +func TestBatchGetAllCached(t *testing.T) { + s := newTestShard(t, 4096, map[string][]byte{}) + + s.Set([]byte("a"), []byte("1")) + s.Set([]byte("b"), []byte("2")) + + keys := map[string]types.BatchGetResult{ + "a": {}, + "b": {}, + } + require.NoError(t, s.BatchGet(keys)) + + for k, want := range map[string]string{"a": "1", "b": "2"} { + r := keys[k] + require.True(t, r.Found, "key=%q", k) + require.Equal(t, want, string(r.Value), "key=%q", k) + } +} + +func TestBatchGetAllFromDB(t *testing.T) { + store := map[string][]byte{"x": []byte("10"), "y": []byte("20")} + s := newTestShard(t, 4096, store) + + keys := map[string]types.BatchGetResult{ + "x": {}, + "y": {}, + } + require.NoError(t, s.BatchGet(keys)) + + for k, want := range map[string]string{"x": "10", "y": "20"} { + r := keys[k] + require.True(t, r.Found, "key=%q", k) + require.Equal(t, want, string(r.Value), "key=%q", k) + } +} + +func TestBatchGetMixedCachedAndDB(t *testing.T) { + store := map[string][]byte{"db-key": []byte("from-db")} + s := newTestShard(t, 4096, store) + + s.Set([]byte("cached"), []byte("from-cache")) + + keys := map[string]types.BatchGetResult{ + "cached": {}, + "db-key": {}, + } + require.NoError(t, s.BatchGet(keys)) + + require.True(t, keys["cached"].Found) + require.Equal(t, "from-cache", string(keys["cached"].Value)) + require.True(t, keys["db-key"].Found) + require.Equal(t, "from-db", string(keys["db-key"].Value)) +} + +func TestBatchGetNotFoundKeys(t *testing.T) { + s := newTestShard(t, 4096, map[string][]byte{}) + + keys := map[string]types.BatchGetResult{ + "nope": {}, + } + require.NoError(t, s.BatchGet(keys)) + require.False(t, keys["nope"].Found) +} + +func TestBatchGetDeletedKeys(t *testing.T) { + s := newTestShard(t, 4096, map[string][]byte{}) + + s.Set([]byte("del"), []byte("v")) + s.Delete([]byte("del")) + + keys := map[string]types.BatchGetResult{ + "del": {}, + } + require.NoError(t, s.BatchGet(keys)) + require.False(t, keys["del"].Found) +} + +func TestBatchGetDBError(t *testing.T) { + dbErr := errors.New("broken") + readFunc := func(key []byte) ([]byte, bool, error) { return nil, false, dbErr } + s, _ := NewShard(context.Background(), threading.NewAdHocPool(), readFunc, 4096) + + keys := map[string]types.BatchGetResult{ + "fail": {}, + } + require.NoError(t, s.BatchGet(keys), "BatchGet itself should not fail") + require.Error(t, keys["fail"].Error, "expected per-key error") +} + +func TestBatchGetEmpty(t *testing.T) { + s := newTestShard(t, 4096, map[string][]byte{}) + + keys := map[string]types.BatchGetResult{} + require.NoError(t, s.BatchGet(keys)) +} + +func TestBatchGetCachesResults(t *testing.T) { + var readCalls atomic.Int64 + store := map[string][]byte{"k": []byte("v")} + readFunc := func(key []byte) ([]byte, bool, error) { + readCalls.Add(1) + v, ok := store[string(key)] + return v, ok, nil + } + s, _ := NewShard(context.Background(), threading.NewAdHocPool(), readFunc, 4096) + + keys := map[string]types.BatchGetResult{"k": {}} + s.BatchGet(keys) + + // bulkInjectValues runs in a goroutine — give it a moment. + time.Sleep(50 * time.Millisecond) + + val, found, err := s.Get([]byte("k"), false) + require.NoError(t, err) + require.True(t, found) + require.Equal(t, "v", string(val)) + require.Equal(t, int64(1), readCalls.Load(), "result should be cached") +} + +// --------------------------------------------------------------------------- +// Eviction +// --------------------------------------------------------------------------- + +func TestEvictionRespectMaxSize(t *testing.T) { + s := newTestShard(t, 30, map[string][]byte{}) + + // key="a" (1 byte) + value="aaaaaaaaaa" (10 bytes) = 11 bytes per entry + s.Set([]byte("a"), []byte("aaaaaaaaaa")) + s.Set([]byte("b"), []byte("bbbbbbbbbb")) + + _, entries := s.getSizeInfo() + require.Equal(t, 2, entries) + + // Third entry pushes to 33 bytes, exceeding maxSize=30 → evict "a". + s.Set([]byte("c"), []byte("cccccccccc")) + + bytes, entries := s.getSizeInfo() + require.LessOrEqual(t, bytes, 30, "shard size should not exceed maxSize") + require.Equal(t, 2, entries) +} + +func TestEvictionOrderIsLRU(t *testing.T) { + // Each entry: key(1) + value(4) = 5 bytes. maxSize=15 → fits 3. + s := newTestShard(t, 15, map[string][]byte{}) + + s.Set([]byte("a"), []byte("1111")) + s.Set([]byte("b"), []byte("2222")) + s.Set([]byte("c"), []byte("3333")) + + // Touch "a" so "b" becomes the LRU. + s.Get([]byte("a"), true) + + // Insert "d" → total 20 > 15 → must evict. "b" is LRU. + s.Set([]byte("d"), []byte("4444")) + + s.lock.Lock() + _, bExists := s.data["b"] + _, aExists := s.data["a"] + s.lock.Unlock() + + require.False(t, bExists, "expected 'b' to be evicted (it was LRU)") + require.True(t, aExists, "expected 'a' to survive (it was recently touched)") +} + +func TestEvictionOnDelete(t *testing.T) { + s := newTestShard(t, 10, map[string][]byte{}) + + s.Set([]byte("a"), []byte("val")) // size 4 + s.Delete([]byte("longkey1")) // size 8 + + bytes, _ := s.getSizeInfo() + require.LessOrEqual(t, bytes, 10, "size should not exceed maxSize") +} + +func TestEvictionOnGetFromDB(t *testing.T) { + store := map[string][]byte{ + "x": []byte("12345678901234567890"), + } + s := newTestShard(t, 25, store) + + s.Set([]byte("a"), []byte("small")) + + // Reading "x" brings in 1+20=21 bytes, total becomes 6+21=27 > 25 → eviction. + s.Get([]byte("x"), true) + + time.Sleep(50 * time.Millisecond) + + bytes, _ := s.getSizeInfo() + require.LessOrEqual(t, bytes, 25, "size should not exceed maxSize after DB read") +} + +// --------------------------------------------------------------------------- +// getSizeInfo +// --------------------------------------------------------------------------- + +func TestGetSizeInfoEmpty(t *testing.T) { + s := newTestShard(t, 4096, map[string][]byte{}) + bytes, entries := s.getSizeInfo() + require.Equal(t, 0, bytes) + require.Equal(t, 0, entries) +} + +func TestGetSizeInfoAfterSets(t *testing.T) { + s := newTestShard(t, 4096, map[string][]byte{}) + + s.Set([]byte("ab"), []byte("cd")) // 2+2 = 4 + s.Set([]byte("efg"), []byte("hi")) // 3+2 = 5 + + bytes, entries := s.getSizeInfo() + require.Equal(t, 2, entries) + require.Equal(t, 9, bytes) +} + +// --------------------------------------------------------------------------- +// injectValue — edge cases +// --------------------------------------------------------------------------- + +func TestInjectValueNotFound(t *testing.T) { + s := newTestShard(t, 4096, map[string][]byte{}) + + val, found, err := s.Get([]byte("missing"), true) + require.NoError(t, err) + require.False(t, found) + require.Nil(t, val) + + s.lock.Lock() + entry, ok := s.data["missing"] + s.lock.Unlock() + require.True(t, ok, "entry should exist in map") + require.Equal(t, statusDeleted, entry.status) +} + +// --------------------------------------------------------------------------- +// Concurrent Set and Get +// --------------------------------------------------------------------------- + +func TestConcurrentSetAndGet(t *testing.T) { + s := newTestShard(t, 4096, map[string][]byte{}) + + const n = 100 + var wg sync.WaitGroup + + for i := 0; i < n; i++ { + wg.Add(2) + key := []byte(fmt.Sprintf("key-%d", i)) + val := []byte(fmt.Sprintf("val-%d", i)) + + go func() { + defer wg.Done() + s.Set(key, val) + }() + go func() { + defer wg.Done() + s.Get(key, true) + }() + } + + wg.Wait() +} + +func TestConcurrentBatchSetAndBatchGet(t *testing.T) { + store := map[string][]byte{} + for i := 0; i < 50; i++ { + store[fmt.Sprintf("db-%d", i)] = []byte(fmt.Sprintf("v-%d", i)) + } + s := newTestShard(t, 100_000, store) + + var wg sync.WaitGroup + + wg.Add(1) + go func() { + defer wg.Done() + updates := make([]CacheUpdate, 20) + for i := 0; i < 20; i++ { + updates[i] = CacheUpdate{ + Key: []byte(fmt.Sprintf("set-%d", i)), + Value: []byte(fmt.Sprintf("sv-%d", i)), + } + } + s.BatchSet(updates) + }() + + wg.Add(1) + go func() { + defer wg.Done() + keys := make(map[string]types.BatchGetResult) + for i := 0; i < 50; i++ { + keys[fmt.Sprintf("db-%d", i)] = types.BatchGetResult{} + } + s.BatchGet(keys) + }() + + wg.Wait() +} + +// --------------------------------------------------------------------------- +// Pool submission failure +// --------------------------------------------------------------------------- + +type failPool struct{} + +func (fp *failPool) Submit(_ context.Context, _ func()) error { + return errors.New("pool exhausted") +} + +func TestGetPoolSubmitFailure(t *testing.T) { + readFunc := func(key []byte) ([]byte, bool, error) { return []byte("v"), true, nil } + s, _ := NewShard(context.Background(), &failPool{}, readFunc, 4096) + + _, _, err := s.Get([]byte("k"), true) + require.Error(t, err) +} + +func TestBatchGetPoolSubmitFailure(t *testing.T) { + readFunc := func(key []byte) ([]byte, bool, error) { return []byte("v"), true, nil } + s, _ := NewShard(context.Background(), &failPool{}, readFunc, 4096) + + keys := map[string]types.BatchGetResult{"k": {}} + err := s.BatchGet(keys) + require.Error(t, err) +} + +// --------------------------------------------------------------------------- +// Large values +// --------------------------------------------------------------------------- + +func TestSetLargeValueExceedingMaxSizeEvictsOldEntries(t *testing.T) { + s := newTestShard(t, 100, map[string][]byte{}) + + s.Set([]byte("a"), []byte("small")) + + bigVal := make([]byte, 95) + for i := range bigVal { + bigVal[i] = 'X' + } + s.Set([]byte("b"), bigVal) + + bytes, _ := s.getSizeInfo() + require.LessOrEqual(t, bytes, 100, "size should not exceed maxSize after large set") +} + +// --------------------------------------------------------------------------- +// bulkInjectValues — error entries are not cached +// --------------------------------------------------------------------------- + +func TestBatchGetDBErrorNotCached(t *testing.T) { + var calls atomic.Int64 + readFunc := func(key []byte) ([]byte, bool, error) { + n := calls.Add(1) + if n == 1 { + return nil, false, errors.New("transient db error") + } + return []byte("ok"), true, nil + } + s, _ := NewShard(context.Background(), threading.NewAdHocPool(), readFunc, 4096) + + keys := map[string]types.BatchGetResult{"k": {}} + s.BatchGet(keys) + + // Wait for bulkInjectValues goroutine. + time.Sleep(50 * time.Millisecond) + + val, found, err := s.Get([]byte("k"), true) + require.NoError(t, err, "retry should succeed") + require.True(t, found) + require.Equal(t, "ok", string(val)) +} + +// --------------------------------------------------------------------------- +// Edge: Set then Delete then BatchGet +// --------------------------------------------------------------------------- + +func TestSetDeleteThenBatchGet(t *testing.T) { + s := newTestShard(t, 4096, map[string][]byte{}) + + s.Set([]byte("k"), []byte("v")) + s.Delete([]byte("k")) + + keys := map[string]types.BatchGetResult{"k": {}} + require.NoError(t, s.BatchGet(keys)) + require.False(t, keys["k"].Found) +} From 7b5538e9c985c5e492ff0145243e0bfae032de14 Mon Sep 17 00:00:00 2001 From: Cody Littley Date: Tue, 10 Mar 2026 15:28:23 -0500 Subject: [PATCH 045/119] cache tests --- .../pebbledb/pebblecache/cache_impl_test.go | 689 ++++++++++++++++++ 1 file changed, 689 insertions(+) create mode 100644 sei-db/db_engine/pebbledb/pebblecache/cache_impl_test.go diff --git a/sei-db/db_engine/pebbledb/pebblecache/cache_impl_test.go b/sei-db/db_engine/pebbledb/pebblecache/cache_impl_test.go new file mode 100644 index 0000000000..f14ba892cc --- /dev/null +++ b/sei-db/db_engine/pebbledb/pebblecache/cache_impl_test.go @@ -0,0 +1,689 @@ +package pebblecache + +import ( + "context" + "errors" + "fmt" + "sync" + "sync/atomic" + "testing" + "time" + + "github.com/stretchr/testify/require" + + "github.com/sei-protocol/sei-chain/sei-db/common/threading" + "github.com/sei-protocol/sei-chain/sei-db/db_engine/types" +) + +// --------------------------------------------------------------------------- +// helpers +// --------------------------------------------------------------------------- + +func noopRead(key []byte) ([]byte, bool, error) { return nil, false, nil } + +func newTestCache(t *testing.T, store map[string][]byte, shardCount, maxSize int) Cache { + t.Helper() + readFunc := func(key []byte) ([]byte, bool, error) { + v, ok := store[string(key)] + if !ok { + return nil, false, nil + } + return v, true, nil + } + pool := threading.NewAdHocPool() + c, err := NewCache(context.Background(), readFunc, shardCount, maxSize, pool, pool, "", 0) + require.NoError(t, err) + return c +} + +// --------------------------------------------------------------------------- +// NewCache — validation +// --------------------------------------------------------------------------- + +func TestNewCacheValid(t *testing.T) { + pool := threading.NewAdHocPool() + c, err := NewCache(context.Background(), noopRead, 4, 1024, pool, pool, "", 0) + require.NoError(t, err) + require.NotNil(t, c) +} + +func TestNewCacheSingleShard(t *testing.T) { + pool := threading.NewAdHocPool() + c, err := NewCache(context.Background(), noopRead, 1, 1024, pool, pool, "", 0) + require.NoError(t, err) + require.NotNil(t, c) +} + +func TestNewCacheShardCountZero(t *testing.T) { + pool := threading.NewAdHocPool() + _, err := NewCache(context.Background(), noopRead, 0, 1024, pool, pool, "", 0) + require.Error(t, err) +} + +func TestNewCacheShardCountNegative(t *testing.T) { + pool := threading.NewAdHocPool() + _, err := NewCache(context.Background(), noopRead, -1, 1024, pool, pool, "", 0) + require.Error(t, err) +} + +func TestNewCacheShardCountNotPowerOfTwo(t *testing.T) { + pool := threading.NewAdHocPool() + for _, n := range []int{3, 5, 6, 7, 9, 10} { + _, err := NewCache(context.Background(), noopRead, n, 1024, pool, pool, "", 0) + require.Error(t, err, "shardCount=%d", n) + } +} + +func TestNewCacheMaxSizeZero(t *testing.T) { + pool := threading.NewAdHocPool() + _, err := NewCache(context.Background(), noopRead, 4, 0, pool, pool, "", 0) + require.Error(t, err) +} + +func TestNewCacheMaxSizeNegative(t *testing.T) { + pool := threading.NewAdHocPool() + _, err := NewCache(context.Background(), noopRead, 4, -100, pool, pool, "", 0) + require.Error(t, err) +} + +func TestNewCacheMaxSizeLessThanShardCount(t *testing.T) { + pool := threading.NewAdHocPool() + // shardCount=4, maxSize=3 → sizePerShard=0 + _, err := NewCache(context.Background(), noopRead, 4, 3, pool, pool, "", 0) + require.Error(t, err) +} + +func TestNewCacheWithMetrics(t *testing.T) { + pool := threading.NewAdHocPool() + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + c, err := NewCache(ctx, noopRead, 2, 1024, pool, pool, "test-cache", time.Hour) + require.NoError(t, err) + require.NotNil(t, c) +} + +// --------------------------------------------------------------------------- +// Get +// --------------------------------------------------------------------------- + +func TestCacheGetFromDB(t *testing.T) { + store := map[string][]byte{"foo": []byte("bar")} + c := newTestCache(t, store, 4, 4096) + + val, found, err := c.Get([]byte("foo"), true) + require.NoError(t, err) + require.True(t, found) + require.Equal(t, "bar", string(val)) +} + +func TestCacheGetNotFound(t *testing.T) { + c := newTestCache(t, map[string][]byte{}, 4, 4096) + + val, found, err := c.Get([]byte("missing"), true) + require.NoError(t, err) + require.False(t, found) + require.Nil(t, val) +} + +func TestCacheGetAfterSet(t *testing.T) { + c := newTestCache(t, map[string][]byte{}, 4, 4096) + + c.Set([]byte("k"), []byte("v")) + + val, found, err := c.Get([]byte("k"), true) + require.NoError(t, err) + require.True(t, found) + require.Equal(t, "v", string(val)) +} + +func TestCacheGetAfterDelete(t *testing.T) { + store := map[string][]byte{"k": []byte("v")} + c := newTestCache(t, store, 4, 4096) + + c.Delete([]byte("k")) + + val, found, err := c.Get([]byte("k"), true) + require.NoError(t, err) + require.False(t, found) + require.Nil(t, val) +} + +func TestCacheGetDBError(t *testing.T) { + dbErr := errors.New("db fail") + readFunc := func(key []byte) ([]byte, bool, error) { return nil, false, dbErr } + pool := threading.NewAdHocPool() + c, _ := NewCache(context.Background(), readFunc, 1, 4096, pool, pool, "", 0) + + _, _, err := c.Get([]byte("k"), true) + require.Error(t, err) + require.ErrorIs(t, err, dbErr) +} + +func TestCacheGetSameKeyConsistentShard(t *testing.T) { + var readCalls atomic.Int64 + readFunc := func(key []byte) ([]byte, bool, error) { + readCalls.Add(1) + return []byte("val"), true, nil + } + pool := threading.NewAdHocPool() + c, _ := NewCache(context.Background(), readFunc, 4, 4096, pool, pool, "", 0) + + // First call populates cache in a specific shard. + val1, _, _ := c.Get([]byte("key"), true) + // Second call should hit cache in the same shard. + val2, _, _ := c.Get([]byte("key"), true) + + require.Equal(t, string(val1), string(val2)) + require.Equal(t, int64(1), readCalls.Load(), "second Get should hit cache") +} + +// --------------------------------------------------------------------------- +// Set +// --------------------------------------------------------------------------- + +func TestCacheSetNewKey(t *testing.T) { + c := newTestCache(t, map[string][]byte{}, 4, 4096) + + c.Set([]byte("a"), []byte("1")) + + val, found, err := c.Get([]byte("a"), false) + require.NoError(t, err) + require.True(t, found) + require.Equal(t, "1", string(val)) +} + +func TestCacheSetOverwrite(t *testing.T) { + c := newTestCache(t, map[string][]byte{}, 4, 4096) + + c.Set([]byte("a"), []byte("old")) + c.Set([]byte("a"), []byte("new")) + + val, found, err := c.Get([]byte("a"), false) + require.NoError(t, err) + require.True(t, found) + require.Equal(t, "new", string(val)) +} + +func TestCacheSetNilValue(t *testing.T) { + c := newTestCache(t, map[string][]byte{}, 4, 4096) + + c.Set([]byte("k"), nil) + + val, found, err := c.Get([]byte("k"), false) + require.NoError(t, err) + require.True(t, found) + require.Nil(t, val) +} + +// --------------------------------------------------------------------------- +// Delete +// --------------------------------------------------------------------------- + +func TestCacheDeleteExistingKey(t *testing.T) { + c := newTestCache(t, map[string][]byte{}, 4, 4096) + + c.Set([]byte("k"), []byte("v")) + c.Delete([]byte("k")) + + _, found, err := c.Get([]byte("k"), false) + require.NoError(t, err) + require.False(t, found) +} + +func TestCacheDeleteNonexistent(t *testing.T) { + c := newTestCache(t, map[string][]byte{}, 4, 4096) + + c.Delete([]byte("ghost")) + + _, found, err := c.Get([]byte("ghost"), false) + require.NoError(t, err) + require.False(t, found) +} + +func TestCacheDeleteThenSet(t *testing.T) { + c := newTestCache(t, map[string][]byte{}, 4, 4096) + + c.Set([]byte("k"), []byte("v1")) + c.Delete([]byte("k")) + c.Set([]byte("k"), []byte("v2")) + + val, found, err := c.Get([]byte("k"), false) + require.NoError(t, err) + require.True(t, found) + require.Equal(t, "v2", string(val)) +} + +// --------------------------------------------------------------------------- +// BatchSet +// --------------------------------------------------------------------------- + +func TestCacheBatchSetMultipleKeys(t *testing.T) { + c := newTestCache(t, map[string][]byte{}, 4, 4096) + + err := c.BatchSet([]CacheUpdate{ + {Key: []byte("a"), Value: []byte("1")}, + {Key: []byte("b"), Value: []byte("2")}, + {Key: []byte("c"), Value: []byte("3")}, + }) + require.NoError(t, err) + + for _, tc := range []struct{ key, want string }{{"a", "1"}, {"b", "2"}, {"c", "3"}} { + val, found, err := c.Get([]byte(tc.key), false) + require.NoError(t, err, "key=%q", tc.key) + require.True(t, found, "key=%q", tc.key) + require.Equal(t, tc.want, string(val), "key=%q", tc.key) + } +} + +func TestCacheBatchSetMixedSetAndDelete(t *testing.T) { + c := newTestCache(t, map[string][]byte{}, 4, 4096) + + c.Set([]byte("keep"), []byte("v")) + c.Set([]byte("remove"), []byte("v")) + + err := c.BatchSet([]CacheUpdate{ + {Key: []byte("keep"), Value: []byte("updated")}, + {Key: []byte("remove"), IsDelete: true}, + {Key: []byte("new"), Value: []byte("fresh")}, + }) + require.NoError(t, err) + + val, found, _ := c.Get([]byte("keep"), false) + require.True(t, found) + require.Equal(t, "updated", string(val)) + + _, found, _ = c.Get([]byte("remove"), false) + require.False(t, found) + + val, found, _ = c.Get([]byte("new"), false) + require.True(t, found) + require.Equal(t, "fresh", string(val)) +} + +func TestCacheBatchSetEmpty(t *testing.T) { + c := newTestCache(t, map[string][]byte{}, 4, 4096) + + require.NoError(t, c.BatchSet(nil)) + require.NoError(t, c.BatchSet([]CacheUpdate{})) +} + +func TestCacheBatchSetPoolFailure(t *testing.T) { + readFunc := func(key []byte) ([]byte, bool, error) { return nil, false, nil } + readPool := threading.NewAdHocPool() + c, _ := NewCache(context.Background(), readFunc, 1, 4096, readPool, &failPool{}, "", 0) + + err := c.BatchSet([]CacheUpdate{ + {Key: []byte("k"), Value: []byte("v")}, + }) + require.Error(t, err) +} + +// --------------------------------------------------------------------------- +// BatchGet +// --------------------------------------------------------------------------- + +func TestCacheBatchGetAllCached(t *testing.T) { + c := newTestCache(t, map[string][]byte{}, 4, 4096) + + c.Set([]byte("a"), []byte("1")) + c.Set([]byte("b"), []byte("2")) + + keys := map[string]types.BatchGetResult{"a": {}, "b": {}} + require.NoError(t, c.BatchGet(keys)) + + require.True(t, keys["a"].Found) + require.Equal(t, "1", string(keys["a"].Value)) + require.True(t, keys["b"].Found) + require.Equal(t, "2", string(keys["b"].Value)) +} + +func TestCacheBatchGetAllFromDB(t *testing.T) { + store := map[string][]byte{"x": []byte("10"), "y": []byte("20")} + c := newTestCache(t, store, 4, 4096) + + keys := map[string]types.BatchGetResult{"x": {}, "y": {}} + require.NoError(t, c.BatchGet(keys)) + + require.True(t, keys["x"].Found) + require.Equal(t, "10", string(keys["x"].Value)) + require.True(t, keys["y"].Found) + require.Equal(t, "20", string(keys["y"].Value)) +} + +func TestCacheBatchGetMixedCachedAndDB(t *testing.T) { + store := map[string][]byte{"db-key": []byte("from-db")} + c := newTestCache(t, store, 4, 4096) + + c.Set([]byte("cached"), []byte("from-cache")) + + keys := map[string]types.BatchGetResult{"cached": {}, "db-key": {}} + require.NoError(t, c.BatchGet(keys)) + + require.True(t, keys["cached"].Found) + require.Equal(t, "from-cache", string(keys["cached"].Value)) + require.True(t, keys["db-key"].Found) + require.Equal(t, "from-db", string(keys["db-key"].Value)) +} + +func TestCacheBatchGetNotFoundKeys(t *testing.T) { + c := newTestCache(t, map[string][]byte{}, 4, 4096) + + keys := map[string]types.BatchGetResult{"nope": {}} + require.NoError(t, c.BatchGet(keys)) + require.False(t, keys["nope"].Found) +} + +func TestCacheBatchGetDeletedKey(t *testing.T) { + c := newTestCache(t, map[string][]byte{}, 4, 4096) + + c.Set([]byte("k"), []byte("v")) + c.Delete([]byte("k")) + + keys := map[string]types.BatchGetResult{"k": {}} + require.NoError(t, c.BatchGet(keys)) + require.False(t, keys["k"].Found) +} + +func TestCacheBatchGetDBError(t *testing.T) { + dbErr := errors.New("broken") + readFunc := func(key []byte) ([]byte, bool, error) { return nil, false, dbErr } + pool := threading.NewAdHocPool() + c, _ := NewCache(context.Background(), readFunc, 1, 4096, pool, pool, "", 0) + + keys := map[string]types.BatchGetResult{"fail": {}} + require.NoError(t, c.BatchGet(keys), "BatchGet itself should not fail") + require.Error(t, keys["fail"].Error) +} + +func TestCacheBatchGetEmpty(t *testing.T) { + c := newTestCache(t, map[string][]byte{}, 4, 4096) + keys := map[string]types.BatchGetResult{} + require.NoError(t, c.BatchGet(keys)) +} + +func TestCacheBatchGetPoolFailure(t *testing.T) { + readFunc := func(key []byte) ([]byte, bool, error) { return nil, false, nil } + readPool := threading.NewAdHocPool() + c, _ := NewCache(context.Background(), readFunc, 1, 4096, readPool, &failPool{}, "", 0) + + keys := map[string]types.BatchGetResult{"k": {}} + err := c.BatchGet(keys) + require.Error(t, err) +} + +func TestCacheBatchGetShardReadPoolFailure(t *testing.T) { + // miscPool succeeds (goroutine runs), but readPool fails inside shard.BatchGet, + // causing the per-key error branch to be hit. + readFunc := func(key []byte) ([]byte, bool, error) { return nil, false, nil } + miscPool := threading.NewAdHocPool() + c, _ := NewCache(context.Background(), readFunc, 1, 4096, &failPool{}, miscPool, "", 0) + + keys := map[string]types.BatchGetResult{"a": {}, "b": {}} + require.NoError(t, c.BatchGet(keys)) + + for k, r := range keys { + require.Error(t, r.Error, "key=%q should have per-key error", k) + } +} + +// --------------------------------------------------------------------------- +// Cross-shard distribution +// --------------------------------------------------------------------------- + +func TestCacheDistributesAcrossShards(t *testing.T) { + c := newTestCache(t, map[string][]byte{}, 4, 4096) + impl := c.(*cache) + + // Insert enough distinct keys that at least 2 shards get entries. + for i := 0; i < 100; i++ { + c.Set([]byte(fmt.Sprintf("key-%d", i)), []byte("v")) + } + + nonEmpty := 0 + for _, s := range impl.shards { + _, entries := s.getSizeInfo() + if entries > 0 { + nonEmpty++ + } + } + require.GreaterOrEqual(t, nonEmpty, 2, "keys should distribute across multiple shards") +} + +func TestCacheGetRoutesToSameShard(t *testing.T) { + c := newTestCache(t, map[string][]byte{}, 4, 4096) + impl := c.(*cache) + + c.Set([]byte("key"), []byte("val")) + + idx := impl.shardManager.Shard([]byte("key")) + _, entries := impl.shards[idx].getSizeInfo() + require.Equal(t, 1, entries, "key should be in the shard determined by shardManager") +} + +// --------------------------------------------------------------------------- +// getCacheSizeInfo +// --------------------------------------------------------------------------- + +func TestCacheGetCacheSizeInfoEmpty(t *testing.T) { + c := newTestCache(t, map[string][]byte{}, 4, 4096) + impl := c.(*cache) + + bytes, entries := impl.getCacheSizeInfo() + require.Equal(t, int64(0), bytes) + require.Equal(t, int64(0), entries) +} + +func TestCacheGetCacheSizeInfoAggregatesShards(t *testing.T) { + c := newTestCache(t, map[string][]byte{}, 4, 4096) + impl := c.(*cache) + + for i := 0; i < 20; i++ { + c.Set([]byte(fmt.Sprintf("k%d", i)), []byte(fmt.Sprintf("v%d", i))) + } + + bytes, entries := impl.getCacheSizeInfo() + require.Equal(t, int64(20), entries) + require.Greater(t, bytes, int64(0)) +} + +// --------------------------------------------------------------------------- +// Many keys — BatchGet/BatchSet spanning all shards +// --------------------------------------------------------------------------- + +func TestCacheBatchSetThenBatchGetManyKeys(t *testing.T) { + c := newTestCache(t, map[string][]byte{}, 4, 100_000) + + updates := make([]CacheUpdate, 200) + for i := range updates { + updates[i] = CacheUpdate{ + Key: []byte(fmt.Sprintf("key-%03d", i)), + Value: []byte(fmt.Sprintf("val-%03d", i)), + } + } + require.NoError(t, c.BatchSet(updates)) + + keys := make(map[string]types.BatchGetResult, 200) + for i := 0; i < 200; i++ { + keys[fmt.Sprintf("key-%03d", i)] = types.BatchGetResult{} + } + require.NoError(t, c.BatchGet(keys)) + + for i := 0; i < 200; i++ { + k := fmt.Sprintf("key-%03d", i) + want := fmt.Sprintf("val-%03d", i) + require.True(t, keys[k].Found, "key=%q", k) + require.Equal(t, want, string(keys[k].Value), "key=%q", k) + require.NoError(t, keys[k].Error, "key=%q", k) + } +} + +// --------------------------------------------------------------------------- +// Concurrency +// --------------------------------------------------------------------------- + +func TestCacheConcurrentGetSet(t *testing.T) { + store := map[string][]byte{} + for i := 0; i < 50; i++ { + store[fmt.Sprintf("db-%d", i)] = []byte(fmt.Sprintf("v-%d", i)) + } + c := newTestCache(t, store, 4, 100_000) + + var wg sync.WaitGroup + for i := 0; i < 100; i++ { + wg.Add(2) + key := []byte(fmt.Sprintf("key-%d", i)) + val := []byte(fmt.Sprintf("val-%d", i)) + + go func() { + defer wg.Done() + c.Set(key, val) + }() + go func() { + defer wg.Done() + c.Get(key, true) + }() + } + wg.Wait() +} + +func TestCacheConcurrentBatchSetAndBatchGet(t *testing.T) { + store := map[string][]byte{} + for i := 0; i < 50; i++ { + store[fmt.Sprintf("db-%d", i)] = []byte(fmt.Sprintf("v-%d", i)) + } + c := newTestCache(t, store, 4, 100_000) + + var wg sync.WaitGroup + + wg.Add(1) + go func() { + defer wg.Done() + updates := make([]CacheUpdate, 50) + for i := range updates { + updates[i] = CacheUpdate{ + Key: []byte(fmt.Sprintf("set-%d", i)), + Value: []byte(fmt.Sprintf("sv-%d", i)), + } + } + c.BatchSet(updates) + }() + + wg.Add(1) + go func() { + defer wg.Done() + keys := make(map[string]types.BatchGetResult) + for i := 0; i < 50; i++ { + keys[fmt.Sprintf("db-%d", i)] = types.BatchGetResult{} + } + c.BatchGet(keys) + }() + + wg.Wait() +} + +func TestCacheConcurrentDeleteAndGet(t *testing.T) { + c := newTestCache(t, map[string][]byte{}, 4, 100_000) + + for i := 0; i < 100; i++ { + c.Set([]byte(fmt.Sprintf("k-%d", i)), []byte("v")) + } + + var wg sync.WaitGroup + for i := 0; i < 100; i++ { + wg.Add(2) + key := []byte(fmt.Sprintf("k-%d", i)) + go func() { + defer wg.Done() + c.Delete(key) + }() + go func() { + defer wg.Done() + c.Get(key, true) + }() + } + wg.Wait() +} + +// --------------------------------------------------------------------------- +// Eviction through the cache layer +// --------------------------------------------------------------------------- + +func TestCacheEvictsPerShard(t *testing.T) { + // 1 shard, maxSize=20. Inserting more than 20 bytes triggers eviction. + c := newTestCache(t, map[string][]byte{}, 1, 20) + impl := c.(*cache) + + // key(1) + value(8) = 9 bytes each + c.Set([]byte("a"), []byte("11111111")) + c.Set([]byte("b"), []byte("22222222")) + // 18 bytes, fits + + c.Set([]byte("c"), []byte("33333333")) + // 27 bytes → must evict to get under 20 + + bytes, _ := impl.shards[0].getSizeInfo() + require.LessOrEqual(t, bytes, 20) +} + +// --------------------------------------------------------------------------- +// Edge: BatchSet with keys all routed to the same shard +// --------------------------------------------------------------------------- + +func TestCacheBatchSetSameShard(t *testing.T) { + c := newTestCache(t, map[string][]byte{}, 1, 4096) + + // With 1 shard, every key goes to shard 0. + err := c.BatchSet([]CacheUpdate{ + {Key: []byte("x"), Value: []byte("1")}, + {Key: []byte("y"), Value: []byte("2")}, + {Key: []byte("z"), Value: []byte("3")}, + }) + require.NoError(t, err) + + for _, tc := range []struct{ key, want string }{{"x", "1"}, {"y", "2"}, {"z", "3"}} { + val, found, err := c.Get([]byte(tc.key), false) + require.NoError(t, err) + require.True(t, found) + require.Equal(t, tc.want, string(val)) + } +} + +// --------------------------------------------------------------------------- +// Edge: BatchGet after BatchSet with deletes +// --------------------------------------------------------------------------- + +func TestCacheBatchGetAfterBatchSetWithDeletes(t *testing.T) { + c := newTestCache(t, map[string][]byte{}, 4, 4096) + + c.Set([]byte("a"), []byte("1")) + c.Set([]byte("b"), []byte("2")) + c.Set([]byte("c"), []byte("3")) + + err := c.BatchSet([]CacheUpdate{ + {Key: []byte("a"), Value: []byte("updated")}, + {Key: []byte("b"), IsDelete: true}, + }) + require.NoError(t, err) + + keys := map[string]types.BatchGetResult{"a": {}, "b": {}, "c": {}} + require.NoError(t, c.BatchGet(keys)) + + require.True(t, keys["a"].Found) + require.Equal(t, "updated", string(keys["a"].Value)) + require.False(t, keys["b"].Found) + require.True(t, keys["c"].Found) + require.Equal(t, "3", string(keys["c"].Value)) +} + +// --------------------------------------------------------------------------- +// Power-of-two shard counts +// --------------------------------------------------------------------------- + +func TestNewCachePowerOfTwoShardCounts(t *testing.T) { + pool := threading.NewAdHocPool() + for _, n := range []int{1, 2, 4, 8, 16, 32, 64} { + c, err := NewCache(context.Background(), noopRead, n, n*100, pool, pool, "", 0) + require.NoError(t, err, "shardCount=%d", n) + require.NotNil(t, c, "shardCount=%d", n) + } +} From dc8d0c9f45747c0fbdcc3acf9843bf34dc6d1082 Mon Sep 17 00:00:00 2001 From: Cody Littley Date: Tue, 10 Mar 2026 15:42:53 -0500 Subject: [PATCH 046/119] moar unit tests --- sei-db/db_engine/pebbledb/db.go | 40 +- sei-db/db_engine/pebbledb/db_test.go | 465 ++++++------------ .../pebbledb/pebblecache/noop_cache.go | 47 ++ .../pebbledb/pebblecache/noop_cache_test.go | 150 ++++++ .../db_engine/pebbledb/pebblecache/shard.go | 5 +- sei-db/db_engine/pebbledb/pebbledb_config.go | 10 +- 6 files changed, 388 insertions(+), 329 deletions(-) create mode 100644 sei-db/db_engine/pebbledb/pebblecache/noop_cache.go create mode 100644 sei-db/db_engine/pebbledb/pebblecache/noop_cache_test.go diff --git a/sei-db/db_engine/pebbledb/db.go b/sei-db/db_engine/pebbledb/db.go index 840ac9e29c..476cdb6572 100644 --- a/sei-db/db_engine/pebbledb/db.go +++ b/sei-db/db_engine/pebbledb/db.go @@ -109,24 +109,28 @@ func Open( NewPebbleMetrics(ctx, db, filepath.Base(config.DataDir), config.MetricsScrapeInterval) } - var cacheName string - if config.EnableMetrics { - cacheName = filepath.Base(config.DataDir) - } - - // A high level cache per key (as opposed to the low level pebble block cache). - cache, err := pebblecache.NewCache( - ctx, - readFunction, - 8, - config.CacheSize, - readPool, - miscPool, - cacheName, - config.MetricsScrapeInterval) - if err != nil { - cancel() - return nil, fmt.Errorf("failed to create flatcache: %w", err) + var cache pebblecache.Cache + if config.CacheSize == 0 { + cache = pebblecache.NewNoOpCache(readFunction) + } else { + var cacheName string + if config.EnableMetrics { + cacheName = filepath.Base(config.DataDir) + } + + cache, err = pebblecache.NewCache( + ctx, + readFunction, + config.CacheShardCount, + config.CacheSize, + readPool, + miscPool, + cacheName, + config.MetricsScrapeInterval) + if err != nil { + cancel() + return nil, fmt.Errorf("failed to create flatcache: %w", err) + } } return &pebbleDB{ diff --git a/sei-db/db_engine/pebbledb/db_test.go b/sei-db/db_engine/pebbledb/db_test.go index 8ccc2ddea2..7f8a066c45 100644 --- a/sei-db/db_engine/pebbledb/db_test.go +++ b/sei-db/db_engine/pebbledb/db_test.go @@ -9,159 +9,195 @@ import ( "github.com/cockroachdb/pebble/v2" errorutils "github.com/sei-protocol/sei-chain/sei-db/common/errors" "github.com/sei-protocol/sei-chain/sei-db/common/threading" + "github.com/sei-protocol/sei-chain/sei-db/common/unit" "github.com/sei-protocol/sei-chain/sei-db/db_engine/types" ) -func TestDBGetSetDelete(t *testing.T) { - cfg := DefaultTestConfig(t) - db, err := Open(t.Context(), &cfg, pebble.DefaultComparer, threading.NewAdHocPool(), threading.NewAdHocPool()) - if err != nil { - t.Fatalf("Open: %v", err) +// forEachCacheMode runs fn once with a warm cache and once with caching disabled, +// so cache-sensitive tests exercise both the cache and the raw storage layer. +func forEachCacheMode(t *testing.T, fn func(t *testing.T, cfg PebbleDBConfig)) { + for _, mode := range []struct { + name string + cacheSize int + }{ + {"cached", 16 * unit.MB}, + {"uncached", 0}, + } { + t.Run(mode.name, func(t *testing.T) { + cfg := DefaultTestConfig(t) + cfg.CacheSize = mode.cacheSize + fn(t, cfg) + }) } +} + +func openDB(t *testing.T, cfg *PebbleDBConfig) types.KeyValueDB { + t.Helper() + db, err := Open(t.Context(), cfg, pebble.DefaultComparer, threading.NewAdHocPool(), threading.NewAdHocPool()) + require.NoError(t, err) t.Cleanup(func() { require.NoError(t, db.Close()) }) + return db +} - key := []byte("k1") - val := []byte("v1") +// --------------------------------------------------------------------------- +// Cache-sensitive tests — run in both cached and uncached modes +// --------------------------------------------------------------------------- - _, err = db.Get(key) - if err != errorutils.ErrNotFound { - t.Fatalf("expected ErrNotFound, got %v", err) - } +func TestDBGetSetDelete(t *testing.T) { + forEachCacheMode(t, func(t *testing.T, cfg PebbleDBConfig) { + db := openDB(t, &cfg) - if err := db.Set(key, val, types.WriteOptions{Sync: false}); err != nil { - t.Fatalf("Set: %v", err) - } + key := []byte("k1") + val := []byte("v1") - got, err := db.Get(key) - if err != nil { - t.Fatalf("Get: %v", err) - } - if !bytes.Equal(got, val) { - t.Fatalf("value mismatch: got %q want %q", got, val) - } + _, err := db.Get(key) + require.ErrorIs(t, err, errorutils.ErrNotFound) - if err := db.Delete(key, types.WriteOptions{Sync: false}); err != nil { - t.Fatalf("Delete: %v", err) - } + require.NoError(t, db.Set(key, val, types.WriteOptions{Sync: false})) - _, err = db.Get(key) - if err != errorutils.ErrNotFound { - t.Fatalf("expected ErrNotFound after delete, got %v", err) - } + got, err := db.Get(key) + require.NoError(t, err) + require.Equal(t, val, got) + + require.NoError(t, db.Delete(key, types.WriteOptions{Sync: false})) + + _, err = db.Get(key) + require.ErrorIs(t, err, errorutils.ErrNotFound) + }) } func TestBatchAtomicWrite(t *testing.T) { - cfg := DefaultTestConfig(t) - db, err := Open(t.Context(), &cfg, pebble.DefaultComparer, threading.NewAdHocPool(), threading.NewAdHocPool()) - if err != nil { - t.Fatalf("Open: %v", err) - } - t.Cleanup(func() { require.NoError(t, db.Close()) }) + forEachCacheMode(t, func(t *testing.T, cfg PebbleDBConfig) { + db := openDB(t, &cfg) - b := db.NewBatch() - t.Cleanup(func() { require.NoError(t, b.Close()) }) + b := db.NewBatch() + t.Cleanup(func() { require.NoError(t, b.Close()) }) - if err := b.Set([]byte("a"), []byte("1")); err != nil { - t.Fatalf("batch set: %v", err) - } - if err := b.Set([]byte("b"), []byte("2")); err != nil { - t.Fatalf("batch set: %v", err) - } - - if err := b.Commit(types.WriteOptions{Sync: false}); err != nil { - t.Fatalf("batch commit: %v", err) - } + require.NoError(t, b.Set([]byte("a"), []byte("1"))) + require.NoError(t, b.Set([]byte("b"), []byte("2"))) + require.NoError(t, b.Commit(types.WriteOptions{Sync: false})) - for _, tc := range []struct { - k string - v string - }{ - {"a", "1"}, - {"b", "2"}, - } { - got, err := db.Get([]byte(tc.k)) - if err != nil { - t.Fatalf("Get(%q): %v", tc.k, err) - } - if string(got) != tc.v { - t.Fatalf("Get(%q)=%q want %q", tc.k, got, tc.v) + for _, tc := range []struct{ k, v string }{{"a", "1"}, {"b", "2"}} { + got, err := db.Get([]byte(tc.k)) + require.NoError(t, err, "key=%q", tc.k) + require.Equal(t, tc.v, string(got), "key=%q", tc.k) } - } + }) } +func TestErrNotFoundConsistency(t *testing.T) { + forEachCacheMode(t, func(t *testing.T, cfg PebbleDBConfig) { + db := openDB(t, &cfg) + + _, err := db.Get([]byte("missing-key")) + require.Error(t, err) + require.ErrorIs(t, err, errorutils.ErrNotFound) + require.True(t, errorutils.IsNotFound(err)) + }) +} + +func TestGetReturnsCopy(t *testing.T) { + forEachCacheMode(t, func(t *testing.T, cfg PebbleDBConfig) { + db := openDB(t, &cfg) + + require.NoError(t, db.Set([]byte("k"), []byte("v"), types.WriteOptions{Sync: false})) + + got, err := db.Get([]byte("k")) + require.NoError(t, err) + got[0] = 'X' + + got2, err := db.Get([]byte("k")) + require.NoError(t, err) + require.Equal(t, "v", string(got2), "stored value should remain unchanged") + }) +} + +func TestBatchLenResetDelete(t *testing.T) { + forEachCacheMode(t, func(t *testing.T, cfg PebbleDBConfig) { + db := openDB(t, &cfg) + + require.NoError(t, db.Set([]byte("to-delete"), []byte("val"), types.WriteOptions{Sync: false})) + + b := db.NewBatch() + t.Cleanup(func() { require.NoError(t, b.Close()) }) + + initialLen := b.Len() + + require.NoError(t, b.Set([]byte("a"), []byte("1"))) + require.NoError(t, b.Delete([]byte("to-delete"))) + require.Greater(t, b.Len(), initialLen) + + b.Reset() + require.Equal(t, initialLen, b.Len()) + + require.NoError(t, b.Set([]byte("b"), []byte("2"))) + require.NoError(t, b.Commit(types.WriteOptions{Sync: false})) + + got, err := db.Get([]byte("b")) + require.NoError(t, err) + require.Equal(t, "2", string(got)) + }) +} + +func TestFlush(t *testing.T) { + forEachCacheMode(t, func(t *testing.T, cfg PebbleDBConfig) { + db := openDB(t, &cfg) + + require.NoError(t, db.Set([]byte("flush-test"), []byte("val"), types.WriteOptions{Sync: false})) + require.NoError(t, db.Flush()) + + got, err := db.Get([]byte("flush-test")) + require.NoError(t, err) + require.Equal(t, "val", string(got)) + }) +} + +// --------------------------------------------------------------------------- +// Cache-irrelevant tests — iterators and lifecycle, run once +// --------------------------------------------------------------------------- + func TestIteratorBounds(t *testing.T) { cfg := DefaultTestConfig(t) - db, err := Open(t.Context(), &cfg, pebble.DefaultComparer, threading.NewAdHocPool(), threading.NewAdHocPool()) - if err != nil { - t.Fatalf("Open: %v", err) - } - t.Cleanup(func() { require.NoError(t, db.Close()) }) + db := openDB(t, &cfg) - // Keys: a, b, c for _, k := range []string{"a", "b", "c"} { - if err := db.Set([]byte(k), []byte("x"), types.WriteOptions{Sync: false}); err != nil { - t.Fatalf("Set(%q): %v", k, err) - } + require.NoError(t, db.Set([]byte(k), []byte("x"), types.WriteOptions{Sync: false})) } itr, err := db.NewIter(&types.IterOptions{LowerBound: []byte("b"), UpperBound: []byte("d")}) - if err != nil { - t.Fatalf("NewIter: %v", err) - } + require.NoError(t, err) t.Cleanup(func() { require.NoError(t, itr.Close()) }) var keys []string for ok := itr.First(); ok && itr.Valid(); ok = itr.Next() { keys = append(keys, string(itr.Key())) } - if err := itr.Error(); err != nil { - t.Fatalf("iter error: %v", err) - } - // LowerBound inclusive => includes b; UpperBound exclusive => includes c (d not present anyway) - if len(keys) != 2 || keys[0] != "b" || keys[1] != "c" { - t.Fatalf("unexpected keys: %v", keys) - } + require.NoError(t, itr.Error()) + require.Equal(t, []string{"b", "c"}, keys) } func TestIteratorPrev(t *testing.T) { cfg := DefaultTestConfig(t) - db, err := Open(t.Context(), &cfg, pebble.DefaultComparer, threading.NewAdHocPool(), threading.NewAdHocPool()) - if err != nil { - t.Fatalf("Open: %v", err) - } - t.Cleanup(func() { require.NoError(t, db.Close()) }) + db := openDB(t, &cfg) - // Keys: a, b, c for _, k := range []string{"a", "b", "c"} { - if err := db.Set([]byte(k), []byte("x"), types.WriteOptions{Sync: false}); err != nil { - t.Fatalf("Set(%q): %v", k, err) - } + require.NoError(t, db.Set([]byte(k), []byte("x"), types.WriteOptions{Sync: false})) } itr, err := db.NewIter(nil) - if err != nil { - t.Fatalf("NewIter: %v", err) - } + require.NoError(t, err) t.Cleanup(func() { require.NoError(t, itr.Close()) }) - if !itr.Last() || !itr.Valid() { - t.Fatalf("expected Last() to position iterator") - } - if string(itr.Key()) != "c" { - t.Fatalf("expected key=c at Last(), got %q", itr.Key()) - } + require.True(t, itr.Last()) + require.True(t, itr.Valid()) + require.Equal(t, "c", string(itr.Key())) - if !itr.Prev() || !itr.Valid() { - t.Fatalf("expected Prev() to succeed") - } - if string(itr.Key()) != "b" { - t.Fatalf("expected key=b after Prev(), got %q", itr.Key()) - } + require.True(t, itr.Prev()) + require.True(t, itr.Valid()) + require.Equal(t, "b", string(itr.Key())) } func TestIteratorNextPrefixWithComparerSplit(t *testing.T) { - // Use a custom comparer with Split that treats everything up to (and including) '/' - // as the "prefix" for NextPrefix() / prefix-based skipping. cmp := *pebble.DefaultComparer cmp.Name = "sei-db/test-split-on-slash" cmp.Split = func(k []byte) int { @@ -172,12 +208,6 @@ func TestIteratorNextPrefixWithComparerSplit(t *testing.T) { } return len(k) } - // NextPrefix relies on Comparer.ImmediateSuccessor to compute a key that is - // guaranteed to be greater than all keys sharing the current prefix. - // pebble.DefaultComparer.ImmediateSuccessor appends 0x00, which is not - // sufficient for our "prefix ends at '/'" convention (e.g. "a/\x00" < "a/2"). - // We provide an ImmediateSuccessor that increments the last byte (from the end) - // to produce a prefix upper bound (e.g. "a/" -> "a0"). cmp.ImmediateSuccessor = func(dst, a []byte) []byte { for i := len(a) - 1; i >= 0; i-- { if a[i] != 0xff { @@ -191,228 +221,53 @@ func TestIteratorNextPrefixWithComparerSplit(t *testing.T) { cfg := DefaultTestConfig(t) db, err := Open(t.Context(), &cfg, &cmp, threading.NewAdHocPool(), threading.NewAdHocPool()) - if err != nil { - t.Fatalf("Open: %v", err) - } + require.NoError(t, err) t.Cleanup(func() { require.NoError(t, db.Close()) }) for _, k := range []string{"a/1", "a/2", "a/3", "b/1"} { - if err := db.Set([]byte(k), []byte("x"), types.WriteOptions{Sync: false}); err != nil { - t.Fatalf("Set(%q): %v", k, err) - } + require.NoError(t, db.Set([]byte(k), []byte("x"), types.WriteOptions{Sync: false})) } itr, err := db.NewIter(nil) - if err != nil { - t.Fatalf("NewIter: %v", err) - } + require.NoError(t, err) t.Cleanup(func() { require.NoError(t, itr.Close()) }) - if !itr.SeekGE([]byte("a/")) || !itr.Valid() { - t.Fatalf("expected SeekGE(a/) to be valid") - } - if !bytes.HasPrefix(itr.Key(), []byte("a/")) { - t.Fatalf("expected key with prefix a/, got %q", itr.Key()) - } - - if !itr.NextPrefix() || !itr.Valid() { - t.Fatalf("expected NextPrefix() to move to next prefix") - } - if string(itr.Key()) != "b/1" { - t.Fatalf("expected key=b/1 after NextPrefix(), got %q", itr.Key()) - } -} - -func TestErrNotFoundConsistency(t *testing.T) { - cfg := DefaultTestConfig(t) - db, err := Open(t.Context(), &cfg, pebble.DefaultComparer, threading.NewAdHocPool(), threading.NewAdHocPool()) - if err != nil { - t.Fatalf("Open: %v", err) - } - t.Cleanup(func() { require.NoError(t, db.Close()) }) - - // Test that Get on missing key returns ErrNotFound - _, err = db.Get([]byte("missing-key")) - if err == nil { - t.Fatalf("expected error for missing key") - } - - // Test that error is ErrNotFound - if err != errorutils.ErrNotFound { - t.Fatalf("expected ErrNotFound, got %v", err) - } + require.True(t, itr.SeekGE([]byte("a/"))) + require.True(t, itr.Valid()) + require.True(t, bytes.HasPrefix(itr.Key(), []byte("a/"))) - // Test that IsNotFound helper works - if !errorutils.IsNotFound(err) { - t.Fatalf("IsNotFound should return true for ErrNotFound") - } -} - -func TestGetReturnsCopy(t *testing.T) { - cfg := DefaultTestConfig(t) - db, err := Open(t.Context(), &cfg, pebble.DefaultComparer, threading.NewAdHocPool(), threading.NewAdHocPool()) - if err != nil { - t.Fatalf("Open: %v", err) - } - t.Cleanup(func() { require.NoError(t, db.Close()) }) - - key := []byte("k") - val := []byte("v") - if err := db.Set(key, val, types.WriteOptions{Sync: false}); err != nil { - t.Fatalf("Set: %v", err) - } - - got, err := db.Get(key) - if err != nil { - t.Fatalf("Get: %v", err) - } - // Modify returned slice; should not affect stored value if Get returns a copy. - got[0] = 'X' - - got2, err := db.Get(key) - if err != nil { - t.Fatalf("Get: %v", err) - } - if string(got2) != "v" { - t.Fatalf("expected stored value to remain unchanged, got %q", got2) - } -} - -func TestBatchLenResetDelete(t *testing.T) { - cfg := DefaultTestConfig(t) - db, err := Open(t.Context(), &cfg, pebble.DefaultComparer, threading.NewAdHocPool(), threading.NewAdHocPool()) - if err != nil { - t.Fatalf("Open: %v", err) - } - t.Cleanup(func() { require.NoError(t, db.Close()) }) - - // First, set a key so we can delete it - if err := db.Set([]byte("to-delete"), []byte("val"), types.WriteOptions{Sync: false}); err != nil { - t.Fatalf("Set: %v", err) - } - - b := db.NewBatch() - t.Cleanup(func() { require.NoError(t, b.Close()) }) - - // Record initial batch len (Pebble batch always has a header, so may not be 0) - initialLen := b.Len() - - // Add some operations - if err := b.Set([]byte("a"), []byte("1")); err != nil { - t.Fatalf("batch set: %v", err) - } - if err := b.Delete([]byte("to-delete")); err != nil { - t.Fatalf("batch delete: %v", err) - } - - // Len should increase after operations (Pebble Len() returns bytes, not count) - if b.Len() <= initialLen { - t.Fatalf("expected Len() to increase after operations, got %d (initial %d)", b.Len(), initialLen) - } - - // Reset should clear the batch back to initial state - b.Reset() - if b.Len() != initialLen { - t.Fatalf("expected Len()=%d after Reset, got %d", initialLen, b.Len()) - } - - // Add and commit - if err := b.Set([]byte("b"), []byte("2")); err != nil { - t.Fatalf("batch set: %v", err) - } - if err := b.Commit(types.WriteOptions{Sync: false}); err != nil { - t.Fatalf("batch commit: %v", err) - } - - // Verify "b" was written - got, err := db.Get([]byte("b")) - if err != nil { - t.Fatalf("Get: %v", err) - } - if string(got) != "2" { - t.Fatalf("expected '2', got %q", got) - } + require.True(t, itr.NextPrefix()) + require.True(t, itr.Valid()) + require.Equal(t, "b/1", string(itr.Key())) } func TestIteratorSeekLTAndValue(t *testing.T) { cfg := DefaultTestConfig(t) - db, err := Open(t.Context(), &cfg, pebble.DefaultComparer, threading.NewAdHocPool(), threading.NewAdHocPool()) - if err != nil { - t.Fatalf("Open: %v", err) - } - t.Cleanup(func() { require.NoError(t, db.Close()) }) + db := openDB(t, &cfg) - // Insert keys: a, b, c with values for _, kv := range []struct{ k, v string }{ {"a", "val-a"}, {"b", "val-b"}, {"c", "val-c"}, } { - if err := db.Set([]byte(kv.k), []byte(kv.v), types.WriteOptions{Sync: false}); err != nil { - t.Fatalf("Set(%q): %v", kv.k, err) - } + require.NoError(t, db.Set([]byte(kv.k), []byte(kv.v), types.WriteOptions{Sync: false})) } itr, err := db.NewIter(nil) - if err != nil { - t.Fatalf("NewIter: %v", err) - } + require.NoError(t, err) t.Cleanup(func() { require.NoError(t, itr.Close()) }) - // SeekLT("c") should position at "b" - if !itr.SeekLT([]byte("c")) || !itr.Valid() { - t.Fatalf("expected SeekLT(c) to be valid") - } - if string(itr.Key()) != "b" { - t.Fatalf("expected key=b after SeekLT(c), got %q", itr.Key()) - } - if string(itr.Value()) != "val-b" { - t.Fatalf("expected value=val-b, got %q", itr.Value()) - } -} - -func TestFlush(t *testing.T) { - cfg := DefaultTestConfig(t) - db, err := Open(t.Context(), &cfg, pebble.DefaultComparer, threading.NewAdHocPool(), threading.NewAdHocPool()) - if err != nil { - t.Fatalf("Open: %v", err) - } - t.Cleanup(func() { require.NoError(t, db.Close()) }) - - // Set some data - if err := db.Set([]byte("flush-test"), []byte("val"), types.WriteOptions{Sync: false}); err != nil { - t.Fatalf("Set: %v", err) - } - - // Flush should succeed - if err := db.Flush(); err != nil { - t.Fatalf("Flush: %v", err) - } - - // Data should still be readable - got, err := db.Get([]byte("flush-test")) - if err != nil { - t.Fatalf("Get after flush: %v", err) - } - if string(got) != "val" { - t.Fatalf("expected 'val', got %q", got) - } + require.True(t, itr.SeekLT([]byte("c"))) + require.True(t, itr.Valid()) + require.Equal(t, "b", string(itr.Key())) + require.Equal(t, "val-b", string(itr.Value())) } func TestCloseIsIdempotent(t *testing.T) { cfg := DefaultTestConfig(t) db, err := Open(t.Context(), &cfg, pebble.DefaultComparer, threading.NewAdHocPool(), threading.NewAdHocPool()) - if err != nil { - t.Fatalf("Open: %v", err) - } - - // First close should succeed - if err := db.Close(); err != nil { - t.Fatalf("first Close: %v", err) - } + require.NoError(t, err) - // Second close should be idempotent (no panic, returns nil) - if err := db.Close(); err != nil { - t.Fatalf("second Close should return nil, got: %v", err) - } + require.NoError(t, db.Close()) + require.NoError(t, db.Close(), "second Close should be idempotent") } diff --git a/sei-db/db_engine/pebbledb/pebblecache/noop_cache.go b/sei-db/db_engine/pebbledb/pebblecache/noop_cache.go new file mode 100644 index 0000000000..0013c5bd82 --- /dev/null +++ b/sei-db/db_engine/pebbledb/pebblecache/noop_cache.go @@ -0,0 +1,47 @@ +package pebblecache + +import "github.com/sei-protocol/sei-chain/sei-db/db_engine/types" + +var _ Cache = (*noOpCache)(nil) + +// noOpCache is a Cache that performs no caching. Every Get falls through +// to the underlying readFunc. Set, Delete, and BatchSet are no-ops. +// Useful for testing the storage layer without cache interference, or for +// workloads where caching is not beneficial. +type noOpCache struct { + readFunc func(key []byte) ([]byte, bool, error) +} + +// NewNoOpCache creates a Cache that always reads from readFunc and never caches. +func NewNoOpCache(readFunc func(key []byte) ([]byte, bool, error)) Cache { + return &noOpCache{readFunc: readFunc} +} + +func (c *noOpCache) Get(key []byte, _ bool) ([]byte, bool, error) { + return c.readFunc(key) +} + +func (c *noOpCache) BatchGet(keys map[string]types.BatchGetResult) error { + for k := range keys { + val, found, err := c.readFunc([]byte(k)) + if err != nil { + keys[k] = types.BatchGetResult{Error: err} + } else { + keys[k] = types.BatchGetResult{Value: val, Found: found} + } + } + return nil +} + +func (c *noOpCache) Set([]byte, []byte) { + // intentional no-op +} + +func (c *noOpCache) Delete([]byte) { + // intentional no-op +} + +func (c *noOpCache) BatchSet([]CacheUpdate) error { + // intentional no-op + return nil +} diff --git a/sei-db/db_engine/pebbledb/pebblecache/noop_cache_test.go b/sei-db/db_engine/pebbledb/pebblecache/noop_cache_test.go new file mode 100644 index 0000000000..c1f0587ebf --- /dev/null +++ b/sei-db/db_engine/pebbledb/pebblecache/noop_cache_test.go @@ -0,0 +1,150 @@ +package pebblecache + +import ( + "errors" + "testing" + + "github.com/stretchr/testify/require" + + "github.com/sei-protocol/sei-chain/sei-db/db_engine/types" +) + +func newNoOpTestCache(store map[string][]byte) Cache { + return NewNoOpCache(func(key []byte) ([]byte, bool, error) { + v, ok := store[string(key)] + if !ok { + return nil, false, nil + } + return v, true, nil + }) +} + +func TestNoOpGetFound(t *testing.T) { + c := newNoOpTestCache(map[string][]byte{"k": []byte("v")}) + + val, found, err := c.Get([]byte("k"), true) + require.NoError(t, err) + require.True(t, found) + require.Equal(t, "v", string(val)) +} + +func TestNoOpGetNotFound(t *testing.T) { + c := newNoOpTestCache(map[string][]byte{}) + + val, found, err := c.Get([]byte("missing"), true) + require.NoError(t, err) + require.False(t, found) + require.Nil(t, val) +} + +func TestNoOpGetError(t *testing.T) { + dbErr := errors.New("broken") + c := NewNoOpCache(func(key []byte) ([]byte, bool, error) { + return nil, false, dbErr + }) + + _, _, err := c.Get([]byte("k"), true) + require.ErrorIs(t, err, dbErr) +} + +func TestNoOpGetIgnoresUpdateLru(t *testing.T) { + c := newNoOpTestCache(map[string][]byte{"k": []byte("v")}) + + val1, _, _ := c.Get([]byte("k"), true) + val2, _, _ := c.Get([]byte("k"), false) + require.Equal(t, string(val1), string(val2)) +} + +func TestNoOpGetAlwaysReadsFromFunc(t *testing.T) { + store := map[string][]byte{"k": []byte("v1")} + c := newNoOpTestCache(store) + + val, _, _ := c.Get([]byte("k"), true) + require.Equal(t, "v1", string(val)) + + store["k"] = []byte("v2") + + val, _, _ = c.Get([]byte("k"), true) + require.Equal(t, "v2", string(val), "should re-read from func, not cache") +} + +func TestNoOpSetIsNoOp(t *testing.T) { + c := newNoOpTestCache(map[string][]byte{}) + + c.Set([]byte("k"), []byte("v")) + + _, found, err := c.Get([]byte("k"), true) + require.NoError(t, err) + require.False(t, found, "Set should not cache anything") +} + +func TestNoOpDeleteIsNoOp(t *testing.T) { + c := newNoOpTestCache(map[string][]byte{"k": []byte("v")}) + + c.Delete([]byte("k")) + + val, found, err := c.Get([]byte("k"), true) + require.NoError(t, err) + require.True(t, found, "Delete should not affect reads") + require.Equal(t, "v", string(val)) +} + +func TestNoOpBatchSetIsNoOp(t *testing.T) { + c := newNoOpTestCache(map[string][]byte{}) + + err := c.BatchSet([]CacheUpdate{ + {Key: []byte("a"), Value: []byte("1")}, + {Key: []byte("b"), Value: []byte("2")}, + }) + require.NoError(t, err) + + _, found, _ := c.Get([]byte("a"), true) + require.False(t, found) + _, found, _ = c.Get([]byte("b"), true) + require.False(t, found) +} + +func TestNoOpBatchSetEmptyAndNil(t *testing.T) { + c := newNoOpTestCache(map[string][]byte{}) + + require.NoError(t, c.BatchSet(nil)) + require.NoError(t, c.BatchSet([]CacheUpdate{})) +} + +func TestNoOpBatchGetAllFound(t *testing.T) { + c := newNoOpTestCache(map[string][]byte{"a": []byte("1"), "b": []byte("2")}) + + keys := map[string]types.BatchGetResult{"a": {}, "b": {}} + require.NoError(t, c.BatchGet(keys)) + + require.True(t, keys["a"].Found) + require.Equal(t, "1", string(keys["a"].Value)) + require.True(t, keys["b"].Found) + require.Equal(t, "2", string(keys["b"].Value)) +} + +func TestNoOpBatchGetNotFound(t *testing.T) { + c := newNoOpTestCache(map[string][]byte{}) + + keys := map[string]types.BatchGetResult{"x": {}} + require.NoError(t, c.BatchGet(keys)) + require.False(t, keys["x"].Found) +} + +func TestNoOpBatchGetError(t *testing.T) { + dbErr := errors.New("fail") + c := NewNoOpCache(func(key []byte) ([]byte, bool, error) { + return nil, false, dbErr + }) + + keys := map[string]types.BatchGetResult{"k": {}} + require.NoError(t, c.BatchGet(keys)) + require.Error(t, keys["k"].Error) +} + +func TestNoOpBatchGetEmpty(t *testing.T) { + c := newNoOpTestCache(map[string][]byte{}) + + keys := map[string]types.BatchGetResult{} + require.NoError(t, c.BatchGet(keys)) +} diff --git a/sei-db/db_engine/pebbledb/pebblecache/shard.go b/sei-db/db_engine/pebbledb/pebblecache/shard.go index 937faf447f..5702272b3b 100644 --- a/sei-db/db_engine/pebbledb/pebblecache/shard.go +++ b/sei-db/db_engine/pebbledb/pebblecache/shard.go @@ -1,6 +1,7 @@ package pebblecache import ( + "bytes" "context" "fmt" "sync" @@ -105,7 +106,7 @@ func (s *shard) Get(key []byte, updateLru bool) ([]byte, bool, error) { switch entry.status { case statusAvailable: - value := entry.value + value := bytes.Clone(entry.value) if updateLru { s.gcQueue.Touch(key) } @@ -227,7 +228,7 @@ func (s *shard) BatchGet(keys map[string]types.BatchGetResult) error { switch entry.status { case statusAvailable: - keys[key] = types.BatchGetResult{Value: entry.value, Found: true} + keys[key] = types.BatchGetResult{Value: bytes.Clone(entry.value), Found: true} hits++ case statusDeleted: keys[key] = types.BatchGetResult{Found: false} diff --git a/sei-db/db_engine/pebbledb/pebbledb_config.go b/sei-db/db_engine/pebbledb/pebbledb_config.go index 005d3c3c71..7936793eb0 100644 --- a/sei-db/db_engine/pebbledb/pebbledb_config.go +++ b/sei-db/db_engine/pebbledb/pebbledb_config.go @@ -39,11 +39,13 @@ func (c *PebbleDBConfig) Validate() error { if c.DataDir == "" { return fmt.Errorf("data dir is required") } - if c.CacheShardCount <= 0 || (c.CacheShardCount&(c.CacheShardCount-1)) != 0 { - return fmt.Errorf("cache shard count must be a power of two and greater than 0") + if c.CacheSize < 0 { + return fmt.Errorf("cache size must not be negative") } - if c.CacheSize <= 0 { - return fmt.Errorf("cache size must be greater than 0") + if c.CacheSize > 0 { + if (c.CacheShardCount&(c.CacheShardCount-1)) != 0 { + return fmt.Errorf("cache shard count must be a power of two or 0") + } } if c.BlockCacheSize <= 0 { return fmt.Errorf("block cache size must be greater than 0") From e9cc9ca5bac4d7b085b1e004646293f6865338ed Mon Sep 17 00:00:00 2001 From: Cody Littley Date: Tue, 10 Mar 2026 15:46:15 -0500 Subject: [PATCH 047/119] cleanup --- .../db_engine/pebbledb/pebblecache/shard.go | 124 ++++++++++-------- 1 file changed, 71 insertions(+), 53 deletions(-) diff --git a/sei-db/db_engine/pebbledb/pebblecache/shard.go b/sei-db/db_engine/pebbledb/pebblecache/shard.go index 5702272b3b..37a59a73ad 100644 --- a/sei-db/db_engine/pebbledb/pebblecache/shard.go +++ b/sei-db/db_engine/pebbledb/pebblecache/shard.go @@ -104,68 +104,86 @@ func (s *shard) Get(key []byte, updateLru bool) ([]byte, bool, error) { entry := s.getEntry(key) switch entry.status { - case statusAvailable: - value := bytes.Clone(entry.value) - if updateLru { - s.gcQueue.Touch(key) - } - s.lock.Unlock() - s.metrics.reportCacheHits(1) - return value, true, nil + return s.getAvailable(entry, key, updateLru) case statusDeleted: - if updateLru { - s.gcQueue.Touch(key) - } - s.lock.Unlock() - s.metrics.reportCacheHits(1) - return nil, false, nil + return s.getDeleted(key, updateLru) case statusScheduled: - // Another goroutine initiated a read, wait for that read to finish. - valueChan := entry.valueChan - s.lock.Unlock() - s.metrics.reportCacheMisses(1) - startTime := time.Now() - result, err := threading.InterruptiblePull(s.ctx, valueChan) - s.metrics.reportCacheMissLatency(time.Since(startTime)) - if err != nil { - return nil, false, fmt.Errorf("failed to pull value from channel: %w", err) - } - valueChan <- result // reload the channel in case there are other listeners - if result.err != nil { - return nil, false, fmt.Errorf("failed to read value from database: %w", result.err) - } - return result.value, result.found, nil + return s.getScheduled(entry) case statusUnknown: - // We are the first goroutine to read this value. - entry.status = statusScheduled - valueChan := make(chan readResult, 1) - entry.valueChan = valueChan - s.lock.Unlock() - s.metrics.reportCacheMisses(1) - startTime := time.Now() - err := s.readPool.Submit(s.ctx, func() { - value, found, readErr := s.readFunc(key) - entry.injectValue(key, readResult{value: value, found: found, err: readErr}) - }) - if err != nil { - return nil, false, fmt.Errorf("failed to schedule read: %w", err) - } - result, err := threading.InterruptiblePull(s.ctx, valueChan) - s.metrics.reportCacheMissLatency(time.Since(startTime)) - if err != nil { - return nil, false, fmt.Errorf("failed to pull value from channel: %w", err) - } - valueChan <- result // reload the channel in case there are other listeners - if result.err != nil { - return nil, false, result.err - } - return result.value, result.found, nil + return s.getUnknown(entry, key) default: + s.lock.Unlock() panic(fmt.Sprintf("unexpected status: %#v", entry.status)) } } +// Handles Get for a key whose value is already cached. Lock must be held; releases it. +func (s *shard) getAvailable(entry *shardEntry, key []byte, updateLru bool) ([]byte, bool, error) { + value := bytes.Clone(entry.value) + if updateLru { + s.gcQueue.Touch(key) + } + s.lock.Unlock() + s.metrics.reportCacheHits(1) + return value, true, nil +} + +// Handles Get for a key known to be deleted. Lock must be held; releases it. +func (s *shard) getDeleted(key []byte, updateLru bool) ([]byte, bool, error) { + if updateLru { + s.gcQueue.Touch(key) + } + s.lock.Unlock() + s.metrics.reportCacheHits(1) + return nil, false, nil +} + +// Handles Get for a key with an in-flight read from another goroutine. Lock must be held; releases it. +func (s *shard) getScheduled(entry *shardEntry) ([]byte, bool, error) { + valueChan := entry.valueChan + s.lock.Unlock() + s.metrics.reportCacheMisses(1) + startTime := time.Now() + result, err := threading.InterruptiblePull(s.ctx, valueChan) + s.metrics.reportCacheMissLatency(time.Since(startTime)) + if err != nil { + return nil, false, fmt.Errorf("failed to pull value from channel: %w", err) + } + valueChan <- result // reload the channel in case there are other listeners + if result.err != nil { + return nil, false, fmt.Errorf("failed to read value from database: %w", result.err) + } + return result.value, result.found, nil +} + +// Handles Get for a key not yet read. Schedules the read and waits. Lock must be held; releases it. +func (s *shard) getUnknown(entry *shardEntry, key []byte) ([]byte, bool, error) { + entry.status = statusScheduled + valueChan := make(chan readResult, 1) + entry.valueChan = valueChan + s.lock.Unlock() + s.metrics.reportCacheMisses(1) + startTime := time.Now() + err := s.readPool.Submit(s.ctx, func() { + value, found, readErr := s.readFunc(key) + entry.injectValue(key, readResult{value: value, found: found, err: readErr}) + }) + if err != nil { + return nil, false, fmt.Errorf("failed to schedule read: %w", err) + } + result, err := threading.InterruptiblePull(s.ctx, valueChan) + s.metrics.reportCacheMissLatency(time.Since(startTime)) + if err != nil { + return nil, false, fmt.Errorf("failed to pull value from channel: %w", err) + } + valueChan <- result // reload the channel in case there are other listeners + if result.err != nil { + return nil, false, result.err + } + return result.value, result.found, nil +} + // This method is called by the read scheduler when a value becomes available. func (se *shardEntry) injectValue(key []byte, result readResult) { se.shard.lock.Lock() From cea0ebb200a921c490b47b0d74947049caf0ae26 Mon Sep 17 00:00:00 2001 From: Cody Littley Date: Wed, 11 Mar 2026 10:08:11 -0500 Subject: [PATCH 048/119] unit test fixes --- sei-db/state_db/sc/flatkv/snapshot_test.go | 32 ++++++++++------ sei-db/state_db/sc/flatkv/store_write_test.go | 37 +++++++------------ 2 files changed, 35 insertions(+), 34 deletions(-) diff --git a/sei-db/state_db/sc/flatkv/snapshot_test.go b/sei-db/state_db/sc/flatkv/snapshot_test.go index ccd7d0f4b6..c6b9574dbb 100644 --- a/sei-db/state_db/sc/flatkv/snapshot_test.go +++ b/sei-db/state_db/sc/flatkv/snapshot_test.go @@ -826,8 +826,10 @@ func TestCreateWorkingDirReclones(t *testing.T) { // ============================================================================= func TestPruneSnapshotsKeepsRecent(t *testing.T) { - dir := t.TempDir() - s, err := NewCommitStore(t.Context(), nil, &Config{DataDir: filepath.Join(dir, flatkvRootDir), SnapshotKeepRecent: 1}) + cfg := DefaultTestConfig(t) + cfg.DataDir = filepath.Join(t.TempDir(), flatkvRootDir) + cfg.SnapshotKeepRecent = 1 + s, err := NewCommitStore(t.Context(), nil, cfg) require.NoError(t, err) _, err = s.LoadVersion(0, false) require.NoError(t, err) @@ -837,9 +839,8 @@ func TestPruneSnapshotsKeepsRecent(t *testing.T) { require.NoError(t, s.WriteSnapshot("")) } - flatkvDir := filepath.Join(dir, flatkvRootDir) var snapshots []int64 - _ = traverseSnapshots(flatkvDir, true, func(v int64) (bool, error) { + _ = traverseSnapshots(cfg.DataDir, true, func(v int64) (bool, error) { snapshots = append(snapshots, v) return false, nil }) @@ -851,8 +852,9 @@ func TestPruneSnapshotsKeepsRecent(t *testing.T) { } func TestPruneSnapshotsKeepAll(t *testing.T) { - dir := t.TempDir() - s, err := NewCommitStore(t.Context(), nil, &Config{DataDir: filepath.Join(dir, flatkvRootDir), SnapshotKeepRecent: 100}) + cfg := DefaultTestConfig(t) + cfg.SnapshotKeepRecent = 100 + s, err := NewCommitStore(t.Context(), nil, cfg) require.NoError(t, err) _, err = s.LoadVersion(0, false) require.NoError(t, err) @@ -863,9 +865,8 @@ func TestPruneSnapshotsKeepAll(t *testing.T) { require.NoError(t, s.WriteSnapshot("")) } - flatkvDir := filepath.Join(dir, flatkvRootDir) var count int - _ = traverseSnapshots(flatkvDir, true, func(_ int64) (bool, error) { + _ = traverseSnapshots(cfg.DataDir, true, func(_ int64) (bool, error) { count++ return false, nil }) @@ -992,7 +993,10 @@ func TestTryTruncateWAL(t *testing.T) { // SnapshotKeepRecent=0 so pruneSnapshots removes snapshot-0 once // the manual snapshot at v5 is created; this makes v5 the earliest // snapshot and gives tryTruncateWAL a positive truncation offset. - s, err := NewCommitStore(t.Context(), nil, &Config{DataDir: filepath.Join(dir, flatkvRootDir), SnapshotKeepRecent: 0}) + cfg := DefaultTestConfig(t) + cfg.DataDir = filepath.Join(dir, flatkvRootDir) + cfg.SnapshotKeepRecent = 0 + s, err := NewCommitStore(t.Context(), nil, cfg) require.NoError(t, err) _, err = s.LoadVersion(0, false) require.NoError(t, err) @@ -1141,7 +1145,10 @@ func TestSeekSnapshotExact(t *testing.T) { func TestMultipleSnapshotsAndReopen(t *testing.T) { dir := t.TempDir() - s, err := NewCommitStore(t.Context(), nil, &Config{DataDir: filepath.Join(dir, flatkvRootDir), SnapshotKeepRecent: 10}) + cfg := DefaultTestConfig(t) + cfg.DataDir = filepath.Join(dir, flatkvRootDir) + cfg.SnapshotKeepRecent = 10 + s, err := NewCommitStore(t.Context(), nil, cfg) require.NoError(t, err) _, err = s.LoadVersion(0, false) require.NoError(t, err) @@ -1156,7 +1163,10 @@ func TestMultipleSnapshotsAndReopen(t *testing.T) { for i, expectedHash := range hashes { ver := int64(i + 1) - s2, err := NewCommitStore(t.Context(), nil, &Config{DataDir: filepath.Join(dir, flatkvRootDir), SnapshotKeepRecent: 10}) + cfg2 := DefaultTestConfig(t) + cfg2.DataDir = filepath.Join(dir, flatkvRootDir) + cfg2.SnapshotKeepRecent = 10 + s2, err := NewCommitStore(t.Context(), nil, cfg2) require.NoError(t, err) _, err = s2.LoadVersion(ver, false) require.NoError(t, err) diff --git a/sei-db/state_db/sc/flatkv/store_write_test.go b/sei-db/state_db/sc/flatkv/store_write_test.go index 6124a2212f..f09337b091 100644 --- a/sei-db/state_db/sc/flatkv/store_write_test.go +++ b/sei-db/state_db/sc/flatkv/store_write_test.go @@ -1,7 +1,6 @@ package flatkv import ( - "path/filepath" "testing" "time" @@ -535,11 +534,9 @@ func TestStoreFsyncConfig(t *testing.T) { }) t.Run("FsyncDisabled", func(t *testing.T) { - dir := t.TempDir() - store, err := NewCommitStore(t.Context(), nil, &Config{ - DataDir: filepath.Join(dir, flatkvRootDir), - Fsync: false, - }) + cfg := DefaultTestConfig(t) + cfg.Fsync = false + store, err := NewCommitStore(t.Context(), nil, cfg) require.NoError(t, err) _, err = store.LoadVersion(0, false) require.NoError(t, err) @@ -569,13 +566,10 @@ func TestStoreFsyncConfig(t *testing.T) { // ============================================================================= func TestAutoSnapshotTriggeredByInterval(t *testing.T) { - dir := t.TempDir() - cfg := Config{ - DataDir: filepath.Join(dir, flatkvRootDir), - SnapshotInterval: 5, - SnapshotKeepRecent: 2, - } - s, err := NewCommitStore(t.Context(), nil, &cfg) + cfg := DefaultTestConfig(t) + cfg.SnapshotInterval = 5 + cfg.SnapshotKeepRecent = 2 + s, err := NewCommitStore(t.Context(), nil, cfg) require.NoError(t, err) _, err = s.LoadVersion(0, false) require.NoError(t, err) @@ -595,12 +589,9 @@ func TestAutoSnapshotTriggeredByInterval(t *testing.T) { } func TestAutoSnapshotNotTriggeredBeforeInterval(t *testing.T) { - dir := t.TempDir() - cfg := &Config{ - DataDir: filepath.Join(dir, flatkvRootDir), - SnapshotInterval: 10, - SnapshotKeepRecent: 2, - } + cfg := DefaultTestConfig(t) + cfg.SnapshotInterval = 10 + cfg.SnapshotKeepRecent = 2 s, err := NewCommitStore(t.Context(), nil, cfg) require.NoError(t, err) _, err = s.LoadVersion(0, false) @@ -627,8 +618,8 @@ func TestAutoSnapshotNotTriggeredBeforeInterval(t *testing.T) { } func TestAutoSnapshotDisabledWhenIntervalZero(t *testing.T) { - dir := t.TempDir() - cfg := &Config{DataDir: filepath.Join(dir, flatkvRootDir), SnapshotInterval: 0} + cfg := DefaultTestConfig(t) + cfg.SnapshotInterval = 0 s, err := NewCommitStore(t.Context(), nil, cfg) require.NoError(t, err) _, err = s.LoadVersion(0, false) @@ -845,8 +836,8 @@ func TestEmptyCommitAdvancesVersion(t *testing.T) { // ============================================================================= func TestStoreFsyncEnabled(t *testing.T) { - dir := t.TempDir() - cfg := &Config{DataDir: filepath.Join(dir, flatkvRootDir), Fsync: true} + cfg := DefaultTestConfig(t) + cfg.Fsync = true s, err := NewCommitStore(t.Context(), nil, cfg) require.NoError(t, err) _, err = s.LoadVersion(0, false) From e58bec2c81ec47a123755ddbc560f905e8ba1665 Mon Sep 17 00:00:00 2001 From: Cody Littley Date: Wed, 11 Mar 2026 10:42:55 -0500 Subject: [PATCH 049/119] fix hash bug --- sei-db/state_db/sc/flatkv/store_write.go | 114 +++++++++++++++++++---- 1 file changed, 95 insertions(+), 19 deletions(-) diff --git a/sei-db/state_db/sc/flatkv/store_write.go b/sei-db/state_db/sc/flatkv/store_write.go index 9daf0162c5..c06823726b 100644 --- a/sei-db/state_db/sc/flatkv/store_write.go +++ b/sei-db/state_db/sc/flatkv/store_write.go @@ -73,11 +73,13 @@ func (s *CommitStore) ApplyChangeSets(cs []*proto.NamedChangeSet) error { key: keyBytes, isDelete: true, } + storageOld[keyStr] = types.BatchGetResult{Found: true, Value: nil} } else { s.storageWrites[keyStr] = &pendingKVWrite{ key: keyBytes, value: pair.Value, } + storageOld[keyStr] = types.BatchGetResult{Found: true, Value: pair.Value} } // LtHash pair: internal key directly @@ -156,11 +158,13 @@ func (s *CommitStore) ApplyChangeSets(cs []*proto.NamedChangeSet) error { key: keyBytes, isDelete: true, } + codeOld[keyStr] = types.BatchGetResult{Found: true, Value: nil} } else { s.codeWrites[keyStr] = &pendingKVWrite{ key: keyBytes, value: pair.Value, } + codeOld[keyStr] = types.BatchGetResult{Found: true, Value: pair.Value} } // LtHash pair: internal key directly @@ -180,11 +184,13 @@ func (s *CommitStore) ApplyChangeSets(cs []*proto.NamedChangeSet) error { key: keyBytes, isDelete: true, } + legacyOld[keyStr] = types.BatchGetResult{Found: true, Value: nil} } else { s.legacyWrites[keyStr] = &pendingKVWrite{ key: keyBytes, value: pair.Value, } + legacyOld[keyStr] = types.BatchGetResult{Found: true, Value: pair.Value} } legacyPairs = append(legacyPairs, lthash.KVPairWithLastValue{ @@ -483,8 +489,11 @@ func (s *CommitStore) commitBatches(version int64) error { return nil } -// batchReadOldValues scans all changeset pairs and issues parallel BatchGet -// calls across the four data DBs. Returns one result map per DB. +// batchReadOldValues scans all changeset pairs and returns one result map per +// DB containing the "old value" for each key. Keys that already have uncommitted +// pending writes (from a prior ApplyChangeSets call in the same block) are +// resolved from those pending writes directly and excluded from the DB batch +// read, avoiding unnecessary I/O and cache pollution. func (s *CommitStore) batchReadOldValues(cs []*proto.NamedChangeSet) ( storageOld map[string]types.BatchGetResult, accountOld map[string]types.BatchGetResult, @@ -497,6 +506,22 @@ func (s *CommitStore) batchReadOldValues(cs []*proto.NamedChangeSet) ( codeOld = make(map[string]types.BatchGetResult) legacyOld = make(map[string]types.BatchGetResult) + // Separate maps for keys that need a DB read (no pending write). + storageBatch := make(map[string]types.BatchGetResult) + accountBatch := make(map[string]types.BatchGetResult) + codeBatch := make(map[string]types.BatchGetResult) + legacyBatch := make(map[string]types.BatchGetResult) + + pendingKVResult := func(pw *pendingKVWrite) types.BatchGetResult { + if pw.isDelete { + return types.BatchGetResult{Found: true, Value: nil} + } + return types.BatchGetResult{Found: true, Value: pw.value} + } + + // Partition changeset keys: resolve from pending writes when available + // (prior ApplyChangeSets call in the same block), otherwise queue for + // a DB batch read. for _, namedCS := range cs { if namedCS.Changeset.Pairs == nil { continue @@ -505,28 +530,65 @@ func (s *CommitStore) batchReadOldValues(cs []*proto.NamedChangeSet) ( kind, keyBytes := evm.ParseEVMKey(pair.Key) switch kind { case evm.EVMKeyStorage: - storageOld[string(keyBytes)] = types.BatchGetResult{} + k := string(keyBytes) + if _, done := storageOld[k]; done { + continue + } + if pw, ok := s.storageWrites[k]; ok { + storageOld[k] = pendingKVResult(pw) + } else { + storageBatch[k] = types.BatchGetResult{} + } + case evm.EVMKeyNonce, evm.EVMKeyCodeHash: addr, ok := AddressFromBytes(keyBytes) if !ok { continue } - accountOld[string(AccountKey(addr))] = types.BatchGetResult{} + k := string(AccountKey(addr)) + if _, done := accountOld[k]; done { + continue + } + if paw, ok := s.accountWrites[k]; ok { + accountOld[k] = types.BatchGetResult{Found: true, Value: EncodeAccountValue(paw.value)} + } else { + accountBatch[k] = types.BatchGetResult{} + } + case evm.EVMKeyCode: - codeOld[string(keyBytes)] = types.BatchGetResult{} + k := string(keyBytes) + if _, done := codeOld[k]; done { + continue + } + if pw, ok := s.codeWrites[k]; ok { + codeOld[k] = pendingKVResult(pw) + } else { + codeBatch[k] = types.BatchGetResult{} + } + case evm.EVMKeyLegacy: - legacyOld[string(keyBytes)] = types.BatchGetResult{} + k := string(keyBytes) + if _, done := legacyOld[k]; done { + continue + } + if pw, ok := s.legacyWrites[k]; ok { + legacyOld[k] = pendingKVResult(pw) + } else { + legacyBatch[k] = types.BatchGetResult{} + } } } } - var storageErr error + // Issue parallel BatchGet calls only for keys that need a DB read. var wg sync.WaitGroup - if len(storageOld) > 0 { + var storageErr, accountErr, codeErr, legacyErr error + + if len(storageBatch) > 0 { wg.Add(1) err = s.miscPool.Submit(s.ctx, func() { defer wg.Done() - storageErr = s.storageDB.BatchGet(storageOld) + storageErr = s.storageDB.BatchGet(storageBatch) }) if err != nil { err = fmt.Errorf("failed to submit batch get: %w", err) @@ -534,24 +596,23 @@ func (s *CommitStore) batchReadOldValues(cs []*proto.NamedChangeSet) ( } } - var accountErr error - if len(accountOld) > 0 { + if len(accountBatch) > 0 { wg.Add(1) err = s.miscPool.Submit(s.ctx, func() { defer wg.Done() - accountErr = s.accountDB.BatchGet(accountOld) + accountErr = s.accountDB.BatchGet(accountBatch) }) if err != nil { err = fmt.Errorf("failed to submit batch get: %w", err) return } } - var codeErr error - if len(codeOld) > 0 { + + if len(codeBatch) > 0 { wg.Add(1) err = s.miscPool.Submit(s.ctx, func() { defer wg.Done() - codeErr = s.codeDB.BatchGet(codeOld) + codeErr = s.codeDB.BatchGet(codeBatch) }) if err != nil { err = fmt.Errorf("failed to submit batch get: %w", err) @@ -559,12 +620,11 @@ func (s *CommitStore) batchReadOldValues(cs []*proto.NamedChangeSet) ( } } - var legacyErr error - if len(legacyOld) > 0 { + if len(legacyBatch) > 0 { wg.Add(1) err = s.miscPool.Submit(s.ctx, func() { defer wg.Done() - legacyErr = s.legacyDB.BatchGet(legacyOld) + legacyErr = s.legacyDB.BatchGet(legacyBatch) }) if err != nil { err = fmt.Errorf("failed to submit batch get: %w", err) @@ -573,7 +633,23 @@ func (s *CommitStore) batchReadOldValues(cs []*proto.NamedChangeSet) ( } wg.Wait() - err = errors.Join(storageErr, accountErr, codeErr, legacyErr) + if err = errors.Join(storageErr, accountErr, codeErr, legacyErr); err != nil { + return + } + + // Merge DB results into the result maps. + for k, v := range storageBatch { + storageOld[k] = v + } + for k, v := range accountBatch { + accountOld[k] = v + } + for k, v := range codeBatch { + codeOld[k] = v + } + for k, v := range legacyBatch { + legacyOld[k] = v + } return } From c3f34b17cbe18708ba397430f8acc0a6f934cf88 Mon Sep 17 00:00:00 2001 From: Cody Littley Date: Wed, 11 Mar 2026 11:02:25 -0500 Subject: [PATCH 050/119] fixed path bug --- sei-db/state_db/sc/flatkv/config.go | 7 +++++++ sei-db/state_db/sc/flatkv/store.go | 9 ++++++++- 2 files changed, 15 insertions(+), 1 deletion(-) diff --git a/sei-db/state_db/sc/flatkv/config.go b/sei-db/state_db/sc/flatkv/config.go index 9cc5d46194..dbe8eb40bd 100644 --- a/sei-db/state_db/sc/flatkv/config.go +++ b/sei-db/state_db/sc/flatkv/config.go @@ -107,6 +107,13 @@ func DefaultConfig() *Config { return cfg } +// Copy returns a deep copy of the Config. +func (c *Config) Copy() *Config { + // The nested PebbleDB configs are value types, so a shallow struct copy is sufficient. + cp := *c + return &cp +} + // InitializeDataDirectories sets the DataDir for each nested PebbleDB config // that does not already have one, using DataDir as the base path. The DBs live // under the working directory: /working/. diff --git a/sei-db/state_db/sc/flatkv/store.go b/sei-db/state_db/sc/flatkv/store.go index db604d13ac..a3911f2677 100644 --- a/sei-db/state_db/sc/flatkv/store.go +++ b/sei-db/state_db/sc/flatkv/store.go @@ -246,7 +246,8 @@ func (s *CommitStore) LoadVersion(targetVersion int64, readOnly bool) (_ Store, // loadVersionReadOnly creates an isolated, read-only CommitStore at the // requested version. func (s *CommitStore) loadVersionReadOnly(targetVersion int64) (_ Store, retErr error) { - ro, err := NewCommitStore(s.ctx, s.log, s.config) + roCfg := s.config.Copy() + ro, err := NewCommitStore(s.ctx, s.log, roCfg) if err != nil { return nil, fmt.Errorf("failed to create readonly store: %w", err) } @@ -257,6 +258,12 @@ func (s *CommitStore) loadVersionReadOnly(targetVersion int64) (_ Store, retErr } ro.readOnlyWorkDir = workDir + ro.config.AccountDBConfig.DataDir = filepath.Join(workDir, accountDBDir) + ro.config.CodeDBConfig.DataDir = filepath.Join(workDir, codeDBDir) + ro.config.StorageDBConfig.DataDir = filepath.Join(workDir, storageDBDir) + ro.config.LegacyDBConfig.DataDir = filepath.Join(workDir, legacyDBDir) + ro.config.MetadataDBConfig.DataDir = filepath.Join(workDir, metadataDir) + defer func() { if retErr != nil { if closeErr := ro.Close(); closeErr != nil { From 111459f5ea2ea32bb851ba64fcc95a83e478814f Mon Sep 17 00:00:00 2001 From: Cody Littley Date: Wed, 11 Mar 2026 12:41:36 -0500 Subject: [PATCH 051/119] Helper files for flatKV cache --- .../db_engine/pebbledb/pebblecache/cache.go | 43 +++ .../pebbledb/pebblecache/lru_queue.go | 83 +++++ .../pebbledb/pebblecache/lru_queue_test.go | 310 ++++++++++++++++++ .../pebbledb/pebblecache/noop_cache.go | 47 +++ .../pebbledb/pebblecache/noop_cache_test.go | 150 +++++++++ .../pebbledb/pebblecache/shard_manager.go | 47 +++ 6 files changed, 680 insertions(+) create mode 100644 sei-db/db_engine/pebbledb/pebblecache/cache.go create mode 100644 sei-db/db_engine/pebbledb/pebblecache/lru_queue.go create mode 100644 sei-db/db_engine/pebbledb/pebblecache/lru_queue_test.go create mode 100644 sei-db/db_engine/pebbledb/pebblecache/noop_cache.go create mode 100644 sei-db/db_engine/pebbledb/pebblecache/noop_cache_test.go create mode 100644 sei-db/db_engine/pebbledb/pebblecache/shard_manager.go diff --git a/sei-db/db_engine/pebbledb/pebblecache/cache.go b/sei-db/db_engine/pebbledb/pebblecache/cache.go new file mode 100644 index 0000000000..a9ba30060d --- /dev/null +++ b/sei-db/db_engine/pebbledb/pebblecache/cache.go @@ -0,0 +1,43 @@ +package pebblecache + +import "github.com/sei-protocol/sei-chain/sei-db/db_engine/types" + +// CacheUpdate describes a single key-value mutation to apply to the cache. +type CacheUpdate struct { + // The key to update. + Key []byte + // The value to set. If nil, the key will be deleted. + Value []byte + // If true, the key will be deleted. + // If false, the key will be set to the given value. + IsDelete bool +} + +// Cache describes a cache kapable of being used by a FlatKV store. +type Cache interface { + + // Get returns the value for the given key, or (nil, false) if not found. + Get( + // The entry to fetch. + key []byte, + // If true, the LRU queue will be updated. If false, the LRU queue will not be updated. + // Useful for when an operation is performed multiple times in close succession on the same key, + // since it requires non-zero overhead to do so with little benefit. + updateLru bool, + ) ([]byte, bool, error) + + // Perform a batch read operation. Given a map of keys to read, performs the reads and updates the + // map with the results. + // + // It is not thread safe to read or mutate the map while this method is running. + BatchGet(keys map[string]types.BatchGetResult) error + + // Set sets the value for the given key. + Set(key []byte, value []byte) + + // Delete deletes the value for the given key. + Delete(key []byte) + + // BatchSet applies the given updates to the cache. + BatchSet(updates []CacheUpdate) error +} diff --git a/sei-db/db_engine/pebbledb/pebblecache/lru_queue.go b/sei-db/db_engine/pebbledb/pebblecache/lru_queue.go new file mode 100644 index 0000000000..bea6b2e1d9 --- /dev/null +++ b/sei-db/db_engine/pebbledb/pebblecache/lru_queue.go @@ -0,0 +1,83 @@ +package pebblecache + +import "container/list" + +// Implements a queue-like abstraction with LRU semantics. Not thread safe. +type lruQueue struct { + order *list.List + entries map[string]*list.Element + totalSize int +} + +type lruQueueEntry struct { + key string + size int +} + +// Create a new LRU queue. +func NewLRUQueue() *lruQueue { + return &lruQueue{ + order: list.New(), + entries: make(map[string]*list.Element), + } +} + +// Add a new entry to the LRU queue. Can also be used to update an existing value with a new weight. +func (lru *lruQueue) Push( + // the key in the cache that was recently interacted with + key []byte, + // the size of the key + value + size int, +) { + if elem, ok := lru.entries[string(key)]; ok { + entry := elem.Value.(*lruQueueEntry) + lru.totalSize += size - entry.size + entry.size = size + lru.order.MoveToBack(elem) + return + } + + keyStr := string(key) + elem := lru.order.PushBack(&lruQueueEntry{ + key: keyStr, + size: size, + }) + lru.entries[keyStr] = elem + lru.totalSize += size +} + +// Signal that an entry has been interated with, moving it to the to the back of the queue +// (i.e. making it so it doesn't get popped soon). +func (lru *lruQueue) Touch(key []byte) { + elem, ok := lru.entries[string(key)] + if !ok { + return + } + lru.order.MoveToBack(elem) +} + +// Returns the total size of all entries in the LRU queue. +func (lru *lruQueue) GetTotalSize() int { + return lru.totalSize +} + +// Returns a count of the number of entries in the LRU queue, where each entry counts for 1 regardless of size. +func (lru *lruQueue) GetCount() int { + return len(lru.entries) +} + +// Pops a single element out of the queue. The element removed is the entry least recently passed to Update(). +// Returns the key in string form to avoid copying the key an additional time. +// Panics if the queue is empty. +func (lru *lruQueue) PopLeastRecentlyUsed() string { + elem := lru.order.Front() + if elem == nil { + panic("cannot pop from empty LRU queue") + } + + lru.order.Remove(elem) + entry := elem.Value.(*lruQueueEntry) + delete(lru.entries, entry.key) + lru.totalSize -= entry.size + return entry.key +} diff --git a/sei-db/db_engine/pebbledb/pebblecache/lru_queue_test.go b/sei-db/db_engine/pebbledb/pebblecache/lru_queue_test.go new file mode 100644 index 0000000000..70da01315a --- /dev/null +++ b/sei-db/db_engine/pebbledb/pebblecache/lru_queue_test.go @@ -0,0 +1,310 @@ +package pebblecache + +import ( + "fmt" + "testing" + + "github.com/stretchr/testify/require" +) + +func TestLRUQueueIsolatesFromCallerMutation(t *testing.T) { + lru := NewLRUQueue() + + key := []byte("a") + lru.Push(key, 1) + key[0] = 'z' + + require.Equal(t, "a", lru.PopLeastRecentlyUsed()) +} + +func TestNewLRUQueueStartsEmpty(t *testing.T) { + lru := NewLRUQueue() + + require.Equal(t, 0, lru.GetCount()) + require.Equal(t, 0, lru.GetTotalSize()) +} + +func TestPopLeastRecentlyUsedPanicsOnEmptyQueue(t *testing.T) { + lru := NewLRUQueue() + require.Panics(t, func() { lru.PopLeastRecentlyUsed() }) +} + +func TestPopLeastRecentlyUsedPanicsAfterDrain(t *testing.T) { + lru := NewLRUQueue() + lru.Push([]byte("x"), 1) + lru.PopLeastRecentlyUsed() + + require.Panics(t, func() { lru.PopLeastRecentlyUsed() }) +} + +func TestPushSingleElement(t *testing.T) { + lru := NewLRUQueue() + lru.Push([]byte("only"), 42) + + require.Equal(t, 1, lru.GetCount()) + require.Equal(t, 42, lru.GetTotalSize()) + require.Equal(t, "only", lru.PopLeastRecentlyUsed()) +} + +func TestPushDuplicateDecreasesSize(t *testing.T) { + lru := NewLRUQueue() + lru.Push([]byte("k"), 100) + lru.Push([]byte("k"), 30) + + require.Equal(t, 1, lru.GetCount()) + require.Equal(t, 30, lru.GetTotalSize()) +} + +func TestPushDuplicateMovesToBack(t *testing.T) { + lru := NewLRUQueue() + lru.Push([]byte("a"), 1) + lru.Push([]byte("b"), 1) + lru.Push([]byte("c"), 1) + + // Re-push "a" — should move it behind "b" and "c" + lru.Push([]byte("a"), 1) + + require.Equal(t, "b", lru.PopLeastRecentlyUsed()) + require.Equal(t, "c", lru.PopLeastRecentlyUsed()) + require.Equal(t, "a", lru.PopLeastRecentlyUsed()) +} + +func TestPushZeroSize(t *testing.T) { + lru := NewLRUQueue() + lru.Push([]byte("z"), 0) + + require.Equal(t, 1, lru.GetCount()) + require.Equal(t, 0, lru.GetTotalSize()) + require.Equal(t, "z", lru.PopLeastRecentlyUsed()) + require.Equal(t, 0, lru.GetTotalSize()) +} + +func TestPushEmptyKey(t *testing.T) { + lru := NewLRUQueue() + lru.Push([]byte(""), 5) + + require.Equal(t, 1, lru.GetCount()) + require.Equal(t, "", lru.PopLeastRecentlyUsed()) +} + +func TestPushRepeatedUpdatesToSameKey(t *testing.T) { + lru := NewLRUQueue() + lru.Push([]byte("k"), 1) + lru.Push([]byte("k"), 2) + lru.Push([]byte("k"), 3) + lru.Push([]byte("k"), 4) + + require.Equal(t, 1, lru.GetCount()) + require.Equal(t, 4, lru.GetTotalSize()) +} + +func TestTouchNonexistentKeyIsNoop(t *testing.T) { + lru := NewLRUQueue() + lru.Push([]byte("a"), 1) + + lru.Touch([]byte("missing")) + + require.Equal(t, 1, lru.GetCount()) + require.Equal(t, "a", lru.PopLeastRecentlyUsed()) +} + +func TestTouchOnEmptyQueueIsNoop(t *testing.T) { + lru := NewLRUQueue() + lru.Touch([]byte("ghost")) + + require.Equal(t, 0, lru.GetCount()) +} + +func TestTouchSingleElement(t *testing.T) { + lru := NewLRUQueue() + lru.Push([]byte("solo"), 10) + lru.Touch([]byte("solo")) + + require.Equal(t, 1, lru.GetCount()) + require.Equal(t, "solo", lru.PopLeastRecentlyUsed()) +} + +func TestTouchDoesNotAffectSizeOrCount(t *testing.T) { + lru := NewLRUQueue() + lru.Push([]byte("a"), 3) + lru.Push([]byte("b"), 7) + + lru.Touch([]byte("a")) + + require.Equal(t, 2, lru.GetCount()) + require.Equal(t, 10, lru.GetTotalSize()) +} + +func TestMultipleTouchesChangeOrder(t *testing.T) { + lru := NewLRUQueue() + lru.Push([]byte("a"), 1) + lru.Push([]byte("b"), 1) + lru.Push([]byte("c"), 1) + + // Order: a, b, c + lru.Touch([]byte("a")) // Order: b, c, a + lru.Touch([]byte("b")) // Order: c, a, b + + require.Equal(t, "c", lru.PopLeastRecentlyUsed()) + require.Equal(t, "a", lru.PopLeastRecentlyUsed()) + require.Equal(t, "b", lru.PopLeastRecentlyUsed()) +} + +func TestTouchAlreadyMostRecentIsNoop(t *testing.T) { + lru := NewLRUQueue() + lru.Push([]byte("a"), 1) + lru.Push([]byte("b"), 1) + + lru.Touch([]byte("b")) // "b" is already at back + + require.Equal(t, "a", lru.PopLeastRecentlyUsed()) + require.Equal(t, "b", lru.PopLeastRecentlyUsed()) +} + +func TestPopDecrementsCountAndSize(t *testing.T) { + lru := NewLRUQueue() + lru.Push([]byte("a"), 10) + lru.Push([]byte("b"), 20) + lru.Push([]byte("c"), 30) + + lru.PopLeastRecentlyUsed() + + require.Equal(t, 2, lru.GetCount()) + require.Equal(t, 50, lru.GetTotalSize()) + + lru.PopLeastRecentlyUsed() + + require.Equal(t, 1, lru.GetCount()) + require.Equal(t, 30, lru.GetTotalSize()) +} + +func TestPopFIFOOrderWithoutTouches(t *testing.T) { + lru := NewLRUQueue() + keys := []string{"first", "second", "third", "fourth"} + for _, k := range keys { + lru.Push([]byte(k), 1) + } + + for _, want := range keys { + require.Equal(t, want, lru.PopLeastRecentlyUsed()) + } +} + +func TestPushAfterDrain(t *testing.T) { + lru := NewLRUQueue() + lru.Push([]byte("a"), 5) + lru.PopLeastRecentlyUsed() + + lru.Push([]byte("x"), 10) + lru.Push([]byte("y"), 20) + + require.Equal(t, 2, lru.GetCount()) + require.Equal(t, 30, lru.GetTotalSize()) + require.Equal(t, "x", lru.PopLeastRecentlyUsed()) +} + +func TestPushPreviouslyPoppedKey(t *testing.T) { + lru := NewLRUQueue() + lru.Push([]byte("recycled"), 5) + lru.PopLeastRecentlyUsed() + + lru.Push([]byte("recycled"), 99) + + require.Equal(t, 1, lru.GetCount()) + require.Equal(t, 99, lru.GetTotalSize()) + require.Equal(t, "recycled", lru.PopLeastRecentlyUsed()) +} + +func TestInterleavedPushAndPop(t *testing.T) { + lru := NewLRUQueue() + + lru.Push([]byte("a"), 1) + lru.Push([]byte("b"), 2) + + require.Equal(t, "a", lru.PopLeastRecentlyUsed()) + + lru.Push([]byte("c"), 3) + + require.Equal(t, 2, lru.GetCount()) + require.Equal(t, 5, lru.GetTotalSize()) + require.Equal(t, "b", lru.PopLeastRecentlyUsed()) + require.Equal(t, "c", lru.PopLeastRecentlyUsed()) +} + +func TestTouchThenPushSameKey(t *testing.T) { + lru := NewLRUQueue() + lru.Push([]byte("a"), 1) + lru.Push([]byte("b"), 1) + + lru.Touch([]byte("a")) // order: b, a + lru.Push([]byte("a"), 50) // updates size, stays at back + + require.Equal(t, 2, lru.GetCount()) + require.Equal(t, 51, lru.GetTotalSize()) + require.Equal(t, "b", lru.PopLeastRecentlyUsed()) +} + +func TestBinaryKeyData(t *testing.T) { + lru := NewLRUQueue() + k1 := []byte{0x00, 0xFF, 0x01} + k2 := []byte{0x00, 0xFF, 0x02} + + lru.Push(k1, 10) + lru.Push(k2, 20) + + require.Equal(t, 2, lru.GetCount()) + require.Equal(t, string(k1), lru.PopLeastRecentlyUsed()) + + lru.Touch(k2) + require.Equal(t, string(k2), lru.PopLeastRecentlyUsed()) +} + +func TestCallerMutationAfterTouchDoesNotAffectQueue(t *testing.T) { + lru := NewLRUQueue() + key := []byte("abc") + lru.Push(key, 1) + + key[0] = 'Z' + lru.Touch(key) // Touch with mutated key ("Zbc") — should be a no-op + + require.Equal(t, "abc", lru.PopLeastRecentlyUsed()) +} + +func TestManyEntries(t *testing.T) { + lru := NewLRUQueue() + n := 1000 + totalSize := 0 + + for i := 0; i < n; i++ { + k := fmt.Sprintf("key-%04d", i) + lru.Push([]byte(k), i+1) + totalSize += i + 1 + } + + require.Equal(t, n, lru.GetCount()) + require.Equal(t, totalSize, lru.GetTotalSize()) + + for i := 0; i < n; i++ { + want := fmt.Sprintf("key-%04d", i) + require.Equal(t, want, lru.PopLeastRecentlyUsed(), "pop %d", i) + } + + require.Equal(t, 0, lru.GetCount()) + require.Equal(t, 0, lru.GetTotalSize()) +} + +func TestPushUpdatedSizeThenPopVerifySizeAccounting(t *testing.T) { + lru := NewLRUQueue() + lru.Push([]byte("a"), 10) + lru.Push([]byte("b"), 20) + lru.Push([]byte("a"), 5) // decrease a's size from 10 to 5 + + require.Equal(t, 25, lru.GetTotalSize()) + + // Pop "b" (it's the LRU since "a" was re-pushed to back). + lru.PopLeastRecentlyUsed() + require.Equal(t, 5, lru.GetTotalSize()) + + lru.PopLeastRecentlyUsed() + require.Equal(t, 0, lru.GetTotalSize()) +} diff --git a/sei-db/db_engine/pebbledb/pebblecache/noop_cache.go b/sei-db/db_engine/pebbledb/pebblecache/noop_cache.go new file mode 100644 index 0000000000..0013c5bd82 --- /dev/null +++ b/sei-db/db_engine/pebbledb/pebblecache/noop_cache.go @@ -0,0 +1,47 @@ +package pebblecache + +import "github.com/sei-protocol/sei-chain/sei-db/db_engine/types" + +var _ Cache = (*noOpCache)(nil) + +// noOpCache is a Cache that performs no caching. Every Get falls through +// to the underlying readFunc. Set, Delete, and BatchSet are no-ops. +// Useful for testing the storage layer without cache interference, or for +// workloads where caching is not beneficial. +type noOpCache struct { + readFunc func(key []byte) ([]byte, bool, error) +} + +// NewNoOpCache creates a Cache that always reads from readFunc and never caches. +func NewNoOpCache(readFunc func(key []byte) ([]byte, bool, error)) Cache { + return &noOpCache{readFunc: readFunc} +} + +func (c *noOpCache) Get(key []byte, _ bool) ([]byte, bool, error) { + return c.readFunc(key) +} + +func (c *noOpCache) BatchGet(keys map[string]types.BatchGetResult) error { + for k := range keys { + val, found, err := c.readFunc([]byte(k)) + if err != nil { + keys[k] = types.BatchGetResult{Error: err} + } else { + keys[k] = types.BatchGetResult{Value: val, Found: found} + } + } + return nil +} + +func (c *noOpCache) Set([]byte, []byte) { + // intentional no-op +} + +func (c *noOpCache) Delete([]byte) { + // intentional no-op +} + +func (c *noOpCache) BatchSet([]CacheUpdate) error { + // intentional no-op + return nil +} diff --git a/sei-db/db_engine/pebbledb/pebblecache/noop_cache_test.go b/sei-db/db_engine/pebbledb/pebblecache/noop_cache_test.go new file mode 100644 index 0000000000..c1f0587ebf --- /dev/null +++ b/sei-db/db_engine/pebbledb/pebblecache/noop_cache_test.go @@ -0,0 +1,150 @@ +package pebblecache + +import ( + "errors" + "testing" + + "github.com/stretchr/testify/require" + + "github.com/sei-protocol/sei-chain/sei-db/db_engine/types" +) + +func newNoOpTestCache(store map[string][]byte) Cache { + return NewNoOpCache(func(key []byte) ([]byte, bool, error) { + v, ok := store[string(key)] + if !ok { + return nil, false, nil + } + return v, true, nil + }) +} + +func TestNoOpGetFound(t *testing.T) { + c := newNoOpTestCache(map[string][]byte{"k": []byte("v")}) + + val, found, err := c.Get([]byte("k"), true) + require.NoError(t, err) + require.True(t, found) + require.Equal(t, "v", string(val)) +} + +func TestNoOpGetNotFound(t *testing.T) { + c := newNoOpTestCache(map[string][]byte{}) + + val, found, err := c.Get([]byte("missing"), true) + require.NoError(t, err) + require.False(t, found) + require.Nil(t, val) +} + +func TestNoOpGetError(t *testing.T) { + dbErr := errors.New("broken") + c := NewNoOpCache(func(key []byte) ([]byte, bool, error) { + return nil, false, dbErr + }) + + _, _, err := c.Get([]byte("k"), true) + require.ErrorIs(t, err, dbErr) +} + +func TestNoOpGetIgnoresUpdateLru(t *testing.T) { + c := newNoOpTestCache(map[string][]byte{"k": []byte("v")}) + + val1, _, _ := c.Get([]byte("k"), true) + val2, _, _ := c.Get([]byte("k"), false) + require.Equal(t, string(val1), string(val2)) +} + +func TestNoOpGetAlwaysReadsFromFunc(t *testing.T) { + store := map[string][]byte{"k": []byte("v1")} + c := newNoOpTestCache(store) + + val, _, _ := c.Get([]byte("k"), true) + require.Equal(t, "v1", string(val)) + + store["k"] = []byte("v2") + + val, _, _ = c.Get([]byte("k"), true) + require.Equal(t, "v2", string(val), "should re-read from func, not cache") +} + +func TestNoOpSetIsNoOp(t *testing.T) { + c := newNoOpTestCache(map[string][]byte{}) + + c.Set([]byte("k"), []byte("v")) + + _, found, err := c.Get([]byte("k"), true) + require.NoError(t, err) + require.False(t, found, "Set should not cache anything") +} + +func TestNoOpDeleteIsNoOp(t *testing.T) { + c := newNoOpTestCache(map[string][]byte{"k": []byte("v")}) + + c.Delete([]byte("k")) + + val, found, err := c.Get([]byte("k"), true) + require.NoError(t, err) + require.True(t, found, "Delete should not affect reads") + require.Equal(t, "v", string(val)) +} + +func TestNoOpBatchSetIsNoOp(t *testing.T) { + c := newNoOpTestCache(map[string][]byte{}) + + err := c.BatchSet([]CacheUpdate{ + {Key: []byte("a"), Value: []byte("1")}, + {Key: []byte("b"), Value: []byte("2")}, + }) + require.NoError(t, err) + + _, found, _ := c.Get([]byte("a"), true) + require.False(t, found) + _, found, _ = c.Get([]byte("b"), true) + require.False(t, found) +} + +func TestNoOpBatchSetEmptyAndNil(t *testing.T) { + c := newNoOpTestCache(map[string][]byte{}) + + require.NoError(t, c.BatchSet(nil)) + require.NoError(t, c.BatchSet([]CacheUpdate{})) +} + +func TestNoOpBatchGetAllFound(t *testing.T) { + c := newNoOpTestCache(map[string][]byte{"a": []byte("1"), "b": []byte("2")}) + + keys := map[string]types.BatchGetResult{"a": {}, "b": {}} + require.NoError(t, c.BatchGet(keys)) + + require.True(t, keys["a"].Found) + require.Equal(t, "1", string(keys["a"].Value)) + require.True(t, keys["b"].Found) + require.Equal(t, "2", string(keys["b"].Value)) +} + +func TestNoOpBatchGetNotFound(t *testing.T) { + c := newNoOpTestCache(map[string][]byte{}) + + keys := map[string]types.BatchGetResult{"x": {}} + require.NoError(t, c.BatchGet(keys)) + require.False(t, keys["x"].Found) +} + +func TestNoOpBatchGetError(t *testing.T) { + dbErr := errors.New("fail") + c := NewNoOpCache(func(key []byte) ([]byte, bool, error) { + return nil, false, dbErr + }) + + keys := map[string]types.BatchGetResult{"k": {}} + require.NoError(t, c.BatchGet(keys)) + require.Error(t, keys["k"].Error) +} + +func TestNoOpBatchGetEmpty(t *testing.T) { + c := newNoOpTestCache(map[string][]byte{}) + + keys := map[string]types.BatchGetResult{} + require.NoError(t, c.BatchGet(keys)) +} diff --git a/sei-db/db_engine/pebbledb/pebblecache/shard_manager.go b/sei-db/db_engine/pebbledb/pebblecache/shard_manager.go new file mode 100644 index 0000000000..4f3bbdb41d --- /dev/null +++ b/sei-db/db_engine/pebbledb/pebblecache/shard_manager.go @@ -0,0 +1,47 @@ +package pebblecache + +import ( + "errors" + "hash/maphash" + "sync" +) + +var ErrNumShardsNotPowerOfTwo = errors.New("numShards must be a power of two and > 0") + +// A utility for assigning keys to shard indices. +type shardManager struct { + // A random seed that makmes it hard for an attacker to predict the shard index and to skew the distribution. + seed maphash.Seed + // Used to perform a quick modulo operation to get the shard index (since numShards is a power of two) + mask uint64 + // reusable Hash objects to avoid allocs + pool sync.Pool +} + +// Creates a new Sharder. Number of shards must be a power of two and greater than 0. +func NewShardManager(numShards uint64) (*shardManager, error) { + if numShards <= 0 || (numShards&(numShards-1)) != 0 { + return nil, ErrNumShardsNotPowerOfTwo + } + + return &shardManager{ + seed: maphash.MakeSeed(), // secret, randomized + mask: numShards - 1, + pool: sync.Pool{ + New: func() any { return new(maphash.Hash) }, + }, + }, nil +} + +// Shard returns a shard index in [0, numShards). +// addr should be the raw address bytes (e.g., 20-byte ETH address). +func (s *shardManager) Shard(addr []byte) uint64 { + h := s.pool.Get().(*maphash.Hash) + h.SetSeed(s.seed) + h.Reset() + _, _ = h.Write(addr) + x := h.Sum64() + s.pool.Put(h) + + return x & s.mask +} From d40395f80aa0a946e8376f460a46962a5e5df7c6 Mon Sep 17 00:00:00 2001 From: Cody Littley Date: Wed, 11 Mar 2026 12:43:55 -0500 Subject: [PATCH 052/119] add missing struct --- sei-db/db_engine/types/types.go | 10 ++++++++++ 1 file changed, 10 insertions(+) diff --git a/sei-db/db_engine/types/types.go b/sei-db/db_engine/types/types.go index 0f82ac85a2..bad1389ac7 100644 --- a/sei-db/db_engine/types/types.go +++ b/sei-db/db_engine/types/types.go @@ -20,6 +20,16 @@ type IterOptions struct { UpperBound []byte } +// BatchGetResult describes the result of a single key lookup within a BatchGet call. +type BatchGetResult struct { + // The value for the given key. + Value []byte + // If true, the key was found. + Found bool + // The error, if any, that occurred during the read. + Error error +} + // OpenOptions configures opening a DB. // // NOTE: This is intentionally minimal today. Most performance-critical knobs From ed7e4b692104f2e38bf6e1ab209a6cfb32aecfe4 Mon Sep 17 00:00:00 2001 From: Cody Littley Date: Thu, 12 Mar 2026 08:42:29 -0500 Subject: [PATCH 053/119] made suggested changes --- .../db_engine/pebbledb/pebblecache/cache.go | 26 +- .../pebbledb/pebblecache/lru_queue.go | 16 +- .../pebbledb/pebblecache/lru_queue_test.go | 54 ++-- .../pebbledb/pebblecache/noop_cache.go | 17 +- .../pebbledb/pebblecache/noop_cache_test.go | 10 +- .../pebbledb/pebblecache/shard_manager.go | 7 +- .../pebblecache/shard_manager_test.go | 271 ++++++++++++++++++ sei-db/db_engine/types/types.go | 9 +- 8 files changed, 349 insertions(+), 61 deletions(-) create mode 100644 sei-db/db_engine/pebbledb/pebblecache/shard_manager_test.go diff --git a/sei-db/db_engine/pebbledb/pebblecache/cache.go b/sei-db/db_engine/pebbledb/pebblecache/cache.go index a9ba30060d..345e8ba4b6 100644 --- a/sei-db/db_engine/pebbledb/pebblecache/cache.go +++ b/sei-db/db_engine/pebbledb/pebblecache/cache.go @@ -2,18 +2,7 @@ package pebblecache import "github.com/sei-protocol/sei-chain/sei-db/db_engine/types" -// CacheUpdate describes a single key-value mutation to apply to the cache. -type CacheUpdate struct { - // The key to update. - Key []byte - // The value to set. If nil, the key will be deleted. - Value []byte - // If true, the key will be deleted. - // If false, the key will be set to the given value. - IsDelete bool -} - -// Cache describes a cache kapable of being used by a FlatKV store. +// Cache describes a cache capable of being used by a FlatKV store. type Cache interface { // Get returns the value for the given key, or (nil, false) if not found. @@ -41,3 +30,16 @@ type Cache interface { // BatchSet applies the given updates to the cache. BatchSet(updates []CacheUpdate) error } + +// CacheUpdate describes a single key-value mutation to apply to the cache. +type CacheUpdate struct { + // The key to update. + Key []byte + // The value to set. If nil, the key will be deleted. + Value []byte +} + +// IsDelete returns true if the update is a delete operation. +func (u *CacheUpdate) IsDelete() bool { + return u.Value == nil +} diff --git a/sei-db/db_engine/pebbledb/pebblecache/lru_queue.go b/sei-db/db_engine/pebbledb/pebblecache/lru_queue.go index bea6b2e1d9..91f8b03a29 100644 --- a/sei-db/db_engine/pebbledb/pebblecache/lru_queue.go +++ b/sei-db/db_engine/pebbledb/pebblecache/lru_queue.go @@ -6,16 +6,16 @@ import "container/list" type lruQueue struct { order *list.List entries map[string]*list.Element - totalSize int + totalSize uint64 } type lruQueueEntry struct { key string - size int + size uint64 } // Create a new LRU queue. -func NewLRUQueue() *lruQueue { +func newLRUQueue() *lruQueue { return &lruQueue{ order: list.New(), entries: make(map[string]*list.Element), @@ -27,7 +27,7 @@ func (lru *lruQueue) Push( // the key in the cache that was recently interacted with key []byte, // the size of the key + value - size int, + size uint64, ) { if elem, ok := lru.entries[string(key)]; ok { entry := elem.Value.(*lruQueueEntry) @@ -46,7 +46,7 @@ func (lru *lruQueue) Push( lru.totalSize += size } -// Signal that an entry has been interated with, moving it to the to the back of the queue +// Signal that an entry has been interated with, moving it to the back of the queue // (i.e. making it so it doesn't get popped soon). func (lru *lruQueue) Touch(key []byte) { elem, ok := lru.entries[string(key)] @@ -57,13 +57,13 @@ func (lru *lruQueue) Touch(key []byte) { } // Returns the total size of all entries in the LRU queue. -func (lru *lruQueue) GetTotalSize() int { +func (lru *lruQueue) GetTotalSize() uint64 { return lru.totalSize } // Returns a count of the number of entries in the LRU queue, where each entry counts for 1 regardless of size. -func (lru *lruQueue) GetCount() int { - return len(lru.entries) +func (lru *lruQueue) GetCount() uint64 { + return uint64(len(lru.entries)) } // Pops a single element out of the queue. The element removed is the entry least recently passed to Update(). diff --git a/sei-db/db_engine/pebbledb/pebblecache/lru_queue_test.go b/sei-db/db_engine/pebbledb/pebblecache/lru_queue_test.go index 70da01315a..a82605980b 100644 --- a/sei-db/db_engine/pebbledb/pebblecache/lru_queue_test.go +++ b/sei-db/db_engine/pebbledb/pebblecache/lru_queue_test.go @@ -8,7 +8,7 @@ import ( ) func TestLRUQueueIsolatesFromCallerMutation(t *testing.T) { - lru := NewLRUQueue() + lru := newLRUQueue() key := []byte("a") lru.Push(key, 1) @@ -18,19 +18,19 @@ func TestLRUQueueIsolatesFromCallerMutation(t *testing.T) { } func TestNewLRUQueueStartsEmpty(t *testing.T) { - lru := NewLRUQueue() + lru := newLRUQueue() require.Equal(t, 0, lru.GetCount()) require.Equal(t, 0, lru.GetTotalSize()) } func TestPopLeastRecentlyUsedPanicsOnEmptyQueue(t *testing.T) { - lru := NewLRUQueue() + lru := newLRUQueue() require.Panics(t, func() { lru.PopLeastRecentlyUsed() }) } func TestPopLeastRecentlyUsedPanicsAfterDrain(t *testing.T) { - lru := NewLRUQueue() + lru := newLRUQueue() lru.Push([]byte("x"), 1) lru.PopLeastRecentlyUsed() @@ -38,7 +38,7 @@ func TestPopLeastRecentlyUsedPanicsAfterDrain(t *testing.T) { } func TestPushSingleElement(t *testing.T) { - lru := NewLRUQueue() + lru := newLRUQueue() lru.Push([]byte("only"), 42) require.Equal(t, 1, lru.GetCount()) @@ -47,7 +47,7 @@ func TestPushSingleElement(t *testing.T) { } func TestPushDuplicateDecreasesSize(t *testing.T) { - lru := NewLRUQueue() + lru := newLRUQueue() lru.Push([]byte("k"), 100) lru.Push([]byte("k"), 30) @@ -56,7 +56,7 @@ func TestPushDuplicateDecreasesSize(t *testing.T) { } func TestPushDuplicateMovesToBack(t *testing.T) { - lru := NewLRUQueue() + lru := newLRUQueue() lru.Push([]byte("a"), 1) lru.Push([]byte("b"), 1) lru.Push([]byte("c"), 1) @@ -70,7 +70,7 @@ func TestPushDuplicateMovesToBack(t *testing.T) { } func TestPushZeroSize(t *testing.T) { - lru := NewLRUQueue() + lru := newLRUQueue() lru.Push([]byte("z"), 0) require.Equal(t, 1, lru.GetCount()) @@ -80,7 +80,7 @@ func TestPushZeroSize(t *testing.T) { } func TestPushEmptyKey(t *testing.T) { - lru := NewLRUQueue() + lru := newLRUQueue() lru.Push([]byte(""), 5) require.Equal(t, 1, lru.GetCount()) @@ -88,7 +88,7 @@ func TestPushEmptyKey(t *testing.T) { } func TestPushRepeatedUpdatesToSameKey(t *testing.T) { - lru := NewLRUQueue() + lru := newLRUQueue() lru.Push([]byte("k"), 1) lru.Push([]byte("k"), 2) lru.Push([]byte("k"), 3) @@ -99,7 +99,7 @@ func TestPushRepeatedUpdatesToSameKey(t *testing.T) { } func TestTouchNonexistentKeyIsNoop(t *testing.T) { - lru := NewLRUQueue() + lru := newLRUQueue() lru.Push([]byte("a"), 1) lru.Touch([]byte("missing")) @@ -109,14 +109,14 @@ func TestTouchNonexistentKeyIsNoop(t *testing.T) { } func TestTouchOnEmptyQueueIsNoop(t *testing.T) { - lru := NewLRUQueue() + lru := newLRUQueue() lru.Touch([]byte("ghost")) require.Equal(t, 0, lru.GetCount()) } func TestTouchSingleElement(t *testing.T) { - lru := NewLRUQueue() + lru := newLRUQueue() lru.Push([]byte("solo"), 10) lru.Touch([]byte("solo")) @@ -125,7 +125,7 @@ func TestTouchSingleElement(t *testing.T) { } func TestTouchDoesNotAffectSizeOrCount(t *testing.T) { - lru := NewLRUQueue() + lru := newLRUQueue() lru.Push([]byte("a"), 3) lru.Push([]byte("b"), 7) @@ -136,7 +136,7 @@ func TestTouchDoesNotAffectSizeOrCount(t *testing.T) { } func TestMultipleTouchesChangeOrder(t *testing.T) { - lru := NewLRUQueue() + lru := newLRUQueue() lru.Push([]byte("a"), 1) lru.Push([]byte("b"), 1) lru.Push([]byte("c"), 1) @@ -151,7 +151,7 @@ func TestMultipleTouchesChangeOrder(t *testing.T) { } func TestTouchAlreadyMostRecentIsNoop(t *testing.T) { - lru := NewLRUQueue() + lru := newLRUQueue() lru.Push([]byte("a"), 1) lru.Push([]byte("b"), 1) @@ -162,7 +162,7 @@ func TestTouchAlreadyMostRecentIsNoop(t *testing.T) { } func TestPopDecrementsCountAndSize(t *testing.T) { - lru := NewLRUQueue() + lru := newLRUQueue() lru.Push([]byte("a"), 10) lru.Push([]byte("b"), 20) lru.Push([]byte("c"), 30) @@ -179,7 +179,7 @@ func TestPopDecrementsCountAndSize(t *testing.T) { } func TestPopFIFOOrderWithoutTouches(t *testing.T) { - lru := NewLRUQueue() + lru := newLRUQueue() keys := []string{"first", "second", "third", "fourth"} for _, k := range keys { lru.Push([]byte(k), 1) @@ -191,7 +191,7 @@ func TestPopFIFOOrderWithoutTouches(t *testing.T) { } func TestPushAfterDrain(t *testing.T) { - lru := NewLRUQueue() + lru := newLRUQueue() lru.Push([]byte("a"), 5) lru.PopLeastRecentlyUsed() @@ -204,7 +204,7 @@ func TestPushAfterDrain(t *testing.T) { } func TestPushPreviouslyPoppedKey(t *testing.T) { - lru := NewLRUQueue() + lru := newLRUQueue() lru.Push([]byte("recycled"), 5) lru.PopLeastRecentlyUsed() @@ -216,7 +216,7 @@ func TestPushPreviouslyPoppedKey(t *testing.T) { } func TestInterleavedPushAndPop(t *testing.T) { - lru := NewLRUQueue() + lru := newLRUQueue() lru.Push([]byte("a"), 1) lru.Push([]byte("b"), 2) @@ -232,7 +232,7 @@ func TestInterleavedPushAndPop(t *testing.T) { } func TestTouchThenPushSameKey(t *testing.T) { - lru := NewLRUQueue() + lru := newLRUQueue() lru.Push([]byte("a"), 1) lru.Push([]byte("b"), 1) @@ -245,7 +245,7 @@ func TestTouchThenPushSameKey(t *testing.T) { } func TestBinaryKeyData(t *testing.T) { - lru := NewLRUQueue() + lru := newLRUQueue() k1 := []byte{0x00, 0xFF, 0x01} k2 := []byte{0x00, 0xFF, 0x02} @@ -260,7 +260,7 @@ func TestBinaryKeyData(t *testing.T) { } func TestCallerMutationAfterTouchDoesNotAffectQueue(t *testing.T) { - lru := NewLRUQueue() + lru := newLRUQueue() key := []byte("abc") lru.Push(key, 1) @@ -271,13 +271,13 @@ func TestCallerMutationAfterTouchDoesNotAffectQueue(t *testing.T) { } func TestManyEntries(t *testing.T) { - lru := NewLRUQueue() + lru := newLRUQueue() n := 1000 totalSize := 0 for i := 0; i < n; i++ { k := fmt.Sprintf("key-%04d", i) - lru.Push([]byte(k), i+1) + lru.Push([]byte(k), uint64(i+1)) totalSize += i + 1 } @@ -294,7 +294,7 @@ func TestManyEntries(t *testing.T) { } func TestPushUpdatedSizeThenPopVerifySizeAccounting(t *testing.T) { - lru := NewLRUQueue() + lru := newLRUQueue() lru.Push([]byte("a"), 10) lru.Push([]byte("b"), 20) lru.Push([]byte("a"), 5) // decrease a's size from 10 to 5 diff --git a/sei-db/db_engine/pebbledb/pebblecache/noop_cache.go b/sei-db/db_engine/pebbledb/pebblecache/noop_cache.go index 0013c5bd82..a8a05e4030 100644 --- a/sei-db/db_engine/pebbledb/pebblecache/noop_cache.go +++ b/sei-db/db_engine/pebbledb/pebblecache/noop_cache.go @@ -1,6 +1,10 @@ package pebblecache -import "github.com/sei-protocol/sei-chain/sei-db/db_engine/types" +import ( + "fmt" + + "github.com/sei-protocol/sei-chain/sei-db/db_engine/types" +) var _ Cache = (*noOpCache)(nil) @@ -22,14 +26,21 @@ func (c *noOpCache) Get(key []byte, _ bool) ([]byte, bool, error) { } func (c *noOpCache) BatchGet(keys map[string]types.BatchGetResult) error { + var firstErr error for k := range keys { - val, found, err := c.readFunc([]byte(k)) + val, _, err := c.readFunc([]byte(k)) if err != nil { keys[k] = types.BatchGetResult{Error: err} + if firstErr == nil { + firstErr = err + } } else { - keys[k] = types.BatchGetResult{Value: val, Found: found} + keys[k] = types.BatchGetResult{Value: val} } } + if firstErr != nil { + return fmt.Errorf("unable to batch get: %w", firstErr) + } return nil } diff --git a/sei-db/db_engine/pebbledb/pebblecache/noop_cache_test.go b/sei-db/db_engine/pebbledb/pebblecache/noop_cache_test.go index c1f0587ebf..4778b65ec9 100644 --- a/sei-db/db_engine/pebbledb/pebblecache/noop_cache_test.go +++ b/sei-db/db_engine/pebbledb/pebblecache/noop_cache_test.go @@ -117,9 +117,9 @@ func TestNoOpBatchGetAllFound(t *testing.T) { keys := map[string]types.BatchGetResult{"a": {}, "b": {}} require.NoError(t, c.BatchGet(keys)) - require.True(t, keys["a"].Found) + require.True(t, keys["a"].IsFound()) require.Equal(t, "1", string(keys["a"].Value)) - require.True(t, keys["b"].Found) + require.True(t, keys["b"].IsFound()) require.Equal(t, "2", string(keys["b"].Value)) } @@ -128,7 +128,7 @@ func TestNoOpBatchGetNotFound(t *testing.T) { keys := map[string]types.BatchGetResult{"x": {}} require.NoError(t, c.BatchGet(keys)) - require.False(t, keys["x"].Found) + require.False(t, keys["x"].IsFound()) } func TestNoOpBatchGetError(t *testing.T) { @@ -138,7 +138,9 @@ func TestNoOpBatchGetError(t *testing.T) { }) keys := map[string]types.BatchGetResult{"k": {}} - require.NoError(t, c.BatchGet(keys)) + err := c.BatchGet(keys) + require.Error(t, err) + require.ErrorIs(t, err, dbErr) require.Error(t, keys["k"].Error) } diff --git a/sei-db/db_engine/pebbledb/pebblecache/shard_manager.go b/sei-db/db_engine/pebbledb/pebblecache/shard_manager.go index 4f3bbdb41d..fb8f459bc3 100644 --- a/sei-db/db_engine/pebbledb/pebblecache/shard_manager.go +++ b/sei-db/db_engine/pebbledb/pebblecache/shard_manager.go @@ -10,7 +10,7 @@ var ErrNumShardsNotPowerOfTwo = errors.New("numShards must be a power of two and // A utility for assigning keys to shard indices. type shardManager struct { - // A random seed that makmes it hard for an attacker to predict the shard index and to skew the distribution. + // A random seed that makes it hard for an attacker to predict the shard index and to skew the distribution. seed maphash.Seed // Used to perform a quick modulo operation to get the shard index (since numShards is a power of two) mask uint64 @@ -19,8 +19,8 @@ type shardManager struct { } // Creates a new Sharder. Number of shards must be a power of two and greater than 0. -func NewShardManager(numShards uint64) (*shardManager, error) { - if numShards <= 0 || (numShards&(numShards-1)) != 0 { +func newShardManager(numShards uint64) (*shardManager, error) { + if numShards == 0 || (numShards&(numShards-1)) != 0 { return nil, ErrNumShardsNotPowerOfTwo } @@ -38,7 +38,6 @@ func NewShardManager(numShards uint64) (*shardManager, error) { func (s *shardManager) Shard(addr []byte) uint64 { h := s.pool.Get().(*maphash.Hash) h.SetSeed(s.seed) - h.Reset() _, _ = h.Write(addr) x := h.Sum64() s.pool.Put(h) diff --git a/sei-db/db_engine/pebbledb/pebblecache/shard_manager_test.go b/sei-db/db_engine/pebbledb/pebblecache/shard_manager_test.go new file mode 100644 index 0000000000..bb96656fc5 --- /dev/null +++ b/sei-db/db_engine/pebbledb/pebblecache/shard_manager_test.go @@ -0,0 +1,271 @@ +package pebblecache + +import ( + "fmt" + "math" + "sync" + "testing" + + "github.com/stretchr/testify/require" +) + +// --- NewShardManager --- + +func TestNewShardManagerValidPowersOfTwo(t *testing.T) { + for exp := 0; exp < 20; exp++ { + n := uint64(1) << exp + sm, err := newShardManager(n) + require.NoError(t, err, "numShards=%d", n) + require.NotNil(t, sm, "numShards=%d", n) + } +} + +func TestNewShardManagerZeroReturnsError(t *testing.T) { + sm, err := newShardManager(0) + require.ErrorIs(t, err, ErrNumShardsNotPowerOfTwo) + require.Nil(t, sm) +} + +func TestNewShardManagerNonPowersOfTwoReturnError(t *testing.T) { + bad := []uint64{3, 5, 6, 7, 9, 10, 12, 15, 17, 100, 255, 1023} + for _, n := range bad { + sm, err := newShardManager(n) + require.ErrorIs(t, err, ErrNumShardsNotPowerOfTwo, "numShards=%d", n) + require.Nil(t, sm, "numShards=%d", n) + } +} + +func TestNewShardManagerMaxUint64ReturnsError(t *testing.T) { + sm, err := newShardManager(math.MaxUint64) + require.ErrorIs(t, err, ErrNumShardsNotPowerOfTwo) + require.Nil(t, sm) +} + +func TestNewShardManagerLargePowerOfTwo(t *testing.T) { + n := uint64(1) << 40 + sm, err := newShardManager(n) + require.NoError(t, err) + require.NotNil(t, sm) +} + +// --- Shard: basic behaviour --- + +func TestShardReturnsBoundedIndex(t *testing.T) { + for _, numShards := range []uint64{1, 2, 4, 16, 256, 1024} { + sm, err := newShardManager(numShards) + require.NoError(t, err) + + for i := 0; i < 500; i++ { + key := []byte(fmt.Sprintf("key-%d", i)) + idx := sm.Shard(key) + require.Less(t, idx, numShards, "numShards=%d key=%s", numShards, key) + } + } +} + +func TestShardDeterministic(t *testing.T) { + sm, err := newShardManager(16) + require.NoError(t, err) + + key := []byte("deterministic-test-key") + first := sm.Shard(key) + for i := 0; i < 100; i++ { + require.Equal(t, first, sm.Shard(key)) + } +} + +func TestShardSingleShardAlwaysReturnsZero(t *testing.T) { + sm, err := newShardManager(1) + require.NoError(t, err) + + keys := [][]byte{ + {}, + {0x00}, + {0xFF}, + []byte("anything"), + []byte("another key entirely"), + } + for _, k := range keys { + require.Equal(t, uint64(0), sm.Shard(k), "key=%q", k) + } +} + +func TestShardEmptyKey(t *testing.T) { + sm, err := newShardManager(8) + require.NoError(t, err) + + idx := sm.Shard([]byte{}) + require.Less(t, idx, uint64(8)) + + // Deterministic + require.Equal(t, idx, sm.Shard([]byte{})) +} + +func TestShardNilKey(t *testing.T) { + sm, err := newShardManager(4) + require.NoError(t, err) + + idx := sm.Shard(nil) + require.Less(t, idx, uint64(4)) + require.Equal(t, idx, sm.Shard(nil)) +} + +func TestShardBinaryKeys(t *testing.T) { + sm, err := newShardManager(16) + require.NoError(t, err) + + k1 := []byte{0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x01} + k2 := []byte{0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02} + + idx1 := sm.Shard(k1) + idx2 := sm.Shard(k2) + require.Less(t, idx1, uint64(16)) + require.Less(t, idx2, uint64(16)) +} + +func TestShardCallerMutationDoesNotAffectFutureResults(t *testing.T) { + sm, err := newShardManager(16) + require.NoError(t, err) + + key := []byte("mutable") + first := sm.Shard(key) + + key[0] = 'X' + second := sm.Shard([]byte("mutable")) + require.Equal(t, first, second) +} + +// --- Distribution --- + +func TestShardDistribution(t *testing.T) { + const numShards = 16 + const numKeys = 10_000 + sm, err := newShardManager(numShards) + require.NoError(t, err) + + counts := make([]int, numShards) + for i := 0; i < numKeys; i++ { + key := []byte(fmt.Sprintf("addr-%06d", i)) + counts[sm.Shard(key)]++ + } + + expected := float64(numKeys) / float64(numShards) + for shard, count := range counts { + ratio := float64(count) / expected + require.Greater(t, ratio, 0.5, "shard %d is severely underrepresented (%d)", shard, count) + require.Less(t, ratio, 1.5, "shard %d is severely overrepresented (%d)", shard, count) + } +} + +// --- Distinct managers --- + +func TestDifferentManagersHaveDifferentSeeds(t *testing.T) { + sm1, err := newShardManager(256) + require.NoError(t, err) + sm2, err := newShardManager(256) + require.NoError(t, err) + + // With distinct random seeds, at least some keys should hash differently. + diffCount := 0 + for i := 0; i < 200; i++ { + key := []byte(fmt.Sprintf("seed-test-%d", i)) + if sm1.Shard(key) != sm2.Shard(key) { + diffCount++ + } + } + require.Greater(t, diffCount, 0, "two managers with independent seeds should differ on at least one key") +} + +// --- Concurrency --- + +func TestShardConcurrentAccess(t *testing.T) { + sm, err := newShardManager(64) + require.NoError(t, err) + + const goroutines = 32 + const iters = 1000 + + key := []byte("concurrent-key") + expected := sm.Shard(key) + + var wg sync.WaitGroup + wg.Add(goroutines) + for g := 0; g < goroutines; g++ { + go func() { + defer wg.Done() + for i := 0; i < iters; i++ { + got := sm.Shard(key) + if got != expected { + t.Errorf("concurrent Shard returned %d, want %d", got, expected) + return + } + } + }() + } + wg.Wait() +} + +func TestShardConcurrentDifferentKeys(t *testing.T) { + sm, err := newShardManager(32) + require.NoError(t, err) + + const goroutines = 16 + const keysPerGoroutine = 500 + + var wg sync.WaitGroup + wg.Add(goroutines) + for g := 0; g < goroutines; g++ { + g := g + go func() { + defer wg.Done() + for i := 0; i < keysPerGoroutine; i++ { + key := []byte(fmt.Sprintf("g%d-k%d", g, i)) + idx := sm.Shard(key) + if idx >= 32 { + t.Errorf("Shard(%q) = %d, want < 32", key, idx) + return + } + } + }() + } + wg.Wait() +} + +// --- Mask correctness --- + +func TestShardMaskMatchesNumShards(t *testing.T) { + for exp := 0; exp < 16; exp++ { + numShards := uint64(1) << exp + sm, err := newShardManager(numShards) + require.NoError(t, err) + require.Equal(t, numShards-1, sm.mask, "numShards=%d", numShards) + } +} + +// --- 20-byte ETH-style addresses --- + +func TestShardWith20ByteAddresses(t *testing.T) { + sm, err := newShardManager(16) + require.NoError(t, err) + + addr := make([]byte, 20) + for i := 0; i < 20; i++ { + addr[i] = byte(i + 1) + } + + idx := sm.Shard(addr) + require.Less(t, idx, uint64(16)) + require.Equal(t, idx, sm.Shard(addr)) +} + +func TestShardSingleByteKey(t *testing.T) { + sm, err := newShardManager(4) + require.NoError(t, err) + + for b := 0; b < 256; b++ { + idx := sm.Shard([]byte{byte(b)}) + require.Less(t, idx, uint64(4), "byte=%d", b) + } +} diff --git a/sei-db/db_engine/types/types.go b/sei-db/db_engine/types/types.go index bad1389ac7..446ed39a65 100644 --- a/sei-db/db_engine/types/types.go +++ b/sei-db/db_engine/types/types.go @@ -22,14 +22,17 @@ type IterOptions struct { // BatchGetResult describes the result of a single key lookup within a BatchGet call. type BatchGetResult struct { - // The value for the given key. + // The value for the given key. If nil, the key was not found (but no error occurred). Value []byte - // If true, the key was found. - Found bool // The error, if any, that occurred during the read. Error error } +// IsFound returns true if the key was found (i.e. Value is not nil). +func (b BatchGetResult) IsFound() bool { + return b.Value != nil +} + // OpenOptions configures opening a DB. // // NOTE: This is intentionally minimal today. Most performance-critical knobs From 5c46647d886160a837d51856debff3d2765fc050 Mon Sep 17 00:00:00 2001 From: Cody Littley Date: Thu, 12 Mar 2026 09:09:11 -0500 Subject: [PATCH 054/119] fix tests --- .../pebbledb/pebblecache/lru_queue_test.go | 76 +++++++++---------- 1 file changed, 38 insertions(+), 38 deletions(-) diff --git a/sei-db/db_engine/pebbledb/pebblecache/lru_queue_test.go b/sei-db/db_engine/pebbledb/pebblecache/lru_queue_test.go index a82605980b..58b624fc77 100644 --- a/sei-db/db_engine/pebbledb/pebblecache/lru_queue_test.go +++ b/sei-db/db_engine/pebbledb/pebblecache/lru_queue_test.go @@ -20,8 +20,8 @@ func TestLRUQueueIsolatesFromCallerMutation(t *testing.T) { func TestNewLRUQueueStartsEmpty(t *testing.T) { lru := newLRUQueue() - require.Equal(t, 0, lru.GetCount()) - require.Equal(t, 0, lru.GetTotalSize()) + require.Equal(t, uint64(0), lru.GetCount()) + require.Equal(t, uint64(0), lru.GetTotalSize()) } func TestPopLeastRecentlyUsedPanicsOnEmptyQueue(t *testing.T) { @@ -41,8 +41,8 @@ func TestPushSingleElement(t *testing.T) { lru := newLRUQueue() lru.Push([]byte("only"), 42) - require.Equal(t, 1, lru.GetCount()) - require.Equal(t, 42, lru.GetTotalSize()) + require.Equal(t, uint64(1), lru.GetCount()) + require.Equal(t, uint64(42), lru.GetTotalSize()) require.Equal(t, "only", lru.PopLeastRecentlyUsed()) } @@ -51,8 +51,8 @@ func TestPushDuplicateDecreasesSize(t *testing.T) { lru.Push([]byte("k"), 100) lru.Push([]byte("k"), 30) - require.Equal(t, 1, lru.GetCount()) - require.Equal(t, 30, lru.GetTotalSize()) + require.Equal(t, uint64(1), lru.GetCount()) + require.Equal(t, uint64(30), lru.GetTotalSize()) } func TestPushDuplicateMovesToBack(t *testing.T) { @@ -73,17 +73,17 @@ func TestPushZeroSize(t *testing.T) { lru := newLRUQueue() lru.Push([]byte("z"), 0) - require.Equal(t, 1, lru.GetCount()) - require.Equal(t, 0, lru.GetTotalSize()) + require.Equal(t, uint64(1), lru.GetCount()) + require.Equal(t, uint64(0), lru.GetTotalSize()) require.Equal(t, "z", lru.PopLeastRecentlyUsed()) - require.Equal(t, 0, lru.GetTotalSize()) + require.Equal(t, uint64(0), lru.GetTotalSize()) } func TestPushEmptyKey(t *testing.T) { lru := newLRUQueue() lru.Push([]byte(""), 5) - require.Equal(t, 1, lru.GetCount()) + require.Equal(t, uint64(1), lru.GetCount()) require.Equal(t, "", lru.PopLeastRecentlyUsed()) } @@ -94,8 +94,8 @@ func TestPushRepeatedUpdatesToSameKey(t *testing.T) { lru.Push([]byte("k"), 3) lru.Push([]byte("k"), 4) - require.Equal(t, 1, lru.GetCount()) - require.Equal(t, 4, lru.GetTotalSize()) + require.Equal(t, uint64(1), lru.GetCount()) + require.Equal(t, uint64(4), lru.GetTotalSize()) } func TestTouchNonexistentKeyIsNoop(t *testing.T) { @@ -104,7 +104,7 @@ func TestTouchNonexistentKeyIsNoop(t *testing.T) { lru.Touch([]byte("missing")) - require.Equal(t, 1, lru.GetCount()) + require.Equal(t, uint64(1), lru.GetCount()) require.Equal(t, "a", lru.PopLeastRecentlyUsed()) } @@ -112,7 +112,7 @@ func TestTouchOnEmptyQueueIsNoop(t *testing.T) { lru := newLRUQueue() lru.Touch([]byte("ghost")) - require.Equal(t, 0, lru.GetCount()) + require.Equal(t, uint64(0), lru.GetCount()) } func TestTouchSingleElement(t *testing.T) { @@ -120,7 +120,7 @@ func TestTouchSingleElement(t *testing.T) { lru.Push([]byte("solo"), 10) lru.Touch([]byte("solo")) - require.Equal(t, 1, lru.GetCount()) + require.Equal(t, uint64(1), lru.GetCount()) require.Equal(t, "solo", lru.PopLeastRecentlyUsed()) } @@ -131,8 +131,8 @@ func TestTouchDoesNotAffectSizeOrCount(t *testing.T) { lru.Touch([]byte("a")) - require.Equal(t, 2, lru.GetCount()) - require.Equal(t, 10, lru.GetTotalSize()) + require.Equal(t, uint64(2), lru.GetCount()) + require.Equal(t, uint64(10), lru.GetTotalSize()) } func TestMultipleTouchesChangeOrder(t *testing.T) { @@ -169,13 +169,13 @@ func TestPopDecrementsCountAndSize(t *testing.T) { lru.PopLeastRecentlyUsed() - require.Equal(t, 2, lru.GetCount()) - require.Equal(t, 50, lru.GetTotalSize()) + require.Equal(t, uint64(2), lru.GetCount()) + require.Equal(t, uint64(50), lru.GetTotalSize()) lru.PopLeastRecentlyUsed() - require.Equal(t, 1, lru.GetCount()) - require.Equal(t, 30, lru.GetTotalSize()) + require.Equal(t, uint64(1), lru.GetCount()) + require.Equal(t, uint64(30), lru.GetTotalSize()) } func TestPopFIFOOrderWithoutTouches(t *testing.T) { @@ -198,8 +198,8 @@ func TestPushAfterDrain(t *testing.T) { lru.Push([]byte("x"), 10) lru.Push([]byte("y"), 20) - require.Equal(t, 2, lru.GetCount()) - require.Equal(t, 30, lru.GetTotalSize()) + require.Equal(t, uint64(2), lru.GetCount()) + require.Equal(t, uint64(30), lru.GetTotalSize()) require.Equal(t, "x", lru.PopLeastRecentlyUsed()) } @@ -210,8 +210,8 @@ func TestPushPreviouslyPoppedKey(t *testing.T) { lru.Push([]byte("recycled"), 99) - require.Equal(t, 1, lru.GetCount()) - require.Equal(t, 99, lru.GetTotalSize()) + require.Equal(t, uint64(1), lru.GetCount()) + require.Equal(t, uint64(99), lru.GetTotalSize()) require.Equal(t, "recycled", lru.PopLeastRecentlyUsed()) } @@ -225,8 +225,8 @@ func TestInterleavedPushAndPop(t *testing.T) { lru.Push([]byte("c"), 3) - require.Equal(t, 2, lru.GetCount()) - require.Equal(t, 5, lru.GetTotalSize()) + require.Equal(t, uint64(2), lru.GetCount()) + require.Equal(t, uint64(5), lru.GetTotalSize()) require.Equal(t, "b", lru.PopLeastRecentlyUsed()) require.Equal(t, "c", lru.PopLeastRecentlyUsed()) } @@ -239,8 +239,8 @@ func TestTouchThenPushSameKey(t *testing.T) { lru.Touch([]byte("a")) // order: b, a lru.Push([]byte("a"), 50) // updates size, stays at back - require.Equal(t, 2, lru.GetCount()) - require.Equal(t, 51, lru.GetTotalSize()) + require.Equal(t, uint64(2), lru.GetCount()) + require.Equal(t, uint64(51), lru.GetTotalSize()) require.Equal(t, "b", lru.PopLeastRecentlyUsed()) } @@ -252,7 +252,7 @@ func TestBinaryKeyData(t *testing.T) { lru.Push(k1, 10) lru.Push(k2, 20) - require.Equal(t, 2, lru.GetCount()) + require.Equal(t, uint64(2), lru.GetCount()) require.Equal(t, string(k1), lru.PopLeastRecentlyUsed()) lru.Touch(k2) @@ -273,15 +273,15 @@ func TestCallerMutationAfterTouchDoesNotAffectQueue(t *testing.T) { func TestManyEntries(t *testing.T) { lru := newLRUQueue() n := 1000 - totalSize := 0 + var totalSize uint64 for i := 0; i < n; i++ { k := fmt.Sprintf("key-%04d", i) lru.Push([]byte(k), uint64(i+1)) - totalSize += i + 1 + totalSize += uint64(i + 1) } - require.Equal(t, n, lru.GetCount()) + require.Equal(t, uint64(n), lru.GetCount()) require.Equal(t, totalSize, lru.GetTotalSize()) for i := 0; i < n; i++ { @@ -289,8 +289,8 @@ func TestManyEntries(t *testing.T) { require.Equal(t, want, lru.PopLeastRecentlyUsed(), "pop %d", i) } - require.Equal(t, 0, lru.GetCount()) - require.Equal(t, 0, lru.GetTotalSize()) + require.Equal(t, uint64(0), lru.GetCount()) + require.Equal(t, uint64(0), lru.GetTotalSize()) } func TestPushUpdatedSizeThenPopVerifySizeAccounting(t *testing.T) { @@ -299,12 +299,12 @@ func TestPushUpdatedSizeThenPopVerifySizeAccounting(t *testing.T) { lru.Push([]byte("b"), 20) lru.Push([]byte("a"), 5) // decrease a's size from 10 to 5 - require.Equal(t, 25, lru.GetTotalSize()) + require.Equal(t, uint64(25), lru.GetTotalSize()) // Pop "b" (it's the LRU since "a" was re-pushed to back). lru.PopLeastRecentlyUsed() - require.Equal(t, 5, lru.GetTotalSize()) + require.Equal(t, uint64(5), lru.GetTotalSize()) lru.PopLeastRecentlyUsed() - require.Equal(t, 0, lru.GetTotalSize()) + require.Equal(t, uint64(0), lru.GetTotalSize()) } From bb2fe7e7ae667f390f721718849fcbde7ca0475e Mon Sep 17 00:00:00 2001 From: Cody Littley Date: Fri, 13 Mar 2026 09:53:41 -0500 Subject: [PATCH 055/119] Maded suggested change to cache structure --- Makefile | 6 + sei-db/db_engine/pebbledb/batch.go | 27 +- sei-db/db_engine/pebbledb/db.go | 127 +-- sei-db/db_engine/pebbledb/db_test.go | 10 +- .../db_engine/pebbledb/pebblecache/cache.go | 45 - .../pebbledb/pebblecache/cache_impl.go | 188 ---- .../pebbledb/pebblecache/cache_impl_test.go | 689 --------------- .../pebbledb/pebblecache/cache_metrics.go | 136 --- .../pebbledb/pebblecache/lru_queue.go | 83 -- .../pebbledb/pebblecache/lru_queue_test.go | 310 ------- .../pebbledb/pebblecache/noop_cache.go | 58 -- .../pebbledb/pebblecache/noop_cache_test.go | 152 ---- .../db_engine/pebbledb/pebblecache/shard.go | 404 --------- .../pebbledb/pebblecache/shard_manager.go | 46 - .../pebblecache/shard_manager_test.go | 271 ------ .../pebbledb/pebblecache/shard_test.go | 815 ------------------ sei-db/db_engine/pebbledb/pebbledb_config.go | 7 +- sei-db/state_db/sc/flatkv/snapshot.go | 3 +- sei-db/state_db/sc/flatkv/snapshot_test.go | 7 +- sei-db/state_db/sc/flatkv/store.go | 2 +- sei-db/state_db/sc/flatkv/store_test.go | 2 +- 21 files changed, 90 insertions(+), 3298 deletions(-) delete mode 100644 sei-db/db_engine/pebbledb/pebblecache/cache.go delete mode 100644 sei-db/db_engine/pebbledb/pebblecache/cache_impl.go delete mode 100644 sei-db/db_engine/pebbledb/pebblecache/cache_impl_test.go delete mode 100644 sei-db/db_engine/pebbledb/pebblecache/cache_metrics.go delete mode 100644 sei-db/db_engine/pebbledb/pebblecache/lru_queue.go delete mode 100644 sei-db/db_engine/pebbledb/pebblecache/lru_queue_test.go delete mode 100644 sei-db/db_engine/pebbledb/pebblecache/noop_cache.go delete mode 100644 sei-db/db_engine/pebbledb/pebblecache/noop_cache_test.go delete mode 100644 sei-db/db_engine/pebbledb/pebblecache/shard.go delete mode 100644 sei-db/db_engine/pebbledb/pebblecache/shard_manager.go delete mode 100644 sei-db/db_engine/pebbledb/pebblecache/shard_manager_test.go delete mode 100644 sei-db/db_engine/pebbledb/pebblecache/shard_test.go diff --git a/Makefile b/Makefile index 83b26cd84b..d7cfcf835c 100644 --- a/Makefile +++ b/Makefile @@ -157,6 +157,12 @@ lint: go mod tidy go mod verify +# Run lint on the sei-db package. Much faster than running lint on the entire project. +dblint: + go run github.com/golangci/golangci-lint/v2/cmd/golangci-lint@v2.8.0 run ./sei-db/... + go fmt ./sei-db/... + go vet ./sei-db/... + build: go build $(BUILD_FLAGS) -o ./build/seid ./cmd/seid diff --git a/sei-db/db_engine/pebbledb/batch.go b/sei-db/db_engine/pebbledb/batch.go index a8ecef0801..1ad8d0f4e1 100644 --- a/sei-db/db_engine/pebbledb/batch.go +++ b/sei-db/db_engine/pebbledb/batch.go @@ -4,7 +4,6 @@ import ( "fmt" "github.com/cockroachdb/pebble/v2" - "github.com/sei-protocol/sei-chain/sei-db/db_engine/pebbledb/pebblecache" "github.com/sei-protocol/sei-chain/sei-db/db_engine/types" ) @@ -12,36 +11,20 @@ import ( // Important: Callers must call Close() after Commit() to release batch resources, // even if Commit() succeeds. Failure to Close() will leak memory. type pebbleBatch struct { - b *pebble.Batch - cache pebblecache.Cache - - // Writes are tracked so the cache can be updated after a successful commit. - pendingCacheUpdates []pebblecache.CacheUpdate + b *pebble.Batch } var _ types.Batch = (*pebbleBatch)(nil) -func newPebbleBatch(db *pebble.DB, cache pebblecache.Cache) *pebbleBatch { - return &pebbleBatch{b: db.NewBatch(), cache: cache} -} - func (p *pebbleDB) NewBatch() types.Batch { - return newPebbleBatch(p.db, p.cache) + return &pebbleBatch{b: p.db.NewBatch()} } func (pb *pebbleBatch) Set(key, value []byte) error { - pb.pendingCacheUpdates = append(pb.pendingCacheUpdates, pebblecache.CacheUpdate{ - Key: key, - Value: value, - }) return pb.b.Set(key, value, nil) } func (pb *pebbleBatch) Delete(key []byte) error { - pb.pendingCacheUpdates = append(pb.pendingCacheUpdates, pebblecache.CacheUpdate{ - Key: key, - Value: nil, - }) return pb.b.Delete(key, nil) } @@ -50,11 +33,6 @@ func (pb *pebbleBatch) Commit(opts types.WriteOptions) error { if err != nil { return fmt.Errorf("failed to commit batch: %w", err) } - err = pb.cache.BatchSet(pb.pendingCacheUpdates) - if err != nil { - return fmt.Errorf("failed to set cache: %w", err) - } - pb.pendingCacheUpdates = nil return nil } @@ -64,7 +42,6 @@ func (pb *pebbleBatch) Len() int { func (pb *pebbleBatch) Reset() { pb.b.Reset() - pb.pendingCacheUpdates = nil } func (pb *pebbleBatch) Close() error { diff --git a/sei-db/db_engine/pebbledb/db.go b/sei-db/db_engine/pebbledb/db.go index 476cdb6572..5f032cd388 100644 --- a/sei-db/db_engine/pebbledb/db.go +++ b/sei-db/db_engine/pebbledb/db.go @@ -13,7 +13,7 @@ import ( errorutils "github.com/sei-protocol/sei-chain/sei-db/common/errors" "github.com/sei-protocol/sei-chain/sei-db/common/threading" - "github.com/sei-protocol/sei-chain/sei-db/db_engine/pebbledb/pebblecache" + dbcache "github.com/sei-protocol/sei-chain/sei-db/db_engine/pebbledb/cache" "github.com/sei-protocol/sei-chain/sei-db/db_engine/types" ) @@ -21,28 +21,21 @@ import ( type pebbleDB struct { db *pebble.DB metricsCancel context.CancelFunc - cache pebblecache.Cache } var _ types.KeyValueDB = (*pebbleDB)(nil) -// Open opens (or creates) a Pebble-backed DB at path, returning the DB interface. +// Open opens (or creates) a Pebble-backed DB at path, returning a KeyValueDB func Open( ctx context.Context, config *PebbleDBConfig, - // Used to determine the ordering of keys in the database. comparer *pebble.Comparer, - // A work pool for reading from the DB. - readPool threading.Pool, - // A work pool for miscellaneous operations that are neither computationally intensive nor IO bound. - miscPool threading.Pool, ) (_ types.KeyValueDB, err error) { if err := config.Validate(); err != nil { return nil, fmt.Errorf("failed to validate config: %w", err) } - // Internal pebbleDB block cache, used to cache uncompressed SSTable data blocks in memory. pebbleCache := pebble.NewCache(int64(config.BlockCacheSize)) defer pebbleCache.Unref() @@ -91,71 +84,91 @@ func Open( return nil, err } - readFunction := func(key []byte) ([]byte, bool, error) { - val, closer, err := db.Get(key) - if err != nil { - if errors.Is(err, pebble.ErrNotFound) { - return nil, false, nil - } - return nil, false, fmt.Errorf("failed to read from pebble: %w", err) - } - cloned := bytes.Clone(val) - _ = closer.Close() - return cloned, true, nil - } - ctx, cancel := context.WithCancel(ctx) if config.EnableMetrics { NewPebbleMetrics(ctx, db, filepath.Base(config.DataDir), config.MetricsScrapeInterval) } - var cache pebblecache.Cache - if config.CacheSize == 0 { - cache = pebblecache.NewNoOpCache(readFunction) - } else { - var cacheName string - if config.EnableMetrics { - cacheName = filepath.Base(config.DataDir) - } - - cache, err = pebblecache.NewCache( - ctx, - readFunction, - config.CacheShardCount, - config.CacheSize, - readPool, - miscPool, - cacheName, - config.MetricsScrapeInterval) - if err != nil { - cancel() - return nil, fmt.Errorf("failed to create flatcache: %w", err) - } - } - return &pebbleDB{ db: db, metricsCancel: cancel, - cache: cache, }, nil } -func (p *pebbleDB) Get(key []byte) ([]byte, error) { - val, found, err := p.cache.Get(key, true) +// OpenCached opens a Pebble-backed DB and wraps it with a read-through cache. +// Cache behaviour is controlled by config: when CacheSize is 0 a no-op cache +// is used, otherwise a sharded LRU cache is created. +func OpenWithCache( + ctx context.Context, + config *PebbleDBConfig, + comparer *pebble.Comparer, + readPool threading.Pool, + miscPool threading.Pool, +) (types.KeyValueDB, error) { + db, err := Open(ctx, config, comparer) if err != nil { - return nil, fmt.Errorf("failed to get value from cache: %w", err) + return nil, fmt.Errorf("failed to open database: %w", err) + } + + readFunc := func(key []byte) ([]byte, bool, error) { + val, getErr := db.Get(key) + if getErr != nil { + if errorutils.IsNotFound(getErr) { + return nil, false, nil + } + return nil, false, getErr + } + return val, true, nil } - if !found { - return nil, errorutils.ErrNotFound + + var cacheName string + if config.EnableMetrics { + cacheName = filepath.Base(config.DataDir) + } + + cache, err := dbcache.BuildCache( + ctx, + readFunc, + config.CacheShardCount, + config.CacheSize, + readPool, + miscPool, + cacheName, + config.MetricsScrapeInterval, + ) + if err != nil { + _ = db.Close() + return nil, fmt.Errorf("failed to create cache: %w", err) } - return val, nil + return dbcache.NewCachedKeyValueDB(db, cache), nil } -func (p *pebbleDB) BatchGet(keys map[string]types.BatchGetResult) error { - err := p.cache.BatchGet(keys) +func (p *pebbleDB) Get(key []byte) ([]byte, error) { + val, closer, err := p.db.Get(key) if err != nil { - return fmt.Errorf("failed to get values from cache: %w", err) + if errors.Is(err, pebble.ErrNotFound) { + return nil, errorutils.ErrNotFound + } + return nil, fmt.Errorf("failed to get value from database: %w", err) + } + cloned := bytes.Clone(val) + _ = closer.Close() + return cloned, nil +} + +func (p *pebbleDB) BatchGet(keys map[string]types.BatchGetResult) error { + for k := range keys { + val, err := p.Get([]byte(k)) + if err != nil { + if errorutils.IsNotFound(err) { + keys[k] = types.BatchGetResult{} + } else { + keys[k] = types.BatchGetResult{Error: err} + } + } else { + keys[k] = types.BatchGetResult{Value: val} + } } return nil } @@ -165,7 +178,6 @@ func (p *pebbleDB) Set(key, value []byte, opts types.WriteOptions) error { if err != nil { return fmt.Errorf("failed to set value in database: %w", err) } - p.cache.Set(key, value) return nil } @@ -174,7 +186,6 @@ func (p *pebbleDB) Delete(key []byte, opts types.WriteOptions) error { if err != nil { return fmt.Errorf("failed to delete value in database: %w", err) } - p.cache.Delete(key) return nil } diff --git a/sei-db/db_engine/pebbledb/db_test.go b/sei-db/db_engine/pebbledb/db_test.go index 7f8a066c45..c583224d87 100644 --- a/sei-db/db_engine/pebbledb/db_test.go +++ b/sei-db/db_engine/pebbledb/db_test.go @@ -18,7 +18,7 @@ import ( func forEachCacheMode(t *testing.T, fn func(t *testing.T, cfg PebbleDBConfig)) { for _, mode := range []struct { name string - cacheSize int + cacheSize uint64 }{ {"cached", 16 * unit.MB}, {"uncached", 0}, @@ -33,7 +33,8 @@ func forEachCacheMode(t *testing.T, fn func(t *testing.T, cfg PebbleDBConfig)) { func openDB(t *testing.T, cfg *PebbleDBConfig) types.KeyValueDB { t.Helper() - db, err := Open(t.Context(), cfg, pebble.DefaultComparer, threading.NewAdHocPool(), threading.NewAdHocPool()) + db, err := OpenWithCache(t.Context(), cfg, pebble.DefaultComparer, + threading.NewAdHocPool(), threading.NewAdHocPool()) require.NoError(t, err) t.Cleanup(func() { require.NoError(t, db.Close()) }) return db @@ -220,7 +221,7 @@ func TestIteratorNextPrefixWithComparerSplit(t *testing.T) { } cfg := DefaultTestConfig(t) - db, err := Open(t.Context(), &cfg, &cmp, threading.NewAdHocPool(), threading.NewAdHocPool()) + db, err := OpenWithCache(t.Context(), &cfg, &cmp, threading.NewAdHocPool(), threading.NewAdHocPool()) require.NoError(t, err) t.Cleanup(func() { require.NoError(t, db.Close()) }) @@ -265,7 +266,8 @@ func TestIteratorSeekLTAndValue(t *testing.T) { func TestCloseIsIdempotent(t *testing.T) { cfg := DefaultTestConfig(t) - db, err := Open(t.Context(), &cfg, pebble.DefaultComparer, threading.NewAdHocPool(), threading.NewAdHocPool()) + db, err := OpenWithCache(t.Context(), &cfg, pebble.DefaultComparer, + threading.NewAdHocPool(), threading.NewAdHocPool()) require.NoError(t, err) require.NoError(t, db.Close()) diff --git a/sei-db/db_engine/pebbledb/pebblecache/cache.go b/sei-db/db_engine/pebbledb/pebblecache/cache.go deleted file mode 100644 index 345e8ba4b6..0000000000 --- a/sei-db/db_engine/pebbledb/pebblecache/cache.go +++ /dev/null @@ -1,45 +0,0 @@ -package pebblecache - -import "github.com/sei-protocol/sei-chain/sei-db/db_engine/types" - -// Cache describes a cache capable of being used by a FlatKV store. -type Cache interface { - - // Get returns the value for the given key, or (nil, false) if not found. - Get( - // The entry to fetch. - key []byte, - // If true, the LRU queue will be updated. If false, the LRU queue will not be updated. - // Useful for when an operation is performed multiple times in close succession on the same key, - // since it requires non-zero overhead to do so with little benefit. - updateLru bool, - ) ([]byte, bool, error) - - // Perform a batch read operation. Given a map of keys to read, performs the reads and updates the - // map with the results. - // - // It is not thread safe to read or mutate the map while this method is running. - BatchGet(keys map[string]types.BatchGetResult) error - - // Set sets the value for the given key. - Set(key []byte, value []byte) - - // Delete deletes the value for the given key. - Delete(key []byte) - - // BatchSet applies the given updates to the cache. - BatchSet(updates []CacheUpdate) error -} - -// CacheUpdate describes a single key-value mutation to apply to the cache. -type CacheUpdate struct { - // The key to update. - Key []byte - // The value to set. If nil, the key will be deleted. - Value []byte -} - -// IsDelete returns true if the update is a delete operation. -func (u *CacheUpdate) IsDelete() bool { - return u.Value == nil -} diff --git a/sei-db/db_engine/pebbledb/pebblecache/cache_impl.go b/sei-db/db_engine/pebbledb/pebblecache/cache_impl.go deleted file mode 100644 index 79b08fea6f..0000000000 --- a/sei-db/db_engine/pebbledb/pebblecache/cache_impl.go +++ /dev/null @@ -1,188 +0,0 @@ -package pebblecache - -import ( - "context" - "fmt" - "sync" - "time" - - "github.com/sei-protocol/sei-chain/sei-db/common/threading" - "github.com/sei-protocol/sei-chain/sei-db/db_engine/types" -) - -var _ Cache = (*cache)(nil) - -// A standard implementation of a flatcache. -type cache struct { - ctx context.Context - - // A utility for assigning keys to shard indices. - shardManager *shardManager - - // The shards in the cache. - shards []*shard - - // A pool for asynchronous reads. - readPool threading.Pool - - // A pool for miscellaneous operations that are neither computationally intensive nor IO bound. - miscPool threading.Pool -} - -// Creates a new Cache. If cacheName is non-empty, OTel metrics are enabled and the -// background size scrape runs every metricsScrapeInterval. -func NewCache( - ctx context.Context, - // A function that reads a value from the database. - readFunc func(key []byte) ([]byte, bool, error), - // The number of shards in the cache. Must be a power of two and greater than 0. - shardCount int, - // The maximum size of the cache, in bytes. - maxSize int, - // A work pool for reading from the DB. - readPool threading.Pool, - // A work pool for miscellaneous operations that are neither computationally intensive nor IO bound. - miscPool threading.Pool, - // Name used as the "cache" attribute on metrics. Empty string disables metrics. - cacheName string, - // How often to scrape cache size for metrics. Ignored if cacheName is empty. - metricsScrapeInterval time.Duration, -) (Cache, error) { - if shardCount <= 0 || (shardCount&(shardCount-1)) != 0 { - return nil, ErrNumShardsNotPowerOfTwo - } - if maxSize <= 0 { - return nil, fmt.Errorf("maxSize must be greater than 0") - } - - shardManager, err := newShardManager(uint64(shardCount)) - if err != nil { - return nil, fmt.Errorf("failed to create shard manager: %w", err) - } - sizePerShard := maxSize / shardCount - if sizePerShard <= 0 { - return nil, fmt.Errorf("maxSize must be greater than shardCount") - } - - shards := make([]*shard, shardCount) - for i := 0; i < shardCount; i++ { - shards[i], err = NewShard(ctx, readPool, readFunc, uint64(sizePerShard)) - if err != nil { - return nil, fmt.Errorf("failed to create shard: %w", err) - } - } - - c := &cache{ - ctx: ctx, - shardManager: shardManager, - shards: shards, - readPool: readPool, - miscPool: miscPool, - } - - if cacheName != "" { - metrics := newCacheMetrics(ctx, cacheName, metricsScrapeInterval, c.getCacheSizeInfo) - for _, s := range c.shards { - s.metrics = metrics - } - } - - return c, nil -} - -func (c *cache) getCacheSizeInfo() (bytes uint64, entries uint64) { - for _, s := range c.shards { - b, e := s.getSizeInfo() - bytes += b - entries += e - } - return bytes, entries -} - -func (c *cache) BatchSet(updates []CacheUpdate) error { - // Sort entries by shard index so each shard is locked only once. - shardMap := make(map[uint64][]CacheUpdate) - for i := range updates { - idx := c.shardManager.Shard(updates[i].Key) - shardMap[idx] = append(shardMap[idx], updates[i]) - } - - var wg sync.WaitGroup - for shardIndex, shardEntries := range shardMap { - wg.Add(1) - err := c.miscPool.Submit(c.ctx, func() { - c.shards[shardIndex].BatchSet(shardEntries) - wg.Done() - }) - if err != nil { - return fmt.Errorf("failed to submit batch set: %w", err) - } - } - wg.Wait() - - return nil -} - -func (c *cache) BatchGet(keys map[string]types.BatchGetResult) error { - work := make(map[uint64]map[string]types.BatchGetResult) - for key := range keys { - idx := c.shardManager.Shard([]byte(key)) - if work[idx] == nil { - work[idx] = make(map[string]types.BatchGetResult) - } - work[idx][key] = types.BatchGetResult{} - } - - var wg sync.WaitGroup - for shardIndex, subMap := range work { - wg.Add(1) - - err := c.miscPool.Submit(c.ctx, func() { - defer wg.Done() - err := c.shards[shardIndex].BatchGet(subMap) - if err != nil { - for key := range subMap { - subMap[key] = types.BatchGetResult{Error: err} - } - } - }) - if err != nil { - return fmt.Errorf("failed to submit batch get: %w", err) - } - } - wg.Wait() - - for _, subMap := range work { - for key, result := range subMap { - keys[key] = result - } - } - - return nil -} - -func (c *cache) Delete(key []byte) { - shardIndex := c.shardManager.Shard(key) - shard := c.shards[shardIndex] - shard.Delete(key) -} - -func (c *cache) Get(key []byte, updateLru bool) ([]byte, bool, error) { - shardIndex := c.shardManager.Shard(key) - shard := c.shards[shardIndex] - - value, ok, err := shard.Get(key, updateLru) - if err != nil { - return nil, false, fmt.Errorf("failed to get value from shard: %w", err) - } - if !ok { - return nil, false, nil - } - return value, ok, nil -} - -func (c *cache) Set(key []byte, value []byte) { - shardIndex := c.shardManager.Shard(key) - shard := c.shards[shardIndex] - shard.Set(key, value) -} diff --git a/sei-db/db_engine/pebbledb/pebblecache/cache_impl_test.go b/sei-db/db_engine/pebbledb/pebblecache/cache_impl_test.go deleted file mode 100644 index e377866d05..0000000000 --- a/sei-db/db_engine/pebbledb/pebblecache/cache_impl_test.go +++ /dev/null @@ -1,689 +0,0 @@ -package pebblecache - -import ( - "context" - "errors" - "fmt" - "sync" - "sync/atomic" - "testing" - "time" - - "github.com/stretchr/testify/require" - - "github.com/sei-protocol/sei-chain/sei-db/common/threading" - "github.com/sei-protocol/sei-chain/sei-db/db_engine/types" -) - -// --------------------------------------------------------------------------- -// helpers -// --------------------------------------------------------------------------- - -func noopRead(key []byte) ([]byte, bool, error) { return nil, false, nil } - -func newTestCache(t *testing.T, store map[string][]byte, shardCount, maxSize int) Cache { - t.Helper() - readFunc := func(key []byte) ([]byte, bool, error) { - v, ok := store[string(key)] - if !ok { - return nil, false, nil - } - return v, true, nil - } - pool := threading.NewAdHocPool() - c, err := NewCache(context.Background(), readFunc, shardCount, maxSize, pool, pool, "", 0) - require.NoError(t, err) - return c -} - -// --------------------------------------------------------------------------- -// NewCache — validation -// --------------------------------------------------------------------------- - -func TestNewCacheValid(t *testing.T) { - pool := threading.NewAdHocPool() - c, err := NewCache(context.Background(), noopRead, 4, 1024, pool, pool, "", 0) - require.NoError(t, err) - require.NotNil(t, c) -} - -func TestNewCacheSingleShard(t *testing.T) { - pool := threading.NewAdHocPool() - c, err := NewCache(context.Background(), noopRead, 1, 1024, pool, pool, "", 0) - require.NoError(t, err) - require.NotNil(t, c) -} - -func TestNewCacheShardCountZero(t *testing.T) { - pool := threading.NewAdHocPool() - _, err := NewCache(context.Background(), noopRead, 0, 1024, pool, pool, "", 0) - require.Error(t, err) -} - -func TestNewCacheShardCountNegative(t *testing.T) { - pool := threading.NewAdHocPool() - _, err := NewCache(context.Background(), noopRead, -1, 1024, pool, pool, "", 0) - require.Error(t, err) -} - -func TestNewCacheShardCountNotPowerOfTwo(t *testing.T) { - pool := threading.NewAdHocPool() - for _, n := range []int{3, 5, 6, 7, 9, 10} { - _, err := NewCache(context.Background(), noopRead, n, 1024, pool, pool, "", 0) - require.Error(t, err, "shardCount=%d", n) - } -} - -func TestNewCacheMaxSizeZero(t *testing.T) { - pool := threading.NewAdHocPool() - _, err := NewCache(context.Background(), noopRead, 4, 0, pool, pool, "", 0) - require.Error(t, err) -} - -func TestNewCacheMaxSizeNegative(t *testing.T) { - pool := threading.NewAdHocPool() - _, err := NewCache(context.Background(), noopRead, 4, -100, pool, pool, "", 0) - require.Error(t, err) -} - -func TestNewCacheMaxSizeLessThanShardCount(t *testing.T) { - pool := threading.NewAdHocPool() - // shardCount=4, maxSize=3 → sizePerShard=0 - _, err := NewCache(context.Background(), noopRead, 4, 3, pool, pool, "", 0) - require.Error(t, err) -} - -func TestNewCacheWithMetrics(t *testing.T) { - pool := threading.NewAdHocPool() - ctx, cancel := context.WithCancel(context.Background()) - defer cancel() - c, err := NewCache(ctx, noopRead, 2, 1024, pool, pool, "test-cache", time.Hour) - require.NoError(t, err) - require.NotNil(t, c) -} - -// --------------------------------------------------------------------------- -// Get -// --------------------------------------------------------------------------- - -func TestCacheGetFromDB(t *testing.T) { - store := map[string][]byte{"foo": []byte("bar")} - c := newTestCache(t, store, 4, 4096) - - val, found, err := c.Get([]byte("foo"), true) - require.NoError(t, err) - require.True(t, found) - require.Equal(t, "bar", string(val)) -} - -func TestCacheGetNotFound(t *testing.T) { - c := newTestCache(t, map[string][]byte{}, 4, 4096) - - val, found, err := c.Get([]byte("missing"), true) - require.NoError(t, err) - require.False(t, found) - require.Nil(t, val) -} - -func TestCacheGetAfterSet(t *testing.T) { - c := newTestCache(t, map[string][]byte{}, 4, 4096) - - c.Set([]byte("k"), []byte("v")) - - val, found, err := c.Get([]byte("k"), true) - require.NoError(t, err) - require.True(t, found) - require.Equal(t, "v", string(val)) -} - -func TestCacheGetAfterDelete(t *testing.T) { - store := map[string][]byte{"k": []byte("v")} - c := newTestCache(t, store, 4, 4096) - - c.Delete([]byte("k")) - - val, found, err := c.Get([]byte("k"), true) - require.NoError(t, err) - require.False(t, found) - require.Nil(t, val) -} - -func TestCacheGetDBError(t *testing.T) { - dbErr := errors.New("db fail") - readFunc := func(key []byte) ([]byte, bool, error) { return nil, false, dbErr } - pool := threading.NewAdHocPool() - c, _ := NewCache(context.Background(), readFunc, 1, 4096, pool, pool, "", 0) - - _, _, err := c.Get([]byte("k"), true) - require.Error(t, err) - require.ErrorIs(t, err, dbErr) -} - -func TestCacheGetSameKeyConsistentShard(t *testing.T) { - var readCalls atomic.Int64 - readFunc := func(key []byte) ([]byte, bool, error) { - readCalls.Add(1) - return []byte("val"), true, nil - } - pool := threading.NewAdHocPool() - c, _ := NewCache(context.Background(), readFunc, 4, 4096, pool, pool, "", 0) - - // First call populates cache in a specific shard. - val1, _, _ := c.Get([]byte("key"), true) - // Second call should hit cache in the same shard. - val2, _, _ := c.Get([]byte("key"), true) - - require.Equal(t, string(val1), string(val2)) - require.Equal(t, int64(1), readCalls.Load(), "second Get should hit cache") -} - -// --------------------------------------------------------------------------- -// Set -// --------------------------------------------------------------------------- - -func TestCacheSetNewKey(t *testing.T) { - c := newTestCache(t, map[string][]byte{}, 4, 4096) - - c.Set([]byte("a"), []byte("1")) - - val, found, err := c.Get([]byte("a"), false) - require.NoError(t, err) - require.True(t, found) - require.Equal(t, "1", string(val)) -} - -func TestCacheSetOverwrite(t *testing.T) { - c := newTestCache(t, map[string][]byte{}, 4, 4096) - - c.Set([]byte("a"), []byte("old")) - c.Set([]byte("a"), []byte("new")) - - val, found, err := c.Get([]byte("a"), false) - require.NoError(t, err) - require.True(t, found) - require.Equal(t, "new", string(val)) -} - -func TestCacheSetNilValue(t *testing.T) { - c := newTestCache(t, map[string][]byte{}, 4, 4096) - - c.Set([]byte("k"), nil) - - val, found, err := c.Get([]byte("k"), false) - require.NoError(t, err) - require.True(t, found) - require.Nil(t, val) -} - -// --------------------------------------------------------------------------- -// Delete -// --------------------------------------------------------------------------- - -func TestCacheDeleteExistingKey(t *testing.T) { - c := newTestCache(t, map[string][]byte{}, 4, 4096) - - c.Set([]byte("k"), []byte("v")) - c.Delete([]byte("k")) - - _, found, err := c.Get([]byte("k"), false) - require.NoError(t, err) - require.False(t, found) -} - -func TestCacheDeleteNonexistent(t *testing.T) { - c := newTestCache(t, map[string][]byte{}, 4, 4096) - - c.Delete([]byte("ghost")) - - _, found, err := c.Get([]byte("ghost"), false) - require.NoError(t, err) - require.False(t, found) -} - -func TestCacheDeleteThenSet(t *testing.T) { - c := newTestCache(t, map[string][]byte{}, 4, 4096) - - c.Set([]byte("k"), []byte("v1")) - c.Delete([]byte("k")) - c.Set([]byte("k"), []byte("v2")) - - val, found, err := c.Get([]byte("k"), false) - require.NoError(t, err) - require.True(t, found) - require.Equal(t, "v2", string(val)) -} - -// --------------------------------------------------------------------------- -// BatchSet -// --------------------------------------------------------------------------- - -func TestCacheBatchSetMultipleKeys(t *testing.T) { - c := newTestCache(t, map[string][]byte{}, 4, 4096) - - err := c.BatchSet([]CacheUpdate{ - {Key: []byte("a"), Value: []byte("1")}, - {Key: []byte("b"), Value: []byte("2")}, - {Key: []byte("c"), Value: []byte("3")}, - }) - require.NoError(t, err) - - for _, tc := range []struct{ key, want string }{{"a", "1"}, {"b", "2"}, {"c", "3"}} { - val, found, err := c.Get([]byte(tc.key), false) - require.NoError(t, err, "key=%q", tc.key) - require.True(t, found, "key=%q", tc.key) - require.Equal(t, tc.want, string(val), "key=%q", tc.key) - } -} - -func TestCacheBatchSetMixedSetAndDelete(t *testing.T) { - c := newTestCache(t, map[string][]byte{}, 4, 4096) - - c.Set([]byte("keep"), []byte("v")) - c.Set([]byte("remove"), []byte("v")) - - err := c.BatchSet([]CacheUpdate{ - {Key: []byte("keep"), Value: []byte("updated")}, - {Key: []byte("remove"), Value: nil}, - {Key: []byte("new"), Value: []byte("fresh")}, - }) - require.NoError(t, err) - - val, found, _ := c.Get([]byte("keep"), false) - require.True(t, found) - require.Equal(t, "updated", string(val)) - - _, found, _ = c.Get([]byte("remove"), false) - require.False(t, found) - - val, found, _ = c.Get([]byte("new"), false) - require.True(t, found) - require.Equal(t, "fresh", string(val)) -} - -func TestCacheBatchSetEmpty(t *testing.T) { - c := newTestCache(t, map[string][]byte{}, 4, 4096) - - require.NoError(t, c.BatchSet(nil)) - require.NoError(t, c.BatchSet([]CacheUpdate{})) -} - -func TestCacheBatchSetPoolFailure(t *testing.T) { - readFunc := func(key []byte) ([]byte, bool, error) { return nil, false, nil } - readPool := threading.NewAdHocPool() - c, _ := NewCache(context.Background(), readFunc, 1, 4096, readPool, &failPool{}, "", 0) - - err := c.BatchSet([]CacheUpdate{ - {Key: []byte("k"), Value: []byte("v")}, - }) - require.Error(t, err) -} - -// --------------------------------------------------------------------------- -// BatchGet -// --------------------------------------------------------------------------- - -func TestCacheBatchGetAllCached(t *testing.T) { - c := newTestCache(t, map[string][]byte{}, 4, 4096) - - c.Set([]byte("a"), []byte("1")) - c.Set([]byte("b"), []byte("2")) - - keys := map[string]types.BatchGetResult{"a": {}, "b": {}} - require.NoError(t, c.BatchGet(keys)) - - require.True(t, keys["a"].IsFound()) - require.Equal(t, "1", string(keys["a"].Value)) - require.True(t, keys["b"].IsFound()) - require.Equal(t, "2", string(keys["b"].Value)) -} - -func TestCacheBatchGetAllFromDB(t *testing.T) { - store := map[string][]byte{"x": []byte("10"), "y": []byte("20")} - c := newTestCache(t, store, 4, 4096) - - keys := map[string]types.BatchGetResult{"x": {}, "y": {}} - require.NoError(t, c.BatchGet(keys)) - - require.True(t, keys["x"].IsFound()) - require.Equal(t, "10", string(keys["x"].Value)) - require.True(t, keys["y"].IsFound()) - require.Equal(t, "20", string(keys["y"].Value)) -} - -func TestCacheBatchGetMixedCachedAndDB(t *testing.T) { - store := map[string][]byte{"db-key": []byte("from-db")} - c := newTestCache(t, store, 4, 4096) - - c.Set([]byte("cached"), []byte("from-cache")) - - keys := map[string]types.BatchGetResult{"cached": {}, "db-key": {}} - require.NoError(t, c.BatchGet(keys)) - - require.True(t, keys["cached"].IsFound()) - require.Equal(t, "from-cache", string(keys["cached"].Value)) - require.True(t, keys["db-key"].IsFound()) - require.Equal(t, "from-db", string(keys["db-key"].Value)) -} - -func TestCacheBatchGetNotFoundKeys(t *testing.T) { - c := newTestCache(t, map[string][]byte{}, 4, 4096) - - keys := map[string]types.BatchGetResult{"nope": {}} - require.NoError(t, c.BatchGet(keys)) - require.False(t, keys["nope"].IsFound()) -} - -func TestCacheBatchGetDeletedKey(t *testing.T) { - c := newTestCache(t, map[string][]byte{}, 4, 4096) - - c.Set([]byte("k"), []byte("v")) - c.Delete([]byte("k")) - - keys := map[string]types.BatchGetResult{"k": {}} - require.NoError(t, c.BatchGet(keys)) - require.False(t, keys["k"].IsFound()) -} - -func TestCacheBatchGetDBError(t *testing.T) { - dbErr := errors.New("broken") - readFunc := func(key []byte) ([]byte, bool, error) { return nil, false, dbErr } - pool := threading.NewAdHocPool() - c, _ := NewCache(context.Background(), readFunc, 1, 4096, pool, pool, "", 0) - - keys := map[string]types.BatchGetResult{"fail": {}} - require.NoError(t, c.BatchGet(keys), "BatchGet itself should not fail") - require.Error(t, keys["fail"].Error) -} - -func TestCacheBatchGetEmpty(t *testing.T) { - c := newTestCache(t, map[string][]byte{}, 4, 4096) - keys := map[string]types.BatchGetResult{} - require.NoError(t, c.BatchGet(keys)) -} - -func TestCacheBatchGetPoolFailure(t *testing.T) { - readFunc := func(key []byte) ([]byte, bool, error) { return nil, false, nil } - readPool := threading.NewAdHocPool() - c, _ := NewCache(context.Background(), readFunc, 1, 4096, readPool, &failPool{}, "", 0) - - keys := map[string]types.BatchGetResult{"k": {}} - err := c.BatchGet(keys) - require.Error(t, err) -} - -func TestCacheBatchGetShardReadPoolFailure(t *testing.T) { - // miscPool succeeds (goroutine runs), but readPool fails inside shard.BatchGet, - // causing the per-key error branch to be hit. - readFunc := func(key []byte) ([]byte, bool, error) { return nil, false, nil } - miscPool := threading.NewAdHocPool() - c, _ := NewCache(context.Background(), readFunc, 1, 4096, &failPool{}, miscPool, "", 0) - - keys := map[string]types.BatchGetResult{"a": {}, "b": {}} - require.NoError(t, c.BatchGet(keys)) - - for k, r := range keys { - require.Error(t, r.Error, "key=%q should have per-key error", k) - } -} - -// --------------------------------------------------------------------------- -// Cross-shard distribution -// --------------------------------------------------------------------------- - -func TestCacheDistributesAcrossShards(t *testing.T) { - c := newTestCache(t, map[string][]byte{}, 4, 4096) - impl := c.(*cache) - - // Insert enough distinct keys that at least 2 shards get entries. - for i := 0; i < 100; i++ { - c.Set([]byte(fmt.Sprintf("key-%d", i)), []byte("v")) - } - - nonEmpty := 0 - for _, s := range impl.shards { - _, entries := s.getSizeInfo() - if entries > 0 { - nonEmpty++ - } - } - require.GreaterOrEqual(t, nonEmpty, 2, "keys should distribute across multiple shards") -} - -func TestCacheGetRoutesToSameShard(t *testing.T) { - c := newTestCache(t, map[string][]byte{}, 4, 4096) - impl := c.(*cache) - - c.Set([]byte("key"), []byte("val")) - - idx := impl.shardManager.Shard([]byte("key")) - _, entries := impl.shards[idx].getSizeInfo() - require.Equal(t, 1, entries, "key should be in the shard determined by shardManager") -} - -// --------------------------------------------------------------------------- -// getCacheSizeInfo -// --------------------------------------------------------------------------- - -func TestCacheGetCacheSizeInfoEmpty(t *testing.T) { - c := newTestCache(t, map[string][]byte{}, 4, 4096) - impl := c.(*cache) - - bytes, entries := impl.getCacheSizeInfo() - require.Equal(t, int64(0), bytes) - require.Equal(t, int64(0), entries) -} - -func TestCacheGetCacheSizeInfoAggregatesShards(t *testing.T) { - c := newTestCache(t, map[string][]byte{}, 4, 4096) - impl := c.(*cache) - - for i := 0; i < 20; i++ { - c.Set([]byte(fmt.Sprintf("k%d", i)), []byte(fmt.Sprintf("v%d", i))) - } - - bytes, entries := impl.getCacheSizeInfo() - require.Equal(t, int64(20), entries) - require.Greater(t, bytes, int64(0)) -} - -// --------------------------------------------------------------------------- -// Many keys — BatchGet/BatchSet spanning all shards -// --------------------------------------------------------------------------- - -func TestCacheBatchSetThenBatchGetManyKeys(t *testing.T) { - c := newTestCache(t, map[string][]byte{}, 4, 100_000) - - updates := make([]CacheUpdate, 200) - for i := range updates { - updates[i] = CacheUpdate{ - Key: []byte(fmt.Sprintf("key-%03d", i)), - Value: []byte(fmt.Sprintf("val-%03d", i)), - } - } - require.NoError(t, c.BatchSet(updates)) - - keys := make(map[string]types.BatchGetResult, 200) - for i := 0; i < 200; i++ { - keys[fmt.Sprintf("key-%03d", i)] = types.BatchGetResult{} - } - require.NoError(t, c.BatchGet(keys)) - - for i := 0; i < 200; i++ { - k := fmt.Sprintf("key-%03d", i) - want := fmt.Sprintf("val-%03d", i) - require.True(t, keys[k].IsFound(), "key=%q", k) - require.Equal(t, want, string(keys[k].Value), "key=%q", k) - require.NoError(t, keys[k].Error, "key=%q", k) - } -} - -// --------------------------------------------------------------------------- -// Concurrency -// --------------------------------------------------------------------------- - -func TestCacheConcurrentGetSet(t *testing.T) { - store := map[string][]byte{} - for i := 0; i < 50; i++ { - store[fmt.Sprintf("db-%d", i)] = []byte(fmt.Sprintf("v-%d", i)) - } - c := newTestCache(t, store, 4, 100_000) - - var wg sync.WaitGroup - for i := 0; i < 100; i++ { - wg.Add(2) - key := []byte(fmt.Sprintf("key-%d", i)) - val := []byte(fmt.Sprintf("val-%d", i)) - - go func() { - defer wg.Done() - c.Set(key, val) - }() - go func() { - defer wg.Done() - c.Get(key, true) - }() - } - wg.Wait() -} - -func TestCacheConcurrentBatchSetAndBatchGet(t *testing.T) { - store := map[string][]byte{} - for i := 0; i < 50; i++ { - store[fmt.Sprintf("db-%d", i)] = []byte(fmt.Sprintf("v-%d", i)) - } - c := newTestCache(t, store, 4, 100_000) - - var wg sync.WaitGroup - - wg.Add(1) - go func() { - defer wg.Done() - updates := make([]CacheUpdate, 50) - for i := range updates { - updates[i] = CacheUpdate{ - Key: []byte(fmt.Sprintf("set-%d", i)), - Value: []byte(fmt.Sprintf("sv-%d", i)), - } - } - c.BatchSet(updates) - }() - - wg.Add(1) - go func() { - defer wg.Done() - keys := make(map[string]types.BatchGetResult) - for i := 0; i < 50; i++ { - keys[fmt.Sprintf("db-%d", i)] = types.BatchGetResult{} - } - c.BatchGet(keys) - }() - - wg.Wait() -} - -func TestCacheConcurrentDeleteAndGet(t *testing.T) { - c := newTestCache(t, map[string][]byte{}, 4, 100_000) - - for i := 0; i < 100; i++ { - c.Set([]byte(fmt.Sprintf("k-%d", i)), []byte("v")) - } - - var wg sync.WaitGroup - for i := 0; i < 100; i++ { - wg.Add(2) - key := []byte(fmt.Sprintf("k-%d", i)) - go func() { - defer wg.Done() - c.Delete(key) - }() - go func() { - defer wg.Done() - c.Get(key, true) - }() - } - wg.Wait() -} - -// --------------------------------------------------------------------------- -// Eviction through the cache layer -// --------------------------------------------------------------------------- - -func TestCacheEvictsPerShard(t *testing.T) { - // 1 shard, maxSize=20. Inserting more than 20 bytes triggers eviction. - c := newTestCache(t, map[string][]byte{}, 1, 20) - impl := c.(*cache) - - // key(1) + value(8) = 9 bytes each - c.Set([]byte("a"), []byte("11111111")) - c.Set([]byte("b"), []byte("22222222")) - // 18 bytes, fits - - c.Set([]byte("c"), []byte("33333333")) - // 27 bytes → must evict to get under 20 - - bytes, _ := impl.shards[0].getSizeInfo() - require.LessOrEqual(t, bytes, 20) -} - -// --------------------------------------------------------------------------- -// Edge: BatchSet with keys all routed to the same shard -// --------------------------------------------------------------------------- - -func TestCacheBatchSetSameShard(t *testing.T) { - c := newTestCache(t, map[string][]byte{}, 1, 4096) - - // With 1 shard, every key goes to shard 0. - err := c.BatchSet([]CacheUpdate{ - {Key: []byte("x"), Value: []byte("1")}, - {Key: []byte("y"), Value: []byte("2")}, - {Key: []byte("z"), Value: []byte("3")}, - }) - require.NoError(t, err) - - for _, tc := range []struct{ key, want string }{{"x", "1"}, {"y", "2"}, {"z", "3"}} { - val, found, err := c.Get([]byte(tc.key), false) - require.NoError(t, err) - require.True(t, found) - require.Equal(t, tc.want, string(val)) - } -} - -// --------------------------------------------------------------------------- -// Edge: BatchGet after BatchSet with deletes -// --------------------------------------------------------------------------- - -func TestCacheBatchGetAfterBatchSetWithDeletes(t *testing.T) { - c := newTestCache(t, map[string][]byte{}, 4, 4096) - - c.Set([]byte("a"), []byte("1")) - c.Set([]byte("b"), []byte("2")) - c.Set([]byte("c"), []byte("3")) - - err := c.BatchSet([]CacheUpdate{ - {Key: []byte("a"), Value: []byte("updated")}, - {Key: []byte("b"), Value: nil}, - }) - require.NoError(t, err) - - keys := map[string]types.BatchGetResult{"a": {}, "b": {}, "c": {}} - require.NoError(t, c.BatchGet(keys)) - - require.True(t, keys["a"].IsFound()) - require.Equal(t, "updated", string(keys["a"].Value)) - require.False(t, keys["b"].IsFound()) - require.True(t, keys["c"].IsFound()) - require.Equal(t, "3", string(keys["c"].Value)) -} - -// --------------------------------------------------------------------------- -// Power-of-two shard counts -// --------------------------------------------------------------------------- - -func TestNewCachePowerOfTwoShardCounts(t *testing.T) { - pool := threading.NewAdHocPool() - for _, n := range []int{1, 2, 4, 8, 16, 32, 64} { - c, err := NewCache(context.Background(), noopRead, n, n*100, pool, pool, "", 0) - require.NoError(t, err, "shardCount=%d", n) - require.NotNil(t, c, "shardCount=%d", n) - } -} diff --git a/sei-db/db_engine/pebbledb/pebblecache/cache_metrics.go b/sei-db/db_engine/pebbledb/pebblecache/cache_metrics.go deleted file mode 100644 index d6afa0487e..0000000000 --- a/sei-db/db_engine/pebbledb/pebblecache/cache_metrics.go +++ /dev/null @@ -1,136 +0,0 @@ -package pebblecache - -import ( - "context" - "time" - - "go.opentelemetry.io/otel" - "go.opentelemetry.io/otel/attribute" - "go.opentelemetry.io/otel/metric" - - smetrics "github.com/sei-protocol/sei-chain/sei-db/common/metrics" -) - -const cacheMeterName = "seidb_pebblecache" - -// CacheMetrics records OTel metrics for a pebblecache instance. -// All report methods are nil-safe: if the receiver is nil, they are no-ops, -// allowing the cache to call them unconditionally regardless of whether metrics -// are enabled. -// -// The cacheName is used as the "cache" attribute on all recorded metrics, -// enabling multiple cache instances to be distinguished in dashboards. -type CacheMetrics struct { - // Pre-computed attribute option reused on every recording to avoid - // per-call allocations on the hot path. - attrs metric.MeasurementOption - - sizeBytes metric.Int64Gauge - sizeEntries metric.Int64Gauge - hits metric.Int64Counter - misses metric.Int64Counter - missLatency metric.Float64Histogram -} - -// newCacheMetrics creates a CacheMetrics that records cache statistics via OTel. -// A background goroutine scrapes cache size every scrapeInterval until ctx is -// cancelled. The cacheName is attached as the "cache" attribute to all recorded -// metrics, enabling multiple cache instances to be distinguished in dashboards. -// -// Multiple instances are safe: OTel instrument registration is idempotent, so each -// call receives references to the same underlying instruments. The "cache" attribute -// distinguishes series (e.g. pebblecache_hits{cache="state"}). -func newCacheMetrics( - ctx context.Context, - cacheName string, - scrapeInterval time.Duration, - getSize func() (bytes uint64, entries uint64), -) *CacheMetrics { - meter := otel.Meter(cacheMeterName) - - sizeBytes, _ := meter.Int64Gauge( - "pebblecache_size_bytes", - metric.WithDescription("Current cache size in bytes"), - metric.WithUnit("By"), - ) - sizeEntries, _ := meter.Int64Gauge( - "pebblecache_size_entries", - metric.WithDescription("Current number of entries in the cache"), - metric.WithUnit("{count}"), - ) - hits, _ := meter.Int64Counter( - "pebblecache_hits", - metric.WithDescription("Total number of cache hits"), - metric.WithUnit("{count}"), - ) - misses, _ := meter.Int64Counter( - "pebblecache_misses", - metric.WithDescription("Total number of cache misses"), - metric.WithUnit("{count}"), - ) - missLatency, _ := meter.Float64Histogram( - "pebblecache_miss_latency", - metric.WithDescription("Time taken to resolve a cache miss from the backing store"), - metric.WithUnit("s"), - metric.WithExplicitBucketBoundaries(smetrics.LatencyBuckets...), - ) - - cm := &CacheMetrics{ - attrs: metric.WithAttributes(attribute.String("cache", cacheName)), - sizeBytes: sizeBytes, - sizeEntries: sizeEntries, - hits: hits, - misses: misses, - missLatency: missLatency, - } - - go cm.collectLoop(ctx, scrapeInterval, getSize) - - return cm -} - -func (cm *CacheMetrics) reportCacheHits(count int64) { - if cm == nil { - return - } - cm.hits.Add(context.Background(), count, cm.attrs) -} - -func (cm *CacheMetrics) reportCacheMisses(count int64) { - if cm == nil { - return - } - cm.misses.Add(context.Background(), count, cm.attrs) -} - -func (cm *CacheMetrics) reportCacheMissLatency(latency time.Duration) { - if cm == nil { - return - } - cm.missLatency.Record(context.Background(), latency.Seconds(), cm.attrs) -} - -// collectLoop periodically scrapes cache size from the provided function -// and records it as gauge values. It exits when ctx is cancelled. -func (cm *CacheMetrics) collectLoop( - ctx context.Context, - interval time.Duration, - getSize func() (bytes uint64, entries uint64), -) { - - if cm == nil { - return - } - ticker := time.NewTicker(interval) - defer ticker.Stop() - for { - select { - case <-ctx.Done(): - return - case <-ticker.C: - bytes, entries := getSize() - cm.sizeBytes.Record(ctx, int64(bytes), cm.attrs) //nolint:gosec // G115: safe, cache size fits int64 - cm.sizeEntries.Record(ctx, int64(entries), cm.attrs) //nolint:gosec // G115: safe, entry count fits int64 - } - } -} diff --git a/sei-db/db_engine/pebbledb/pebblecache/lru_queue.go b/sei-db/db_engine/pebbledb/pebblecache/lru_queue.go deleted file mode 100644 index 91f8b03a29..0000000000 --- a/sei-db/db_engine/pebbledb/pebblecache/lru_queue.go +++ /dev/null @@ -1,83 +0,0 @@ -package pebblecache - -import "container/list" - -// Implements a queue-like abstraction with LRU semantics. Not thread safe. -type lruQueue struct { - order *list.List - entries map[string]*list.Element - totalSize uint64 -} - -type lruQueueEntry struct { - key string - size uint64 -} - -// Create a new LRU queue. -func newLRUQueue() *lruQueue { - return &lruQueue{ - order: list.New(), - entries: make(map[string]*list.Element), - } -} - -// Add a new entry to the LRU queue. Can also be used to update an existing value with a new weight. -func (lru *lruQueue) Push( - // the key in the cache that was recently interacted with - key []byte, - // the size of the key + value - size uint64, -) { - if elem, ok := lru.entries[string(key)]; ok { - entry := elem.Value.(*lruQueueEntry) - lru.totalSize += size - entry.size - entry.size = size - lru.order.MoveToBack(elem) - return - } - - keyStr := string(key) - elem := lru.order.PushBack(&lruQueueEntry{ - key: keyStr, - size: size, - }) - lru.entries[keyStr] = elem - lru.totalSize += size -} - -// Signal that an entry has been interated with, moving it to the back of the queue -// (i.e. making it so it doesn't get popped soon). -func (lru *lruQueue) Touch(key []byte) { - elem, ok := lru.entries[string(key)] - if !ok { - return - } - lru.order.MoveToBack(elem) -} - -// Returns the total size of all entries in the LRU queue. -func (lru *lruQueue) GetTotalSize() uint64 { - return lru.totalSize -} - -// Returns a count of the number of entries in the LRU queue, where each entry counts for 1 regardless of size. -func (lru *lruQueue) GetCount() uint64 { - return uint64(len(lru.entries)) -} - -// Pops a single element out of the queue. The element removed is the entry least recently passed to Update(). -// Returns the key in string form to avoid copying the key an additional time. -// Panics if the queue is empty. -func (lru *lruQueue) PopLeastRecentlyUsed() string { - elem := lru.order.Front() - if elem == nil { - panic("cannot pop from empty LRU queue") - } - - lru.order.Remove(elem) - entry := elem.Value.(*lruQueueEntry) - delete(lru.entries, entry.key) - lru.totalSize -= entry.size - return entry.key -} diff --git a/sei-db/db_engine/pebbledb/pebblecache/lru_queue_test.go b/sei-db/db_engine/pebbledb/pebblecache/lru_queue_test.go deleted file mode 100644 index 58b624fc77..0000000000 --- a/sei-db/db_engine/pebbledb/pebblecache/lru_queue_test.go +++ /dev/null @@ -1,310 +0,0 @@ -package pebblecache - -import ( - "fmt" - "testing" - - "github.com/stretchr/testify/require" -) - -func TestLRUQueueIsolatesFromCallerMutation(t *testing.T) { - lru := newLRUQueue() - - key := []byte("a") - lru.Push(key, 1) - key[0] = 'z' - - require.Equal(t, "a", lru.PopLeastRecentlyUsed()) -} - -func TestNewLRUQueueStartsEmpty(t *testing.T) { - lru := newLRUQueue() - - require.Equal(t, uint64(0), lru.GetCount()) - require.Equal(t, uint64(0), lru.GetTotalSize()) -} - -func TestPopLeastRecentlyUsedPanicsOnEmptyQueue(t *testing.T) { - lru := newLRUQueue() - require.Panics(t, func() { lru.PopLeastRecentlyUsed() }) -} - -func TestPopLeastRecentlyUsedPanicsAfterDrain(t *testing.T) { - lru := newLRUQueue() - lru.Push([]byte("x"), 1) - lru.PopLeastRecentlyUsed() - - require.Panics(t, func() { lru.PopLeastRecentlyUsed() }) -} - -func TestPushSingleElement(t *testing.T) { - lru := newLRUQueue() - lru.Push([]byte("only"), 42) - - require.Equal(t, uint64(1), lru.GetCount()) - require.Equal(t, uint64(42), lru.GetTotalSize()) - require.Equal(t, "only", lru.PopLeastRecentlyUsed()) -} - -func TestPushDuplicateDecreasesSize(t *testing.T) { - lru := newLRUQueue() - lru.Push([]byte("k"), 100) - lru.Push([]byte("k"), 30) - - require.Equal(t, uint64(1), lru.GetCount()) - require.Equal(t, uint64(30), lru.GetTotalSize()) -} - -func TestPushDuplicateMovesToBack(t *testing.T) { - lru := newLRUQueue() - lru.Push([]byte("a"), 1) - lru.Push([]byte("b"), 1) - lru.Push([]byte("c"), 1) - - // Re-push "a" — should move it behind "b" and "c" - lru.Push([]byte("a"), 1) - - require.Equal(t, "b", lru.PopLeastRecentlyUsed()) - require.Equal(t, "c", lru.PopLeastRecentlyUsed()) - require.Equal(t, "a", lru.PopLeastRecentlyUsed()) -} - -func TestPushZeroSize(t *testing.T) { - lru := newLRUQueue() - lru.Push([]byte("z"), 0) - - require.Equal(t, uint64(1), lru.GetCount()) - require.Equal(t, uint64(0), lru.GetTotalSize()) - require.Equal(t, "z", lru.PopLeastRecentlyUsed()) - require.Equal(t, uint64(0), lru.GetTotalSize()) -} - -func TestPushEmptyKey(t *testing.T) { - lru := newLRUQueue() - lru.Push([]byte(""), 5) - - require.Equal(t, uint64(1), lru.GetCount()) - require.Equal(t, "", lru.PopLeastRecentlyUsed()) -} - -func TestPushRepeatedUpdatesToSameKey(t *testing.T) { - lru := newLRUQueue() - lru.Push([]byte("k"), 1) - lru.Push([]byte("k"), 2) - lru.Push([]byte("k"), 3) - lru.Push([]byte("k"), 4) - - require.Equal(t, uint64(1), lru.GetCount()) - require.Equal(t, uint64(4), lru.GetTotalSize()) -} - -func TestTouchNonexistentKeyIsNoop(t *testing.T) { - lru := newLRUQueue() - lru.Push([]byte("a"), 1) - - lru.Touch([]byte("missing")) - - require.Equal(t, uint64(1), lru.GetCount()) - require.Equal(t, "a", lru.PopLeastRecentlyUsed()) -} - -func TestTouchOnEmptyQueueIsNoop(t *testing.T) { - lru := newLRUQueue() - lru.Touch([]byte("ghost")) - - require.Equal(t, uint64(0), lru.GetCount()) -} - -func TestTouchSingleElement(t *testing.T) { - lru := newLRUQueue() - lru.Push([]byte("solo"), 10) - lru.Touch([]byte("solo")) - - require.Equal(t, uint64(1), lru.GetCount()) - require.Equal(t, "solo", lru.PopLeastRecentlyUsed()) -} - -func TestTouchDoesNotAffectSizeOrCount(t *testing.T) { - lru := newLRUQueue() - lru.Push([]byte("a"), 3) - lru.Push([]byte("b"), 7) - - lru.Touch([]byte("a")) - - require.Equal(t, uint64(2), lru.GetCount()) - require.Equal(t, uint64(10), lru.GetTotalSize()) -} - -func TestMultipleTouchesChangeOrder(t *testing.T) { - lru := newLRUQueue() - lru.Push([]byte("a"), 1) - lru.Push([]byte("b"), 1) - lru.Push([]byte("c"), 1) - - // Order: a, b, c - lru.Touch([]byte("a")) // Order: b, c, a - lru.Touch([]byte("b")) // Order: c, a, b - - require.Equal(t, "c", lru.PopLeastRecentlyUsed()) - require.Equal(t, "a", lru.PopLeastRecentlyUsed()) - require.Equal(t, "b", lru.PopLeastRecentlyUsed()) -} - -func TestTouchAlreadyMostRecentIsNoop(t *testing.T) { - lru := newLRUQueue() - lru.Push([]byte("a"), 1) - lru.Push([]byte("b"), 1) - - lru.Touch([]byte("b")) // "b" is already at back - - require.Equal(t, "a", lru.PopLeastRecentlyUsed()) - require.Equal(t, "b", lru.PopLeastRecentlyUsed()) -} - -func TestPopDecrementsCountAndSize(t *testing.T) { - lru := newLRUQueue() - lru.Push([]byte("a"), 10) - lru.Push([]byte("b"), 20) - lru.Push([]byte("c"), 30) - - lru.PopLeastRecentlyUsed() - - require.Equal(t, uint64(2), lru.GetCount()) - require.Equal(t, uint64(50), lru.GetTotalSize()) - - lru.PopLeastRecentlyUsed() - - require.Equal(t, uint64(1), lru.GetCount()) - require.Equal(t, uint64(30), lru.GetTotalSize()) -} - -func TestPopFIFOOrderWithoutTouches(t *testing.T) { - lru := newLRUQueue() - keys := []string{"first", "second", "third", "fourth"} - for _, k := range keys { - lru.Push([]byte(k), 1) - } - - for _, want := range keys { - require.Equal(t, want, lru.PopLeastRecentlyUsed()) - } -} - -func TestPushAfterDrain(t *testing.T) { - lru := newLRUQueue() - lru.Push([]byte("a"), 5) - lru.PopLeastRecentlyUsed() - - lru.Push([]byte("x"), 10) - lru.Push([]byte("y"), 20) - - require.Equal(t, uint64(2), lru.GetCount()) - require.Equal(t, uint64(30), lru.GetTotalSize()) - require.Equal(t, "x", lru.PopLeastRecentlyUsed()) -} - -func TestPushPreviouslyPoppedKey(t *testing.T) { - lru := newLRUQueue() - lru.Push([]byte("recycled"), 5) - lru.PopLeastRecentlyUsed() - - lru.Push([]byte("recycled"), 99) - - require.Equal(t, uint64(1), lru.GetCount()) - require.Equal(t, uint64(99), lru.GetTotalSize()) - require.Equal(t, "recycled", lru.PopLeastRecentlyUsed()) -} - -func TestInterleavedPushAndPop(t *testing.T) { - lru := newLRUQueue() - - lru.Push([]byte("a"), 1) - lru.Push([]byte("b"), 2) - - require.Equal(t, "a", lru.PopLeastRecentlyUsed()) - - lru.Push([]byte("c"), 3) - - require.Equal(t, uint64(2), lru.GetCount()) - require.Equal(t, uint64(5), lru.GetTotalSize()) - require.Equal(t, "b", lru.PopLeastRecentlyUsed()) - require.Equal(t, "c", lru.PopLeastRecentlyUsed()) -} - -func TestTouchThenPushSameKey(t *testing.T) { - lru := newLRUQueue() - lru.Push([]byte("a"), 1) - lru.Push([]byte("b"), 1) - - lru.Touch([]byte("a")) // order: b, a - lru.Push([]byte("a"), 50) // updates size, stays at back - - require.Equal(t, uint64(2), lru.GetCount()) - require.Equal(t, uint64(51), lru.GetTotalSize()) - require.Equal(t, "b", lru.PopLeastRecentlyUsed()) -} - -func TestBinaryKeyData(t *testing.T) { - lru := newLRUQueue() - k1 := []byte{0x00, 0xFF, 0x01} - k2 := []byte{0x00, 0xFF, 0x02} - - lru.Push(k1, 10) - lru.Push(k2, 20) - - require.Equal(t, uint64(2), lru.GetCount()) - require.Equal(t, string(k1), lru.PopLeastRecentlyUsed()) - - lru.Touch(k2) - require.Equal(t, string(k2), lru.PopLeastRecentlyUsed()) -} - -func TestCallerMutationAfterTouchDoesNotAffectQueue(t *testing.T) { - lru := newLRUQueue() - key := []byte("abc") - lru.Push(key, 1) - - key[0] = 'Z' - lru.Touch(key) // Touch with mutated key ("Zbc") — should be a no-op - - require.Equal(t, "abc", lru.PopLeastRecentlyUsed()) -} - -func TestManyEntries(t *testing.T) { - lru := newLRUQueue() - n := 1000 - var totalSize uint64 - - for i := 0; i < n; i++ { - k := fmt.Sprintf("key-%04d", i) - lru.Push([]byte(k), uint64(i+1)) - totalSize += uint64(i + 1) - } - - require.Equal(t, uint64(n), lru.GetCount()) - require.Equal(t, totalSize, lru.GetTotalSize()) - - for i := 0; i < n; i++ { - want := fmt.Sprintf("key-%04d", i) - require.Equal(t, want, lru.PopLeastRecentlyUsed(), "pop %d", i) - } - - require.Equal(t, uint64(0), lru.GetCount()) - require.Equal(t, uint64(0), lru.GetTotalSize()) -} - -func TestPushUpdatedSizeThenPopVerifySizeAccounting(t *testing.T) { - lru := newLRUQueue() - lru.Push([]byte("a"), 10) - lru.Push([]byte("b"), 20) - lru.Push([]byte("a"), 5) // decrease a's size from 10 to 5 - - require.Equal(t, uint64(25), lru.GetTotalSize()) - - // Pop "b" (it's the LRU since "a" was re-pushed to back). - lru.PopLeastRecentlyUsed() - require.Equal(t, uint64(5), lru.GetTotalSize()) - - lru.PopLeastRecentlyUsed() - require.Equal(t, uint64(0), lru.GetTotalSize()) -} diff --git a/sei-db/db_engine/pebbledb/pebblecache/noop_cache.go b/sei-db/db_engine/pebbledb/pebblecache/noop_cache.go deleted file mode 100644 index a8a05e4030..0000000000 --- a/sei-db/db_engine/pebbledb/pebblecache/noop_cache.go +++ /dev/null @@ -1,58 +0,0 @@ -package pebblecache - -import ( - "fmt" - - "github.com/sei-protocol/sei-chain/sei-db/db_engine/types" -) - -var _ Cache = (*noOpCache)(nil) - -// noOpCache is a Cache that performs no caching. Every Get falls through -// to the underlying readFunc. Set, Delete, and BatchSet are no-ops. -// Useful for testing the storage layer without cache interference, or for -// workloads where caching is not beneficial. -type noOpCache struct { - readFunc func(key []byte) ([]byte, bool, error) -} - -// NewNoOpCache creates a Cache that always reads from readFunc and never caches. -func NewNoOpCache(readFunc func(key []byte) ([]byte, bool, error)) Cache { - return &noOpCache{readFunc: readFunc} -} - -func (c *noOpCache) Get(key []byte, _ bool) ([]byte, bool, error) { - return c.readFunc(key) -} - -func (c *noOpCache) BatchGet(keys map[string]types.BatchGetResult) error { - var firstErr error - for k := range keys { - val, _, err := c.readFunc([]byte(k)) - if err != nil { - keys[k] = types.BatchGetResult{Error: err} - if firstErr == nil { - firstErr = err - } - } else { - keys[k] = types.BatchGetResult{Value: val} - } - } - if firstErr != nil { - return fmt.Errorf("unable to batch get: %w", firstErr) - } - return nil -} - -func (c *noOpCache) Set([]byte, []byte) { - // intentional no-op -} - -func (c *noOpCache) Delete([]byte) { - // intentional no-op -} - -func (c *noOpCache) BatchSet([]CacheUpdate) error { - // intentional no-op - return nil -} diff --git a/sei-db/db_engine/pebbledb/pebblecache/noop_cache_test.go b/sei-db/db_engine/pebbledb/pebblecache/noop_cache_test.go deleted file mode 100644 index 4778b65ec9..0000000000 --- a/sei-db/db_engine/pebbledb/pebblecache/noop_cache_test.go +++ /dev/null @@ -1,152 +0,0 @@ -package pebblecache - -import ( - "errors" - "testing" - - "github.com/stretchr/testify/require" - - "github.com/sei-protocol/sei-chain/sei-db/db_engine/types" -) - -func newNoOpTestCache(store map[string][]byte) Cache { - return NewNoOpCache(func(key []byte) ([]byte, bool, error) { - v, ok := store[string(key)] - if !ok { - return nil, false, nil - } - return v, true, nil - }) -} - -func TestNoOpGetFound(t *testing.T) { - c := newNoOpTestCache(map[string][]byte{"k": []byte("v")}) - - val, found, err := c.Get([]byte("k"), true) - require.NoError(t, err) - require.True(t, found) - require.Equal(t, "v", string(val)) -} - -func TestNoOpGetNotFound(t *testing.T) { - c := newNoOpTestCache(map[string][]byte{}) - - val, found, err := c.Get([]byte("missing"), true) - require.NoError(t, err) - require.False(t, found) - require.Nil(t, val) -} - -func TestNoOpGetError(t *testing.T) { - dbErr := errors.New("broken") - c := NewNoOpCache(func(key []byte) ([]byte, bool, error) { - return nil, false, dbErr - }) - - _, _, err := c.Get([]byte("k"), true) - require.ErrorIs(t, err, dbErr) -} - -func TestNoOpGetIgnoresUpdateLru(t *testing.T) { - c := newNoOpTestCache(map[string][]byte{"k": []byte("v")}) - - val1, _, _ := c.Get([]byte("k"), true) - val2, _, _ := c.Get([]byte("k"), false) - require.Equal(t, string(val1), string(val2)) -} - -func TestNoOpGetAlwaysReadsFromFunc(t *testing.T) { - store := map[string][]byte{"k": []byte("v1")} - c := newNoOpTestCache(store) - - val, _, _ := c.Get([]byte("k"), true) - require.Equal(t, "v1", string(val)) - - store["k"] = []byte("v2") - - val, _, _ = c.Get([]byte("k"), true) - require.Equal(t, "v2", string(val), "should re-read from func, not cache") -} - -func TestNoOpSetIsNoOp(t *testing.T) { - c := newNoOpTestCache(map[string][]byte{}) - - c.Set([]byte("k"), []byte("v")) - - _, found, err := c.Get([]byte("k"), true) - require.NoError(t, err) - require.False(t, found, "Set should not cache anything") -} - -func TestNoOpDeleteIsNoOp(t *testing.T) { - c := newNoOpTestCache(map[string][]byte{"k": []byte("v")}) - - c.Delete([]byte("k")) - - val, found, err := c.Get([]byte("k"), true) - require.NoError(t, err) - require.True(t, found, "Delete should not affect reads") - require.Equal(t, "v", string(val)) -} - -func TestNoOpBatchSetIsNoOp(t *testing.T) { - c := newNoOpTestCache(map[string][]byte{}) - - err := c.BatchSet([]CacheUpdate{ - {Key: []byte("a"), Value: []byte("1")}, - {Key: []byte("b"), Value: []byte("2")}, - }) - require.NoError(t, err) - - _, found, _ := c.Get([]byte("a"), true) - require.False(t, found) - _, found, _ = c.Get([]byte("b"), true) - require.False(t, found) -} - -func TestNoOpBatchSetEmptyAndNil(t *testing.T) { - c := newNoOpTestCache(map[string][]byte{}) - - require.NoError(t, c.BatchSet(nil)) - require.NoError(t, c.BatchSet([]CacheUpdate{})) -} - -func TestNoOpBatchGetAllFound(t *testing.T) { - c := newNoOpTestCache(map[string][]byte{"a": []byte("1"), "b": []byte("2")}) - - keys := map[string]types.BatchGetResult{"a": {}, "b": {}} - require.NoError(t, c.BatchGet(keys)) - - require.True(t, keys["a"].IsFound()) - require.Equal(t, "1", string(keys["a"].Value)) - require.True(t, keys["b"].IsFound()) - require.Equal(t, "2", string(keys["b"].Value)) -} - -func TestNoOpBatchGetNotFound(t *testing.T) { - c := newNoOpTestCache(map[string][]byte{}) - - keys := map[string]types.BatchGetResult{"x": {}} - require.NoError(t, c.BatchGet(keys)) - require.False(t, keys["x"].IsFound()) -} - -func TestNoOpBatchGetError(t *testing.T) { - dbErr := errors.New("fail") - c := NewNoOpCache(func(key []byte) ([]byte, bool, error) { - return nil, false, dbErr - }) - - keys := map[string]types.BatchGetResult{"k": {}} - err := c.BatchGet(keys) - require.Error(t, err) - require.ErrorIs(t, err, dbErr) - require.Error(t, keys["k"].Error) -} - -func TestNoOpBatchGetEmpty(t *testing.T) { - c := newNoOpTestCache(map[string][]byte{}) - - keys := map[string]types.BatchGetResult{} - require.NoError(t, c.BatchGet(keys)) -} diff --git a/sei-db/db_engine/pebbledb/pebblecache/shard.go b/sei-db/db_engine/pebbledb/pebblecache/shard.go deleted file mode 100644 index e52df96826..0000000000 --- a/sei-db/db_engine/pebbledb/pebblecache/shard.go +++ /dev/null @@ -1,404 +0,0 @@ -package pebblecache - -import ( - "bytes" - "context" - "fmt" - "sync" - "time" - - "github.com/sei-protocol/sei-chain/sei-db/common/threading" - "github.com/sei-protocol/sei-chain/sei-db/db_engine/types" -) - -// A single shard of a Cache. -type shard struct { - ctx context.Context - - // A lock to protect the shard's data. - lock sync.Mutex - - // The data in the shard. - data map[string]*shardEntry - - // Organizes data for garbage collection. - gcQueue *lruQueue - - // A pool for asynchronous reads. - readPool threading.Pool - - // A function that reads a value from the database. - readFunc func(key []byte) ([]byte, bool, error) - - // The maximum size of this cache, in bytes. - maxSize uint64 - - // Cache-level metrics. Nil-safe; if nil, no metrics are recorded. - metrics *CacheMetrics -} - -// The result of a read from the underlying database. -type readResult struct { - value []byte - err error -} - -// The status of a value in the cache. -type valueStatus int - -const ( - // The value is not known and we are not currently attempting to find it. - statusUnknown valueStatus = iota - // We've scheduled a read of the value but haven't yet finsihed the read. - statusScheduled - // The data is available. - statusAvailable - // We are aware that the value is deleted (special case of data being available). - statusDeleted -) - -// A single shardEntry in a shard. Records data for a single key. -type shardEntry struct { - // The parent shard that contains this entry. - shard *shard - - // The current status of this entry. - status valueStatus - - // The value, if known. - value []byte - - // If the value is not available when we request it, - // it will be written to this channel when it is available. - valueChan chan readResult -} - -// Creates a new Shard. -func NewShard( - ctx context.Context, - readPool threading.Pool, - readFunc func(key []byte) ([]byte, bool, error), - maxSize uint64, -) (*shard, error) { - - if maxSize <= 0 { - return nil, fmt.Errorf("maxSize must be greater than 0") - } - - return &shard{ - ctx: ctx, - readPool: readPool, - readFunc: readFunc, - lock: sync.Mutex{}, - data: make(map[string]*shardEntry), - gcQueue: newLRUQueue(), - maxSize: maxSize, - }, nil -} - -// Get returns the value for the given key, or (nil, false) if not found. -func (s *shard) Get(key []byte, updateLru bool) ([]byte, bool, error) { - s.lock.Lock() - - entry := s.getEntry(key) - - switch entry.status { - case statusAvailable: - return s.getAvailable(entry, key, updateLru) - case statusDeleted: - return s.getDeleted(key, updateLru) - case statusScheduled: - return s.getScheduled(entry) - case statusUnknown: - return s.getUnknown(entry, key) - default: - s.lock.Unlock() - panic(fmt.Sprintf("unexpected status: %#v", entry.status)) - } -} - -// Handles Get for a key whose value is already cached. Lock must be held; releases it. -func (s *shard) getAvailable(entry *shardEntry, key []byte, updateLru bool) ([]byte, bool, error) { - value := bytes.Clone(entry.value) - if updateLru { - s.gcQueue.Touch(key) - } - s.lock.Unlock() - s.metrics.reportCacheHits(1) - return value, true, nil -} - -// Handles Get for a key known to be deleted. Lock must be held; releases it. -func (s *shard) getDeleted(key []byte, updateLru bool) ([]byte, bool, error) { - if updateLru { - s.gcQueue.Touch(key) - } - s.lock.Unlock() - s.metrics.reportCacheHits(1) - return nil, false, nil -} - -// Handles Get for a key with an in-flight read from another goroutine. Lock must be held; releases it. -func (s *shard) getScheduled(entry *shardEntry) ([]byte, bool, error) { - valueChan := entry.valueChan - s.lock.Unlock() - s.metrics.reportCacheMisses(1) - startTime := time.Now() - result, err := threading.InterruptiblePull(s.ctx, valueChan) - s.metrics.reportCacheMissLatency(time.Since(startTime)) - if err != nil { - return nil, false, fmt.Errorf("failed to pull value from channel: %w", err) - } - valueChan <- result // reload the channel in case there are other listeners - if result.err != nil { - return nil, false, fmt.Errorf("failed to read value from database: %w", result.err) - } - return result.value, result.value != nil, nil -} - -// Handles Get for a key not yet read. Schedules the read and waits. Lock must be held; releases it. -func (s *shard) getUnknown(entry *shardEntry, key []byte) ([]byte, bool, error) { - entry.status = statusScheduled - valueChan := make(chan readResult, 1) - entry.valueChan = valueChan - s.lock.Unlock() - s.metrics.reportCacheMisses(1) - startTime := time.Now() - err := s.readPool.Submit(s.ctx, func() { - value, _, readErr := s.readFunc(key) - entry.injectValue(key, readResult{value: value, err: readErr}) - }) - if err != nil { - return nil, false, fmt.Errorf("failed to schedule read: %w", err) - } - result, err := threading.InterruptiblePull(s.ctx, valueChan) - s.metrics.reportCacheMissLatency(time.Since(startTime)) - if err != nil { - return nil, false, fmt.Errorf("failed to pull value from channel: %w", err) - } - valueChan <- result // reload the channel in case there are other listeners - if result.err != nil { - return nil, false, result.err - } - return result.value, result.value != nil, nil -} - -// This method is called by the read scheduler when a value becomes available. -func (se *shardEntry) injectValue(key []byte, result readResult) { - se.shard.lock.Lock() - - if se.status == statusScheduled { - if result.err != nil { - // Don't cache errors — reset so the next caller retries. - delete(se.shard.data, string(key)) - } else if result.value == nil { - se.status = statusDeleted - se.value = nil - se.shard.gcQueue.Push(key, uint64(len(key))) - se.shard.evictUnlocked() - } else { - se.status = statusAvailable - se.value = result.value - se.shard.gcQueue.Push(key, uint64(len(key)+len(result.value))) //nolint:gosec // G115: len is non-negative - se.shard.evictUnlocked() - } - } - - se.shard.lock.Unlock() - - se.valueChan <- result -} - -// Get a shard entry for a given key. Caller is responsible for holding the shard's lock -// when this method is called. -func (s *shard) getEntry(key []byte) *shardEntry { - if entry, ok := s.data[string(key)]; ok { - return entry - } - entry := &shardEntry{ - shard: s, - status: statusUnknown, - } - keyStr := string(key) - s.data[keyStr] = entry - return entry -} - -// Tracks a key whose value is not yet available and must be waited on. -type pendingRead struct { - key string - entry *shardEntry - valueChan chan readResult - needsSchedule bool - // Populated after the read completes, used by bulkInjectValues. - result readResult -} - -// BatchGet reads a batch of keys from the shard. Results are written into the provided map. -func (s *shard) BatchGet(keys map[string]types.BatchGetResult) error { - pending := make([]pendingRead, 0, len(keys)) - var hits int64 - - s.lock.Lock() - for key := range keys { - entry := s.getEntry([]byte(key)) - - switch entry.status { - case statusAvailable | statusDeleted: - keys[key] = types.BatchGetResult{Value: bytes.Clone(entry.value)} - hits++ - case statusScheduled: - pending = append(pending, pendingRead{ - key: key, - entry: entry, - valueChan: entry.valueChan, - }) - case statusUnknown: - entry.status = statusScheduled - valueChan := make(chan readResult, 1) - entry.valueChan = valueChan - pending = append(pending, pendingRead{ - key: key, - entry: entry, - valueChan: valueChan, - needsSchedule: true, - }) - default: - s.lock.Unlock() - panic(fmt.Sprintf("unexpected status: %#v", entry.status)) - } - } - s.lock.Unlock() - - if hits > 0 { - s.metrics.reportCacheHits(hits) - } - if len(pending) == 0 { - return nil - } - - s.metrics.reportCacheMisses(int64(len(pending))) - startTime := time.Now() - - for i := range pending { - if pending[i].needsSchedule { - p := &pending[i] - err := s.readPool.Submit(s.ctx, func() { - value, _, readErr := s.readFunc([]byte(p.key)) - p.entry.valueChan <- readResult{value: value, err: readErr} - }) - if err != nil { - return fmt.Errorf("failed to schedule read: %w", err) - } - } - } - - for i := range pending { - result, err := threading.InterruptiblePull(s.ctx, pending[i].valueChan) - if err != nil { - return fmt.Errorf("failed to pull value from channel: %w", err) - } - pending[i].valueChan <- result - pending[i].result = result - - if result.err != nil { - keys[pending[i].key] = types.BatchGetResult{Error: result.err} - } else { - keys[pending[i].key] = types.BatchGetResult{Value: result.value} - } - } - - s.metrics.reportCacheMissLatency(time.Since(startTime)) - go s.bulkInjectValues(pending) - - return nil -} - -// Applies deferred cache updates for a batch of reads under a single lock acquisition. -func (s *shard) bulkInjectValues(reads []pendingRead) { - s.lock.Lock() - for i := range reads { - entry := reads[i].entry - if entry.status != statusScheduled { - continue - } - result := reads[i].result - if result.err != nil { - // Don't cache errors — reset so the next caller retries. - delete(s.data, reads[i].key) - } else if result.value == nil { - entry.status = statusDeleted - entry.value = nil - s.gcQueue.Push([]byte(reads[i].key), uint64(len(reads[i].key))) - } else { - entry.status = statusAvailable - entry.value = result.value - s.gcQueue.Push([]byte(reads[i].key), uint64(len(reads[i].key)+len(result.value))) //nolint:gosec // G115 - } - } - s.evictUnlocked() - s.lock.Unlock() -} - -// Evicts least recently used entries until the cache is within its size budget. -// Caller is required to hold the lock. -func (s *shard) evictUnlocked() { - for s.gcQueue.GetTotalSize() > s.maxSize { - next := s.gcQueue.PopLeastRecentlyUsed() - delete(s.data, next) - } -} - -// getSizeInfo returns the current size (bytes) and entry count under the shard lock. -func (s *shard) getSizeInfo() (bytes uint64, entries uint64) { - s.lock.Lock() - defer s.lock.Unlock() - return s.gcQueue.GetTotalSize(), s.gcQueue.GetCount() -} - -// Set sets the value for the given key. -func (s *shard) Set(key []byte, value []byte) { - s.lock.Lock() - s.setUnlocked(key, value) - s.lock.Unlock() -} - -// Set a value. Caller is required to hold the lock. -func (s *shard) setUnlocked(key []byte, value []byte) { - entry := s.getEntry(key) - entry.status = statusAvailable - entry.value = value - - s.gcQueue.Push(key, uint64(len(key)+len(value))) //nolint:gosec // G115 - s.evictUnlocked() -} - -// BatchSet sets the values for a batch of keys. -func (s *shard) BatchSet(entries []CacheUpdate) { - s.lock.Lock() - for i := range entries { - if entries[i].IsDelete() { - s.deleteUnlocked(entries[i].Key) - } else { - s.setUnlocked(entries[i].Key, entries[i].Value) - } - } - s.lock.Unlock() -} - -// Delete deletes the value for the given key. -func (s *shard) Delete(key []byte) { - s.lock.Lock() - s.deleteUnlocked(key) - s.lock.Unlock() -} - -// Delete a value. Caller is required to hold the lock. -func (s *shard) deleteUnlocked(key []byte) { - entry := s.getEntry(key) - entry.status = statusDeleted - entry.value = nil - - s.gcQueue.Push(key, uint64(len(key))) - s.evictUnlocked() -} diff --git a/sei-db/db_engine/pebbledb/pebblecache/shard_manager.go b/sei-db/db_engine/pebbledb/pebblecache/shard_manager.go deleted file mode 100644 index fb8f459bc3..0000000000 --- a/sei-db/db_engine/pebbledb/pebblecache/shard_manager.go +++ /dev/null @@ -1,46 +0,0 @@ -package pebblecache - -import ( - "errors" - "hash/maphash" - "sync" -) - -var ErrNumShardsNotPowerOfTwo = errors.New("numShards must be a power of two and > 0") - -// A utility for assigning keys to shard indices. -type shardManager struct { - // A random seed that makes it hard for an attacker to predict the shard index and to skew the distribution. - seed maphash.Seed - // Used to perform a quick modulo operation to get the shard index (since numShards is a power of two) - mask uint64 - // reusable Hash objects to avoid allocs - pool sync.Pool -} - -// Creates a new Sharder. Number of shards must be a power of two and greater than 0. -func newShardManager(numShards uint64) (*shardManager, error) { - if numShards == 0 || (numShards&(numShards-1)) != 0 { - return nil, ErrNumShardsNotPowerOfTwo - } - - return &shardManager{ - seed: maphash.MakeSeed(), // secret, randomized - mask: numShards - 1, - pool: sync.Pool{ - New: func() any { return new(maphash.Hash) }, - }, - }, nil -} - -// Shard returns a shard index in [0, numShards). -// addr should be the raw address bytes (e.g., 20-byte ETH address). -func (s *shardManager) Shard(addr []byte) uint64 { - h := s.pool.Get().(*maphash.Hash) - h.SetSeed(s.seed) - _, _ = h.Write(addr) - x := h.Sum64() - s.pool.Put(h) - - return x & s.mask -} diff --git a/sei-db/db_engine/pebbledb/pebblecache/shard_manager_test.go b/sei-db/db_engine/pebbledb/pebblecache/shard_manager_test.go deleted file mode 100644 index bb96656fc5..0000000000 --- a/sei-db/db_engine/pebbledb/pebblecache/shard_manager_test.go +++ /dev/null @@ -1,271 +0,0 @@ -package pebblecache - -import ( - "fmt" - "math" - "sync" - "testing" - - "github.com/stretchr/testify/require" -) - -// --- NewShardManager --- - -func TestNewShardManagerValidPowersOfTwo(t *testing.T) { - for exp := 0; exp < 20; exp++ { - n := uint64(1) << exp - sm, err := newShardManager(n) - require.NoError(t, err, "numShards=%d", n) - require.NotNil(t, sm, "numShards=%d", n) - } -} - -func TestNewShardManagerZeroReturnsError(t *testing.T) { - sm, err := newShardManager(0) - require.ErrorIs(t, err, ErrNumShardsNotPowerOfTwo) - require.Nil(t, sm) -} - -func TestNewShardManagerNonPowersOfTwoReturnError(t *testing.T) { - bad := []uint64{3, 5, 6, 7, 9, 10, 12, 15, 17, 100, 255, 1023} - for _, n := range bad { - sm, err := newShardManager(n) - require.ErrorIs(t, err, ErrNumShardsNotPowerOfTwo, "numShards=%d", n) - require.Nil(t, sm, "numShards=%d", n) - } -} - -func TestNewShardManagerMaxUint64ReturnsError(t *testing.T) { - sm, err := newShardManager(math.MaxUint64) - require.ErrorIs(t, err, ErrNumShardsNotPowerOfTwo) - require.Nil(t, sm) -} - -func TestNewShardManagerLargePowerOfTwo(t *testing.T) { - n := uint64(1) << 40 - sm, err := newShardManager(n) - require.NoError(t, err) - require.NotNil(t, sm) -} - -// --- Shard: basic behaviour --- - -func TestShardReturnsBoundedIndex(t *testing.T) { - for _, numShards := range []uint64{1, 2, 4, 16, 256, 1024} { - sm, err := newShardManager(numShards) - require.NoError(t, err) - - for i := 0; i < 500; i++ { - key := []byte(fmt.Sprintf("key-%d", i)) - idx := sm.Shard(key) - require.Less(t, idx, numShards, "numShards=%d key=%s", numShards, key) - } - } -} - -func TestShardDeterministic(t *testing.T) { - sm, err := newShardManager(16) - require.NoError(t, err) - - key := []byte("deterministic-test-key") - first := sm.Shard(key) - for i := 0; i < 100; i++ { - require.Equal(t, first, sm.Shard(key)) - } -} - -func TestShardSingleShardAlwaysReturnsZero(t *testing.T) { - sm, err := newShardManager(1) - require.NoError(t, err) - - keys := [][]byte{ - {}, - {0x00}, - {0xFF}, - []byte("anything"), - []byte("another key entirely"), - } - for _, k := range keys { - require.Equal(t, uint64(0), sm.Shard(k), "key=%q", k) - } -} - -func TestShardEmptyKey(t *testing.T) { - sm, err := newShardManager(8) - require.NoError(t, err) - - idx := sm.Shard([]byte{}) - require.Less(t, idx, uint64(8)) - - // Deterministic - require.Equal(t, idx, sm.Shard([]byte{})) -} - -func TestShardNilKey(t *testing.T) { - sm, err := newShardManager(4) - require.NoError(t, err) - - idx := sm.Shard(nil) - require.Less(t, idx, uint64(4)) - require.Equal(t, idx, sm.Shard(nil)) -} - -func TestShardBinaryKeys(t *testing.T) { - sm, err := newShardManager(16) - require.NoError(t, err) - - k1 := []byte{0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, - 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x01} - k2 := []byte{0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, - 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02} - - idx1 := sm.Shard(k1) - idx2 := sm.Shard(k2) - require.Less(t, idx1, uint64(16)) - require.Less(t, idx2, uint64(16)) -} - -func TestShardCallerMutationDoesNotAffectFutureResults(t *testing.T) { - sm, err := newShardManager(16) - require.NoError(t, err) - - key := []byte("mutable") - first := sm.Shard(key) - - key[0] = 'X' - second := sm.Shard([]byte("mutable")) - require.Equal(t, first, second) -} - -// --- Distribution --- - -func TestShardDistribution(t *testing.T) { - const numShards = 16 - const numKeys = 10_000 - sm, err := newShardManager(numShards) - require.NoError(t, err) - - counts := make([]int, numShards) - for i := 0; i < numKeys; i++ { - key := []byte(fmt.Sprintf("addr-%06d", i)) - counts[sm.Shard(key)]++ - } - - expected := float64(numKeys) / float64(numShards) - for shard, count := range counts { - ratio := float64(count) / expected - require.Greater(t, ratio, 0.5, "shard %d is severely underrepresented (%d)", shard, count) - require.Less(t, ratio, 1.5, "shard %d is severely overrepresented (%d)", shard, count) - } -} - -// --- Distinct managers --- - -func TestDifferentManagersHaveDifferentSeeds(t *testing.T) { - sm1, err := newShardManager(256) - require.NoError(t, err) - sm2, err := newShardManager(256) - require.NoError(t, err) - - // With distinct random seeds, at least some keys should hash differently. - diffCount := 0 - for i := 0; i < 200; i++ { - key := []byte(fmt.Sprintf("seed-test-%d", i)) - if sm1.Shard(key) != sm2.Shard(key) { - diffCount++ - } - } - require.Greater(t, diffCount, 0, "two managers with independent seeds should differ on at least one key") -} - -// --- Concurrency --- - -func TestShardConcurrentAccess(t *testing.T) { - sm, err := newShardManager(64) - require.NoError(t, err) - - const goroutines = 32 - const iters = 1000 - - key := []byte("concurrent-key") - expected := sm.Shard(key) - - var wg sync.WaitGroup - wg.Add(goroutines) - for g := 0; g < goroutines; g++ { - go func() { - defer wg.Done() - for i := 0; i < iters; i++ { - got := sm.Shard(key) - if got != expected { - t.Errorf("concurrent Shard returned %d, want %d", got, expected) - return - } - } - }() - } - wg.Wait() -} - -func TestShardConcurrentDifferentKeys(t *testing.T) { - sm, err := newShardManager(32) - require.NoError(t, err) - - const goroutines = 16 - const keysPerGoroutine = 500 - - var wg sync.WaitGroup - wg.Add(goroutines) - for g := 0; g < goroutines; g++ { - g := g - go func() { - defer wg.Done() - for i := 0; i < keysPerGoroutine; i++ { - key := []byte(fmt.Sprintf("g%d-k%d", g, i)) - idx := sm.Shard(key) - if idx >= 32 { - t.Errorf("Shard(%q) = %d, want < 32", key, idx) - return - } - } - }() - } - wg.Wait() -} - -// --- Mask correctness --- - -func TestShardMaskMatchesNumShards(t *testing.T) { - for exp := 0; exp < 16; exp++ { - numShards := uint64(1) << exp - sm, err := newShardManager(numShards) - require.NoError(t, err) - require.Equal(t, numShards-1, sm.mask, "numShards=%d", numShards) - } -} - -// --- 20-byte ETH-style addresses --- - -func TestShardWith20ByteAddresses(t *testing.T) { - sm, err := newShardManager(16) - require.NoError(t, err) - - addr := make([]byte, 20) - for i := 0; i < 20; i++ { - addr[i] = byte(i + 1) - } - - idx := sm.Shard(addr) - require.Less(t, idx, uint64(16)) - require.Equal(t, idx, sm.Shard(addr)) -} - -func TestShardSingleByteKey(t *testing.T) { - sm, err := newShardManager(4) - require.NoError(t, err) - - for b := 0; b < 256; b++ { - idx := sm.Shard([]byte{byte(b)}) - require.Less(t, idx, uint64(4), "byte=%d", b) - } -} diff --git a/sei-db/db_engine/pebbledb/pebblecache/shard_test.go b/sei-db/db_engine/pebbledb/pebblecache/shard_test.go deleted file mode 100644 index 2950fa124d..0000000000 --- a/sei-db/db_engine/pebbledb/pebblecache/shard_test.go +++ /dev/null @@ -1,815 +0,0 @@ -package pebblecache - -import ( - "context" - "errors" - "fmt" - "sync" - "sync/atomic" - "testing" - "time" - - "github.com/stretchr/testify/require" - - "github.com/sei-protocol/sei-chain/sei-db/common/threading" - "github.com/sei-protocol/sei-chain/sei-db/db_engine/types" -) - -// --------------------------------------------------------------------------- -// helpers -// --------------------------------------------------------------------------- - -// newTestShard creates a shard backed by a simple in-memory map. -// The returned readFunc map can be populated before calling Get. -func newTestShard(t *testing.T, maxSize uint64, store map[string][]byte) *shard { - t.Helper() - readFunc := func(key []byte) ([]byte, bool, error) { - v, ok := store[string(key)] - if !ok { - return nil, false, nil - } - return v, true, nil - } - s, err := NewShard(context.Background(), threading.NewAdHocPool(), readFunc, maxSize) - require.NoError(t, err) - return s -} - -// --------------------------------------------------------------------------- -// NewShard -// --------------------------------------------------------------------------- - -func TestNewShardValid(t *testing.T) { - readFunc := func(key []byte) ([]byte, bool, error) { return nil, false, nil } - s, err := NewShard(context.Background(), threading.NewAdHocPool(), readFunc, 1024) - require.NoError(t, err) - require.NotNil(t, s) -} - -func TestNewShardZeroMaxSize(t *testing.T) { - readFunc := func(key []byte) ([]byte, bool, error) { return nil, false, nil } - _, err := NewShard(context.Background(), threading.NewAdHocPool(), readFunc, 0) - require.Error(t, err) -} - -// --------------------------------------------------------------------------- -// Get — cache miss flows -// --------------------------------------------------------------------------- - -func TestGetCacheMissFoundInDB(t *testing.T) { - store := map[string][]byte{"hello": []byte("world")} - s := newTestShard(t, 4096, store) - - val, found, err := s.Get([]byte("hello"), true) - require.NoError(t, err) - require.True(t, found) - require.Equal(t, "world", string(val)) -} - -func TestGetCacheMissNotFoundInDB(t *testing.T) { - s := newTestShard(t, 4096, map[string][]byte{}) - - val, found, err := s.Get([]byte("missing"), true) - require.NoError(t, err) - require.False(t, found) - require.Nil(t, val) -} - -func TestGetCacheMissDBError(t *testing.T) { - dbErr := errors.New("disk on fire") - readFunc := func(key []byte) ([]byte, bool, error) { return nil, false, dbErr } - s, _ := NewShard(context.Background(), threading.NewAdHocPool(), readFunc, 4096) - - _, _, err := s.Get([]byte("boom"), true) - require.Error(t, err) - require.ErrorIs(t, err, dbErr) -} - -func TestGetDBErrorDoesNotCacheResult(t *testing.T) { - var calls atomic.Int64 - readFunc := func(key []byte) ([]byte, bool, error) { - n := calls.Add(1) - if n == 1 { - return nil, false, errors.New("transient") - } - return []byte("recovered"), true, nil - } - s, _ := NewShard(context.Background(), threading.NewAdHocPool(), readFunc, 4096) - - _, _, err := s.Get([]byte("key"), true) - require.Error(t, err, "first call should fail") - - val, found, err := s.Get([]byte("key"), true) - require.NoError(t, err, "second call should succeed") - require.True(t, found) - require.Equal(t, "recovered", string(val)) - require.Equal(t, int64(2), calls.Load(), "error should not be cached") -} - -// --------------------------------------------------------------------------- -// Get — cache hit flows -// --------------------------------------------------------------------------- - -func TestGetCacheHitAvailable(t *testing.T) { - s := newTestShard(t, 4096, map[string][]byte{"k": []byte("v")}) - - s.Get([]byte("k"), true) - - val, found, err := s.Get([]byte("k"), true) - require.NoError(t, err) - require.True(t, found) - require.Equal(t, "v", string(val)) -} - -func TestGetCacheHitDeleted(t *testing.T) { - s := newTestShard(t, 4096, map[string][]byte{}) - - s.Get([]byte("gone"), true) - - val, found, err := s.Get([]byte("gone"), true) - require.NoError(t, err) - require.False(t, found) - require.Nil(t, val) -} - -func TestGetAfterSet(t *testing.T) { - var readCalls atomic.Int64 - readFunc := func(key []byte) ([]byte, bool, error) { - readCalls.Add(1) - return nil, false, nil - } - s, _ := NewShard(context.Background(), threading.NewAdHocPool(), readFunc, 4096) - - s.Set([]byte("k"), []byte("from-set")) - - val, found, err := s.Get([]byte("k"), true) - require.NoError(t, err) - require.True(t, found) - require.Equal(t, "from-set", string(val)) - require.Equal(t, int64(0), readCalls.Load(), "readFunc should not be called for Set-populated entry") -} - -func TestGetAfterDelete(t *testing.T) { - store := map[string][]byte{"k": []byte("v")} - s := newTestShard(t, 4096, store) - - s.Delete([]byte("k")) - - val, found, err := s.Get([]byte("k"), true) - require.NoError(t, err) - require.False(t, found) - require.Nil(t, val) -} - -// --------------------------------------------------------------------------- -// Get — concurrent reads on the same key -// --------------------------------------------------------------------------- - -func TestGetConcurrentSameKey(t *testing.T) { - var readCalls atomic.Int64 - gate := make(chan struct{}) - - readFunc := func(key []byte) ([]byte, bool, error) { - readCalls.Add(1) - <-gate - return []byte("value"), true, nil - } - s, _ := NewShard(context.Background(), threading.NewAdHocPool(), readFunc, 4096) - - const n = 10 - var wg sync.WaitGroup - errs := make([]error, n) - vals := make([]string, n) - founds := make([]bool, n) - - for i := 0; i < n; i++ { - wg.Add(1) - go func(idx int) { - defer wg.Done() - v, f, e := s.Get([]byte("shared"), true) - vals[idx] = string(v) - founds[idx] = f - errs[idx] = e - }(i) - } - - time.Sleep(50 * time.Millisecond) - close(gate) - wg.Wait() - - for i := 0; i < n; i++ { - require.NoError(t, errs[i], "goroutine %d", i) - require.True(t, founds[i], "goroutine %d", i) - require.Equal(t, "value", vals[i], "goroutine %d", i) - } - - require.Equal(t, int64(1), readCalls.Load(), "readFunc should be called exactly once") -} - -// --------------------------------------------------------------------------- -// Get — context cancellation -// --------------------------------------------------------------------------- - -func TestGetContextCancelled(t *testing.T) { - ctx, cancel := context.WithCancel(context.Background()) - - readFunc := func(key []byte) ([]byte, bool, error) { - time.Sleep(time.Second) - return []byte("late"), true, nil - } - s, _ := NewShard(ctx, threading.NewAdHocPool(), readFunc, 4096) - - cancel() - - _, _, err := s.Get([]byte("k"), true) - require.Error(t, err) -} - -// --------------------------------------------------------------------------- -// Get — updateLru flag -// --------------------------------------------------------------------------- - -func TestGetUpdateLruTrue(t *testing.T) { - store := map[string][]byte{ - "a": []byte("1"), - "b": []byte("2"), - } - s := newTestShard(t, 4096, store) - - s.Get([]byte("a"), true) - s.Get([]byte("b"), true) - - // Touch "a" via Get with updateLru=true, making "b" the LRU. - s.Get([]byte("a"), true) - - s.lock.Lock() - lru := s.gcQueue.PopLeastRecentlyUsed() - s.lock.Unlock() - - require.Equal(t, "b", lru) -} - -func TestGetUpdateLruFalse(t *testing.T) { - store := map[string][]byte{ - "a": []byte("1"), - "b": []byte("2"), - } - s := newTestShard(t, 4096, store) - - s.Get([]byte("a"), true) - s.Get([]byte("b"), true) - - // Access "a" without updating LRU — "a" should remain the LRU entry. - s.Get([]byte("a"), false) - - s.lock.Lock() - lru := s.gcQueue.PopLeastRecentlyUsed() - s.lock.Unlock() - - require.Equal(t, "a", lru, "updateLru=false should not move entry") -} - -// --------------------------------------------------------------------------- -// Set -// --------------------------------------------------------------------------- - -func TestSetNewKey(t *testing.T) { - s := newTestShard(t, 4096, map[string][]byte{}) - - s.Set([]byte("k"), []byte("v")) - - val, found, err := s.Get([]byte("k"), false) - require.NoError(t, err) - require.True(t, found) - require.Equal(t, "v", string(val)) -} - -func TestSetOverwritesExistingKey(t *testing.T) { - s := newTestShard(t, 4096, map[string][]byte{}) - - s.Set([]byte("k"), []byte("old")) - s.Set([]byte("k"), []byte("new")) - - val, found, err := s.Get([]byte("k"), false) - require.NoError(t, err) - require.True(t, found) - require.Equal(t, "new", string(val)) -} - -func TestSetOverwritesDeletedKey(t *testing.T) { - s := newTestShard(t, 4096, map[string][]byte{}) - - s.Delete([]byte("k")) - s.Set([]byte("k"), []byte("revived")) - - val, found, err := s.Get([]byte("k"), false) - require.NoError(t, err) - require.True(t, found) - require.Equal(t, "revived", string(val)) -} - -func TestSetNilValue(t *testing.T) { - s := newTestShard(t, 4096, map[string][]byte{}) - - s.Set([]byte("k"), nil) - - val, found, err := s.Get([]byte("k"), false) - require.NoError(t, err) - require.True(t, found) - require.Nil(t, val) -} - -func TestSetEmptyKey(t *testing.T) { - s := newTestShard(t, 4096, map[string][]byte{}) - - s.Set([]byte(""), []byte("empty-key-val")) - - val, found, err := s.Get([]byte(""), false) - require.NoError(t, err) - require.True(t, found) - require.Equal(t, "empty-key-val", string(val)) -} - -// --------------------------------------------------------------------------- -// Delete -// --------------------------------------------------------------------------- - -func TestDeleteExistingKey(t *testing.T) { - s := newTestShard(t, 4096, map[string][]byte{}) - - s.Set([]byte("k"), []byte("v")) - s.Delete([]byte("k")) - - val, found, err := s.Get([]byte("k"), false) - require.NoError(t, err) - require.False(t, found) - require.Nil(t, val) -} - -func TestDeleteNonexistentKey(t *testing.T) { - s := newTestShard(t, 4096, map[string][]byte{}) - - s.Delete([]byte("ghost")) - - val, found, err := s.Get([]byte("ghost"), false) - require.NoError(t, err) - require.False(t, found) - require.Nil(t, val) -} - -func TestDeleteThenSetThenGet(t *testing.T) { - s := newTestShard(t, 4096, map[string][]byte{}) - - s.Set([]byte("k"), []byte("v1")) - s.Delete([]byte("k")) - s.Set([]byte("k"), []byte("v2")) - - val, found, err := s.Get([]byte("k"), false) - require.NoError(t, err) - require.True(t, found) - require.Equal(t, "v2", string(val)) -} - -// --------------------------------------------------------------------------- -// BatchSet -// --------------------------------------------------------------------------- - -func TestBatchSetSetsMultiple(t *testing.T) { - s := newTestShard(t, 4096, map[string][]byte{}) - - s.BatchSet([]CacheUpdate{ - {Key: []byte("a"), Value: []byte("1")}, - {Key: []byte("b"), Value: []byte("2")}, - {Key: []byte("c"), Value: []byte("3")}, - }) - - for _, tc := range []struct { - key, want string - }{{"a", "1"}, {"b", "2"}, {"c", "3"}} { - val, found, err := s.Get([]byte(tc.key), false) - require.NoError(t, err, "Get(%q)", tc.key) - require.True(t, found, "Get(%q)", tc.key) - require.Equal(t, tc.want, string(val), "Get(%q)", tc.key) - } -} - -func TestBatchSetMixedSetAndDelete(t *testing.T) { - s := newTestShard(t, 4096, map[string][]byte{}) - - s.Set([]byte("keep"), []byte("v")) - s.Set([]byte("remove"), []byte("v")) - - s.BatchSet([]CacheUpdate{ - {Key: []byte("keep"), Value: []byte("updated")}, - {Key: []byte("remove"), Value: nil}, - {Key: []byte("new"), Value: []byte("fresh")}, - }) - - val, found, _ := s.Get([]byte("keep"), false) - require.True(t, found) - require.Equal(t, "updated", string(val)) - - _, found, _ = s.Get([]byte("remove"), false) - require.False(t, found, "expected remove to be deleted") - - val, found, _ = s.Get([]byte("new"), false) - require.True(t, found) - require.Equal(t, "fresh", string(val)) -} - -func TestBatchSetEmpty(t *testing.T) { - s := newTestShard(t, 4096, map[string][]byte{}) - s.BatchSet(nil) - s.BatchSet([]CacheUpdate{}) - - bytes, entries := s.getSizeInfo() - require.Equal(t, 0, bytes) - require.Equal(t, 0, entries) -} - -// --------------------------------------------------------------------------- -// BatchGet -// --------------------------------------------------------------------------- - -func TestBatchGetAllCached(t *testing.T) { - s := newTestShard(t, 4096, map[string][]byte{}) - - s.Set([]byte("a"), []byte("1")) - s.Set([]byte("b"), []byte("2")) - - keys := map[string]types.BatchGetResult{ - "a": {}, - "b": {}, - } - require.NoError(t, s.BatchGet(keys)) - - for k, want := range map[string]string{"a": "1", "b": "2"} { - r := keys[k] - require.True(t, r.IsFound(), "key=%q", k) - require.Equal(t, want, string(r.Value), "key=%q", k) - } -} - -func TestBatchGetAllFromDB(t *testing.T) { - store := map[string][]byte{"x": []byte("10"), "y": []byte("20")} - s := newTestShard(t, 4096, store) - - keys := map[string]types.BatchGetResult{ - "x": {}, - "y": {}, - } - require.NoError(t, s.BatchGet(keys)) - - for k, want := range map[string]string{"x": "10", "y": "20"} { - r := keys[k] - require.True(t, r.IsFound(), "key=%q", k) - require.Equal(t, want, string(r.Value), "key=%q", k) - } -} - -func TestBatchGetMixedCachedAndDB(t *testing.T) { - store := map[string][]byte{"db-key": []byte("from-db")} - s := newTestShard(t, 4096, store) - - s.Set([]byte("cached"), []byte("from-cache")) - - keys := map[string]types.BatchGetResult{ - "cached": {}, - "db-key": {}, - } - require.NoError(t, s.BatchGet(keys)) - - require.True(t, keys["cached"].IsFound()) - require.Equal(t, "from-cache", string(keys["cached"].Value)) - require.True(t, keys["db-key"].IsFound()) - require.Equal(t, "from-db", string(keys["db-key"].Value)) -} - -func TestBatchGetNotFoundKeys(t *testing.T) { - s := newTestShard(t, 4096, map[string][]byte{}) - - keys := map[string]types.BatchGetResult{ - "nope": {}, - } - require.NoError(t, s.BatchGet(keys)) - require.False(t, keys["nope"].IsFound()) -} - -func TestBatchGetDeletedKeys(t *testing.T) { - s := newTestShard(t, 4096, map[string][]byte{}) - - s.Set([]byte("del"), []byte("v")) - s.Delete([]byte("del")) - - keys := map[string]types.BatchGetResult{ - "del": {}, - } - require.NoError(t, s.BatchGet(keys)) - require.False(t, keys["del"].IsFound()) -} - -func TestBatchGetDBError(t *testing.T) { - dbErr := errors.New("broken") - readFunc := func(key []byte) ([]byte, bool, error) { return nil, false, dbErr } - s, _ := NewShard(context.Background(), threading.NewAdHocPool(), readFunc, 4096) - - keys := map[string]types.BatchGetResult{ - "fail": {}, - } - require.NoError(t, s.BatchGet(keys), "BatchGet itself should not fail") - require.Error(t, keys["fail"].Error, "expected per-key error") -} - -func TestBatchGetEmpty(t *testing.T) { - s := newTestShard(t, 4096, map[string][]byte{}) - - keys := map[string]types.BatchGetResult{} - require.NoError(t, s.BatchGet(keys)) -} - -func TestBatchGetCachesResults(t *testing.T) { - var readCalls atomic.Int64 - store := map[string][]byte{"k": []byte("v")} - readFunc := func(key []byte) ([]byte, bool, error) { - readCalls.Add(1) - v, ok := store[string(key)] - return v, ok, nil - } - s, _ := NewShard(context.Background(), threading.NewAdHocPool(), readFunc, 4096) - - keys := map[string]types.BatchGetResult{"k": {}} - s.BatchGet(keys) - - // bulkInjectValues runs in a goroutine — give it a moment. - time.Sleep(50 * time.Millisecond) - - val, found, err := s.Get([]byte("k"), false) - require.NoError(t, err) - require.True(t, found) - require.Equal(t, "v", string(val)) - require.Equal(t, int64(1), readCalls.Load(), "result should be cached") -} - -// --------------------------------------------------------------------------- -// Eviction -// --------------------------------------------------------------------------- - -func TestEvictionRespectMaxSize(t *testing.T) { - s := newTestShard(t, 30, map[string][]byte{}) - - // key="a" (1 byte) + value="aaaaaaaaaa" (10 bytes) = 11 bytes per entry - s.Set([]byte("a"), []byte("aaaaaaaaaa")) - s.Set([]byte("b"), []byte("bbbbbbbbbb")) - - _, entries := s.getSizeInfo() - require.Equal(t, 2, entries) - - // Third entry pushes to 33 bytes, exceeding maxSize=30 → evict "a". - s.Set([]byte("c"), []byte("cccccccccc")) - - bytes, entries := s.getSizeInfo() - require.LessOrEqual(t, bytes, 30, "shard size should not exceed maxSize") - require.Equal(t, 2, entries) -} - -func TestEvictionOrderIsLRU(t *testing.T) { - // Each entry: key(1) + value(4) = 5 bytes. maxSize=15 → fits 3. - s := newTestShard(t, 15, map[string][]byte{}) - - s.Set([]byte("a"), []byte("1111")) - s.Set([]byte("b"), []byte("2222")) - s.Set([]byte("c"), []byte("3333")) - - // Touch "a" so "b" becomes the LRU. - s.Get([]byte("a"), true) - - // Insert "d" → total 20 > 15 → must evict. "b" is LRU. - s.Set([]byte("d"), []byte("4444")) - - s.lock.Lock() - _, bExists := s.data["b"] - _, aExists := s.data["a"] - s.lock.Unlock() - - require.False(t, bExists, "expected 'b' to be evicted (it was LRU)") - require.True(t, aExists, "expected 'a' to survive (it was recently touched)") -} - -func TestEvictionOnDelete(t *testing.T) { - s := newTestShard(t, 10, map[string][]byte{}) - - s.Set([]byte("a"), []byte("val")) // size 4 - s.Delete([]byte("longkey1")) // size 8 - - bytes, _ := s.getSizeInfo() - require.LessOrEqual(t, bytes, 10, "size should not exceed maxSize") -} - -func TestEvictionOnGetFromDB(t *testing.T) { - store := map[string][]byte{ - "x": []byte("12345678901234567890"), - } - s := newTestShard(t, 25, store) - - s.Set([]byte("a"), []byte("small")) - - // Reading "x" brings in 1+20=21 bytes, total becomes 6+21=27 > 25 → eviction. - s.Get([]byte("x"), true) - - time.Sleep(50 * time.Millisecond) - - bytes, _ := s.getSizeInfo() - require.LessOrEqual(t, bytes, 25, "size should not exceed maxSize after DB read") -} - -// --------------------------------------------------------------------------- -// getSizeInfo -// --------------------------------------------------------------------------- - -func TestGetSizeInfoEmpty(t *testing.T) { - s := newTestShard(t, 4096, map[string][]byte{}) - bytes, entries := s.getSizeInfo() - require.Equal(t, 0, bytes) - require.Equal(t, 0, entries) -} - -func TestGetSizeInfoAfterSets(t *testing.T) { - s := newTestShard(t, 4096, map[string][]byte{}) - - s.Set([]byte("ab"), []byte("cd")) // 2+2 = 4 - s.Set([]byte("efg"), []byte("hi")) // 3+2 = 5 - - bytes, entries := s.getSizeInfo() - require.Equal(t, 2, entries) - require.Equal(t, 9, bytes) -} - -// --------------------------------------------------------------------------- -// injectValue — edge cases -// --------------------------------------------------------------------------- - -func TestInjectValueNotFound(t *testing.T) { - s := newTestShard(t, 4096, map[string][]byte{}) - - val, found, err := s.Get([]byte("missing"), true) - require.NoError(t, err) - require.False(t, found) - require.Nil(t, val) - - s.lock.Lock() - entry, ok := s.data["missing"] - s.lock.Unlock() - require.True(t, ok, "entry should exist in map") - require.Equal(t, statusDeleted, entry.status) -} - -// --------------------------------------------------------------------------- -// Concurrent Set and Get -// --------------------------------------------------------------------------- - -func TestConcurrentSetAndGet(t *testing.T) { - s := newTestShard(t, 4096, map[string][]byte{}) - - const n = 100 - var wg sync.WaitGroup - - for i := 0; i < n; i++ { - wg.Add(2) - key := []byte(fmt.Sprintf("key-%d", i)) - val := []byte(fmt.Sprintf("val-%d", i)) - - go func() { - defer wg.Done() - s.Set(key, val) - }() - go func() { - defer wg.Done() - s.Get(key, true) - }() - } - - wg.Wait() -} - -func TestConcurrentBatchSetAndBatchGet(t *testing.T) { - store := map[string][]byte{} - for i := 0; i < 50; i++ { - store[fmt.Sprintf("db-%d", i)] = []byte(fmt.Sprintf("v-%d", i)) - } - s := newTestShard(t, 100_000, store) - - var wg sync.WaitGroup - - wg.Add(1) - go func() { - defer wg.Done() - updates := make([]CacheUpdate, 20) - for i := 0; i < 20; i++ { - updates[i] = CacheUpdate{ - Key: []byte(fmt.Sprintf("set-%d", i)), - Value: []byte(fmt.Sprintf("sv-%d", i)), - } - } - s.BatchSet(updates) - }() - - wg.Add(1) - go func() { - defer wg.Done() - keys := make(map[string]types.BatchGetResult) - for i := 0; i < 50; i++ { - keys[fmt.Sprintf("db-%d", i)] = types.BatchGetResult{} - } - s.BatchGet(keys) - }() - - wg.Wait() -} - -// --------------------------------------------------------------------------- -// Pool submission failure -// --------------------------------------------------------------------------- - -type failPool struct{} - -func (fp *failPool) Submit(_ context.Context, _ func()) error { - return errors.New("pool exhausted") -} - -func TestGetPoolSubmitFailure(t *testing.T) { - readFunc := func(key []byte) ([]byte, bool, error) { return []byte("v"), true, nil } - s, _ := NewShard(context.Background(), &failPool{}, readFunc, 4096) - - _, _, err := s.Get([]byte("k"), true) - require.Error(t, err) -} - -func TestBatchGetPoolSubmitFailure(t *testing.T) { - readFunc := func(key []byte) ([]byte, bool, error) { return []byte("v"), true, nil } - s, _ := NewShard(context.Background(), &failPool{}, readFunc, 4096) - - keys := map[string]types.BatchGetResult{"k": {}} - err := s.BatchGet(keys) - require.Error(t, err) -} - -// --------------------------------------------------------------------------- -// Large values -// --------------------------------------------------------------------------- - -func TestSetLargeValueExceedingMaxSizeEvictsOldEntries(t *testing.T) { - s := newTestShard(t, 100, map[string][]byte{}) - - s.Set([]byte("a"), []byte("small")) - - bigVal := make([]byte, 95) - for i := range bigVal { - bigVal[i] = 'X' - } - s.Set([]byte("b"), bigVal) - - bytes, _ := s.getSizeInfo() - require.LessOrEqual(t, bytes, 100, "size should not exceed maxSize after large set") -} - -// --------------------------------------------------------------------------- -// bulkInjectValues — error entries are not cached -// --------------------------------------------------------------------------- - -func TestBatchGetDBErrorNotCached(t *testing.T) { - var calls atomic.Int64 - readFunc := func(key []byte) ([]byte, bool, error) { - n := calls.Add(1) - if n == 1 { - return nil, false, errors.New("transient db error") - } - return []byte("ok"), true, nil - } - s, _ := NewShard(context.Background(), threading.NewAdHocPool(), readFunc, 4096) - - keys := map[string]types.BatchGetResult{"k": {}} - s.BatchGet(keys) - - // Wait for bulkInjectValues goroutine. - time.Sleep(50 * time.Millisecond) - - val, found, err := s.Get([]byte("k"), true) - require.NoError(t, err, "retry should succeed") - require.True(t, found) - require.Equal(t, "ok", string(val)) -} - -// --------------------------------------------------------------------------- -// Edge: Set then Delete then BatchGet -// --------------------------------------------------------------------------- - -func TestSetDeleteThenBatchGet(t *testing.T) { - s := newTestShard(t, 4096, map[string][]byte{}) - - s.Set([]byte("k"), []byte("v")) - s.Delete([]byte("k")) - - keys := map[string]types.BatchGetResult{"k": {}} - require.NoError(t, s.BatchGet(keys)) - require.False(t, keys["k"].IsFound()) -} diff --git a/sei-db/db_engine/pebbledb/pebbledb_config.go b/sei-db/db_engine/pebbledb/pebbledb_config.go index bbdac5b0f5..d431088ff9 100644 --- a/sei-db/db_engine/pebbledb/pebbledb_config.go +++ b/sei-db/db_engine/pebbledb/pebbledb_config.go @@ -12,9 +12,9 @@ type PebbleDBConfig struct { // The directory to store the database files. This has no default value and must be provided. DataDir string // The size of key-value cache, in bytes. - CacheSize int + CacheSize uint64 // The number of shards in the key-value cache. Must be a power of two and greater than 0. - CacheShardCount int + CacheShardCount uint64 // The size of pebbleDB's internal block cache, in bytes. BlockCacheSize int // Whether to enable metrics. @@ -39,9 +39,6 @@ func (c *PebbleDBConfig) Validate() error { if c.DataDir == "" { return fmt.Errorf("data dir is required") } - if c.CacheSize < 0 { - return fmt.Errorf("cache size must not be negative") - } if c.CacheSize > 0 && (c.CacheShardCount&(c.CacheShardCount-1)) != 0 { return fmt.Errorf("cache shard count must be a power of two or 0") } diff --git a/sei-db/state_db/sc/flatkv/snapshot.go b/sei-db/state_db/sc/flatkv/snapshot.go index 10debc5ee5..3c80fb5f24 100644 --- a/sei-db/state_db/sc/flatkv/snapshot.go +++ b/sei-db/state_db/sc/flatkv/snapshot.go @@ -382,8 +382,7 @@ func (s *CommitStore) migrateFlatLayout(flatkvDir string) (string, error) { metaCfg := s.config.MetadataDBConfig metaCfg.DataDir = filepath.Join(flatkvDir, metadataDir) tmpMeta, err := pebbledb.Open( - s.ctx, &metaCfg, pebble.DefaultComparer, - s.readPool, s.miscPool) + s.ctx, &metaCfg, pebble.DefaultComparer) if err == nil { verData, verErr := tmpMeta.Get([]byte(MetaGlobalVersion)) _ = tmpMeta.Close() diff --git a/sei-db/state_db/sc/flatkv/snapshot_test.go b/sei-db/state_db/sc/flatkv/snapshot_test.go index 1d1971f3fe..8274cd2b74 100644 --- a/sei-db/state_db/sc/flatkv/snapshot_test.go +++ b/sei-db/state_db/sc/flatkv/snapshot_test.go @@ -9,7 +9,6 @@ import ( "github.com/cockroachdb/pebble/v2" "github.com/sei-protocol/sei-chain/sei-db/common/evm" - "github.com/sei-protocol/sei-chain/sei-db/common/threading" "github.com/sei-protocol/sei-chain/sei-db/db_engine/pebbledb" "github.com/sei-protocol/sei-chain/sei-db/db_engine/types" "github.com/sei-protocol/sei-chain/sei-db/proto" @@ -280,8 +279,7 @@ func TestMigrationFromFlatLayout(t *testing.T) { // Create an actual PebbleDB so Open works cfg := pebbledb.DefaultTestConfig(t) cfg.DataDir = dbPath - db, err := pebbledb.Open(t.Context(), &cfg, pebble.DefaultComparer, - threading.NewAdHocPool(), threading.NewAdHocPool()) + db, err := pebbledb.Open(t.Context(), &cfg, pebble.DefaultComparer) require.NoError(t, err) require.NoError(t, db.Close()) } @@ -347,8 +345,7 @@ func TestOpenVersionValidation(t *testing.T) { acctCfg := pebbledb.DefaultConfig() acctCfg.DataDir = accountDBPath acctCfg.EnableMetrics = false - db, err := pebbledb.Open(t.Context(), &acctCfg, pebble.DefaultComparer, - threading.NewAdHocPool(), threading.NewAdHocPool()) + db, err := pebbledb.Open(t.Context(), &acctCfg, pebble.DefaultComparer) require.NoError(t, err) lagMeta := &LocalMeta{CommittedVersion: 1} require.NoError(t, db.Set(DBLocalMetaKey, MarshalLocalMeta(lagMeta), types.WriteOptions{Sync: true})) diff --git a/sei-db/state_db/sc/flatkv/store.go b/sei-db/state_db/sc/flatkv/store.go index 093274cb6b..dbd00d53b9 100644 --- a/sei-db/state_db/sc/flatkv/store.go +++ b/sei-db/state_db/sc/flatkv/store.go @@ -433,7 +433,7 @@ func (s *CommitStore) openPebbleDB(cfg *pebbledb.PebbleDBConfig) (seidbtypes.Key if err := os.MkdirAll(cfg.DataDir, 0750); err != nil { return nil, fmt.Errorf("create directory %s: %w", cfg.DataDir, err) } - db, err := pebbledb.Open(s.ctx, cfg, pebble.DefaultComparer, s.readPool, s.miscPool) + db, err := pebbledb.OpenWithCache(s.ctx, cfg, pebble.DefaultComparer, s.readPool, s.miscPool) if err != nil { return nil, fmt.Errorf("open %s: %w", cfg.DataDir, err) } diff --git a/sei-db/state_db/sc/flatkv/store_test.go b/sei-db/state_db/sc/flatkv/store_test.go index bea881399e..6ac6cdba24 100644 --- a/sei-db/state_db/sc/flatkv/store_test.go +++ b/sei-db/state_db/sc/flatkv/store_test.go @@ -60,7 +60,7 @@ func makeChangeSet(key, value []byte, delete bool) *proto.NamedChangeSet { func setupTestDB(t *testing.T) types.KeyValueDB { t.Helper() cfg := pebbledb.DefaultTestConfig(t) - db, err := pebbledb.Open(t.Context(), &cfg, pebble.DefaultComparer, + db, err := pebbledb.OpenWithCache(t.Context(), &cfg, pebble.DefaultComparer, threading.NewAdHocPool(), threading.NewAdHocPool()) require.NoError(t, err) return db From f4b832672f4e3346b075b44e01688502567ec931 Mon Sep 17 00:00:00 2001 From: Cody Littley Date: Fri, 13 Mar 2026 09:56:08 -0500 Subject: [PATCH 056/119] rename cache -> dbcache to avoid gitignore --- sei-db/db_engine/pebbledb/db.go | 2 +- sei-db/db_engine/pebbledb/dbcache/cache.go | 84 ++ .../db_engine/pebbledb/dbcache/cache_impl.go | 188 ++++ .../pebbledb/dbcache/cache_impl_test.go | 677 +++++++++++++++ .../pebbledb/dbcache/cache_metrics.go | 136 +++ .../pebbledb/dbcache/cached_batch.go | 55 ++ .../pebbledb/dbcache/cached_batch_test.go | 204 +++++ .../pebbledb/dbcache/cached_key_value_db.go | 82 ++ .../db_engine/pebbledb/dbcache/lru_queue.go | 83 ++ .../pebbledb/dbcache/lru_queue_test.go | 310 +++++++ .../db_engine/pebbledb/dbcache/noop_cache.go | 58 ++ .../pebbledb/dbcache/noop_cache_test.go | 152 ++++ sei-db/db_engine/pebbledb/dbcache/shard.go | 404 +++++++++ .../pebbledb/dbcache/shard_manager.go | 46 + .../pebbledb/dbcache/shard_manager_test.go | 271 ++++++ .../db_engine/pebbledb/dbcache/shard_test.go | 815 ++++++++++++++++++ 16 files changed, 3566 insertions(+), 1 deletion(-) create mode 100644 sei-db/db_engine/pebbledb/dbcache/cache.go create mode 100644 sei-db/db_engine/pebbledb/dbcache/cache_impl.go create mode 100644 sei-db/db_engine/pebbledb/dbcache/cache_impl_test.go create mode 100644 sei-db/db_engine/pebbledb/dbcache/cache_metrics.go create mode 100644 sei-db/db_engine/pebbledb/dbcache/cached_batch.go create mode 100644 sei-db/db_engine/pebbledb/dbcache/cached_batch_test.go create mode 100644 sei-db/db_engine/pebbledb/dbcache/cached_key_value_db.go create mode 100644 sei-db/db_engine/pebbledb/dbcache/lru_queue.go create mode 100644 sei-db/db_engine/pebbledb/dbcache/lru_queue_test.go create mode 100644 sei-db/db_engine/pebbledb/dbcache/noop_cache.go create mode 100644 sei-db/db_engine/pebbledb/dbcache/noop_cache_test.go create mode 100644 sei-db/db_engine/pebbledb/dbcache/shard.go create mode 100644 sei-db/db_engine/pebbledb/dbcache/shard_manager.go create mode 100644 sei-db/db_engine/pebbledb/dbcache/shard_manager_test.go create mode 100644 sei-db/db_engine/pebbledb/dbcache/shard_test.go diff --git a/sei-db/db_engine/pebbledb/db.go b/sei-db/db_engine/pebbledb/db.go index 5f032cd388..0e45bfcc9f 100644 --- a/sei-db/db_engine/pebbledb/db.go +++ b/sei-db/db_engine/pebbledb/db.go @@ -13,7 +13,7 @@ import ( errorutils "github.com/sei-protocol/sei-chain/sei-db/common/errors" "github.com/sei-protocol/sei-chain/sei-db/common/threading" - dbcache "github.com/sei-protocol/sei-chain/sei-db/db_engine/pebbledb/cache" + "github.com/sei-protocol/sei-chain/sei-db/db_engine/pebbledb/dbcache" "github.com/sei-protocol/sei-chain/sei-db/db_engine/types" ) diff --git a/sei-db/db_engine/pebbledb/dbcache/cache.go b/sei-db/db_engine/pebbledb/dbcache/cache.go new file mode 100644 index 0000000000..2ec5acfd82 --- /dev/null +++ b/sei-db/db_engine/pebbledb/dbcache/cache.go @@ -0,0 +1,84 @@ +package dbcache + +import ( + "context" + "fmt" + "time" + + "github.com/sei-protocol/sei-chain/sei-db/common/threading" + "github.com/sei-protocol/sei-chain/sei-db/db_engine/types" +) + +// Cache describes a cache capable of being used by a FlatKV store. +type Cache interface { + + // Get returns the value for the given key, or (nil, false) if not found. + Get( + // The entry to fetch. + key []byte, + // If true, the LRU queue will be updated. If false, the LRU queue will not be updated. + // Useful for when an operation is performed multiple times in close succession on the same key, + // since it requires non-zero overhead to do so with little benefit. + updateLru bool, + ) ([]byte, bool, error) + + // Perform a batch read operation. Given a map of keys to read, performs the reads and updates the + // map with the results. + // + // It is not thread safe to read or mutate the map while this method is running. + BatchGet(keys map[string]types.BatchGetResult) error + + // Set sets the value for the given key. + Set(key []byte, value []byte) + + // Delete deletes the value for the given key. + Delete(key []byte) + + // BatchSet applies the given updates to the cache. + BatchSet(updates []CacheUpdate) error +} + +// CacheUpdate describes a single key-value mutation to apply to the cache. +type CacheUpdate struct { + // The key to update. + Key []byte + // The value to set. If nil, the key will be deleted. + Value []byte +} + +// IsDelete returns true if the update is a delete operation. +func (u *CacheUpdate) IsDelete() bool { + return u.Value == nil +} + +// BuildCache creates a new Cache. +func BuildCache( + ctx context.Context, + readFunc func(key []byte) ([]byte, bool, error), + shardCount uint64, + maxSize uint64, + readPool threading.Pool, + miscPool threading.Pool, + cacheName string, + metricsScrapeInterval time.Duration, +) (Cache, error) { + + if maxSize == 0 { + return NewNoOpCache(readFunc), nil + } + + cache, err := NewStandardCache( + ctx, + readFunc, + shardCount, + maxSize, + readPool, + miscPool, + cacheName, + metricsScrapeInterval, + ) + if err != nil { + return nil, fmt.Errorf("failed to create cache: %w", err) + } + return cache, nil +} diff --git a/sei-db/db_engine/pebbledb/dbcache/cache_impl.go b/sei-db/db_engine/pebbledb/dbcache/cache_impl.go new file mode 100644 index 0000000000..ae55bea8b7 --- /dev/null +++ b/sei-db/db_engine/pebbledb/dbcache/cache_impl.go @@ -0,0 +1,188 @@ +package dbcache + +import ( + "context" + "fmt" + "sync" + "time" + + "github.com/sei-protocol/sei-chain/sei-db/common/threading" + "github.com/sei-protocol/sei-chain/sei-db/db_engine/types" +) + +var _ Cache = (*cache)(nil) + +// A standard implementation of a flatcache. +type cache struct { + ctx context.Context + + // A utility for assigning keys to shard indices. + shardManager *shardManager + + // The shards in the cache. + shards []*shard + + // A pool for asynchronous reads. + readPool threading.Pool + + // A pool for miscellaneous operations that are neither computationally intensive nor IO bound. + miscPool threading.Pool +} + +// Creates a new Cache. If cacheName is non-empty, OTel metrics are enabled and the +// background size scrape runs every metricsScrapeInterval. +func NewStandardCache( + ctx context.Context, + // A function that reads a value from the database. + readFunc func(key []byte) ([]byte, bool, error), + // The number of shards in the cache. Must be a power of two and greater than 0. + shardCount uint64, + // The maximum size of the cache, in bytes. + maxSize uint64, + // A work pool for reading from the DB. + readPool threading.Pool, + // A work pool for miscellaneous operations that are neither computationally intensive nor IO bound. + miscPool threading.Pool, + // Name used as the "cache" attribute on metrics. Empty string disables metrics. + cacheName string, + // How often to scrape cache size for metrics. Ignored if cacheName is empty. + metricsScrapeInterval time.Duration, +) (Cache, error) { + if shardCount == 0 || (shardCount&(shardCount-1)) != 0 { + return nil, ErrNumShardsNotPowerOfTwo + } + if maxSize == 0 { + return nil, fmt.Errorf("maxSize must be greater than 0") + } + + shardManager, err := newShardManager(shardCount) + if err != nil { + return nil, fmt.Errorf("failed to create shard manager: %w", err) + } + sizePerShard := maxSize / shardCount + if sizePerShard == 0 { + return nil, fmt.Errorf("maxSize must be greater than shardCount") + } + + shards := make([]*shard, shardCount) + for i := uint64(0); i < shardCount; i++ { + shards[i], err = NewShard(ctx, readPool, readFunc, sizePerShard) + if err != nil { + return nil, fmt.Errorf("failed to create shard: %w", err) + } + } + + c := &cache{ + ctx: ctx, + shardManager: shardManager, + shards: shards, + readPool: readPool, + miscPool: miscPool, + } + + if cacheName != "" { + metrics := newCacheMetrics(ctx, cacheName, metricsScrapeInterval, c.getCacheSizeInfo) + for _, s := range c.shards { + s.metrics = metrics + } + } + + return c, nil +} + +func (c *cache) getCacheSizeInfo() (bytes uint64, entries uint64) { + for _, s := range c.shards { + b, e := s.getSizeInfo() + bytes += b + entries += e + } + return bytes, entries +} + +func (c *cache) BatchSet(updates []CacheUpdate) error { + // Sort entries by shard index so each shard is locked only once. + shardMap := make(map[uint64][]CacheUpdate) + for i := range updates { + idx := c.shardManager.Shard(updates[i].Key) + shardMap[idx] = append(shardMap[idx], updates[i]) + } + + var wg sync.WaitGroup + for shardIndex, shardEntries := range shardMap { + wg.Add(1) + err := c.miscPool.Submit(c.ctx, func() { + c.shards[shardIndex].BatchSet(shardEntries) + wg.Done() + }) + if err != nil { + return fmt.Errorf("failed to submit batch set: %w", err) + } + } + wg.Wait() + + return nil +} + +func (c *cache) BatchGet(keys map[string]types.BatchGetResult) error { + work := make(map[uint64]map[string]types.BatchGetResult) + for key := range keys { + idx := c.shardManager.Shard([]byte(key)) + if work[idx] == nil { + work[idx] = make(map[string]types.BatchGetResult) + } + work[idx][key] = types.BatchGetResult{} + } + + var wg sync.WaitGroup + for shardIndex, subMap := range work { + wg.Add(1) + + err := c.miscPool.Submit(c.ctx, func() { + defer wg.Done() + err := c.shards[shardIndex].BatchGet(subMap) + if err != nil { + for key := range subMap { + subMap[key] = types.BatchGetResult{Error: err} + } + } + }) + if err != nil { + return fmt.Errorf("failed to submit batch get: %w", err) + } + } + wg.Wait() + + for _, subMap := range work { + for key, result := range subMap { + keys[key] = result + } + } + + return nil +} + +func (c *cache) Delete(key []byte) { + shardIndex := c.shardManager.Shard(key) + shard := c.shards[shardIndex] + shard.Delete(key) +} + +func (c *cache) Get(key []byte, updateLru bool) ([]byte, bool, error) { + shardIndex := c.shardManager.Shard(key) + shard := c.shards[shardIndex] + + value, ok, err := shard.Get(key, updateLru) + if err != nil { + return nil, false, fmt.Errorf("failed to get value from shard: %w", err) + } + if !ok { + return nil, false, nil + } + return value, ok, nil +} + +func (c *cache) Set(key []byte, value []byte) { + shardIndex := c.shardManager.Shard(key) + shard := c.shards[shardIndex] + shard.Set(key, value) +} diff --git a/sei-db/db_engine/pebbledb/dbcache/cache_impl_test.go b/sei-db/db_engine/pebbledb/dbcache/cache_impl_test.go new file mode 100644 index 0000000000..601591f008 --- /dev/null +++ b/sei-db/db_engine/pebbledb/dbcache/cache_impl_test.go @@ -0,0 +1,677 @@ +package dbcache + +import ( + "context" + "errors" + "fmt" + "sync" + "sync/atomic" + "testing" + "time" + + "github.com/stretchr/testify/require" + + "github.com/sei-protocol/sei-chain/sei-db/common/threading" + "github.com/sei-protocol/sei-chain/sei-db/db_engine/types" +) + +// --------------------------------------------------------------------------- +// helpers +// --------------------------------------------------------------------------- + +func noopRead(key []byte) ([]byte, bool, error) { return nil, false, nil } + +func newTestCache(t *testing.T, store map[string][]byte, shardCount, maxSize uint64) Cache { + t.Helper() + readFunc := func(key []byte) ([]byte, bool, error) { + v, ok := store[string(key)] + if !ok { + return nil, false, nil + } + return v, true, nil + } + pool := threading.NewAdHocPool() + c, err := NewStandardCache(context.Background(), readFunc, shardCount, maxSize, pool, pool, "", 0) + require.NoError(t, err) + return c +} + +// --------------------------------------------------------------------------- +// NewStandardCache — validation +// --------------------------------------------------------------------------- + +func TestNewStandardCacheValid(t *testing.T) { + pool := threading.NewAdHocPool() + c, err := NewStandardCache(context.Background(), noopRead, 4, 1024, pool, pool, "", 0) + require.NoError(t, err) + require.NotNil(t, c) +} + +func TestNewStandardCacheSingleShard(t *testing.T) { + pool := threading.NewAdHocPool() + c, err := NewStandardCache(context.Background(), noopRead, 1, 1024, pool, pool, "", 0) + require.NoError(t, err) + require.NotNil(t, c) +} + +func TestNewStandardCacheShardCountZero(t *testing.T) { + pool := threading.NewAdHocPool() + _, err := NewStandardCache(context.Background(), noopRead, 0, 1024, pool, pool, "", 0) + require.Error(t, err) +} + +func TestNewStandardCacheShardCountNotPowerOfTwo(t *testing.T) { + pool := threading.NewAdHocPool() + for _, n := range []uint64{3, 5, 6, 7, 9, 10} { + _, err := NewStandardCache(context.Background(), noopRead, n, 1024, pool, pool, "", 0) + require.Error(t, err, "shardCount=%d", n) + } +} + +func TestNewStandardCacheMaxSizeZero(t *testing.T) { + pool := threading.NewAdHocPool() + _, err := NewStandardCache(context.Background(), noopRead, 4, 0, pool, pool, "", 0) + require.Error(t, err) +} + +func TestNewStandardCacheMaxSizeLessThanShardCount(t *testing.T) { + pool := threading.NewAdHocPool() + // shardCount=4, maxSize=3 → sizePerShard=0 + _, err := NewStandardCache(context.Background(), noopRead, 4, 3, pool, pool, "", 0) + require.Error(t, err) +} + +func TestNewStandardCacheWithMetrics(t *testing.T) { + pool := threading.NewAdHocPool() + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + c, err := NewStandardCache(ctx, noopRead, 2, 1024, pool, pool, "test-cache", time.Hour) + require.NoError(t, err) + require.NotNil(t, c) +} + +// --------------------------------------------------------------------------- +// Get +// --------------------------------------------------------------------------- + +func TestCacheGetFromDB(t *testing.T) { + store := map[string][]byte{"foo": []byte("bar")} + c := newTestCache(t, store, 4, 4096) + + val, found, err := c.Get([]byte("foo"), true) + require.NoError(t, err) + require.True(t, found) + require.Equal(t, "bar", string(val)) +} + +func TestCacheGetNotFound(t *testing.T) { + c := newTestCache(t, map[string][]byte{}, 4, 4096) + + val, found, err := c.Get([]byte("missing"), true) + require.NoError(t, err) + require.False(t, found) + require.Nil(t, val) +} + +func TestCacheGetAfterSet(t *testing.T) { + c := newTestCache(t, map[string][]byte{}, 4, 4096) + + c.Set([]byte("k"), []byte("v")) + + val, found, err := c.Get([]byte("k"), true) + require.NoError(t, err) + require.True(t, found) + require.Equal(t, "v", string(val)) +} + +func TestCacheGetAfterDelete(t *testing.T) { + store := map[string][]byte{"k": []byte("v")} + c := newTestCache(t, store, 4, 4096) + + c.Delete([]byte("k")) + + val, found, err := c.Get([]byte("k"), true) + require.NoError(t, err) + require.False(t, found) + require.Nil(t, val) +} + +func TestCacheGetDBError(t *testing.T) { + dbErr := errors.New("db fail") + readFunc := func(key []byte) ([]byte, bool, error) { return nil, false, dbErr } + pool := threading.NewAdHocPool() + c, _ := NewStandardCache(context.Background(), readFunc, 1, 4096, pool, pool, "", 0) + + _, _, err := c.Get([]byte("k"), true) + require.Error(t, err) + require.ErrorIs(t, err, dbErr) +} + +func TestCacheGetSameKeyConsistentShard(t *testing.T) { + var readCalls atomic.Int64 + readFunc := func(key []byte) ([]byte, bool, error) { + readCalls.Add(1) + return []byte("val"), true, nil + } + pool := threading.NewAdHocPool() + c, _ := NewStandardCache(context.Background(), readFunc, 4, 4096, pool, pool, "", 0) + + // First call populates cache in a specific shard. + val1, _, _ := c.Get([]byte("key"), true) + // Second call should hit cache in the same shard. + val2, _, _ := c.Get([]byte("key"), true) + + require.Equal(t, string(val1), string(val2)) + require.Equal(t, int64(1), readCalls.Load(), "second Get should hit cache") +} + +// --------------------------------------------------------------------------- +// Set +// --------------------------------------------------------------------------- + +func TestCacheSetNewKey(t *testing.T) { + c := newTestCache(t, map[string][]byte{}, 4, 4096) + + c.Set([]byte("a"), []byte("1")) + + val, found, err := c.Get([]byte("a"), false) + require.NoError(t, err) + require.True(t, found) + require.Equal(t, "1", string(val)) +} + +func TestCacheSetOverwrite(t *testing.T) { + c := newTestCache(t, map[string][]byte{}, 4, 4096) + + c.Set([]byte("a"), []byte("old")) + c.Set([]byte("a"), []byte("new")) + + val, found, err := c.Get([]byte("a"), false) + require.NoError(t, err) + require.True(t, found) + require.Equal(t, "new", string(val)) +} + +func TestCacheSetNilValue(t *testing.T) { + c := newTestCache(t, map[string][]byte{}, 4, 4096) + + c.Set([]byte("k"), nil) + + val, found, err := c.Get([]byte("k"), false) + require.NoError(t, err) + require.True(t, found) + require.Nil(t, val) +} + +// --------------------------------------------------------------------------- +// Delete +// --------------------------------------------------------------------------- + +func TestCacheDeleteExistingKey(t *testing.T) { + c := newTestCache(t, map[string][]byte{}, 4, 4096) + + c.Set([]byte("k"), []byte("v")) + c.Delete([]byte("k")) + + _, found, err := c.Get([]byte("k"), false) + require.NoError(t, err) + require.False(t, found) +} + +func TestCacheDeleteNonexistent(t *testing.T) { + c := newTestCache(t, map[string][]byte{}, 4, 4096) + + c.Delete([]byte("ghost")) + + _, found, err := c.Get([]byte("ghost"), false) + require.NoError(t, err) + require.False(t, found) +} + +func TestCacheDeleteThenSet(t *testing.T) { + c := newTestCache(t, map[string][]byte{}, 4, 4096) + + c.Set([]byte("k"), []byte("v1")) + c.Delete([]byte("k")) + c.Set([]byte("k"), []byte("v2")) + + val, found, err := c.Get([]byte("k"), false) + require.NoError(t, err) + require.True(t, found) + require.Equal(t, "v2", string(val)) +} + +// --------------------------------------------------------------------------- +// BatchSet +// --------------------------------------------------------------------------- + +func TestCacheBatchSetMultipleKeys(t *testing.T) { + c := newTestCache(t, map[string][]byte{}, 4, 4096) + + err := c.BatchSet([]CacheUpdate{ + {Key: []byte("a"), Value: []byte("1")}, + {Key: []byte("b"), Value: []byte("2")}, + {Key: []byte("c"), Value: []byte("3")}, + }) + require.NoError(t, err) + + for _, tc := range []struct{ key, want string }{{"a", "1"}, {"b", "2"}, {"c", "3"}} { + val, found, err := c.Get([]byte(tc.key), false) + require.NoError(t, err, "key=%q", tc.key) + require.True(t, found, "key=%q", tc.key) + require.Equal(t, tc.want, string(val), "key=%q", tc.key) + } +} + +func TestCacheBatchSetMixedSetAndDelete(t *testing.T) { + c := newTestCache(t, map[string][]byte{}, 4, 4096) + + c.Set([]byte("keep"), []byte("v")) + c.Set([]byte("remove"), []byte("v")) + + err := c.BatchSet([]CacheUpdate{ + {Key: []byte("keep"), Value: []byte("updated")}, + {Key: []byte("remove"), Value: nil}, + {Key: []byte("new"), Value: []byte("fresh")}, + }) + require.NoError(t, err) + + val, found, _ := c.Get([]byte("keep"), false) + require.True(t, found) + require.Equal(t, "updated", string(val)) + + _, found, _ = c.Get([]byte("remove"), false) + require.False(t, found) + + val, found, _ = c.Get([]byte("new"), false) + require.True(t, found) + require.Equal(t, "fresh", string(val)) +} + +func TestCacheBatchSetEmpty(t *testing.T) { + c := newTestCache(t, map[string][]byte{}, 4, 4096) + + require.NoError(t, c.BatchSet(nil)) + require.NoError(t, c.BatchSet([]CacheUpdate{})) +} + +func TestCacheBatchSetPoolFailure(t *testing.T) { + readFunc := func(key []byte) ([]byte, bool, error) { return nil, false, nil } + readPool := threading.NewAdHocPool() + c, _ := NewStandardCache(context.Background(), readFunc, 1, 4096, readPool, &failPool{}, "", 0) + + err := c.BatchSet([]CacheUpdate{ + {Key: []byte("k"), Value: []byte("v")}, + }) + require.Error(t, err) +} + +// --------------------------------------------------------------------------- +// BatchGet +// --------------------------------------------------------------------------- + +func TestCacheBatchGetAllCached(t *testing.T) { + c := newTestCache(t, map[string][]byte{}, 4, 4096) + + c.Set([]byte("a"), []byte("1")) + c.Set([]byte("b"), []byte("2")) + + keys := map[string]types.BatchGetResult{"a": {}, "b": {}} + require.NoError(t, c.BatchGet(keys)) + + require.True(t, keys["a"].IsFound()) + require.Equal(t, "1", string(keys["a"].Value)) + require.True(t, keys["b"].IsFound()) + require.Equal(t, "2", string(keys["b"].Value)) +} + +func TestCacheBatchGetAllFromDB(t *testing.T) { + store := map[string][]byte{"x": []byte("10"), "y": []byte("20")} + c := newTestCache(t, store, 4, 4096) + + keys := map[string]types.BatchGetResult{"x": {}, "y": {}} + require.NoError(t, c.BatchGet(keys)) + + require.True(t, keys["x"].IsFound()) + require.Equal(t, "10", string(keys["x"].Value)) + require.True(t, keys["y"].IsFound()) + require.Equal(t, "20", string(keys["y"].Value)) +} + +func TestCacheBatchGetMixedCachedAndDB(t *testing.T) { + store := map[string][]byte{"db-key": []byte("from-db")} + c := newTestCache(t, store, 4, 4096) + + c.Set([]byte("cached"), []byte("from-cache")) + + keys := map[string]types.BatchGetResult{"cached": {}, "db-key": {}} + require.NoError(t, c.BatchGet(keys)) + + require.True(t, keys["cached"].IsFound()) + require.Equal(t, "from-cache", string(keys["cached"].Value)) + require.True(t, keys["db-key"].IsFound()) + require.Equal(t, "from-db", string(keys["db-key"].Value)) +} + +func TestCacheBatchGetNotFoundKeys(t *testing.T) { + c := newTestCache(t, map[string][]byte{}, 4, 4096) + + keys := map[string]types.BatchGetResult{"nope": {}} + require.NoError(t, c.BatchGet(keys)) + require.False(t, keys["nope"].IsFound()) +} + +func TestCacheBatchGetDeletedKey(t *testing.T) { + c := newTestCache(t, map[string][]byte{}, 4, 4096) + + c.Set([]byte("k"), []byte("v")) + c.Delete([]byte("k")) + + keys := map[string]types.BatchGetResult{"k": {}} + require.NoError(t, c.BatchGet(keys)) + require.False(t, keys["k"].IsFound()) +} + +func TestCacheBatchGetDBError(t *testing.T) { + dbErr := errors.New("broken") + readFunc := func(key []byte) ([]byte, bool, error) { return nil, false, dbErr } + pool := threading.NewAdHocPool() + c, _ := NewStandardCache(context.Background(), readFunc, 1, 4096, pool, pool, "", 0) + + keys := map[string]types.BatchGetResult{"fail": {}} + require.NoError(t, c.BatchGet(keys), "BatchGet itself should not fail") + require.Error(t, keys["fail"].Error) +} + +func TestCacheBatchGetEmpty(t *testing.T) { + c := newTestCache(t, map[string][]byte{}, 4, 4096) + keys := map[string]types.BatchGetResult{} + require.NoError(t, c.BatchGet(keys)) +} + +func TestCacheBatchGetPoolFailure(t *testing.T) { + readFunc := func(key []byte) ([]byte, bool, error) { return nil, false, nil } + readPool := threading.NewAdHocPool() + c, _ := NewStandardCache(context.Background(), readFunc, 1, 4096, readPool, &failPool{}, "", 0) + + keys := map[string]types.BatchGetResult{"k": {}} + err := c.BatchGet(keys) + require.Error(t, err) +} + +func TestCacheBatchGetShardReadPoolFailure(t *testing.T) { + // miscPool succeeds (goroutine runs), but readPool fails inside shard.BatchGet, + // causing the per-key error branch to be hit. + readFunc := func(key []byte) ([]byte, bool, error) { return nil, false, nil } + miscPool := threading.NewAdHocPool() + c, _ := NewStandardCache(context.Background(), readFunc, 1, 4096, &failPool{}, miscPool, "", 0) + + keys := map[string]types.BatchGetResult{"a": {}, "b": {}} + require.NoError(t, c.BatchGet(keys)) + + for k, r := range keys { + require.Error(t, r.Error, "key=%q should have per-key error", k) + } +} + +// --------------------------------------------------------------------------- +// Cross-shard distribution +// --------------------------------------------------------------------------- + +func TestCacheDistributesAcrossShards(t *testing.T) { + c := newTestCache(t, map[string][]byte{}, 4, 4096) + impl := c.(*cache) + + // Insert enough distinct keys that at least 2 shards get entries. + for i := 0; i < 100; i++ { + c.Set([]byte(fmt.Sprintf("key-%d", i)), []byte("v")) + } + + nonEmpty := 0 + for _, s := range impl.shards { + _, entries := s.getSizeInfo() + if entries > 0 { + nonEmpty++ + } + } + require.GreaterOrEqual(t, nonEmpty, 2, "keys should distribute across multiple shards") +} + +func TestCacheGetRoutesToSameShard(t *testing.T) { + c := newTestCache(t, map[string][]byte{}, 4, 4096) + impl := c.(*cache) + + c.Set([]byte("key"), []byte("val")) + + idx := impl.shardManager.Shard([]byte("key")) + _, entries := impl.shards[idx].getSizeInfo() + require.Equal(t, 1, entries, "key should be in the shard determined by shardManager") +} + +// --------------------------------------------------------------------------- +// getCacheSizeInfo +// --------------------------------------------------------------------------- + +func TestCacheGetCacheSizeInfoEmpty(t *testing.T) { + c := newTestCache(t, map[string][]byte{}, 4, 4096) + impl := c.(*cache) + + bytes, entries := impl.getCacheSizeInfo() + require.Equal(t, int64(0), bytes) + require.Equal(t, int64(0), entries) +} + +func TestCacheGetCacheSizeInfoAggregatesShards(t *testing.T) { + c := newTestCache(t, map[string][]byte{}, 4, 4096) + impl := c.(*cache) + + for i := 0; i < 20; i++ { + c.Set([]byte(fmt.Sprintf("k%d", i)), []byte(fmt.Sprintf("v%d", i))) + } + + bytes, entries := impl.getCacheSizeInfo() + require.Equal(t, int64(20), entries) + require.Greater(t, bytes, int64(0)) +} + +// --------------------------------------------------------------------------- +// Many keys — BatchGet/BatchSet spanning all shards +// --------------------------------------------------------------------------- + +func TestCacheBatchSetThenBatchGetManyKeys(t *testing.T) { + c := newTestCache(t, map[string][]byte{}, 4, 100_000) + + updates := make([]CacheUpdate, 200) + for i := range updates { + updates[i] = CacheUpdate{ + Key: []byte(fmt.Sprintf("key-%03d", i)), + Value: []byte(fmt.Sprintf("val-%03d", i)), + } + } + require.NoError(t, c.BatchSet(updates)) + + keys := make(map[string]types.BatchGetResult, 200) + for i := 0; i < 200; i++ { + keys[fmt.Sprintf("key-%03d", i)] = types.BatchGetResult{} + } + require.NoError(t, c.BatchGet(keys)) + + for i := 0; i < 200; i++ { + k := fmt.Sprintf("key-%03d", i) + want := fmt.Sprintf("val-%03d", i) + require.True(t, keys[k].IsFound(), "key=%q", k) + require.Equal(t, want, string(keys[k].Value), "key=%q", k) + require.NoError(t, keys[k].Error, "key=%q", k) + } +} + +// --------------------------------------------------------------------------- +// Concurrency +// --------------------------------------------------------------------------- + +func TestCacheConcurrentGetSet(t *testing.T) { + store := map[string][]byte{} + for i := 0; i < 50; i++ { + store[fmt.Sprintf("db-%d", i)] = []byte(fmt.Sprintf("v-%d", i)) + } + c := newTestCache(t, store, 4, 100_000) + + var wg sync.WaitGroup + for i := 0; i < 100; i++ { + wg.Add(2) + key := []byte(fmt.Sprintf("key-%d", i)) + val := []byte(fmt.Sprintf("val-%d", i)) + + go func() { + defer wg.Done() + c.Set(key, val) + }() + go func() { + defer wg.Done() + c.Get(key, true) + }() + } + wg.Wait() +} + +func TestCacheConcurrentBatchSetAndBatchGet(t *testing.T) { + store := map[string][]byte{} + for i := 0; i < 50; i++ { + store[fmt.Sprintf("db-%d", i)] = []byte(fmt.Sprintf("v-%d", i)) + } + c := newTestCache(t, store, 4, 100_000) + + var wg sync.WaitGroup + + wg.Add(1) + go func() { + defer wg.Done() + updates := make([]CacheUpdate, 50) + for i := range updates { + updates[i] = CacheUpdate{ + Key: []byte(fmt.Sprintf("set-%d", i)), + Value: []byte(fmt.Sprintf("sv-%d", i)), + } + } + c.BatchSet(updates) + }() + + wg.Add(1) + go func() { + defer wg.Done() + keys := make(map[string]types.BatchGetResult) + for i := 0; i < 50; i++ { + keys[fmt.Sprintf("db-%d", i)] = types.BatchGetResult{} + } + c.BatchGet(keys) + }() + + wg.Wait() +} + +func TestCacheConcurrentDeleteAndGet(t *testing.T) { + c := newTestCache(t, map[string][]byte{}, 4, 100_000) + + for i := 0; i < 100; i++ { + c.Set([]byte(fmt.Sprintf("k-%d", i)), []byte("v")) + } + + var wg sync.WaitGroup + for i := 0; i < 100; i++ { + wg.Add(2) + key := []byte(fmt.Sprintf("k-%d", i)) + go func() { + defer wg.Done() + c.Delete(key) + }() + go func() { + defer wg.Done() + c.Get(key, true) + }() + } + wg.Wait() +} + +// --------------------------------------------------------------------------- +// Eviction through the cache layer +// --------------------------------------------------------------------------- + +func TestCacheEvictsPerShard(t *testing.T) { + // 1 shard, maxSize=20. Inserting more than 20 bytes triggers eviction. + c := newTestCache(t, map[string][]byte{}, 1, 20) + impl := c.(*cache) + + // key(1) + value(8) = 9 bytes each + c.Set([]byte("a"), []byte("11111111")) + c.Set([]byte("b"), []byte("22222222")) + // 18 bytes, fits + + c.Set([]byte("c"), []byte("33333333")) + // 27 bytes → must evict to get under 20 + + bytes, _ := impl.shards[0].getSizeInfo() + require.LessOrEqual(t, bytes, 20) +} + +// --------------------------------------------------------------------------- +// Edge: BatchSet with keys all routed to the same shard +// --------------------------------------------------------------------------- + +func TestCacheBatchSetSameShard(t *testing.T) { + c := newTestCache(t, map[string][]byte{}, 1, 4096) + + // With 1 shard, every key goes to shard 0. + err := c.BatchSet([]CacheUpdate{ + {Key: []byte("x"), Value: []byte("1")}, + {Key: []byte("y"), Value: []byte("2")}, + {Key: []byte("z"), Value: []byte("3")}, + }) + require.NoError(t, err) + + for _, tc := range []struct{ key, want string }{{"x", "1"}, {"y", "2"}, {"z", "3"}} { + val, found, err := c.Get([]byte(tc.key), false) + require.NoError(t, err) + require.True(t, found) + require.Equal(t, tc.want, string(val)) + } +} + +// --------------------------------------------------------------------------- +// Edge: BatchGet after BatchSet with deletes +// --------------------------------------------------------------------------- + +func TestCacheBatchGetAfterBatchSetWithDeletes(t *testing.T) { + c := newTestCache(t, map[string][]byte{}, 4, 4096) + + c.Set([]byte("a"), []byte("1")) + c.Set([]byte("b"), []byte("2")) + c.Set([]byte("c"), []byte("3")) + + err := c.BatchSet([]CacheUpdate{ + {Key: []byte("a"), Value: []byte("updated")}, + {Key: []byte("b"), Value: nil}, + }) + require.NoError(t, err) + + keys := map[string]types.BatchGetResult{"a": {}, "b": {}, "c": {}} + require.NoError(t, c.BatchGet(keys)) + + require.True(t, keys["a"].IsFound()) + require.Equal(t, "updated", string(keys["a"].Value)) + require.False(t, keys["b"].IsFound()) + require.True(t, keys["c"].IsFound()) + require.Equal(t, "3", string(keys["c"].Value)) +} + +// --------------------------------------------------------------------------- +// Power-of-two shard counts +// --------------------------------------------------------------------------- + +func TestNewStandardCachePowerOfTwoShardCounts(t *testing.T) { + pool := threading.NewAdHocPool() + for _, n := range []uint64{1, 2, 4, 8, 16, 32, 64} { + c, err := NewStandardCache(context.Background(), noopRead, n, n*100, pool, pool, "", 0) + require.NoError(t, err, "shardCount=%d", n) + require.NotNil(t, c, "shardCount=%d", n) + } +} diff --git a/sei-db/db_engine/pebbledb/dbcache/cache_metrics.go b/sei-db/db_engine/pebbledb/dbcache/cache_metrics.go new file mode 100644 index 0000000000..a1d810da7b --- /dev/null +++ b/sei-db/db_engine/pebbledb/dbcache/cache_metrics.go @@ -0,0 +1,136 @@ +package dbcache + +import ( + "context" + "time" + + "go.opentelemetry.io/otel" + "go.opentelemetry.io/otel/attribute" + "go.opentelemetry.io/otel/metric" + + smetrics "github.com/sei-protocol/sei-chain/sei-db/common/metrics" +) + +const cacheMeterName = "seidb_pebblecache" + +// CacheMetrics records OTel metrics for a pebblecache instance. +// All report methods are nil-safe: if the receiver is nil, they are no-ops, +// allowing the cache to call them unconditionally regardless of whether metrics +// are enabled. +// +// The cacheName is used as the "cache" attribute on all recorded metrics, +// enabling multiple cache instances to be distinguished in dashboards. +type CacheMetrics struct { + // Pre-computed attribute option reused on every recording to avoid + // per-call allocations on the hot path. + attrs metric.MeasurementOption + + sizeBytes metric.Int64Gauge + sizeEntries metric.Int64Gauge + hits metric.Int64Counter + misses metric.Int64Counter + missLatency metric.Float64Histogram +} + +// newCacheMetrics creates a CacheMetrics that records cache statistics via OTel. +// A background goroutine scrapes cache size every scrapeInterval until ctx is +// cancelled. The cacheName is attached as the "cache" attribute to all recorded +// metrics, enabling multiple cache instances to be distinguished in dashboards. +// +// Multiple instances are safe: OTel instrument registration is idempotent, so each +// call receives references to the same underlying instruments. The "cache" attribute +// distinguishes series (e.g. pebblecache_hits{cache="state"}). +func newCacheMetrics( + ctx context.Context, + cacheName string, + scrapeInterval time.Duration, + getSize func() (bytes uint64, entries uint64), +) *CacheMetrics { + meter := otel.Meter(cacheMeterName) + + sizeBytes, _ := meter.Int64Gauge( + "pebblecache_size_bytes", + metric.WithDescription("Current cache size in bytes"), + metric.WithUnit("By"), + ) + sizeEntries, _ := meter.Int64Gauge( + "pebblecache_size_entries", + metric.WithDescription("Current number of entries in the cache"), + metric.WithUnit("{count}"), + ) + hits, _ := meter.Int64Counter( + "pebblecache_hits", + metric.WithDescription("Total number of cache hits"), + metric.WithUnit("{count}"), + ) + misses, _ := meter.Int64Counter( + "pebblecache_misses", + metric.WithDescription("Total number of cache misses"), + metric.WithUnit("{count}"), + ) + missLatency, _ := meter.Float64Histogram( + "pebblecache_miss_latency", + metric.WithDescription("Time taken to resolve a cache miss from the backing store"), + metric.WithUnit("s"), + metric.WithExplicitBucketBoundaries(smetrics.LatencyBuckets...), + ) + + cm := &CacheMetrics{ + attrs: metric.WithAttributes(attribute.String("cache", cacheName)), + sizeBytes: sizeBytes, + sizeEntries: sizeEntries, + hits: hits, + misses: misses, + missLatency: missLatency, + } + + go cm.collectLoop(ctx, scrapeInterval, getSize) + + return cm +} + +func (cm *CacheMetrics) reportCacheHits(count int64) { + if cm == nil { + return + } + cm.hits.Add(context.Background(), count, cm.attrs) +} + +func (cm *CacheMetrics) reportCacheMisses(count int64) { + if cm == nil { + return + } + cm.misses.Add(context.Background(), count, cm.attrs) +} + +func (cm *CacheMetrics) reportCacheMissLatency(latency time.Duration) { + if cm == nil { + return + } + cm.missLatency.Record(context.Background(), latency.Seconds(), cm.attrs) +} + +// collectLoop periodically scrapes cache size from the provided function +// and records it as gauge values. It exits when ctx is cancelled. +func (cm *CacheMetrics) collectLoop( + ctx context.Context, + interval time.Duration, + getSize func() (bytes uint64, entries uint64), +) { + + if cm == nil { + return + } + ticker := time.NewTicker(interval) + defer ticker.Stop() + for { + select { + case <-ctx.Done(): + return + case <-ticker.C: + bytes, entries := getSize() + cm.sizeBytes.Record(ctx, int64(bytes), cm.attrs) //nolint:gosec // G115: safe, cache size fits int64 + cm.sizeEntries.Record(ctx, int64(entries), cm.attrs) //nolint:gosec // G115: safe, entry count fits int64 + } + } +} diff --git a/sei-db/db_engine/pebbledb/dbcache/cached_batch.go b/sei-db/db_engine/pebbledb/dbcache/cached_batch.go new file mode 100644 index 0000000000..e4995fe33b --- /dev/null +++ b/sei-db/db_engine/pebbledb/dbcache/cached_batch.go @@ -0,0 +1,55 @@ +package dbcache + +import ( + "fmt" + + "github.com/sei-protocol/sei-chain/sei-db/db_engine/types" +) + +// cachedBatch wraps a types.Batch and applies pending mutations to the cache +// after a successful commit. +type cachedBatch struct { + inner types.Batch + cache Cache + pending []CacheUpdate +} + +var _ types.Batch = (*cachedBatch)(nil) + +func newCachedBatch(inner types.Batch, cache Cache) *cachedBatch { + return &cachedBatch{inner: inner, cache: cache} +} + +func (cb *cachedBatch) Set(key, value []byte) error { + cb.pending = append(cb.pending, CacheUpdate{Key: key, Value: value}) + return cb.inner.Set(key, value) +} + +func (cb *cachedBatch) Delete(key []byte) error { + cb.pending = append(cb.pending, CacheUpdate{Key: key, Value: nil}) + return cb.inner.Delete(key) +} + +func (cb *cachedBatch) Commit(opts types.WriteOptions) error { + if err := cb.inner.Commit(opts); err != nil { + return err + } + if err := cb.cache.BatchSet(cb.pending); err != nil { + return fmt.Errorf("failed to update cache after commit: %w", err) + } + cb.pending = nil + return nil +} + +func (cb *cachedBatch) Len() int { + return cb.inner.Len() +} + +func (cb *cachedBatch) Reset() { + cb.inner.Reset() + cb.pending = nil +} + +func (cb *cachedBatch) Close() error { + return cb.inner.Close() +} diff --git a/sei-db/db_engine/pebbledb/dbcache/cached_batch_test.go b/sei-db/db_engine/pebbledb/dbcache/cached_batch_test.go new file mode 100644 index 0000000000..5aeb533238 --- /dev/null +++ b/sei-db/db_engine/pebbledb/dbcache/cached_batch_test.go @@ -0,0 +1,204 @@ +package dbcache + +import ( + "errors" + "testing" + + "github.com/stretchr/testify/require" + + "github.com/sei-protocol/sei-chain/sei-db/db_engine/types" +) + +// --------------------------------------------------------------------------- +// mock batch +// --------------------------------------------------------------------------- + +type mockBatch struct { + sets []CacheUpdate + deletes [][]byte + committed bool + closed bool + resetCount int + commitErr error +} + +func (m *mockBatch) Set(key, value []byte) error { + m.sets = append(m.sets, CacheUpdate{Key: key, Value: value}) + return nil +} + +func (m *mockBatch) Delete(key []byte) error { + m.deletes = append(m.deletes, key) + return nil +} + +func (m *mockBatch) Commit(opts types.WriteOptions) error { + if m.commitErr != nil { + return m.commitErr + } + m.committed = true + return nil +} + +func (m *mockBatch) Len() int { + return len(m.sets) + len(m.deletes) +} + +func (m *mockBatch) Reset() { + m.sets = nil + m.deletes = nil + m.committed = false + m.resetCount++ +} + +func (m *mockBatch) Close() error { + m.closed = true + return nil +} + +// --------------------------------------------------------------------------- +// mock cache +// --------------------------------------------------------------------------- + +type mockCache struct { + data map[string][]byte + batchSetErr error +} + +func newMockCache() *mockCache { + return &mockCache{data: make(map[string][]byte)} +} + +func (mc *mockCache) Get(key []byte, _ bool) ([]byte, bool, error) { + v, ok := mc.data[string(key)] + return v, ok, nil +} + +func (mc *mockCache) BatchGet(keys map[string]types.BatchGetResult) error { + for k := range keys { + v, ok := mc.data[k] + if ok { + keys[k] = types.BatchGetResult{Value: v} + } + } + return nil +} + +func (mc *mockCache) Set(key, value []byte) { + mc.data[string(key)] = value +} + +func (mc *mockCache) Delete(key []byte) { + delete(mc.data, string(key)) +} + +func (mc *mockCache) BatchSet(updates []CacheUpdate) error { + if mc.batchSetErr != nil { + return mc.batchSetErr + } + for _, u := range updates { + if u.IsDelete() { + delete(mc.data, string(u.Key)) + } else { + mc.data[string(u.Key)] = u.Value + } + } + return nil +} + +// --------------------------------------------------------------------------- +// tests +// --------------------------------------------------------------------------- + +func TestCachedBatchCommitUpdatesCacheOnSuccess(t *testing.T) { + inner := &mockBatch{} + cache := newMockCache() + cb := newCachedBatch(inner, cache) + + require.NoError(t, cb.Set([]byte("a"), []byte("1"))) + require.NoError(t, cb.Set([]byte("b"), []byte("2"))) + require.NoError(t, cb.Commit(types.WriteOptions{})) + + require.True(t, inner.committed) + v, ok := cache.data["a"] + require.True(t, ok) + require.Equal(t, []byte("1"), v) + v, ok = cache.data["b"] + require.True(t, ok) + require.Equal(t, []byte("2"), v) +} + +func TestCachedBatchCommitDoesNotUpdateCacheOnInnerFailure(t *testing.T) { + inner := &mockBatch{commitErr: errors.New("disk full")} + cache := newMockCache() + cb := newCachedBatch(inner, cache) + + require.NoError(t, cb.Set([]byte("a"), []byte("1"))) + err := cb.Commit(types.WriteOptions{}) + + require.Error(t, err) + require.Contains(t, err.Error(), "disk full") + _, ok := cache.data["a"] + require.False(t, ok, "cache should not be updated when inner commit fails") +} + +func TestCachedBatchCommitReturnsCacheError(t *testing.T) { + inner := &mockBatch{} + cache := newMockCache() + cache.batchSetErr = errors.New("cache broken") + cb := newCachedBatch(inner, cache) + + require.NoError(t, cb.Set([]byte("a"), []byte("1"))) + err := cb.Commit(types.WriteOptions{}) + + require.Error(t, err) + require.Contains(t, err.Error(), "cache broken") + require.True(t, inner.committed, "inner batch should have committed") +} + +func TestCachedBatchDeleteMarksKeyForRemoval(t *testing.T) { + inner := &mockBatch{} + cache := newMockCache() + cache.Set([]byte("x"), []byte("old")) + cb := newCachedBatch(inner, cache) + + require.NoError(t, cb.Delete([]byte("x"))) + require.NoError(t, cb.Commit(types.WriteOptions{})) + + _, ok := cache.data["x"] + require.False(t, ok, "key should be deleted from cache") +} + +func TestCachedBatchResetClearsPending(t *testing.T) { + inner := &mockBatch{} + cache := newMockCache() + cb := newCachedBatch(inner, cache) + + require.NoError(t, cb.Set([]byte("a"), []byte("1"))) + require.NoError(t, cb.Set([]byte("b"), []byte("2"))) + cb.Reset() + + require.NoError(t, cb.Commit(types.WriteOptions{})) + + require.Empty(t, cache.data, "cache should have no entries after reset + commit") +} + +func TestCachedBatchLenDelegatesToInner(t *testing.T) { + inner := &mockBatch{} + cache := newMockCache() + cb := newCachedBatch(inner, cache) + + require.Equal(t, 0, cb.Len()) + require.NoError(t, cb.Set([]byte("a"), []byte("1"))) + require.NoError(t, cb.Delete([]byte("b"))) + require.Equal(t, 2, cb.Len()) +} + +func TestCachedBatchCloseDelegatesToInner(t *testing.T) { + inner := &mockBatch{} + cache := newMockCache() + cb := newCachedBatch(inner, cache) + + require.NoError(t, cb.Close()) + require.True(t, inner.closed) +} diff --git a/sei-db/db_engine/pebbledb/dbcache/cached_key_value_db.go b/sei-db/db_engine/pebbledb/dbcache/cached_key_value_db.go new file mode 100644 index 0000000000..3a60cf39f3 --- /dev/null +++ b/sei-db/db_engine/pebbledb/dbcache/cached_key_value_db.go @@ -0,0 +1,82 @@ +package dbcache + +import ( + "fmt" + + errorutils "github.com/sei-protocol/sei-chain/sei-db/common/errors" + "github.com/sei-protocol/sei-chain/sei-db/db_engine/types" +) + +var _ types.KeyValueDB = (*cachedKeyValueDB)(nil) +var _ types.Checkpointable = (*cachedKeyValueDB)(nil) + +type cachedKeyValueDB struct { + db types.KeyValueDB + cache Cache +} + +// Combine a cache and a key-value database to create a new key-value database with caching. +func NewCachedKeyValueDB(db types.KeyValueDB, cache Cache) types.KeyValueDB { + return &cachedKeyValueDB{db: db, cache: cache} +} + +func (c *cachedKeyValueDB) Get(key []byte) ([]byte, error) { + val, found, err := c.cache.Get(key, true) + if err != nil { + return nil, fmt.Errorf("failed to get value from cache: %w", err) + } + if !found { + return nil, errorutils.ErrNotFound + } + return val, nil +} + +func (c *cachedKeyValueDB) BatchGet(keys map[string]types.BatchGetResult) error { + err := c.cache.BatchGet(keys) + if err != nil { + return fmt.Errorf("failed to get values from cache: %w", err) + } + return nil +} + +func (c *cachedKeyValueDB) Set(key []byte, value []byte, opts types.WriteOptions) error { + err := c.db.Set(key, value, opts) + if err != nil { + return fmt.Errorf("failed to set value in database: %w", err) + } + c.cache.Set(key, value) + return nil +} + +func (c *cachedKeyValueDB) Delete(key []byte, opts types.WriteOptions) error { + err := c.db.Delete(key, opts) + if err != nil { + return fmt.Errorf("failed to delete value in database: %w", err) + } + c.cache.Delete(key) + return nil +} + +func (c *cachedKeyValueDB) NewIter(opts *types.IterOptions) (types.KeyValueDBIterator, error) { + return c.db.NewIter(opts) +} + +func (c *cachedKeyValueDB) NewBatch() types.Batch { + return newCachedBatch(c.db.NewBatch(), c.cache) +} + +func (c *cachedKeyValueDB) Flush() error { + return c.db.Flush() +} + +func (c *cachedKeyValueDB) Close() error { + return c.db.Close() +} + +func (c *cachedKeyValueDB) Checkpoint(destDir string) error { + cp, ok := c.db.(types.Checkpointable) + if !ok { + return fmt.Errorf("underlying database does not support Checkpoint") + } + return cp.Checkpoint(destDir) +} diff --git a/sei-db/db_engine/pebbledb/dbcache/lru_queue.go b/sei-db/db_engine/pebbledb/dbcache/lru_queue.go new file mode 100644 index 0000000000..6870679c9d --- /dev/null +++ b/sei-db/db_engine/pebbledb/dbcache/lru_queue.go @@ -0,0 +1,83 @@ +package dbcache + +import "container/list" + +// Implements a queue-like abstraction with LRU semantics. Not thread safe. +type lruQueue struct { + order *list.List + entries map[string]*list.Element + totalSize uint64 +} + +type lruQueueEntry struct { + key string + size uint64 +} + +// Create a new LRU queue. +func newLRUQueue() *lruQueue { + return &lruQueue{ + order: list.New(), + entries: make(map[string]*list.Element), + } +} + +// Add a new entry to the LRU queue. Can also be used to update an existing value with a new weight. +func (lru *lruQueue) Push( + // the key in the cache that was recently interacted with + key []byte, + // the size of the key + value + size uint64, +) { + if elem, ok := lru.entries[string(key)]; ok { + entry := elem.Value.(*lruQueueEntry) + lru.totalSize += size - entry.size + entry.size = size + lru.order.MoveToBack(elem) + return + } + + keyStr := string(key) + elem := lru.order.PushBack(&lruQueueEntry{ + key: keyStr, + size: size, + }) + lru.entries[keyStr] = elem + lru.totalSize += size +} + +// Signal that an entry has been interated with, moving it to the back of the queue +// (i.e. making it so it doesn't get popped soon). +func (lru *lruQueue) Touch(key []byte) { + elem, ok := lru.entries[string(key)] + if !ok { + return + } + lru.order.MoveToBack(elem) +} + +// Returns the total size of all entries in the LRU queue. +func (lru *lruQueue) GetTotalSize() uint64 { + return lru.totalSize +} + +// Returns a count of the number of entries in the LRU queue, where each entry counts for 1 regardless of size. +func (lru *lruQueue) GetCount() uint64 { + return uint64(len(lru.entries)) +} + +// Pops a single element out of the queue. The element removed is the entry least recently passed to Update(). +// Returns the key in string form to avoid copying the key an additional time. +// Panics if the queue is empty. +func (lru *lruQueue) PopLeastRecentlyUsed() string { + elem := lru.order.Front() + if elem == nil { + panic("cannot pop from empty LRU queue") + } + + lru.order.Remove(elem) + entry := elem.Value.(*lruQueueEntry) + delete(lru.entries, entry.key) + lru.totalSize -= entry.size + return entry.key +} diff --git a/sei-db/db_engine/pebbledb/dbcache/lru_queue_test.go b/sei-db/db_engine/pebbledb/dbcache/lru_queue_test.go new file mode 100644 index 0000000000..0073e6d1f0 --- /dev/null +++ b/sei-db/db_engine/pebbledb/dbcache/lru_queue_test.go @@ -0,0 +1,310 @@ +package dbcache + +import ( + "fmt" + "testing" + + "github.com/stretchr/testify/require" +) + +func TestLRUQueueIsolatesFromCallerMutation(t *testing.T) { + lru := newLRUQueue() + + key := []byte("a") + lru.Push(key, 1) + key[0] = 'z' + + require.Equal(t, "a", lru.PopLeastRecentlyUsed()) +} + +func TestNewLRUQueueStartsEmpty(t *testing.T) { + lru := newLRUQueue() + + require.Equal(t, uint64(0), lru.GetCount()) + require.Equal(t, uint64(0), lru.GetTotalSize()) +} + +func TestPopLeastRecentlyUsedPanicsOnEmptyQueue(t *testing.T) { + lru := newLRUQueue() + require.Panics(t, func() { lru.PopLeastRecentlyUsed() }) +} + +func TestPopLeastRecentlyUsedPanicsAfterDrain(t *testing.T) { + lru := newLRUQueue() + lru.Push([]byte("x"), 1) + lru.PopLeastRecentlyUsed() + + require.Panics(t, func() { lru.PopLeastRecentlyUsed() }) +} + +func TestPushSingleElement(t *testing.T) { + lru := newLRUQueue() + lru.Push([]byte("only"), 42) + + require.Equal(t, uint64(1), lru.GetCount()) + require.Equal(t, uint64(42), lru.GetTotalSize()) + require.Equal(t, "only", lru.PopLeastRecentlyUsed()) +} + +func TestPushDuplicateDecreasesSize(t *testing.T) { + lru := newLRUQueue() + lru.Push([]byte("k"), 100) + lru.Push([]byte("k"), 30) + + require.Equal(t, uint64(1), lru.GetCount()) + require.Equal(t, uint64(30), lru.GetTotalSize()) +} + +func TestPushDuplicateMovesToBack(t *testing.T) { + lru := newLRUQueue() + lru.Push([]byte("a"), 1) + lru.Push([]byte("b"), 1) + lru.Push([]byte("c"), 1) + + // Re-push "a" — should move it behind "b" and "c" + lru.Push([]byte("a"), 1) + + require.Equal(t, "b", lru.PopLeastRecentlyUsed()) + require.Equal(t, "c", lru.PopLeastRecentlyUsed()) + require.Equal(t, "a", lru.PopLeastRecentlyUsed()) +} + +func TestPushZeroSize(t *testing.T) { + lru := newLRUQueue() + lru.Push([]byte("z"), 0) + + require.Equal(t, uint64(1), lru.GetCount()) + require.Equal(t, uint64(0), lru.GetTotalSize()) + require.Equal(t, "z", lru.PopLeastRecentlyUsed()) + require.Equal(t, uint64(0), lru.GetTotalSize()) +} + +func TestPushEmptyKey(t *testing.T) { + lru := newLRUQueue() + lru.Push([]byte(""), 5) + + require.Equal(t, uint64(1), lru.GetCount()) + require.Equal(t, "", lru.PopLeastRecentlyUsed()) +} + +func TestPushRepeatedUpdatesToSameKey(t *testing.T) { + lru := newLRUQueue() + lru.Push([]byte("k"), 1) + lru.Push([]byte("k"), 2) + lru.Push([]byte("k"), 3) + lru.Push([]byte("k"), 4) + + require.Equal(t, uint64(1), lru.GetCount()) + require.Equal(t, uint64(4), lru.GetTotalSize()) +} + +func TestTouchNonexistentKeyIsNoop(t *testing.T) { + lru := newLRUQueue() + lru.Push([]byte("a"), 1) + + lru.Touch([]byte("missing")) + + require.Equal(t, uint64(1), lru.GetCount()) + require.Equal(t, "a", lru.PopLeastRecentlyUsed()) +} + +func TestTouchOnEmptyQueueIsNoop(t *testing.T) { + lru := newLRUQueue() + lru.Touch([]byte("ghost")) + + require.Equal(t, uint64(0), lru.GetCount()) +} + +func TestTouchSingleElement(t *testing.T) { + lru := newLRUQueue() + lru.Push([]byte("solo"), 10) + lru.Touch([]byte("solo")) + + require.Equal(t, uint64(1), lru.GetCount()) + require.Equal(t, "solo", lru.PopLeastRecentlyUsed()) +} + +func TestTouchDoesNotAffectSizeOrCount(t *testing.T) { + lru := newLRUQueue() + lru.Push([]byte("a"), 3) + lru.Push([]byte("b"), 7) + + lru.Touch([]byte("a")) + + require.Equal(t, uint64(2), lru.GetCount()) + require.Equal(t, uint64(10), lru.GetTotalSize()) +} + +func TestMultipleTouchesChangeOrder(t *testing.T) { + lru := newLRUQueue() + lru.Push([]byte("a"), 1) + lru.Push([]byte("b"), 1) + lru.Push([]byte("c"), 1) + + // Order: a, b, c + lru.Touch([]byte("a")) // Order: b, c, a + lru.Touch([]byte("b")) // Order: c, a, b + + require.Equal(t, "c", lru.PopLeastRecentlyUsed()) + require.Equal(t, "a", lru.PopLeastRecentlyUsed()) + require.Equal(t, "b", lru.PopLeastRecentlyUsed()) +} + +func TestTouchAlreadyMostRecentIsNoop(t *testing.T) { + lru := newLRUQueue() + lru.Push([]byte("a"), 1) + lru.Push([]byte("b"), 1) + + lru.Touch([]byte("b")) // "b" is already at back + + require.Equal(t, "a", lru.PopLeastRecentlyUsed()) + require.Equal(t, "b", lru.PopLeastRecentlyUsed()) +} + +func TestPopDecrementsCountAndSize(t *testing.T) { + lru := newLRUQueue() + lru.Push([]byte("a"), 10) + lru.Push([]byte("b"), 20) + lru.Push([]byte("c"), 30) + + lru.PopLeastRecentlyUsed() + + require.Equal(t, uint64(2), lru.GetCount()) + require.Equal(t, uint64(50), lru.GetTotalSize()) + + lru.PopLeastRecentlyUsed() + + require.Equal(t, uint64(1), lru.GetCount()) + require.Equal(t, uint64(30), lru.GetTotalSize()) +} + +func TestPopFIFOOrderWithoutTouches(t *testing.T) { + lru := newLRUQueue() + keys := []string{"first", "second", "third", "fourth"} + for _, k := range keys { + lru.Push([]byte(k), 1) + } + + for _, want := range keys { + require.Equal(t, want, lru.PopLeastRecentlyUsed()) + } +} + +func TestPushAfterDrain(t *testing.T) { + lru := newLRUQueue() + lru.Push([]byte("a"), 5) + lru.PopLeastRecentlyUsed() + + lru.Push([]byte("x"), 10) + lru.Push([]byte("y"), 20) + + require.Equal(t, uint64(2), lru.GetCount()) + require.Equal(t, uint64(30), lru.GetTotalSize()) + require.Equal(t, "x", lru.PopLeastRecentlyUsed()) +} + +func TestPushPreviouslyPoppedKey(t *testing.T) { + lru := newLRUQueue() + lru.Push([]byte("recycled"), 5) + lru.PopLeastRecentlyUsed() + + lru.Push([]byte("recycled"), 99) + + require.Equal(t, uint64(1), lru.GetCount()) + require.Equal(t, uint64(99), lru.GetTotalSize()) + require.Equal(t, "recycled", lru.PopLeastRecentlyUsed()) +} + +func TestInterleavedPushAndPop(t *testing.T) { + lru := newLRUQueue() + + lru.Push([]byte("a"), 1) + lru.Push([]byte("b"), 2) + + require.Equal(t, "a", lru.PopLeastRecentlyUsed()) + + lru.Push([]byte("c"), 3) + + require.Equal(t, uint64(2), lru.GetCount()) + require.Equal(t, uint64(5), lru.GetTotalSize()) + require.Equal(t, "b", lru.PopLeastRecentlyUsed()) + require.Equal(t, "c", lru.PopLeastRecentlyUsed()) +} + +func TestTouchThenPushSameKey(t *testing.T) { + lru := newLRUQueue() + lru.Push([]byte("a"), 1) + lru.Push([]byte("b"), 1) + + lru.Touch([]byte("a")) // order: b, a + lru.Push([]byte("a"), 50) // updates size, stays at back + + require.Equal(t, uint64(2), lru.GetCount()) + require.Equal(t, uint64(51), lru.GetTotalSize()) + require.Equal(t, "b", lru.PopLeastRecentlyUsed()) +} + +func TestBinaryKeyData(t *testing.T) { + lru := newLRUQueue() + k1 := []byte{0x00, 0xFF, 0x01} + k2 := []byte{0x00, 0xFF, 0x02} + + lru.Push(k1, 10) + lru.Push(k2, 20) + + require.Equal(t, uint64(2), lru.GetCount()) + require.Equal(t, string(k1), lru.PopLeastRecentlyUsed()) + + lru.Touch(k2) + require.Equal(t, string(k2), lru.PopLeastRecentlyUsed()) +} + +func TestCallerMutationAfterTouchDoesNotAffectQueue(t *testing.T) { + lru := newLRUQueue() + key := []byte("abc") + lru.Push(key, 1) + + key[0] = 'Z' + lru.Touch(key) // Touch with mutated key ("Zbc") — should be a no-op + + require.Equal(t, "abc", lru.PopLeastRecentlyUsed()) +} + +func TestManyEntries(t *testing.T) { + lru := newLRUQueue() + n := 1000 + var totalSize uint64 + + for i := 0; i < n; i++ { + k := fmt.Sprintf("key-%04d", i) + lru.Push([]byte(k), uint64(i+1)) + totalSize += uint64(i + 1) + } + + require.Equal(t, uint64(n), lru.GetCount()) + require.Equal(t, totalSize, lru.GetTotalSize()) + + for i := 0; i < n; i++ { + want := fmt.Sprintf("key-%04d", i) + require.Equal(t, want, lru.PopLeastRecentlyUsed(), "pop %d", i) + } + + require.Equal(t, uint64(0), lru.GetCount()) + require.Equal(t, uint64(0), lru.GetTotalSize()) +} + +func TestPushUpdatedSizeThenPopVerifySizeAccounting(t *testing.T) { + lru := newLRUQueue() + lru.Push([]byte("a"), 10) + lru.Push([]byte("b"), 20) + lru.Push([]byte("a"), 5) // decrease a's size from 10 to 5 + + require.Equal(t, uint64(25), lru.GetTotalSize()) + + // Pop "b" (it's the LRU since "a" was re-pushed to back). + lru.PopLeastRecentlyUsed() + require.Equal(t, uint64(5), lru.GetTotalSize()) + + lru.PopLeastRecentlyUsed() + require.Equal(t, uint64(0), lru.GetTotalSize()) +} diff --git a/sei-db/db_engine/pebbledb/dbcache/noop_cache.go b/sei-db/db_engine/pebbledb/dbcache/noop_cache.go new file mode 100644 index 0000000000..1e40e02879 --- /dev/null +++ b/sei-db/db_engine/pebbledb/dbcache/noop_cache.go @@ -0,0 +1,58 @@ +package dbcache + +import ( + "fmt" + + "github.com/sei-protocol/sei-chain/sei-db/db_engine/types" +) + +var _ Cache = (*noOpCache)(nil) + +// noOpCache is a Cache that performs no caching. Every Get falls through +// to the underlying readFunc. Set, Delete, and BatchSet are no-ops. +// Useful for testing the storage layer without cache interference, or for +// workloads where caching is not beneficial. +type noOpCache struct { + readFunc func(key []byte) ([]byte, bool, error) +} + +// NewNoOpCache creates a Cache that always reads from readFunc and never caches. +func NewNoOpCache(readFunc func(key []byte) ([]byte, bool, error)) Cache { + return &noOpCache{readFunc: readFunc} +} + +func (c *noOpCache) Get(key []byte, _ bool) ([]byte, bool, error) { + return c.readFunc(key) +} + +func (c *noOpCache) BatchGet(keys map[string]types.BatchGetResult) error { + var firstErr error + for k := range keys { + val, _, err := c.readFunc([]byte(k)) + if err != nil { + keys[k] = types.BatchGetResult{Error: err} + if firstErr == nil { + firstErr = err + } + } else { + keys[k] = types.BatchGetResult{Value: val} + } + } + if firstErr != nil { + return fmt.Errorf("unable to batch get: %w", firstErr) + } + return nil +} + +func (c *noOpCache) Set([]byte, []byte) { + // intentional no-op +} + +func (c *noOpCache) Delete([]byte) { + // intentional no-op +} + +func (c *noOpCache) BatchSet([]CacheUpdate) error { + // intentional no-op + return nil +} diff --git a/sei-db/db_engine/pebbledb/dbcache/noop_cache_test.go b/sei-db/db_engine/pebbledb/dbcache/noop_cache_test.go new file mode 100644 index 0000000000..2fd7bb2790 --- /dev/null +++ b/sei-db/db_engine/pebbledb/dbcache/noop_cache_test.go @@ -0,0 +1,152 @@ +package dbcache + +import ( + "errors" + "testing" + + "github.com/stretchr/testify/require" + + "github.com/sei-protocol/sei-chain/sei-db/db_engine/types" +) + +func newNoOpTestCache(store map[string][]byte) Cache { + return NewNoOpCache(func(key []byte) ([]byte, bool, error) { + v, ok := store[string(key)] + if !ok { + return nil, false, nil + } + return v, true, nil + }) +} + +func TestNoOpGetFound(t *testing.T) { + c := newNoOpTestCache(map[string][]byte{"k": []byte("v")}) + + val, found, err := c.Get([]byte("k"), true) + require.NoError(t, err) + require.True(t, found) + require.Equal(t, "v", string(val)) +} + +func TestNoOpGetNotFound(t *testing.T) { + c := newNoOpTestCache(map[string][]byte{}) + + val, found, err := c.Get([]byte("missing"), true) + require.NoError(t, err) + require.False(t, found) + require.Nil(t, val) +} + +func TestNoOpGetError(t *testing.T) { + dbErr := errors.New("broken") + c := NewNoOpCache(func(key []byte) ([]byte, bool, error) { + return nil, false, dbErr + }) + + _, _, err := c.Get([]byte("k"), true) + require.ErrorIs(t, err, dbErr) +} + +func TestNoOpGetIgnoresUpdateLru(t *testing.T) { + c := newNoOpTestCache(map[string][]byte{"k": []byte("v")}) + + val1, _, _ := c.Get([]byte("k"), true) + val2, _, _ := c.Get([]byte("k"), false) + require.Equal(t, string(val1), string(val2)) +} + +func TestNoOpGetAlwaysReadsFromFunc(t *testing.T) { + store := map[string][]byte{"k": []byte("v1")} + c := newNoOpTestCache(store) + + val, _, _ := c.Get([]byte("k"), true) + require.Equal(t, "v1", string(val)) + + store["k"] = []byte("v2") + + val, _, _ = c.Get([]byte("k"), true) + require.Equal(t, "v2", string(val), "should re-read from func, not cache") +} + +func TestNoOpSetIsNoOp(t *testing.T) { + c := newNoOpTestCache(map[string][]byte{}) + + c.Set([]byte("k"), []byte("v")) + + _, found, err := c.Get([]byte("k"), true) + require.NoError(t, err) + require.False(t, found, "Set should not cache anything") +} + +func TestNoOpDeleteIsNoOp(t *testing.T) { + c := newNoOpTestCache(map[string][]byte{"k": []byte("v")}) + + c.Delete([]byte("k")) + + val, found, err := c.Get([]byte("k"), true) + require.NoError(t, err) + require.True(t, found, "Delete should not affect reads") + require.Equal(t, "v", string(val)) +} + +func TestNoOpBatchSetIsNoOp(t *testing.T) { + c := newNoOpTestCache(map[string][]byte{}) + + err := c.BatchSet([]CacheUpdate{ + {Key: []byte("a"), Value: []byte("1")}, + {Key: []byte("b"), Value: []byte("2")}, + }) + require.NoError(t, err) + + _, found, _ := c.Get([]byte("a"), true) + require.False(t, found) + _, found, _ = c.Get([]byte("b"), true) + require.False(t, found) +} + +func TestNoOpBatchSetEmptyAndNil(t *testing.T) { + c := newNoOpTestCache(map[string][]byte{}) + + require.NoError(t, c.BatchSet(nil)) + require.NoError(t, c.BatchSet([]CacheUpdate{})) +} + +func TestNoOpBatchGetAllFound(t *testing.T) { + c := newNoOpTestCache(map[string][]byte{"a": []byte("1"), "b": []byte("2")}) + + keys := map[string]types.BatchGetResult{"a": {}, "b": {}} + require.NoError(t, c.BatchGet(keys)) + + require.True(t, keys["a"].IsFound()) + require.Equal(t, "1", string(keys["a"].Value)) + require.True(t, keys["b"].IsFound()) + require.Equal(t, "2", string(keys["b"].Value)) +} + +func TestNoOpBatchGetNotFound(t *testing.T) { + c := newNoOpTestCache(map[string][]byte{}) + + keys := map[string]types.BatchGetResult{"x": {}} + require.NoError(t, c.BatchGet(keys)) + require.False(t, keys["x"].IsFound()) +} + +func TestNoOpBatchGetError(t *testing.T) { + dbErr := errors.New("fail") + c := NewNoOpCache(func(key []byte) ([]byte, bool, error) { + return nil, false, dbErr + }) + + keys := map[string]types.BatchGetResult{"k": {}} + err := c.BatchGet(keys) + require.Error(t, err) + require.ErrorIs(t, err, dbErr) + require.Error(t, keys["k"].Error) +} + +func TestNoOpBatchGetEmpty(t *testing.T) { + c := newNoOpTestCache(map[string][]byte{}) + + keys := map[string]types.BatchGetResult{} + require.NoError(t, c.BatchGet(keys)) +} diff --git a/sei-db/db_engine/pebbledb/dbcache/shard.go b/sei-db/db_engine/pebbledb/dbcache/shard.go new file mode 100644 index 0000000000..130e650be3 --- /dev/null +++ b/sei-db/db_engine/pebbledb/dbcache/shard.go @@ -0,0 +1,404 @@ +package dbcache + +import ( + "bytes" + "context" + "fmt" + "sync" + "time" + + "github.com/sei-protocol/sei-chain/sei-db/common/threading" + "github.com/sei-protocol/sei-chain/sei-db/db_engine/types" +) + +// A single shard of a Cache. +type shard struct { + ctx context.Context + + // A lock to protect the shard's data. + lock sync.Mutex + + // The data in the shard. + data map[string]*shardEntry + + // Organizes data for garbage collection. + gcQueue *lruQueue + + // A pool for asynchronous reads. + readPool threading.Pool + + // A function that reads a value from the database. + readFunc func(key []byte) ([]byte, bool, error) + + // The maximum size of this cache, in bytes. + maxSize uint64 + + // Cache-level metrics. Nil-safe; if nil, no metrics are recorded. + metrics *CacheMetrics +} + +// The result of a read from the underlying database. +type readResult struct { + value []byte + err error +} + +// The status of a value in the cache. +type valueStatus int + +const ( + // The value is not known and we are not currently attempting to find it. + statusUnknown valueStatus = iota + // We've scheduled a read of the value but haven't yet finsihed the read. + statusScheduled + // The data is available. + statusAvailable + // We are aware that the value is deleted (special case of data being available). + statusDeleted +) + +// A single shardEntry in a shard. Records data for a single key. +type shardEntry struct { + // The parent shard that contains this entry. + shard *shard + + // The current status of this entry. + status valueStatus + + // The value, if known. + value []byte + + // If the value is not available when we request it, + // it will be written to this channel when it is available. + valueChan chan readResult +} + +// Creates a new Shard. +func NewShard( + ctx context.Context, + readPool threading.Pool, + readFunc func(key []byte) ([]byte, bool, error), + maxSize uint64, +) (*shard, error) { + + if maxSize <= 0 { + return nil, fmt.Errorf("maxSize must be greater than 0") + } + + return &shard{ + ctx: ctx, + readPool: readPool, + readFunc: readFunc, + lock: sync.Mutex{}, + data: make(map[string]*shardEntry), + gcQueue: newLRUQueue(), + maxSize: maxSize, + }, nil +} + +// Get returns the value for the given key, or (nil, false) if not found. +func (s *shard) Get(key []byte, updateLru bool) ([]byte, bool, error) { + s.lock.Lock() + + entry := s.getEntry(key) + + switch entry.status { + case statusAvailable: + return s.getAvailable(entry, key, updateLru) + case statusDeleted: + return s.getDeleted(key, updateLru) + case statusScheduled: + return s.getScheduled(entry) + case statusUnknown: + return s.getUnknown(entry, key) + default: + s.lock.Unlock() + panic(fmt.Sprintf("unexpected status: %#v", entry.status)) + } +} + +// Handles Get for a key whose value is already cached. Lock must be held; releases it. +func (s *shard) getAvailable(entry *shardEntry, key []byte, updateLru bool) ([]byte, bool, error) { + value := bytes.Clone(entry.value) + if updateLru { + s.gcQueue.Touch(key) + } + s.lock.Unlock() + s.metrics.reportCacheHits(1) + return value, true, nil +} + +// Handles Get for a key known to be deleted. Lock must be held; releases it. +func (s *shard) getDeleted(key []byte, updateLru bool) ([]byte, bool, error) { + if updateLru { + s.gcQueue.Touch(key) + } + s.lock.Unlock() + s.metrics.reportCacheHits(1) + return nil, false, nil +} + +// Handles Get for a key with an in-flight read from another goroutine. Lock must be held; releases it. +func (s *shard) getScheduled(entry *shardEntry) ([]byte, bool, error) { + valueChan := entry.valueChan + s.lock.Unlock() + s.metrics.reportCacheMisses(1) + startTime := time.Now() + result, err := threading.InterruptiblePull(s.ctx, valueChan) + s.metrics.reportCacheMissLatency(time.Since(startTime)) + if err != nil { + return nil, false, fmt.Errorf("failed to pull value from channel: %w", err) + } + valueChan <- result // reload the channel in case there are other listeners + if result.err != nil { + return nil, false, fmt.Errorf("failed to read value from database: %w", result.err) + } + return result.value, result.value != nil, nil +} + +// Handles Get for a key not yet read. Schedules the read and waits. Lock must be held; releases it. +func (s *shard) getUnknown(entry *shardEntry, key []byte) ([]byte, bool, error) { + entry.status = statusScheduled + valueChan := make(chan readResult, 1) + entry.valueChan = valueChan + s.lock.Unlock() + s.metrics.reportCacheMisses(1) + startTime := time.Now() + err := s.readPool.Submit(s.ctx, func() { + value, _, readErr := s.readFunc(key) + entry.injectValue(key, readResult{value: value, err: readErr}) + }) + if err != nil { + return nil, false, fmt.Errorf("failed to schedule read: %w", err) + } + result, err := threading.InterruptiblePull(s.ctx, valueChan) + s.metrics.reportCacheMissLatency(time.Since(startTime)) + if err != nil { + return nil, false, fmt.Errorf("failed to pull value from channel: %w", err) + } + valueChan <- result // reload the channel in case there are other listeners + if result.err != nil { + return nil, false, result.err + } + return result.value, result.value != nil, nil +} + +// This method is called by the read scheduler when a value becomes available. +func (se *shardEntry) injectValue(key []byte, result readResult) { + se.shard.lock.Lock() + + if se.status == statusScheduled { + if result.err != nil { + // Don't cache errors — reset so the next caller retries. + delete(se.shard.data, string(key)) + } else if result.value == nil { + se.status = statusDeleted + se.value = nil + se.shard.gcQueue.Push(key, uint64(len(key))) + se.shard.evictUnlocked() + } else { + se.status = statusAvailable + se.value = result.value + se.shard.gcQueue.Push(key, uint64(len(key)+len(result.value))) //nolint:gosec // G115: len is non-negative + se.shard.evictUnlocked() + } + } + + se.shard.lock.Unlock() + + se.valueChan <- result +} + +// Get a shard entry for a given key. Caller is responsible for holding the shard's lock +// when this method is called. +func (s *shard) getEntry(key []byte) *shardEntry { + if entry, ok := s.data[string(key)]; ok { + return entry + } + entry := &shardEntry{ + shard: s, + status: statusUnknown, + } + keyStr := string(key) + s.data[keyStr] = entry + return entry +} + +// Tracks a key whose value is not yet available and must be waited on. +type pendingRead struct { + key string + entry *shardEntry + valueChan chan readResult + needsSchedule bool + // Populated after the read completes, used by bulkInjectValues. + result readResult +} + +// BatchGet reads a batch of keys from the shard. Results are written into the provided map. +func (s *shard) BatchGet(keys map[string]types.BatchGetResult) error { + pending := make([]pendingRead, 0, len(keys)) + var hits int64 + + s.lock.Lock() + for key := range keys { + entry := s.getEntry([]byte(key)) + + switch entry.status { + case statusAvailable | statusDeleted: + keys[key] = types.BatchGetResult{Value: bytes.Clone(entry.value)} + hits++ + case statusScheduled: + pending = append(pending, pendingRead{ + key: key, + entry: entry, + valueChan: entry.valueChan, + }) + case statusUnknown: + entry.status = statusScheduled + valueChan := make(chan readResult, 1) + entry.valueChan = valueChan + pending = append(pending, pendingRead{ + key: key, + entry: entry, + valueChan: valueChan, + needsSchedule: true, + }) + default: + s.lock.Unlock() + panic(fmt.Sprintf("unexpected status: %#v", entry.status)) + } + } + s.lock.Unlock() + + if hits > 0 { + s.metrics.reportCacheHits(hits) + } + if len(pending) == 0 { + return nil + } + + s.metrics.reportCacheMisses(int64(len(pending))) + startTime := time.Now() + + for i := range pending { + if pending[i].needsSchedule { + p := &pending[i] + err := s.readPool.Submit(s.ctx, func() { + value, _, readErr := s.readFunc([]byte(p.key)) + p.entry.valueChan <- readResult{value: value, err: readErr} + }) + if err != nil { + return fmt.Errorf("failed to schedule read: %w", err) + } + } + } + + for i := range pending { + result, err := threading.InterruptiblePull(s.ctx, pending[i].valueChan) + if err != nil { + return fmt.Errorf("failed to pull value from channel: %w", err) + } + pending[i].valueChan <- result + pending[i].result = result + + if result.err != nil { + keys[pending[i].key] = types.BatchGetResult{Error: result.err} + } else { + keys[pending[i].key] = types.BatchGetResult{Value: result.value} + } + } + + s.metrics.reportCacheMissLatency(time.Since(startTime)) + go s.bulkInjectValues(pending) + + return nil +} + +// Applies deferred cache updates for a batch of reads under a single lock acquisition. +func (s *shard) bulkInjectValues(reads []pendingRead) { + s.lock.Lock() + for i := range reads { + entry := reads[i].entry + if entry.status != statusScheduled { + continue + } + result := reads[i].result + if result.err != nil { + // Don't cache errors — reset so the next caller retries. + delete(s.data, reads[i].key) + } else if result.value == nil { + entry.status = statusDeleted + entry.value = nil + s.gcQueue.Push([]byte(reads[i].key), uint64(len(reads[i].key))) + } else { + entry.status = statusAvailable + entry.value = result.value + s.gcQueue.Push([]byte(reads[i].key), uint64(len(reads[i].key)+len(result.value))) //nolint:gosec // G115 + } + } + s.evictUnlocked() + s.lock.Unlock() +} + +// Evicts least recently used entries until the cache is within its size budget. +// Caller is required to hold the lock. +func (s *shard) evictUnlocked() { + for s.gcQueue.GetTotalSize() > s.maxSize { + next := s.gcQueue.PopLeastRecentlyUsed() + delete(s.data, next) + } +} + +// getSizeInfo returns the current size (bytes) and entry count under the shard lock. +func (s *shard) getSizeInfo() (bytes uint64, entries uint64) { + s.lock.Lock() + defer s.lock.Unlock() + return s.gcQueue.GetTotalSize(), s.gcQueue.GetCount() +} + +// Set sets the value for the given key. +func (s *shard) Set(key []byte, value []byte) { + s.lock.Lock() + s.setUnlocked(key, value) + s.lock.Unlock() +} + +// Set a value. Caller is required to hold the lock. +func (s *shard) setUnlocked(key []byte, value []byte) { + entry := s.getEntry(key) + entry.status = statusAvailable + entry.value = value + + s.gcQueue.Push(key, uint64(len(key)+len(value))) //nolint:gosec // G115 + s.evictUnlocked() +} + +// BatchSet sets the values for a batch of keys. +func (s *shard) BatchSet(entries []CacheUpdate) { + s.lock.Lock() + for i := range entries { + if entries[i].IsDelete() { + s.deleteUnlocked(entries[i].Key) + } else { + s.setUnlocked(entries[i].Key, entries[i].Value) + } + } + s.lock.Unlock() +} + +// Delete deletes the value for the given key. +func (s *shard) Delete(key []byte) { + s.lock.Lock() + s.deleteUnlocked(key) + s.lock.Unlock() +} + +// Delete a value. Caller is required to hold the lock. +func (s *shard) deleteUnlocked(key []byte) { + entry := s.getEntry(key) + entry.status = statusDeleted + entry.value = nil + + s.gcQueue.Push(key, uint64(len(key))) + s.evictUnlocked() +} diff --git a/sei-db/db_engine/pebbledb/dbcache/shard_manager.go b/sei-db/db_engine/pebbledb/dbcache/shard_manager.go new file mode 100644 index 0000000000..bfc837845c --- /dev/null +++ b/sei-db/db_engine/pebbledb/dbcache/shard_manager.go @@ -0,0 +1,46 @@ +package dbcache + +import ( + "errors" + "hash/maphash" + "sync" +) + +var ErrNumShardsNotPowerOfTwo = errors.New("numShards must be a power of two and > 0") + +// A utility for assigning keys to shard indices. +type shardManager struct { + // A random seed that makes it hard for an attacker to predict the shard index and to skew the distribution. + seed maphash.Seed + // Used to perform a quick modulo operation to get the shard index (since numShards is a power of two) + mask uint64 + // reusable Hash objects to avoid allocs + pool sync.Pool +} + +// Creates a new Sharder. Number of shards must be a power of two and greater than 0. +func newShardManager(numShards uint64) (*shardManager, error) { + if numShards == 0 || (numShards&(numShards-1)) != 0 { + return nil, ErrNumShardsNotPowerOfTwo + } + + return &shardManager{ + seed: maphash.MakeSeed(), // secret, randomized + mask: numShards - 1, + pool: sync.Pool{ + New: func() any { return new(maphash.Hash) }, + }, + }, nil +} + +// Shard returns a shard index in [0, numShards). +// addr should be the raw address bytes (e.g., 20-byte ETH address). +func (s *shardManager) Shard(addr []byte) uint64 { + h := s.pool.Get().(*maphash.Hash) + h.SetSeed(s.seed) + _, _ = h.Write(addr) + x := h.Sum64() + s.pool.Put(h) + + return x & s.mask +} diff --git a/sei-db/db_engine/pebbledb/dbcache/shard_manager_test.go b/sei-db/db_engine/pebbledb/dbcache/shard_manager_test.go new file mode 100644 index 0000000000..07aa2041a2 --- /dev/null +++ b/sei-db/db_engine/pebbledb/dbcache/shard_manager_test.go @@ -0,0 +1,271 @@ +package dbcache + +import ( + "fmt" + "math" + "sync" + "testing" + + "github.com/stretchr/testify/require" +) + +// --- NewShardManager --- + +func TestNewShardManagerValidPowersOfTwo(t *testing.T) { + for exp := 0; exp < 20; exp++ { + n := uint64(1) << exp + sm, err := newShardManager(n) + require.NoError(t, err, "numShards=%d", n) + require.NotNil(t, sm, "numShards=%d", n) + } +} + +func TestNewShardManagerZeroReturnsError(t *testing.T) { + sm, err := newShardManager(0) + require.ErrorIs(t, err, ErrNumShardsNotPowerOfTwo) + require.Nil(t, sm) +} + +func TestNewShardManagerNonPowersOfTwoReturnError(t *testing.T) { + bad := []uint64{3, 5, 6, 7, 9, 10, 12, 15, 17, 100, 255, 1023} + for _, n := range bad { + sm, err := newShardManager(n) + require.ErrorIs(t, err, ErrNumShardsNotPowerOfTwo, "numShards=%d", n) + require.Nil(t, sm, "numShards=%d", n) + } +} + +func TestNewShardManagerMaxUint64ReturnsError(t *testing.T) { + sm, err := newShardManager(math.MaxUint64) + require.ErrorIs(t, err, ErrNumShardsNotPowerOfTwo) + require.Nil(t, sm) +} + +func TestNewShardManagerLargePowerOfTwo(t *testing.T) { + n := uint64(1) << 40 + sm, err := newShardManager(n) + require.NoError(t, err) + require.NotNil(t, sm) +} + +// --- Shard: basic behaviour --- + +func TestShardReturnsBoundedIndex(t *testing.T) { + for _, numShards := range []uint64{1, 2, 4, 16, 256, 1024} { + sm, err := newShardManager(numShards) + require.NoError(t, err) + + for i := 0; i < 500; i++ { + key := []byte(fmt.Sprintf("key-%d", i)) + idx := sm.Shard(key) + require.Less(t, idx, numShards, "numShards=%d key=%s", numShards, key) + } + } +} + +func TestShardDeterministic(t *testing.T) { + sm, err := newShardManager(16) + require.NoError(t, err) + + key := []byte("deterministic-test-key") + first := sm.Shard(key) + for i := 0; i < 100; i++ { + require.Equal(t, first, sm.Shard(key)) + } +} + +func TestShardSingleShardAlwaysReturnsZero(t *testing.T) { + sm, err := newShardManager(1) + require.NoError(t, err) + + keys := [][]byte{ + {}, + {0x00}, + {0xFF}, + []byte("anything"), + []byte("another key entirely"), + } + for _, k := range keys { + require.Equal(t, uint64(0), sm.Shard(k), "key=%q", k) + } +} + +func TestShardEmptyKey(t *testing.T) { + sm, err := newShardManager(8) + require.NoError(t, err) + + idx := sm.Shard([]byte{}) + require.Less(t, idx, uint64(8)) + + // Deterministic + require.Equal(t, idx, sm.Shard([]byte{})) +} + +func TestShardNilKey(t *testing.T) { + sm, err := newShardManager(4) + require.NoError(t, err) + + idx := sm.Shard(nil) + require.Less(t, idx, uint64(4)) + require.Equal(t, idx, sm.Shard(nil)) +} + +func TestShardBinaryKeys(t *testing.T) { + sm, err := newShardManager(16) + require.NoError(t, err) + + k1 := []byte{0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x01} + k2 := []byte{0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02} + + idx1 := sm.Shard(k1) + idx2 := sm.Shard(k2) + require.Less(t, idx1, uint64(16)) + require.Less(t, idx2, uint64(16)) +} + +func TestShardCallerMutationDoesNotAffectFutureResults(t *testing.T) { + sm, err := newShardManager(16) + require.NoError(t, err) + + key := []byte("mutable") + first := sm.Shard(key) + + key[0] = 'X' + second := sm.Shard([]byte("mutable")) + require.Equal(t, first, second) +} + +// --- Distribution --- + +func TestShardDistribution(t *testing.T) { + const numShards = 16 + const numKeys = 10_000 + sm, err := newShardManager(numShards) + require.NoError(t, err) + + counts := make([]int, numShards) + for i := 0; i < numKeys; i++ { + key := []byte(fmt.Sprintf("addr-%06d", i)) + counts[sm.Shard(key)]++ + } + + expected := float64(numKeys) / float64(numShards) + for shard, count := range counts { + ratio := float64(count) / expected + require.Greater(t, ratio, 0.5, "shard %d is severely underrepresented (%d)", shard, count) + require.Less(t, ratio, 1.5, "shard %d is severely overrepresented (%d)", shard, count) + } +} + +// --- Distinct managers --- + +func TestDifferentManagersHaveDifferentSeeds(t *testing.T) { + sm1, err := newShardManager(256) + require.NoError(t, err) + sm2, err := newShardManager(256) + require.NoError(t, err) + + // With distinct random seeds, at least some keys should hash differently. + diffCount := 0 + for i := 0; i < 200; i++ { + key := []byte(fmt.Sprintf("seed-test-%d", i)) + if sm1.Shard(key) != sm2.Shard(key) { + diffCount++ + } + } + require.Greater(t, diffCount, 0, "two managers with independent seeds should differ on at least one key") +} + +// --- Concurrency --- + +func TestShardConcurrentAccess(t *testing.T) { + sm, err := newShardManager(64) + require.NoError(t, err) + + const goroutines = 32 + const iters = 1000 + + key := []byte("concurrent-key") + expected := sm.Shard(key) + + var wg sync.WaitGroup + wg.Add(goroutines) + for g := 0; g < goroutines; g++ { + go func() { + defer wg.Done() + for i := 0; i < iters; i++ { + got := sm.Shard(key) + if got != expected { + t.Errorf("concurrent Shard returned %d, want %d", got, expected) + return + } + } + }() + } + wg.Wait() +} + +func TestShardConcurrentDifferentKeys(t *testing.T) { + sm, err := newShardManager(32) + require.NoError(t, err) + + const goroutines = 16 + const keysPerGoroutine = 500 + + var wg sync.WaitGroup + wg.Add(goroutines) + for g := 0; g < goroutines; g++ { + g := g + go func() { + defer wg.Done() + for i := 0; i < keysPerGoroutine; i++ { + key := []byte(fmt.Sprintf("g%d-k%d", g, i)) + idx := sm.Shard(key) + if idx >= 32 { + t.Errorf("Shard(%q) = %d, want < 32", key, idx) + return + } + } + }() + } + wg.Wait() +} + +// --- Mask correctness --- + +func TestShardMaskMatchesNumShards(t *testing.T) { + for exp := 0; exp < 16; exp++ { + numShards := uint64(1) << exp + sm, err := newShardManager(numShards) + require.NoError(t, err) + require.Equal(t, numShards-1, sm.mask, "numShards=%d", numShards) + } +} + +// --- 20-byte ETH-style addresses --- + +func TestShardWith20ByteAddresses(t *testing.T) { + sm, err := newShardManager(16) + require.NoError(t, err) + + addr := make([]byte, 20) + for i := 0; i < 20; i++ { + addr[i] = byte(i + 1) + } + + idx := sm.Shard(addr) + require.Less(t, idx, uint64(16)) + require.Equal(t, idx, sm.Shard(addr)) +} + +func TestShardSingleByteKey(t *testing.T) { + sm, err := newShardManager(4) + require.NoError(t, err) + + for b := 0; b < 256; b++ { + idx := sm.Shard([]byte{byte(b)}) + require.Less(t, idx, uint64(4), "byte=%d", b) + } +} diff --git a/sei-db/db_engine/pebbledb/dbcache/shard_test.go b/sei-db/db_engine/pebbledb/dbcache/shard_test.go new file mode 100644 index 0000000000..b2c0b5ad77 --- /dev/null +++ b/sei-db/db_engine/pebbledb/dbcache/shard_test.go @@ -0,0 +1,815 @@ +package dbcache + +import ( + "context" + "errors" + "fmt" + "sync" + "sync/atomic" + "testing" + "time" + + "github.com/stretchr/testify/require" + + "github.com/sei-protocol/sei-chain/sei-db/common/threading" + "github.com/sei-protocol/sei-chain/sei-db/db_engine/types" +) + +// --------------------------------------------------------------------------- +// helpers +// --------------------------------------------------------------------------- + +// newTestShard creates a shard backed by a simple in-memory map. +// The returned readFunc map can be populated before calling Get. +func newTestShard(t *testing.T, maxSize uint64, store map[string][]byte) *shard { + t.Helper() + readFunc := func(key []byte) ([]byte, bool, error) { + v, ok := store[string(key)] + if !ok { + return nil, false, nil + } + return v, true, nil + } + s, err := NewShard(context.Background(), threading.NewAdHocPool(), readFunc, maxSize) + require.NoError(t, err) + return s +} + +// --------------------------------------------------------------------------- +// NewShard +// --------------------------------------------------------------------------- + +func TestNewShardValid(t *testing.T) { + readFunc := func(key []byte) ([]byte, bool, error) { return nil, false, nil } + s, err := NewShard(context.Background(), threading.NewAdHocPool(), readFunc, 1024) + require.NoError(t, err) + require.NotNil(t, s) +} + +func TestNewShardZeroMaxSize(t *testing.T) { + readFunc := func(key []byte) ([]byte, bool, error) { return nil, false, nil } + _, err := NewShard(context.Background(), threading.NewAdHocPool(), readFunc, 0) + require.Error(t, err) +} + +// --------------------------------------------------------------------------- +// Get — cache miss flows +// --------------------------------------------------------------------------- + +func TestGetCacheMissFoundInDB(t *testing.T) { + store := map[string][]byte{"hello": []byte("world")} + s := newTestShard(t, 4096, store) + + val, found, err := s.Get([]byte("hello"), true) + require.NoError(t, err) + require.True(t, found) + require.Equal(t, "world", string(val)) +} + +func TestGetCacheMissNotFoundInDB(t *testing.T) { + s := newTestShard(t, 4096, map[string][]byte{}) + + val, found, err := s.Get([]byte("missing"), true) + require.NoError(t, err) + require.False(t, found) + require.Nil(t, val) +} + +func TestGetCacheMissDBError(t *testing.T) { + dbErr := errors.New("disk on fire") + readFunc := func(key []byte) ([]byte, bool, error) { return nil, false, dbErr } + s, _ := NewShard(context.Background(), threading.NewAdHocPool(), readFunc, 4096) + + _, _, err := s.Get([]byte("boom"), true) + require.Error(t, err) + require.ErrorIs(t, err, dbErr) +} + +func TestGetDBErrorDoesNotCacheResult(t *testing.T) { + var calls atomic.Int64 + readFunc := func(key []byte) ([]byte, bool, error) { + n := calls.Add(1) + if n == 1 { + return nil, false, errors.New("transient") + } + return []byte("recovered"), true, nil + } + s, _ := NewShard(context.Background(), threading.NewAdHocPool(), readFunc, 4096) + + _, _, err := s.Get([]byte("key"), true) + require.Error(t, err, "first call should fail") + + val, found, err := s.Get([]byte("key"), true) + require.NoError(t, err, "second call should succeed") + require.True(t, found) + require.Equal(t, "recovered", string(val)) + require.Equal(t, int64(2), calls.Load(), "error should not be cached") +} + +// --------------------------------------------------------------------------- +// Get — cache hit flows +// --------------------------------------------------------------------------- + +func TestGetCacheHitAvailable(t *testing.T) { + s := newTestShard(t, 4096, map[string][]byte{"k": []byte("v")}) + + s.Get([]byte("k"), true) + + val, found, err := s.Get([]byte("k"), true) + require.NoError(t, err) + require.True(t, found) + require.Equal(t, "v", string(val)) +} + +func TestGetCacheHitDeleted(t *testing.T) { + s := newTestShard(t, 4096, map[string][]byte{}) + + s.Get([]byte("gone"), true) + + val, found, err := s.Get([]byte("gone"), true) + require.NoError(t, err) + require.False(t, found) + require.Nil(t, val) +} + +func TestGetAfterSet(t *testing.T) { + var readCalls atomic.Int64 + readFunc := func(key []byte) ([]byte, bool, error) { + readCalls.Add(1) + return nil, false, nil + } + s, _ := NewShard(context.Background(), threading.NewAdHocPool(), readFunc, 4096) + + s.Set([]byte("k"), []byte("from-set")) + + val, found, err := s.Get([]byte("k"), true) + require.NoError(t, err) + require.True(t, found) + require.Equal(t, "from-set", string(val)) + require.Equal(t, int64(0), readCalls.Load(), "readFunc should not be called for Set-populated entry") +} + +func TestGetAfterDelete(t *testing.T) { + store := map[string][]byte{"k": []byte("v")} + s := newTestShard(t, 4096, store) + + s.Delete([]byte("k")) + + val, found, err := s.Get([]byte("k"), true) + require.NoError(t, err) + require.False(t, found) + require.Nil(t, val) +} + +// --------------------------------------------------------------------------- +// Get — concurrent reads on the same key +// --------------------------------------------------------------------------- + +func TestGetConcurrentSameKey(t *testing.T) { + var readCalls atomic.Int64 + gate := make(chan struct{}) + + readFunc := func(key []byte) ([]byte, bool, error) { + readCalls.Add(1) + <-gate + return []byte("value"), true, nil + } + s, _ := NewShard(context.Background(), threading.NewAdHocPool(), readFunc, 4096) + + const n = 10 + var wg sync.WaitGroup + errs := make([]error, n) + vals := make([]string, n) + founds := make([]bool, n) + + for i := 0; i < n; i++ { + wg.Add(1) + go func(idx int) { + defer wg.Done() + v, f, e := s.Get([]byte("shared"), true) + vals[idx] = string(v) + founds[idx] = f + errs[idx] = e + }(i) + } + + time.Sleep(50 * time.Millisecond) + close(gate) + wg.Wait() + + for i := 0; i < n; i++ { + require.NoError(t, errs[i], "goroutine %d", i) + require.True(t, founds[i], "goroutine %d", i) + require.Equal(t, "value", vals[i], "goroutine %d", i) + } + + require.Equal(t, int64(1), readCalls.Load(), "readFunc should be called exactly once") +} + +// --------------------------------------------------------------------------- +// Get — context cancellation +// --------------------------------------------------------------------------- + +func TestGetContextCancelled(t *testing.T) { + ctx, cancel := context.WithCancel(context.Background()) + + readFunc := func(key []byte) ([]byte, bool, error) { + time.Sleep(time.Second) + return []byte("late"), true, nil + } + s, _ := NewShard(ctx, threading.NewAdHocPool(), readFunc, 4096) + + cancel() + + _, _, err := s.Get([]byte("k"), true) + require.Error(t, err) +} + +// --------------------------------------------------------------------------- +// Get — updateLru flag +// --------------------------------------------------------------------------- + +func TestGetUpdateLruTrue(t *testing.T) { + store := map[string][]byte{ + "a": []byte("1"), + "b": []byte("2"), + } + s := newTestShard(t, 4096, store) + + s.Get([]byte("a"), true) + s.Get([]byte("b"), true) + + // Touch "a" via Get with updateLru=true, making "b" the LRU. + s.Get([]byte("a"), true) + + s.lock.Lock() + lru := s.gcQueue.PopLeastRecentlyUsed() + s.lock.Unlock() + + require.Equal(t, "b", lru) +} + +func TestGetUpdateLruFalse(t *testing.T) { + store := map[string][]byte{ + "a": []byte("1"), + "b": []byte("2"), + } + s := newTestShard(t, 4096, store) + + s.Get([]byte("a"), true) + s.Get([]byte("b"), true) + + // Access "a" without updating LRU — "a" should remain the LRU entry. + s.Get([]byte("a"), false) + + s.lock.Lock() + lru := s.gcQueue.PopLeastRecentlyUsed() + s.lock.Unlock() + + require.Equal(t, "a", lru, "updateLru=false should not move entry") +} + +// --------------------------------------------------------------------------- +// Set +// --------------------------------------------------------------------------- + +func TestSetNewKey(t *testing.T) { + s := newTestShard(t, 4096, map[string][]byte{}) + + s.Set([]byte("k"), []byte("v")) + + val, found, err := s.Get([]byte("k"), false) + require.NoError(t, err) + require.True(t, found) + require.Equal(t, "v", string(val)) +} + +func TestSetOverwritesExistingKey(t *testing.T) { + s := newTestShard(t, 4096, map[string][]byte{}) + + s.Set([]byte("k"), []byte("old")) + s.Set([]byte("k"), []byte("new")) + + val, found, err := s.Get([]byte("k"), false) + require.NoError(t, err) + require.True(t, found) + require.Equal(t, "new", string(val)) +} + +func TestSetOverwritesDeletedKey(t *testing.T) { + s := newTestShard(t, 4096, map[string][]byte{}) + + s.Delete([]byte("k")) + s.Set([]byte("k"), []byte("revived")) + + val, found, err := s.Get([]byte("k"), false) + require.NoError(t, err) + require.True(t, found) + require.Equal(t, "revived", string(val)) +} + +func TestSetNilValue(t *testing.T) { + s := newTestShard(t, 4096, map[string][]byte{}) + + s.Set([]byte("k"), nil) + + val, found, err := s.Get([]byte("k"), false) + require.NoError(t, err) + require.True(t, found) + require.Nil(t, val) +} + +func TestSetEmptyKey(t *testing.T) { + s := newTestShard(t, 4096, map[string][]byte{}) + + s.Set([]byte(""), []byte("empty-key-val")) + + val, found, err := s.Get([]byte(""), false) + require.NoError(t, err) + require.True(t, found) + require.Equal(t, "empty-key-val", string(val)) +} + +// --------------------------------------------------------------------------- +// Delete +// --------------------------------------------------------------------------- + +func TestDeleteExistingKey(t *testing.T) { + s := newTestShard(t, 4096, map[string][]byte{}) + + s.Set([]byte("k"), []byte("v")) + s.Delete([]byte("k")) + + val, found, err := s.Get([]byte("k"), false) + require.NoError(t, err) + require.False(t, found) + require.Nil(t, val) +} + +func TestDeleteNonexistentKey(t *testing.T) { + s := newTestShard(t, 4096, map[string][]byte{}) + + s.Delete([]byte("ghost")) + + val, found, err := s.Get([]byte("ghost"), false) + require.NoError(t, err) + require.False(t, found) + require.Nil(t, val) +} + +func TestDeleteThenSetThenGet(t *testing.T) { + s := newTestShard(t, 4096, map[string][]byte{}) + + s.Set([]byte("k"), []byte("v1")) + s.Delete([]byte("k")) + s.Set([]byte("k"), []byte("v2")) + + val, found, err := s.Get([]byte("k"), false) + require.NoError(t, err) + require.True(t, found) + require.Equal(t, "v2", string(val)) +} + +// --------------------------------------------------------------------------- +// BatchSet +// --------------------------------------------------------------------------- + +func TestBatchSetSetsMultiple(t *testing.T) { + s := newTestShard(t, 4096, map[string][]byte{}) + + s.BatchSet([]CacheUpdate{ + {Key: []byte("a"), Value: []byte("1")}, + {Key: []byte("b"), Value: []byte("2")}, + {Key: []byte("c"), Value: []byte("3")}, + }) + + for _, tc := range []struct { + key, want string + }{{"a", "1"}, {"b", "2"}, {"c", "3"}} { + val, found, err := s.Get([]byte(tc.key), false) + require.NoError(t, err, "Get(%q)", tc.key) + require.True(t, found, "Get(%q)", tc.key) + require.Equal(t, tc.want, string(val), "Get(%q)", tc.key) + } +} + +func TestBatchSetMixedSetAndDelete(t *testing.T) { + s := newTestShard(t, 4096, map[string][]byte{}) + + s.Set([]byte("keep"), []byte("v")) + s.Set([]byte("remove"), []byte("v")) + + s.BatchSet([]CacheUpdate{ + {Key: []byte("keep"), Value: []byte("updated")}, + {Key: []byte("remove"), Value: nil}, + {Key: []byte("new"), Value: []byte("fresh")}, + }) + + val, found, _ := s.Get([]byte("keep"), false) + require.True(t, found) + require.Equal(t, "updated", string(val)) + + _, found, _ = s.Get([]byte("remove"), false) + require.False(t, found, "expected remove to be deleted") + + val, found, _ = s.Get([]byte("new"), false) + require.True(t, found) + require.Equal(t, "fresh", string(val)) +} + +func TestBatchSetEmpty(t *testing.T) { + s := newTestShard(t, 4096, map[string][]byte{}) + s.BatchSet(nil) + s.BatchSet([]CacheUpdate{}) + + bytes, entries := s.getSizeInfo() + require.Equal(t, 0, bytes) + require.Equal(t, 0, entries) +} + +// --------------------------------------------------------------------------- +// BatchGet +// --------------------------------------------------------------------------- + +func TestBatchGetAllCached(t *testing.T) { + s := newTestShard(t, 4096, map[string][]byte{}) + + s.Set([]byte("a"), []byte("1")) + s.Set([]byte("b"), []byte("2")) + + keys := map[string]types.BatchGetResult{ + "a": {}, + "b": {}, + } + require.NoError(t, s.BatchGet(keys)) + + for k, want := range map[string]string{"a": "1", "b": "2"} { + r := keys[k] + require.True(t, r.IsFound(), "key=%q", k) + require.Equal(t, want, string(r.Value), "key=%q", k) + } +} + +func TestBatchGetAllFromDB(t *testing.T) { + store := map[string][]byte{"x": []byte("10"), "y": []byte("20")} + s := newTestShard(t, 4096, store) + + keys := map[string]types.BatchGetResult{ + "x": {}, + "y": {}, + } + require.NoError(t, s.BatchGet(keys)) + + for k, want := range map[string]string{"x": "10", "y": "20"} { + r := keys[k] + require.True(t, r.IsFound(), "key=%q", k) + require.Equal(t, want, string(r.Value), "key=%q", k) + } +} + +func TestBatchGetMixedCachedAndDB(t *testing.T) { + store := map[string][]byte{"db-key": []byte("from-db")} + s := newTestShard(t, 4096, store) + + s.Set([]byte("cached"), []byte("from-cache")) + + keys := map[string]types.BatchGetResult{ + "cached": {}, + "db-key": {}, + } + require.NoError(t, s.BatchGet(keys)) + + require.True(t, keys["cached"].IsFound()) + require.Equal(t, "from-cache", string(keys["cached"].Value)) + require.True(t, keys["db-key"].IsFound()) + require.Equal(t, "from-db", string(keys["db-key"].Value)) +} + +func TestBatchGetNotFoundKeys(t *testing.T) { + s := newTestShard(t, 4096, map[string][]byte{}) + + keys := map[string]types.BatchGetResult{ + "nope": {}, + } + require.NoError(t, s.BatchGet(keys)) + require.False(t, keys["nope"].IsFound()) +} + +func TestBatchGetDeletedKeys(t *testing.T) { + s := newTestShard(t, 4096, map[string][]byte{}) + + s.Set([]byte("del"), []byte("v")) + s.Delete([]byte("del")) + + keys := map[string]types.BatchGetResult{ + "del": {}, + } + require.NoError(t, s.BatchGet(keys)) + require.False(t, keys["del"].IsFound()) +} + +func TestBatchGetDBError(t *testing.T) { + dbErr := errors.New("broken") + readFunc := func(key []byte) ([]byte, bool, error) { return nil, false, dbErr } + s, _ := NewShard(context.Background(), threading.NewAdHocPool(), readFunc, 4096) + + keys := map[string]types.BatchGetResult{ + "fail": {}, + } + require.NoError(t, s.BatchGet(keys), "BatchGet itself should not fail") + require.Error(t, keys["fail"].Error, "expected per-key error") +} + +func TestBatchGetEmpty(t *testing.T) { + s := newTestShard(t, 4096, map[string][]byte{}) + + keys := map[string]types.BatchGetResult{} + require.NoError(t, s.BatchGet(keys)) +} + +func TestBatchGetCachesResults(t *testing.T) { + var readCalls atomic.Int64 + store := map[string][]byte{"k": []byte("v")} + readFunc := func(key []byte) ([]byte, bool, error) { + readCalls.Add(1) + v, ok := store[string(key)] + return v, ok, nil + } + s, _ := NewShard(context.Background(), threading.NewAdHocPool(), readFunc, 4096) + + keys := map[string]types.BatchGetResult{"k": {}} + s.BatchGet(keys) + + // bulkInjectValues runs in a goroutine — give it a moment. + time.Sleep(50 * time.Millisecond) + + val, found, err := s.Get([]byte("k"), false) + require.NoError(t, err) + require.True(t, found) + require.Equal(t, "v", string(val)) + require.Equal(t, int64(1), readCalls.Load(), "result should be cached") +} + +// --------------------------------------------------------------------------- +// Eviction +// --------------------------------------------------------------------------- + +func TestEvictionRespectMaxSize(t *testing.T) { + s := newTestShard(t, 30, map[string][]byte{}) + + // key="a" (1 byte) + value="aaaaaaaaaa" (10 bytes) = 11 bytes per entry + s.Set([]byte("a"), []byte("aaaaaaaaaa")) + s.Set([]byte("b"), []byte("bbbbbbbbbb")) + + _, entries := s.getSizeInfo() + require.Equal(t, 2, entries) + + // Third entry pushes to 33 bytes, exceeding maxSize=30 → evict "a". + s.Set([]byte("c"), []byte("cccccccccc")) + + bytes, entries := s.getSizeInfo() + require.LessOrEqual(t, bytes, 30, "shard size should not exceed maxSize") + require.Equal(t, 2, entries) +} + +func TestEvictionOrderIsLRU(t *testing.T) { + // Each entry: key(1) + value(4) = 5 bytes. maxSize=15 → fits 3. + s := newTestShard(t, 15, map[string][]byte{}) + + s.Set([]byte("a"), []byte("1111")) + s.Set([]byte("b"), []byte("2222")) + s.Set([]byte("c"), []byte("3333")) + + // Touch "a" so "b" becomes the LRU. + s.Get([]byte("a"), true) + + // Insert "d" → total 20 > 15 → must evict. "b" is LRU. + s.Set([]byte("d"), []byte("4444")) + + s.lock.Lock() + _, bExists := s.data["b"] + _, aExists := s.data["a"] + s.lock.Unlock() + + require.False(t, bExists, "expected 'b' to be evicted (it was LRU)") + require.True(t, aExists, "expected 'a' to survive (it was recently touched)") +} + +func TestEvictionOnDelete(t *testing.T) { + s := newTestShard(t, 10, map[string][]byte{}) + + s.Set([]byte("a"), []byte("val")) // size 4 + s.Delete([]byte("longkey1")) // size 8 + + bytes, _ := s.getSizeInfo() + require.LessOrEqual(t, bytes, 10, "size should not exceed maxSize") +} + +func TestEvictionOnGetFromDB(t *testing.T) { + store := map[string][]byte{ + "x": []byte("12345678901234567890"), + } + s := newTestShard(t, 25, store) + + s.Set([]byte("a"), []byte("small")) + + // Reading "x" brings in 1+20=21 bytes, total becomes 6+21=27 > 25 → eviction. + s.Get([]byte("x"), true) + + time.Sleep(50 * time.Millisecond) + + bytes, _ := s.getSizeInfo() + require.LessOrEqual(t, bytes, 25, "size should not exceed maxSize after DB read") +} + +// --------------------------------------------------------------------------- +// getSizeInfo +// --------------------------------------------------------------------------- + +func TestGetSizeInfoEmpty(t *testing.T) { + s := newTestShard(t, 4096, map[string][]byte{}) + bytes, entries := s.getSizeInfo() + require.Equal(t, 0, bytes) + require.Equal(t, 0, entries) +} + +func TestGetSizeInfoAfterSets(t *testing.T) { + s := newTestShard(t, 4096, map[string][]byte{}) + + s.Set([]byte("ab"), []byte("cd")) // 2+2 = 4 + s.Set([]byte("efg"), []byte("hi")) // 3+2 = 5 + + bytes, entries := s.getSizeInfo() + require.Equal(t, 2, entries) + require.Equal(t, 9, bytes) +} + +// --------------------------------------------------------------------------- +// injectValue — edge cases +// --------------------------------------------------------------------------- + +func TestInjectValueNotFound(t *testing.T) { + s := newTestShard(t, 4096, map[string][]byte{}) + + val, found, err := s.Get([]byte("missing"), true) + require.NoError(t, err) + require.False(t, found) + require.Nil(t, val) + + s.lock.Lock() + entry, ok := s.data["missing"] + s.lock.Unlock() + require.True(t, ok, "entry should exist in map") + require.Equal(t, statusDeleted, entry.status) +} + +// --------------------------------------------------------------------------- +// Concurrent Set and Get +// --------------------------------------------------------------------------- + +func TestConcurrentSetAndGet(t *testing.T) { + s := newTestShard(t, 4096, map[string][]byte{}) + + const n = 100 + var wg sync.WaitGroup + + for i := 0; i < n; i++ { + wg.Add(2) + key := []byte(fmt.Sprintf("key-%d", i)) + val := []byte(fmt.Sprintf("val-%d", i)) + + go func() { + defer wg.Done() + s.Set(key, val) + }() + go func() { + defer wg.Done() + s.Get(key, true) + }() + } + + wg.Wait() +} + +func TestConcurrentBatchSetAndBatchGet(t *testing.T) { + store := map[string][]byte{} + for i := 0; i < 50; i++ { + store[fmt.Sprintf("db-%d", i)] = []byte(fmt.Sprintf("v-%d", i)) + } + s := newTestShard(t, 100_000, store) + + var wg sync.WaitGroup + + wg.Add(1) + go func() { + defer wg.Done() + updates := make([]CacheUpdate, 20) + for i := 0; i < 20; i++ { + updates[i] = CacheUpdate{ + Key: []byte(fmt.Sprintf("set-%d", i)), + Value: []byte(fmt.Sprintf("sv-%d", i)), + } + } + s.BatchSet(updates) + }() + + wg.Add(1) + go func() { + defer wg.Done() + keys := make(map[string]types.BatchGetResult) + for i := 0; i < 50; i++ { + keys[fmt.Sprintf("db-%d", i)] = types.BatchGetResult{} + } + s.BatchGet(keys) + }() + + wg.Wait() +} + +// --------------------------------------------------------------------------- +// Pool submission failure +// --------------------------------------------------------------------------- + +type failPool struct{} + +func (fp *failPool) Submit(_ context.Context, _ func()) error { + return errors.New("pool exhausted") +} + +func TestGetPoolSubmitFailure(t *testing.T) { + readFunc := func(key []byte) ([]byte, bool, error) { return []byte("v"), true, nil } + s, _ := NewShard(context.Background(), &failPool{}, readFunc, 4096) + + _, _, err := s.Get([]byte("k"), true) + require.Error(t, err) +} + +func TestBatchGetPoolSubmitFailure(t *testing.T) { + readFunc := func(key []byte) ([]byte, bool, error) { return []byte("v"), true, nil } + s, _ := NewShard(context.Background(), &failPool{}, readFunc, 4096) + + keys := map[string]types.BatchGetResult{"k": {}} + err := s.BatchGet(keys) + require.Error(t, err) +} + +// --------------------------------------------------------------------------- +// Large values +// --------------------------------------------------------------------------- + +func TestSetLargeValueExceedingMaxSizeEvictsOldEntries(t *testing.T) { + s := newTestShard(t, 100, map[string][]byte{}) + + s.Set([]byte("a"), []byte("small")) + + bigVal := make([]byte, 95) + for i := range bigVal { + bigVal[i] = 'X' + } + s.Set([]byte("b"), bigVal) + + bytes, _ := s.getSizeInfo() + require.LessOrEqual(t, bytes, 100, "size should not exceed maxSize after large set") +} + +// --------------------------------------------------------------------------- +// bulkInjectValues — error entries are not cached +// --------------------------------------------------------------------------- + +func TestBatchGetDBErrorNotCached(t *testing.T) { + var calls atomic.Int64 + readFunc := func(key []byte) ([]byte, bool, error) { + n := calls.Add(1) + if n == 1 { + return nil, false, errors.New("transient db error") + } + return []byte("ok"), true, nil + } + s, _ := NewShard(context.Background(), threading.NewAdHocPool(), readFunc, 4096) + + keys := map[string]types.BatchGetResult{"k": {}} + s.BatchGet(keys) + + // Wait for bulkInjectValues goroutine. + time.Sleep(50 * time.Millisecond) + + val, found, err := s.Get([]byte("k"), true) + require.NoError(t, err, "retry should succeed") + require.True(t, found) + require.Equal(t, "ok", string(val)) +} + +// --------------------------------------------------------------------------- +// Edge: Set then Delete then BatchGet +// --------------------------------------------------------------------------- + +func TestSetDeleteThenBatchGet(t *testing.T) { + s := newTestShard(t, 4096, map[string][]byte{}) + + s.Set([]byte("k"), []byte("v")) + s.Delete([]byte("k")) + + keys := map[string]types.BatchGetResult{"k": {}} + require.NoError(t, s.BatchGet(keys)) + require.False(t, keys["k"].IsFound()) +} From 4b2247b519bc3434b8bac04c25fe0bf8d79ad70b Mon Sep 17 00:00:00 2001 From: Cody Littley Date: Fri, 13 Mar 2026 11:37:01 -0500 Subject: [PATCH 057/119] Helper files for the flatKV cache implementation --- Makefile | 7 + sei-db/db_engine/dbcache/cache.go | 47 +++ sei-db/db_engine/dbcache/cached_batch.go | 55 ++++ sei-db/db_engine/dbcache/cached_batch_test.go | 204 ++++++++++++ .../db_engine/dbcache/cached_key_value_db.go | 83 +++++ sei-db/db_engine/dbcache/lru_queue.go | 83 +++++ sei-db/db_engine/dbcache/lru_queue_test.go | 310 ++++++++++++++++++ sei-db/db_engine/dbcache/noop_cache.go | 58 ++++ sei-db/db_engine/dbcache/noop_cache_test.go | 152 +++++++++ sei-db/db_engine/dbcache/shard_manager.go | 46 +++ .../db_engine/dbcache/shard_manager_test.go | 271 +++++++++++++++ sei-db/db_engine/types/types.go | 13 + 12 files changed, 1329 insertions(+) create mode 100644 sei-db/db_engine/dbcache/cache.go create mode 100644 sei-db/db_engine/dbcache/cached_batch.go create mode 100644 sei-db/db_engine/dbcache/cached_batch_test.go create mode 100644 sei-db/db_engine/dbcache/cached_key_value_db.go create mode 100644 sei-db/db_engine/dbcache/lru_queue.go create mode 100644 sei-db/db_engine/dbcache/lru_queue_test.go create mode 100644 sei-db/db_engine/dbcache/noop_cache.go create mode 100644 sei-db/db_engine/dbcache/noop_cache_test.go create mode 100644 sei-db/db_engine/dbcache/shard_manager.go create mode 100644 sei-db/db_engine/dbcache/shard_manager_test.go diff --git a/Makefile b/Makefile index 83b26cd84b..de0e50f8e2 100644 --- a/Makefile +++ b/Makefile @@ -157,6 +157,13 @@ lint: go mod tidy go mod verify +# Run lint on the sei-db package. Much faster than running lint on the entire project.\ +# Makes life easier for storage team when iterating on changes inside the sei-db package. +dblint: + go run github.com/golangci/golangci-lint/v2/cmd/golangci-lint@v2.8.0 run ./sei-db/... + go fmt ./sei-db/... + go vet ./sei-db/... + build: go build $(BUILD_FLAGS) -o ./build/seid ./cmd/seid diff --git a/sei-db/db_engine/dbcache/cache.go b/sei-db/db_engine/dbcache/cache.go new file mode 100644 index 0000000000..fa4b292b85 --- /dev/null +++ b/sei-db/db_engine/dbcache/cache.go @@ -0,0 +1,47 @@ +package dbcache + +import ( + "github.com/sei-protocol/sei-chain/sei-db/db_engine/types" +) + +// Cache describes a cache capable of being used by a FlatKV store. +type Cache interface { + + // Get returns the value for the given key, or (nil, false) if not found. + Get( + // The entry to fetch. + key []byte, + // If true, the LRU queue will be updated. If false, the LRU queue will not be updated. + // Useful for when an operation is performed multiple times in close succession on the same key, + // since it requires non-zero overhead to do so with little benefit. + updateLru bool, + ) ([]byte, bool, error) + + // Perform a batch read operation. Given a map of keys to read, performs the reads and updates the + // map with the results. + // + // It is not thread safe to read or mutate the map while this method is running. + BatchGet(keys map[string]types.BatchGetResult) error + + // Set sets the value for the given key. + Set(key []byte, value []byte) + + // Delete deletes the value for the given key. + Delete(key []byte) + + // BatchSet applies the given updates to the cache. + BatchSet(updates []CacheUpdate) error +} + +// CacheUpdate describes a single key-value mutation to apply to the cache. +type CacheUpdate struct { + // The key to update. + Key []byte + // The value to set. If nil, the key will be deleted. + Value []byte +} + +// IsDelete returns true if the update is a delete operation. +func (u *CacheUpdate) IsDelete() bool { + return u.Value == nil +} diff --git a/sei-db/db_engine/dbcache/cached_batch.go b/sei-db/db_engine/dbcache/cached_batch.go new file mode 100644 index 0000000000..e4995fe33b --- /dev/null +++ b/sei-db/db_engine/dbcache/cached_batch.go @@ -0,0 +1,55 @@ +package dbcache + +import ( + "fmt" + + "github.com/sei-protocol/sei-chain/sei-db/db_engine/types" +) + +// cachedBatch wraps a types.Batch and applies pending mutations to the cache +// after a successful commit. +type cachedBatch struct { + inner types.Batch + cache Cache + pending []CacheUpdate +} + +var _ types.Batch = (*cachedBatch)(nil) + +func newCachedBatch(inner types.Batch, cache Cache) *cachedBatch { + return &cachedBatch{inner: inner, cache: cache} +} + +func (cb *cachedBatch) Set(key, value []byte) error { + cb.pending = append(cb.pending, CacheUpdate{Key: key, Value: value}) + return cb.inner.Set(key, value) +} + +func (cb *cachedBatch) Delete(key []byte) error { + cb.pending = append(cb.pending, CacheUpdate{Key: key, Value: nil}) + return cb.inner.Delete(key) +} + +func (cb *cachedBatch) Commit(opts types.WriteOptions) error { + if err := cb.inner.Commit(opts); err != nil { + return err + } + if err := cb.cache.BatchSet(cb.pending); err != nil { + return fmt.Errorf("failed to update cache after commit: %w", err) + } + cb.pending = nil + return nil +} + +func (cb *cachedBatch) Len() int { + return cb.inner.Len() +} + +func (cb *cachedBatch) Reset() { + cb.inner.Reset() + cb.pending = nil +} + +func (cb *cachedBatch) Close() error { + return cb.inner.Close() +} diff --git a/sei-db/db_engine/dbcache/cached_batch_test.go b/sei-db/db_engine/dbcache/cached_batch_test.go new file mode 100644 index 0000000000..5aeb533238 --- /dev/null +++ b/sei-db/db_engine/dbcache/cached_batch_test.go @@ -0,0 +1,204 @@ +package dbcache + +import ( + "errors" + "testing" + + "github.com/stretchr/testify/require" + + "github.com/sei-protocol/sei-chain/sei-db/db_engine/types" +) + +// --------------------------------------------------------------------------- +// mock batch +// --------------------------------------------------------------------------- + +type mockBatch struct { + sets []CacheUpdate + deletes [][]byte + committed bool + closed bool + resetCount int + commitErr error +} + +func (m *mockBatch) Set(key, value []byte) error { + m.sets = append(m.sets, CacheUpdate{Key: key, Value: value}) + return nil +} + +func (m *mockBatch) Delete(key []byte) error { + m.deletes = append(m.deletes, key) + return nil +} + +func (m *mockBatch) Commit(opts types.WriteOptions) error { + if m.commitErr != nil { + return m.commitErr + } + m.committed = true + return nil +} + +func (m *mockBatch) Len() int { + return len(m.sets) + len(m.deletes) +} + +func (m *mockBatch) Reset() { + m.sets = nil + m.deletes = nil + m.committed = false + m.resetCount++ +} + +func (m *mockBatch) Close() error { + m.closed = true + return nil +} + +// --------------------------------------------------------------------------- +// mock cache +// --------------------------------------------------------------------------- + +type mockCache struct { + data map[string][]byte + batchSetErr error +} + +func newMockCache() *mockCache { + return &mockCache{data: make(map[string][]byte)} +} + +func (mc *mockCache) Get(key []byte, _ bool) ([]byte, bool, error) { + v, ok := mc.data[string(key)] + return v, ok, nil +} + +func (mc *mockCache) BatchGet(keys map[string]types.BatchGetResult) error { + for k := range keys { + v, ok := mc.data[k] + if ok { + keys[k] = types.BatchGetResult{Value: v} + } + } + return nil +} + +func (mc *mockCache) Set(key, value []byte) { + mc.data[string(key)] = value +} + +func (mc *mockCache) Delete(key []byte) { + delete(mc.data, string(key)) +} + +func (mc *mockCache) BatchSet(updates []CacheUpdate) error { + if mc.batchSetErr != nil { + return mc.batchSetErr + } + for _, u := range updates { + if u.IsDelete() { + delete(mc.data, string(u.Key)) + } else { + mc.data[string(u.Key)] = u.Value + } + } + return nil +} + +// --------------------------------------------------------------------------- +// tests +// --------------------------------------------------------------------------- + +func TestCachedBatchCommitUpdatesCacheOnSuccess(t *testing.T) { + inner := &mockBatch{} + cache := newMockCache() + cb := newCachedBatch(inner, cache) + + require.NoError(t, cb.Set([]byte("a"), []byte("1"))) + require.NoError(t, cb.Set([]byte("b"), []byte("2"))) + require.NoError(t, cb.Commit(types.WriteOptions{})) + + require.True(t, inner.committed) + v, ok := cache.data["a"] + require.True(t, ok) + require.Equal(t, []byte("1"), v) + v, ok = cache.data["b"] + require.True(t, ok) + require.Equal(t, []byte("2"), v) +} + +func TestCachedBatchCommitDoesNotUpdateCacheOnInnerFailure(t *testing.T) { + inner := &mockBatch{commitErr: errors.New("disk full")} + cache := newMockCache() + cb := newCachedBatch(inner, cache) + + require.NoError(t, cb.Set([]byte("a"), []byte("1"))) + err := cb.Commit(types.WriteOptions{}) + + require.Error(t, err) + require.Contains(t, err.Error(), "disk full") + _, ok := cache.data["a"] + require.False(t, ok, "cache should not be updated when inner commit fails") +} + +func TestCachedBatchCommitReturnsCacheError(t *testing.T) { + inner := &mockBatch{} + cache := newMockCache() + cache.batchSetErr = errors.New("cache broken") + cb := newCachedBatch(inner, cache) + + require.NoError(t, cb.Set([]byte("a"), []byte("1"))) + err := cb.Commit(types.WriteOptions{}) + + require.Error(t, err) + require.Contains(t, err.Error(), "cache broken") + require.True(t, inner.committed, "inner batch should have committed") +} + +func TestCachedBatchDeleteMarksKeyForRemoval(t *testing.T) { + inner := &mockBatch{} + cache := newMockCache() + cache.Set([]byte("x"), []byte("old")) + cb := newCachedBatch(inner, cache) + + require.NoError(t, cb.Delete([]byte("x"))) + require.NoError(t, cb.Commit(types.WriteOptions{})) + + _, ok := cache.data["x"] + require.False(t, ok, "key should be deleted from cache") +} + +func TestCachedBatchResetClearsPending(t *testing.T) { + inner := &mockBatch{} + cache := newMockCache() + cb := newCachedBatch(inner, cache) + + require.NoError(t, cb.Set([]byte("a"), []byte("1"))) + require.NoError(t, cb.Set([]byte("b"), []byte("2"))) + cb.Reset() + + require.NoError(t, cb.Commit(types.WriteOptions{})) + + require.Empty(t, cache.data, "cache should have no entries after reset + commit") +} + +func TestCachedBatchLenDelegatesToInner(t *testing.T) { + inner := &mockBatch{} + cache := newMockCache() + cb := newCachedBatch(inner, cache) + + require.Equal(t, 0, cb.Len()) + require.NoError(t, cb.Set([]byte("a"), []byte("1"))) + require.NoError(t, cb.Delete([]byte("b"))) + require.Equal(t, 2, cb.Len()) +} + +func TestCachedBatchCloseDelegatesToInner(t *testing.T) { + inner := &mockBatch{} + cache := newMockCache() + cb := newCachedBatch(inner, cache) + + require.NoError(t, cb.Close()) + require.True(t, inner.closed) +} diff --git a/sei-db/db_engine/dbcache/cached_key_value_db.go b/sei-db/db_engine/dbcache/cached_key_value_db.go new file mode 100644 index 0000000000..0f926dff98 --- /dev/null +++ b/sei-db/db_engine/dbcache/cached_key_value_db.go @@ -0,0 +1,83 @@ +package dbcache + +import ( + "fmt" + + errorutils "github.com/sei-protocol/sei-chain/sei-db/common/errors" + "github.com/sei-protocol/sei-chain/sei-db/db_engine/types" +) + +var _ types.KeyValueDB = (*cachedKeyValueDB)(nil) +var _ types.Checkpointable = (*cachedKeyValueDB)(nil) + +// Combines a cache and a key-value database to create a new key-value database with caching. +type cachedKeyValueDB struct { + db types.KeyValueDB + cache Cache +} + +// Combine a cache and a key-value database to create a new key-value database with caching. +func NewCachedKeyValueDB(db types.KeyValueDB, cache Cache) types.KeyValueDB { + return &cachedKeyValueDB{db: db, cache: cache} +} + +func (c *cachedKeyValueDB) Get(key []byte) ([]byte, error) { + val, found, err := c.cache.Get(key, true) + if err != nil { + return nil, fmt.Errorf("failed to get value from cache: %w", err) + } + if !found { + return nil, errorutils.ErrNotFound + } + return val, nil +} + +func (c *cachedKeyValueDB) BatchGet(keys map[string]types.BatchGetResult) error { + err := c.cache.BatchGet(keys) + if err != nil { + return fmt.Errorf("failed to get values from cache: %w", err) + } + return nil +} + +func (c *cachedKeyValueDB) Set(key []byte, value []byte, opts types.WriteOptions) error { + err := c.db.Set(key, value, opts) + if err != nil { + return fmt.Errorf("failed to set value in database: %w", err) + } + c.cache.Set(key, value) + return nil +} + +func (c *cachedKeyValueDB) Delete(key []byte, opts types.WriteOptions) error { + err := c.db.Delete(key, opts) + if err != nil { + return fmt.Errorf("failed to delete value in database: %w", err) + } + c.cache.Delete(key) + return nil +} + +func (c *cachedKeyValueDB) NewIter(opts *types.IterOptions) (types.KeyValueDBIterator, error) { + return c.db.NewIter(opts) +} + +func (c *cachedKeyValueDB) NewBatch() types.Batch { + return newCachedBatch(c.db.NewBatch(), c.cache) +} + +func (c *cachedKeyValueDB) Flush() error { + return c.db.Flush() +} + +func (c *cachedKeyValueDB) Close() error { + return c.db.Close() +} + +func (c *cachedKeyValueDB) Checkpoint(destDir string) error { + cp, ok := c.db.(types.Checkpointable) + if !ok { + return fmt.Errorf("underlying database does not support Checkpoint") + } + return cp.Checkpoint(destDir) +} diff --git a/sei-db/db_engine/dbcache/lru_queue.go b/sei-db/db_engine/dbcache/lru_queue.go new file mode 100644 index 0000000000..6870679c9d --- /dev/null +++ b/sei-db/db_engine/dbcache/lru_queue.go @@ -0,0 +1,83 @@ +package dbcache + +import "container/list" + +// Implements a queue-like abstraction with LRU semantics. Not thread safe. +type lruQueue struct { + order *list.List + entries map[string]*list.Element + totalSize uint64 +} + +type lruQueueEntry struct { + key string + size uint64 +} + +// Create a new LRU queue. +func newLRUQueue() *lruQueue { + return &lruQueue{ + order: list.New(), + entries: make(map[string]*list.Element), + } +} + +// Add a new entry to the LRU queue. Can also be used to update an existing value with a new weight. +func (lru *lruQueue) Push( + // the key in the cache that was recently interacted with + key []byte, + // the size of the key + value + size uint64, +) { + if elem, ok := lru.entries[string(key)]; ok { + entry := elem.Value.(*lruQueueEntry) + lru.totalSize += size - entry.size + entry.size = size + lru.order.MoveToBack(elem) + return + } + + keyStr := string(key) + elem := lru.order.PushBack(&lruQueueEntry{ + key: keyStr, + size: size, + }) + lru.entries[keyStr] = elem + lru.totalSize += size +} + +// Signal that an entry has been interated with, moving it to the back of the queue +// (i.e. making it so it doesn't get popped soon). +func (lru *lruQueue) Touch(key []byte) { + elem, ok := lru.entries[string(key)] + if !ok { + return + } + lru.order.MoveToBack(elem) +} + +// Returns the total size of all entries in the LRU queue. +func (lru *lruQueue) GetTotalSize() uint64 { + return lru.totalSize +} + +// Returns a count of the number of entries in the LRU queue, where each entry counts for 1 regardless of size. +func (lru *lruQueue) GetCount() uint64 { + return uint64(len(lru.entries)) +} + +// Pops a single element out of the queue. The element removed is the entry least recently passed to Update(). +// Returns the key in string form to avoid copying the key an additional time. +// Panics if the queue is empty. +func (lru *lruQueue) PopLeastRecentlyUsed() string { + elem := lru.order.Front() + if elem == nil { + panic("cannot pop from empty LRU queue") + } + + lru.order.Remove(elem) + entry := elem.Value.(*lruQueueEntry) + delete(lru.entries, entry.key) + lru.totalSize -= entry.size + return entry.key +} diff --git a/sei-db/db_engine/dbcache/lru_queue_test.go b/sei-db/db_engine/dbcache/lru_queue_test.go new file mode 100644 index 0000000000..0073e6d1f0 --- /dev/null +++ b/sei-db/db_engine/dbcache/lru_queue_test.go @@ -0,0 +1,310 @@ +package dbcache + +import ( + "fmt" + "testing" + + "github.com/stretchr/testify/require" +) + +func TestLRUQueueIsolatesFromCallerMutation(t *testing.T) { + lru := newLRUQueue() + + key := []byte("a") + lru.Push(key, 1) + key[0] = 'z' + + require.Equal(t, "a", lru.PopLeastRecentlyUsed()) +} + +func TestNewLRUQueueStartsEmpty(t *testing.T) { + lru := newLRUQueue() + + require.Equal(t, uint64(0), lru.GetCount()) + require.Equal(t, uint64(0), lru.GetTotalSize()) +} + +func TestPopLeastRecentlyUsedPanicsOnEmptyQueue(t *testing.T) { + lru := newLRUQueue() + require.Panics(t, func() { lru.PopLeastRecentlyUsed() }) +} + +func TestPopLeastRecentlyUsedPanicsAfterDrain(t *testing.T) { + lru := newLRUQueue() + lru.Push([]byte("x"), 1) + lru.PopLeastRecentlyUsed() + + require.Panics(t, func() { lru.PopLeastRecentlyUsed() }) +} + +func TestPushSingleElement(t *testing.T) { + lru := newLRUQueue() + lru.Push([]byte("only"), 42) + + require.Equal(t, uint64(1), lru.GetCount()) + require.Equal(t, uint64(42), lru.GetTotalSize()) + require.Equal(t, "only", lru.PopLeastRecentlyUsed()) +} + +func TestPushDuplicateDecreasesSize(t *testing.T) { + lru := newLRUQueue() + lru.Push([]byte("k"), 100) + lru.Push([]byte("k"), 30) + + require.Equal(t, uint64(1), lru.GetCount()) + require.Equal(t, uint64(30), lru.GetTotalSize()) +} + +func TestPushDuplicateMovesToBack(t *testing.T) { + lru := newLRUQueue() + lru.Push([]byte("a"), 1) + lru.Push([]byte("b"), 1) + lru.Push([]byte("c"), 1) + + // Re-push "a" — should move it behind "b" and "c" + lru.Push([]byte("a"), 1) + + require.Equal(t, "b", lru.PopLeastRecentlyUsed()) + require.Equal(t, "c", lru.PopLeastRecentlyUsed()) + require.Equal(t, "a", lru.PopLeastRecentlyUsed()) +} + +func TestPushZeroSize(t *testing.T) { + lru := newLRUQueue() + lru.Push([]byte("z"), 0) + + require.Equal(t, uint64(1), lru.GetCount()) + require.Equal(t, uint64(0), lru.GetTotalSize()) + require.Equal(t, "z", lru.PopLeastRecentlyUsed()) + require.Equal(t, uint64(0), lru.GetTotalSize()) +} + +func TestPushEmptyKey(t *testing.T) { + lru := newLRUQueue() + lru.Push([]byte(""), 5) + + require.Equal(t, uint64(1), lru.GetCount()) + require.Equal(t, "", lru.PopLeastRecentlyUsed()) +} + +func TestPushRepeatedUpdatesToSameKey(t *testing.T) { + lru := newLRUQueue() + lru.Push([]byte("k"), 1) + lru.Push([]byte("k"), 2) + lru.Push([]byte("k"), 3) + lru.Push([]byte("k"), 4) + + require.Equal(t, uint64(1), lru.GetCount()) + require.Equal(t, uint64(4), lru.GetTotalSize()) +} + +func TestTouchNonexistentKeyIsNoop(t *testing.T) { + lru := newLRUQueue() + lru.Push([]byte("a"), 1) + + lru.Touch([]byte("missing")) + + require.Equal(t, uint64(1), lru.GetCount()) + require.Equal(t, "a", lru.PopLeastRecentlyUsed()) +} + +func TestTouchOnEmptyQueueIsNoop(t *testing.T) { + lru := newLRUQueue() + lru.Touch([]byte("ghost")) + + require.Equal(t, uint64(0), lru.GetCount()) +} + +func TestTouchSingleElement(t *testing.T) { + lru := newLRUQueue() + lru.Push([]byte("solo"), 10) + lru.Touch([]byte("solo")) + + require.Equal(t, uint64(1), lru.GetCount()) + require.Equal(t, "solo", lru.PopLeastRecentlyUsed()) +} + +func TestTouchDoesNotAffectSizeOrCount(t *testing.T) { + lru := newLRUQueue() + lru.Push([]byte("a"), 3) + lru.Push([]byte("b"), 7) + + lru.Touch([]byte("a")) + + require.Equal(t, uint64(2), lru.GetCount()) + require.Equal(t, uint64(10), lru.GetTotalSize()) +} + +func TestMultipleTouchesChangeOrder(t *testing.T) { + lru := newLRUQueue() + lru.Push([]byte("a"), 1) + lru.Push([]byte("b"), 1) + lru.Push([]byte("c"), 1) + + // Order: a, b, c + lru.Touch([]byte("a")) // Order: b, c, a + lru.Touch([]byte("b")) // Order: c, a, b + + require.Equal(t, "c", lru.PopLeastRecentlyUsed()) + require.Equal(t, "a", lru.PopLeastRecentlyUsed()) + require.Equal(t, "b", lru.PopLeastRecentlyUsed()) +} + +func TestTouchAlreadyMostRecentIsNoop(t *testing.T) { + lru := newLRUQueue() + lru.Push([]byte("a"), 1) + lru.Push([]byte("b"), 1) + + lru.Touch([]byte("b")) // "b" is already at back + + require.Equal(t, "a", lru.PopLeastRecentlyUsed()) + require.Equal(t, "b", lru.PopLeastRecentlyUsed()) +} + +func TestPopDecrementsCountAndSize(t *testing.T) { + lru := newLRUQueue() + lru.Push([]byte("a"), 10) + lru.Push([]byte("b"), 20) + lru.Push([]byte("c"), 30) + + lru.PopLeastRecentlyUsed() + + require.Equal(t, uint64(2), lru.GetCount()) + require.Equal(t, uint64(50), lru.GetTotalSize()) + + lru.PopLeastRecentlyUsed() + + require.Equal(t, uint64(1), lru.GetCount()) + require.Equal(t, uint64(30), lru.GetTotalSize()) +} + +func TestPopFIFOOrderWithoutTouches(t *testing.T) { + lru := newLRUQueue() + keys := []string{"first", "second", "third", "fourth"} + for _, k := range keys { + lru.Push([]byte(k), 1) + } + + for _, want := range keys { + require.Equal(t, want, lru.PopLeastRecentlyUsed()) + } +} + +func TestPushAfterDrain(t *testing.T) { + lru := newLRUQueue() + lru.Push([]byte("a"), 5) + lru.PopLeastRecentlyUsed() + + lru.Push([]byte("x"), 10) + lru.Push([]byte("y"), 20) + + require.Equal(t, uint64(2), lru.GetCount()) + require.Equal(t, uint64(30), lru.GetTotalSize()) + require.Equal(t, "x", lru.PopLeastRecentlyUsed()) +} + +func TestPushPreviouslyPoppedKey(t *testing.T) { + lru := newLRUQueue() + lru.Push([]byte("recycled"), 5) + lru.PopLeastRecentlyUsed() + + lru.Push([]byte("recycled"), 99) + + require.Equal(t, uint64(1), lru.GetCount()) + require.Equal(t, uint64(99), lru.GetTotalSize()) + require.Equal(t, "recycled", lru.PopLeastRecentlyUsed()) +} + +func TestInterleavedPushAndPop(t *testing.T) { + lru := newLRUQueue() + + lru.Push([]byte("a"), 1) + lru.Push([]byte("b"), 2) + + require.Equal(t, "a", lru.PopLeastRecentlyUsed()) + + lru.Push([]byte("c"), 3) + + require.Equal(t, uint64(2), lru.GetCount()) + require.Equal(t, uint64(5), lru.GetTotalSize()) + require.Equal(t, "b", lru.PopLeastRecentlyUsed()) + require.Equal(t, "c", lru.PopLeastRecentlyUsed()) +} + +func TestTouchThenPushSameKey(t *testing.T) { + lru := newLRUQueue() + lru.Push([]byte("a"), 1) + lru.Push([]byte("b"), 1) + + lru.Touch([]byte("a")) // order: b, a + lru.Push([]byte("a"), 50) // updates size, stays at back + + require.Equal(t, uint64(2), lru.GetCount()) + require.Equal(t, uint64(51), lru.GetTotalSize()) + require.Equal(t, "b", lru.PopLeastRecentlyUsed()) +} + +func TestBinaryKeyData(t *testing.T) { + lru := newLRUQueue() + k1 := []byte{0x00, 0xFF, 0x01} + k2 := []byte{0x00, 0xFF, 0x02} + + lru.Push(k1, 10) + lru.Push(k2, 20) + + require.Equal(t, uint64(2), lru.GetCount()) + require.Equal(t, string(k1), lru.PopLeastRecentlyUsed()) + + lru.Touch(k2) + require.Equal(t, string(k2), lru.PopLeastRecentlyUsed()) +} + +func TestCallerMutationAfterTouchDoesNotAffectQueue(t *testing.T) { + lru := newLRUQueue() + key := []byte("abc") + lru.Push(key, 1) + + key[0] = 'Z' + lru.Touch(key) // Touch with mutated key ("Zbc") — should be a no-op + + require.Equal(t, "abc", lru.PopLeastRecentlyUsed()) +} + +func TestManyEntries(t *testing.T) { + lru := newLRUQueue() + n := 1000 + var totalSize uint64 + + for i := 0; i < n; i++ { + k := fmt.Sprintf("key-%04d", i) + lru.Push([]byte(k), uint64(i+1)) + totalSize += uint64(i + 1) + } + + require.Equal(t, uint64(n), lru.GetCount()) + require.Equal(t, totalSize, lru.GetTotalSize()) + + for i := 0; i < n; i++ { + want := fmt.Sprintf("key-%04d", i) + require.Equal(t, want, lru.PopLeastRecentlyUsed(), "pop %d", i) + } + + require.Equal(t, uint64(0), lru.GetCount()) + require.Equal(t, uint64(0), lru.GetTotalSize()) +} + +func TestPushUpdatedSizeThenPopVerifySizeAccounting(t *testing.T) { + lru := newLRUQueue() + lru.Push([]byte("a"), 10) + lru.Push([]byte("b"), 20) + lru.Push([]byte("a"), 5) // decrease a's size from 10 to 5 + + require.Equal(t, uint64(25), lru.GetTotalSize()) + + // Pop "b" (it's the LRU since "a" was re-pushed to back). + lru.PopLeastRecentlyUsed() + require.Equal(t, uint64(5), lru.GetTotalSize()) + + lru.PopLeastRecentlyUsed() + require.Equal(t, uint64(0), lru.GetTotalSize()) +} diff --git a/sei-db/db_engine/dbcache/noop_cache.go b/sei-db/db_engine/dbcache/noop_cache.go new file mode 100644 index 0000000000..1e40e02879 --- /dev/null +++ b/sei-db/db_engine/dbcache/noop_cache.go @@ -0,0 +1,58 @@ +package dbcache + +import ( + "fmt" + + "github.com/sei-protocol/sei-chain/sei-db/db_engine/types" +) + +var _ Cache = (*noOpCache)(nil) + +// noOpCache is a Cache that performs no caching. Every Get falls through +// to the underlying readFunc. Set, Delete, and BatchSet are no-ops. +// Useful for testing the storage layer without cache interference, or for +// workloads where caching is not beneficial. +type noOpCache struct { + readFunc func(key []byte) ([]byte, bool, error) +} + +// NewNoOpCache creates a Cache that always reads from readFunc and never caches. +func NewNoOpCache(readFunc func(key []byte) ([]byte, bool, error)) Cache { + return &noOpCache{readFunc: readFunc} +} + +func (c *noOpCache) Get(key []byte, _ bool) ([]byte, bool, error) { + return c.readFunc(key) +} + +func (c *noOpCache) BatchGet(keys map[string]types.BatchGetResult) error { + var firstErr error + for k := range keys { + val, _, err := c.readFunc([]byte(k)) + if err != nil { + keys[k] = types.BatchGetResult{Error: err} + if firstErr == nil { + firstErr = err + } + } else { + keys[k] = types.BatchGetResult{Value: val} + } + } + if firstErr != nil { + return fmt.Errorf("unable to batch get: %w", firstErr) + } + return nil +} + +func (c *noOpCache) Set([]byte, []byte) { + // intentional no-op +} + +func (c *noOpCache) Delete([]byte) { + // intentional no-op +} + +func (c *noOpCache) BatchSet([]CacheUpdate) error { + // intentional no-op + return nil +} diff --git a/sei-db/db_engine/dbcache/noop_cache_test.go b/sei-db/db_engine/dbcache/noop_cache_test.go new file mode 100644 index 0000000000..2fd7bb2790 --- /dev/null +++ b/sei-db/db_engine/dbcache/noop_cache_test.go @@ -0,0 +1,152 @@ +package dbcache + +import ( + "errors" + "testing" + + "github.com/stretchr/testify/require" + + "github.com/sei-protocol/sei-chain/sei-db/db_engine/types" +) + +func newNoOpTestCache(store map[string][]byte) Cache { + return NewNoOpCache(func(key []byte) ([]byte, bool, error) { + v, ok := store[string(key)] + if !ok { + return nil, false, nil + } + return v, true, nil + }) +} + +func TestNoOpGetFound(t *testing.T) { + c := newNoOpTestCache(map[string][]byte{"k": []byte("v")}) + + val, found, err := c.Get([]byte("k"), true) + require.NoError(t, err) + require.True(t, found) + require.Equal(t, "v", string(val)) +} + +func TestNoOpGetNotFound(t *testing.T) { + c := newNoOpTestCache(map[string][]byte{}) + + val, found, err := c.Get([]byte("missing"), true) + require.NoError(t, err) + require.False(t, found) + require.Nil(t, val) +} + +func TestNoOpGetError(t *testing.T) { + dbErr := errors.New("broken") + c := NewNoOpCache(func(key []byte) ([]byte, bool, error) { + return nil, false, dbErr + }) + + _, _, err := c.Get([]byte("k"), true) + require.ErrorIs(t, err, dbErr) +} + +func TestNoOpGetIgnoresUpdateLru(t *testing.T) { + c := newNoOpTestCache(map[string][]byte{"k": []byte("v")}) + + val1, _, _ := c.Get([]byte("k"), true) + val2, _, _ := c.Get([]byte("k"), false) + require.Equal(t, string(val1), string(val2)) +} + +func TestNoOpGetAlwaysReadsFromFunc(t *testing.T) { + store := map[string][]byte{"k": []byte("v1")} + c := newNoOpTestCache(store) + + val, _, _ := c.Get([]byte("k"), true) + require.Equal(t, "v1", string(val)) + + store["k"] = []byte("v2") + + val, _, _ = c.Get([]byte("k"), true) + require.Equal(t, "v2", string(val), "should re-read from func, not cache") +} + +func TestNoOpSetIsNoOp(t *testing.T) { + c := newNoOpTestCache(map[string][]byte{}) + + c.Set([]byte("k"), []byte("v")) + + _, found, err := c.Get([]byte("k"), true) + require.NoError(t, err) + require.False(t, found, "Set should not cache anything") +} + +func TestNoOpDeleteIsNoOp(t *testing.T) { + c := newNoOpTestCache(map[string][]byte{"k": []byte("v")}) + + c.Delete([]byte("k")) + + val, found, err := c.Get([]byte("k"), true) + require.NoError(t, err) + require.True(t, found, "Delete should not affect reads") + require.Equal(t, "v", string(val)) +} + +func TestNoOpBatchSetIsNoOp(t *testing.T) { + c := newNoOpTestCache(map[string][]byte{}) + + err := c.BatchSet([]CacheUpdate{ + {Key: []byte("a"), Value: []byte("1")}, + {Key: []byte("b"), Value: []byte("2")}, + }) + require.NoError(t, err) + + _, found, _ := c.Get([]byte("a"), true) + require.False(t, found) + _, found, _ = c.Get([]byte("b"), true) + require.False(t, found) +} + +func TestNoOpBatchSetEmptyAndNil(t *testing.T) { + c := newNoOpTestCache(map[string][]byte{}) + + require.NoError(t, c.BatchSet(nil)) + require.NoError(t, c.BatchSet([]CacheUpdate{})) +} + +func TestNoOpBatchGetAllFound(t *testing.T) { + c := newNoOpTestCache(map[string][]byte{"a": []byte("1"), "b": []byte("2")}) + + keys := map[string]types.BatchGetResult{"a": {}, "b": {}} + require.NoError(t, c.BatchGet(keys)) + + require.True(t, keys["a"].IsFound()) + require.Equal(t, "1", string(keys["a"].Value)) + require.True(t, keys["b"].IsFound()) + require.Equal(t, "2", string(keys["b"].Value)) +} + +func TestNoOpBatchGetNotFound(t *testing.T) { + c := newNoOpTestCache(map[string][]byte{}) + + keys := map[string]types.BatchGetResult{"x": {}} + require.NoError(t, c.BatchGet(keys)) + require.False(t, keys["x"].IsFound()) +} + +func TestNoOpBatchGetError(t *testing.T) { + dbErr := errors.New("fail") + c := NewNoOpCache(func(key []byte) ([]byte, bool, error) { + return nil, false, dbErr + }) + + keys := map[string]types.BatchGetResult{"k": {}} + err := c.BatchGet(keys) + require.Error(t, err) + require.ErrorIs(t, err, dbErr) + require.Error(t, keys["k"].Error) +} + +func TestNoOpBatchGetEmpty(t *testing.T) { + c := newNoOpTestCache(map[string][]byte{}) + + keys := map[string]types.BatchGetResult{} + require.NoError(t, c.BatchGet(keys)) +} diff --git a/sei-db/db_engine/dbcache/shard_manager.go b/sei-db/db_engine/dbcache/shard_manager.go new file mode 100644 index 0000000000..bfc837845c --- /dev/null +++ b/sei-db/db_engine/dbcache/shard_manager.go @@ -0,0 +1,46 @@ +package dbcache + +import ( + "errors" + "hash/maphash" + "sync" +) + +var ErrNumShardsNotPowerOfTwo = errors.New("numShards must be a power of two and > 0") + +// A utility for assigning keys to shard indices. +type shardManager struct { + // A random seed that makes it hard for an attacker to predict the shard index and to skew the distribution. + seed maphash.Seed + // Used to perform a quick modulo operation to get the shard index (since numShards is a power of two) + mask uint64 + // reusable Hash objects to avoid allocs + pool sync.Pool +} + +// Creates a new Sharder. Number of shards must be a power of two and greater than 0. +func newShardManager(numShards uint64) (*shardManager, error) { + if numShards == 0 || (numShards&(numShards-1)) != 0 { + return nil, ErrNumShardsNotPowerOfTwo + } + + return &shardManager{ + seed: maphash.MakeSeed(), // secret, randomized + mask: numShards - 1, + pool: sync.Pool{ + New: func() any { return new(maphash.Hash) }, + }, + }, nil +} + +// Shard returns a shard index in [0, numShards). +// addr should be the raw address bytes (e.g., 20-byte ETH address). +func (s *shardManager) Shard(addr []byte) uint64 { + h := s.pool.Get().(*maphash.Hash) + h.SetSeed(s.seed) + _, _ = h.Write(addr) + x := h.Sum64() + s.pool.Put(h) + + return x & s.mask +} diff --git a/sei-db/db_engine/dbcache/shard_manager_test.go b/sei-db/db_engine/dbcache/shard_manager_test.go new file mode 100644 index 0000000000..07aa2041a2 --- /dev/null +++ b/sei-db/db_engine/dbcache/shard_manager_test.go @@ -0,0 +1,271 @@ +package dbcache + +import ( + "fmt" + "math" + "sync" + "testing" + + "github.com/stretchr/testify/require" +) + +// --- NewShardManager --- + +func TestNewShardManagerValidPowersOfTwo(t *testing.T) { + for exp := 0; exp < 20; exp++ { + n := uint64(1) << exp + sm, err := newShardManager(n) + require.NoError(t, err, "numShards=%d", n) + require.NotNil(t, sm, "numShards=%d", n) + } +} + +func TestNewShardManagerZeroReturnsError(t *testing.T) { + sm, err := newShardManager(0) + require.ErrorIs(t, err, ErrNumShardsNotPowerOfTwo) + require.Nil(t, sm) +} + +func TestNewShardManagerNonPowersOfTwoReturnError(t *testing.T) { + bad := []uint64{3, 5, 6, 7, 9, 10, 12, 15, 17, 100, 255, 1023} + for _, n := range bad { + sm, err := newShardManager(n) + require.ErrorIs(t, err, ErrNumShardsNotPowerOfTwo, "numShards=%d", n) + require.Nil(t, sm, "numShards=%d", n) + } +} + +func TestNewShardManagerMaxUint64ReturnsError(t *testing.T) { + sm, err := newShardManager(math.MaxUint64) + require.ErrorIs(t, err, ErrNumShardsNotPowerOfTwo) + require.Nil(t, sm) +} + +func TestNewShardManagerLargePowerOfTwo(t *testing.T) { + n := uint64(1) << 40 + sm, err := newShardManager(n) + require.NoError(t, err) + require.NotNil(t, sm) +} + +// --- Shard: basic behaviour --- + +func TestShardReturnsBoundedIndex(t *testing.T) { + for _, numShards := range []uint64{1, 2, 4, 16, 256, 1024} { + sm, err := newShardManager(numShards) + require.NoError(t, err) + + for i := 0; i < 500; i++ { + key := []byte(fmt.Sprintf("key-%d", i)) + idx := sm.Shard(key) + require.Less(t, idx, numShards, "numShards=%d key=%s", numShards, key) + } + } +} + +func TestShardDeterministic(t *testing.T) { + sm, err := newShardManager(16) + require.NoError(t, err) + + key := []byte("deterministic-test-key") + first := sm.Shard(key) + for i := 0; i < 100; i++ { + require.Equal(t, first, sm.Shard(key)) + } +} + +func TestShardSingleShardAlwaysReturnsZero(t *testing.T) { + sm, err := newShardManager(1) + require.NoError(t, err) + + keys := [][]byte{ + {}, + {0x00}, + {0xFF}, + []byte("anything"), + []byte("another key entirely"), + } + for _, k := range keys { + require.Equal(t, uint64(0), sm.Shard(k), "key=%q", k) + } +} + +func TestShardEmptyKey(t *testing.T) { + sm, err := newShardManager(8) + require.NoError(t, err) + + idx := sm.Shard([]byte{}) + require.Less(t, idx, uint64(8)) + + // Deterministic + require.Equal(t, idx, sm.Shard([]byte{})) +} + +func TestShardNilKey(t *testing.T) { + sm, err := newShardManager(4) + require.NoError(t, err) + + idx := sm.Shard(nil) + require.Less(t, idx, uint64(4)) + require.Equal(t, idx, sm.Shard(nil)) +} + +func TestShardBinaryKeys(t *testing.T) { + sm, err := newShardManager(16) + require.NoError(t, err) + + k1 := []byte{0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x01} + k2 := []byte{0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02} + + idx1 := sm.Shard(k1) + idx2 := sm.Shard(k2) + require.Less(t, idx1, uint64(16)) + require.Less(t, idx2, uint64(16)) +} + +func TestShardCallerMutationDoesNotAffectFutureResults(t *testing.T) { + sm, err := newShardManager(16) + require.NoError(t, err) + + key := []byte("mutable") + first := sm.Shard(key) + + key[0] = 'X' + second := sm.Shard([]byte("mutable")) + require.Equal(t, first, second) +} + +// --- Distribution --- + +func TestShardDistribution(t *testing.T) { + const numShards = 16 + const numKeys = 10_000 + sm, err := newShardManager(numShards) + require.NoError(t, err) + + counts := make([]int, numShards) + for i := 0; i < numKeys; i++ { + key := []byte(fmt.Sprintf("addr-%06d", i)) + counts[sm.Shard(key)]++ + } + + expected := float64(numKeys) / float64(numShards) + for shard, count := range counts { + ratio := float64(count) / expected + require.Greater(t, ratio, 0.5, "shard %d is severely underrepresented (%d)", shard, count) + require.Less(t, ratio, 1.5, "shard %d is severely overrepresented (%d)", shard, count) + } +} + +// --- Distinct managers --- + +func TestDifferentManagersHaveDifferentSeeds(t *testing.T) { + sm1, err := newShardManager(256) + require.NoError(t, err) + sm2, err := newShardManager(256) + require.NoError(t, err) + + // With distinct random seeds, at least some keys should hash differently. + diffCount := 0 + for i := 0; i < 200; i++ { + key := []byte(fmt.Sprintf("seed-test-%d", i)) + if sm1.Shard(key) != sm2.Shard(key) { + diffCount++ + } + } + require.Greater(t, diffCount, 0, "two managers with independent seeds should differ on at least one key") +} + +// --- Concurrency --- + +func TestShardConcurrentAccess(t *testing.T) { + sm, err := newShardManager(64) + require.NoError(t, err) + + const goroutines = 32 + const iters = 1000 + + key := []byte("concurrent-key") + expected := sm.Shard(key) + + var wg sync.WaitGroup + wg.Add(goroutines) + for g := 0; g < goroutines; g++ { + go func() { + defer wg.Done() + for i := 0; i < iters; i++ { + got := sm.Shard(key) + if got != expected { + t.Errorf("concurrent Shard returned %d, want %d", got, expected) + return + } + } + }() + } + wg.Wait() +} + +func TestShardConcurrentDifferentKeys(t *testing.T) { + sm, err := newShardManager(32) + require.NoError(t, err) + + const goroutines = 16 + const keysPerGoroutine = 500 + + var wg sync.WaitGroup + wg.Add(goroutines) + for g := 0; g < goroutines; g++ { + g := g + go func() { + defer wg.Done() + for i := 0; i < keysPerGoroutine; i++ { + key := []byte(fmt.Sprintf("g%d-k%d", g, i)) + idx := sm.Shard(key) + if idx >= 32 { + t.Errorf("Shard(%q) = %d, want < 32", key, idx) + return + } + } + }() + } + wg.Wait() +} + +// --- Mask correctness --- + +func TestShardMaskMatchesNumShards(t *testing.T) { + for exp := 0; exp < 16; exp++ { + numShards := uint64(1) << exp + sm, err := newShardManager(numShards) + require.NoError(t, err) + require.Equal(t, numShards-1, sm.mask, "numShards=%d", numShards) + } +} + +// --- 20-byte ETH-style addresses --- + +func TestShardWith20ByteAddresses(t *testing.T) { + sm, err := newShardManager(16) + require.NoError(t, err) + + addr := make([]byte, 20) + for i := 0; i < 20; i++ { + addr[i] = byte(i + 1) + } + + idx := sm.Shard(addr) + require.Less(t, idx, uint64(16)) + require.Equal(t, idx, sm.Shard(addr)) +} + +func TestShardSingleByteKey(t *testing.T) { + sm, err := newShardManager(4) + require.NoError(t, err) + + for b := 0; b < 256; b++ { + idx := sm.Shard([]byte{byte(b)}) + require.Less(t, idx, uint64(4), "byte=%d", b) + } +} diff --git a/sei-db/db_engine/types/types.go b/sei-db/db_engine/types/types.go index 0f82ac85a2..446ed39a65 100644 --- a/sei-db/db_engine/types/types.go +++ b/sei-db/db_engine/types/types.go @@ -20,6 +20,19 @@ type IterOptions struct { UpperBound []byte } +// BatchGetResult describes the result of a single key lookup within a BatchGet call. +type BatchGetResult struct { + // The value for the given key. If nil, the key was not found (but no error occurred). + Value []byte + // The error, if any, that occurred during the read. + Error error +} + +// IsFound returns true if the key was found (i.e. Value is not nil). +func (b BatchGetResult) IsFound() bool { + return b.Value != nil +} + // OpenOptions configures opening a DB. // // NOTE: This is intentionally minimal today. Most performance-critical knobs From 36d7328acbbd461ade625eb00c4b3f408f9671e1 Mon Sep 17 00:00:00 2001 From: Cody Littley Date: Fri, 13 Mar 2026 11:38:17 -0500 Subject: [PATCH 058/119] bugfix --- Makefile | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/Makefile b/Makefile index de0e50f8e2..85dfb480d7 100644 --- a/Makefile +++ b/Makefile @@ -157,7 +157,7 @@ lint: go mod tidy go mod verify -# Run lint on the sei-db package. Much faster than running lint on the entire project.\ +# Run lint on the sei-db package. Much faster than running lint on the entire project. # Makes life easier for storage team when iterating on changes inside the sei-db package. dblint: go run github.com/golangci/golangci-lint/v2/cmd/golangci-lint@v2.8.0 run ./sei-db/... From 4ba242bfa467959a351374bd21a3c781e0215ece Mon Sep 17 00:00:00 2001 From: Cody Littley Date: Mon, 16 Mar 2026 08:39:34 -0500 Subject: [PATCH 059/119] fix merge problems --- sei-db/db_engine/dbcache/cache.go | 37 +++ .../{pebbledb => }/dbcache/cache_impl.go | 0 .../{pebbledb => }/dbcache/cache_impl_test.go | 12 +- .../{pebbledb => }/dbcache/cache_metrics.go | 0 .../db_engine/{pebbledb => }/dbcache/shard.go | 2 +- .../{pebbledb => }/dbcache/shard_test.go | 24 +- sei-db/db_engine/pebbledb/db.go | 2 +- sei-db/db_engine/pebbledb/dbcache/cache.go | 84 ----- .../pebbledb/dbcache/cached_batch.go | 55 ---- .../pebbledb/dbcache/cached_batch_test.go | 204 ------------ .../pebbledb/dbcache/cached_key_value_db.go | 82 ----- .../db_engine/pebbledb/dbcache/lru_queue.go | 83 ----- .../pebbledb/dbcache/lru_queue_test.go | 310 ------------------ .../db_engine/pebbledb/dbcache/noop_cache.go | 58 ---- .../pebbledb/dbcache/noop_cache_test.go | 152 --------- .../pebbledb/dbcache/shard_manager.go | 46 --- .../pebbledb/dbcache/shard_manager_test.go | 271 --------------- 17 files changed, 57 insertions(+), 1365 deletions(-) rename sei-db/db_engine/{pebbledb => }/dbcache/cache_impl.go (100%) rename sei-db/db_engine/{pebbledb => }/dbcache/cache_impl_test.go (98%) rename sei-db/db_engine/{pebbledb => }/dbcache/cache_metrics.go (100%) rename sei-db/db_engine/{pebbledb => }/dbcache/shard.go (99%) rename sei-db/db_engine/{pebbledb => }/dbcache/shard_test.go (97%) delete mode 100644 sei-db/db_engine/pebbledb/dbcache/cache.go delete mode 100644 sei-db/db_engine/pebbledb/dbcache/cached_batch.go delete mode 100644 sei-db/db_engine/pebbledb/dbcache/cached_batch_test.go delete mode 100644 sei-db/db_engine/pebbledb/dbcache/cached_key_value_db.go delete mode 100644 sei-db/db_engine/pebbledb/dbcache/lru_queue.go delete mode 100644 sei-db/db_engine/pebbledb/dbcache/lru_queue_test.go delete mode 100644 sei-db/db_engine/pebbledb/dbcache/noop_cache.go delete mode 100644 sei-db/db_engine/pebbledb/dbcache/noop_cache_test.go delete mode 100644 sei-db/db_engine/pebbledb/dbcache/shard_manager.go delete mode 100644 sei-db/db_engine/pebbledb/dbcache/shard_manager_test.go diff --git a/sei-db/db_engine/dbcache/cache.go b/sei-db/db_engine/dbcache/cache.go index fa4b292b85..2ec5acfd82 100644 --- a/sei-db/db_engine/dbcache/cache.go +++ b/sei-db/db_engine/dbcache/cache.go @@ -1,6 +1,11 @@ package dbcache import ( + "context" + "fmt" + "time" + + "github.com/sei-protocol/sei-chain/sei-db/common/threading" "github.com/sei-protocol/sei-chain/sei-db/db_engine/types" ) @@ -45,3 +50,35 @@ type CacheUpdate struct { func (u *CacheUpdate) IsDelete() bool { return u.Value == nil } + +// BuildCache creates a new Cache. +func BuildCache( + ctx context.Context, + readFunc func(key []byte) ([]byte, bool, error), + shardCount uint64, + maxSize uint64, + readPool threading.Pool, + miscPool threading.Pool, + cacheName string, + metricsScrapeInterval time.Duration, +) (Cache, error) { + + if maxSize == 0 { + return NewNoOpCache(readFunc), nil + } + + cache, err := NewStandardCache( + ctx, + readFunc, + shardCount, + maxSize, + readPool, + miscPool, + cacheName, + metricsScrapeInterval, + ) + if err != nil { + return nil, fmt.Errorf("failed to create cache: %w", err) + } + return cache, nil +} diff --git a/sei-db/db_engine/pebbledb/dbcache/cache_impl.go b/sei-db/db_engine/dbcache/cache_impl.go similarity index 100% rename from sei-db/db_engine/pebbledb/dbcache/cache_impl.go rename to sei-db/db_engine/dbcache/cache_impl.go diff --git a/sei-db/db_engine/pebbledb/dbcache/cache_impl_test.go b/sei-db/db_engine/dbcache/cache_impl_test.go similarity index 98% rename from sei-db/db_engine/pebbledb/dbcache/cache_impl_test.go rename to sei-db/db_engine/dbcache/cache_impl_test.go index 601591f008..5e4981b281 100644 --- a/sei-db/db_engine/pebbledb/dbcache/cache_impl_test.go +++ b/sei-db/db_engine/dbcache/cache_impl_test.go @@ -445,7 +445,7 @@ func TestCacheGetRoutesToSameShard(t *testing.T) { idx := impl.shardManager.Shard([]byte("key")) _, entries := impl.shards[idx].getSizeInfo() - require.Equal(t, 1, entries, "key should be in the shard determined by shardManager") + require.Equal(t, uint64(1), entries, "key should be in the shard determined by shardManager") } // --------------------------------------------------------------------------- @@ -457,8 +457,8 @@ func TestCacheGetCacheSizeInfoEmpty(t *testing.T) { impl := c.(*cache) bytes, entries := impl.getCacheSizeInfo() - require.Equal(t, int64(0), bytes) - require.Equal(t, int64(0), entries) + require.Equal(t, uint64(0), bytes) + require.Equal(t, uint64(0), entries) } func TestCacheGetCacheSizeInfoAggregatesShards(t *testing.T) { @@ -470,8 +470,8 @@ func TestCacheGetCacheSizeInfoAggregatesShards(t *testing.T) { } bytes, entries := impl.getCacheSizeInfo() - require.Equal(t, int64(20), entries) - require.Greater(t, bytes, int64(0)) + require.Equal(t, uint64(20), entries) + require.Greater(t, bytes, uint64(0)) } // --------------------------------------------------------------------------- @@ -610,7 +610,7 @@ func TestCacheEvictsPerShard(t *testing.T) { // 27 bytes → must evict to get under 20 bytes, _ := impl.shards[0].getSizeInfo() - require.LessOrEqual(t, bytes, 20) + require.LessOrEqual(t, bytes, uint64(20)) } // --------------------------------------------------------------------------- diff --git a/sei-db/db_engine/pebbledb/dbcache/cache_metrics.go b/sei-db/db_engine/dbcache/cache_metrics.go similarity index 100% rename from sei-db/db_engine/pebbledb/dbcache/cache_metrics.go rename to sei-db/db_engine/dbcache/cache_metrics.go diff --git a/sei-db/db_engine/pebbledb/dbcache/shard.go b/sei-db/db_engine/dbcache/shard.go similarity index 99% rename from sei-db/db_engine/pebbledb/dbcache/shard.go rename to sei-db/db_engine/dbcache/shard.go index 130e650be3..eb44f3e47e 100644 --- a/sei-db/db_engine/pebbledb/dbcache/shard.go +++ b/sei-db/db_engine/dbcache/shard.go @@ -244,7 +244,7 @@ func (s *shard) BatchGet(keys map[string]types.BatchGetResult) error { entry := s.getEntry([]byte(key)) switch entry.status { - case statusAvailable | statusDeleted: + case statusAvailable, statusDeleted: keys[key] = types.BatchGetResult{Value: bytes.Clone(entry.value)} hits++ case statusScheduled: diff --git a/sei-db/db_engine/pebbledb/dbcache/shard_test.go b/sei-db/db_engine/dbcache/shard_test.go similarity index 97% rename from sei-db/db_engine/pebbledb/dbcache/shard_test.go rename to sei-db/db_engine/dbcache/shard_test.go index b2c0b5ad77..e23614299e 100644 --- a/sei-db/db_engine/pebbledb/dbcache/shard_test.go +++ b/sei-db/db_engine/dbcache/shard_test.go @@ -423,8 +423,8 @@ func TestBatchSetEmpty(t *testing.T) { s.BatchSet([]CacheUpdate{}) bytes, entries := s.getSizeInfo() - require.Equal(t, 0, bytes) - require.Equal(t, 0, entries) + require.Equal(t, uint64(0), bytes) + require.Equal(t, uint64(0), entries) } // --------------------------------------------------------------------------- @@ -562,14 +562,14 @@ func TestEvictionRespectMaxSize(t *testing.T) { s.Set([]byte("b"), []byte("bbbbbbbbbb")) _, entries := s.getSizeInfo() - require.Equal(t, 2, entries) + require.Equal(t, uint64(2), entries) // Third entry pushes to 33 bytes, exceeding maxSize=30 → evict "a". s.Set([]byte("c"), []byte("cccccccccc")) bytes, entries := s.getSizeInfo() - require.LessOrEqual(t, bytes, 30, "shard size should not exceed maxSize") - require.Equal(t, 2, entries) + require.LessOrEqual(t, bytes, uint64(30), "shard size should not exceed maxSize") + require.Equal(t, uint64(2), entries) } func TestEvictionOrderIsLRU(t *testing.T) { @@ -602,7 +602,7 @@ func TestEvictionOnDelete(t *testing.T) { s.Delete([]byte("longkey1")) // size 8 bytes, _ := s.getSizeInfo() - require.LessOrEqual(t, bytes, 10, "size should not exceed maxSize") + require.LessOrEqual(t, bytes, uint64(10), "size should not exceed maxSize") } func TestEvictionOnGetFromDB(t *testing.T) { @@ -619,7 +619,7 @@ func TestEvictionOnGetFromDB(t *testing.T) { time.Sleep(50 * time.Millisecond) bytes, _ := s.getSizeInfo() - require.LessOrEqual(t, bytes, 25, "size should not exceed maxSize after DB read") + require.LessOrEqual(t, bytes, uint64(25), "size should not exceed maxSize after DB read") } // --------------------------------------------------------------------------- @@ -629,8 +629,8 @@ func TestEvictionOnGetFromDB(t *testing.T) { func TestGetSizeInfoEmpty(t *testing.T) { s := newTestShard(t, 4096, map[string][]byte{}) bytes, entries := s.getSizeInfo() - require.Equal(t, 0, bytes) - require.Equal(t, 0, entries) + require.Equal(t, uint64(0), bytes) + require.Equal(t, uint64(0), entries) } func TestGetSizeInfoAfterSets(t *testing.T) { @@ -640,8 +640,8 @@ func TestGetSizeInfoAfterSets(t *testing.T) { s.Set([]byte("efg"), []byte("hi")) // 3+2 = 5 bytes, entries := s.getSizeInfo() - require.Equal(t, 2, entries) - require.Equal(t, 9, bytes) + require.Equal(t, uint64(2), entries) + require.Equal(t, uint64(9), bytes) } // --------------------------------------------------------------------------- @@ -769,7 +769,7 @@ func TestSetLargeValueExceedingMaxSizeEvictsOldEntries(t *testing.T) { s.Set([]byte("b"), bigVal) bytes, _ := s.getSizeInfo() - require.LessOrEqual(t, bytes, 100, "size should not exceed maxSize after large set") + require.LessOrEqual(t, bytes, uint64(100), "size should not exceed maxSize after large set") } // --------------------------------------------------------------------------- diff --git a/sei-db/db_engine/pebbledb/db.go b/sei-db/db_engine/pebbledb/db.go index 0e45bfcc9f..6d2bea30e8 100644 --- a/sei-db/db_engine/pebbledb/db.go +++ b/sei-db/db_engine/pebbledb/db.go @@ -13,7 +13,7 @@ import ( errorutils "github.com/sei-protocol/sei-chain/sei-db/common/errors" "github.com/sei-protocol/sei-chain/sei-db/common/threading" - "github.com/sei-protocol/sei-chain/sei-db/db_engine/pebbledb/dbcache" + "github.com/sei-protocol/sei-chain/sei-db/db_engine/dbcache" "github.com/sei-protocol/sei-chain/sei-db/db_engine/types" ) diff --git a/sei-db/db_engine/pebbledb/dbcache/cache.go b/sei-db/db_engine/pebbledb/dbcache/cache.go deleted file mode 100644 index 2ec5acfd82..0000000000 --- a/sei-db/db_engine/pebbledb/dbcache/cache.go +++ /dev/null @@ -1,84 +0,0 @@ -package dbcache - -import ( - "context" - "fmt" - "time" - - "github.com/sei-protocol/sei-chain/sei-db/common/threading" - "github.com/sei-protocol/sei-chain/sei-db/db_engine/types" -) - -// Cache describes a cache capable of being used by a FlatKV store. -type Cache interface { - - // Get returns the value for the given key, or (nil, false) if not found. - Get( - // The entry to fetch. - key []byte, - // If true, the LRU queue will be updated. If false, the LRU queue will not be updated. - // Useful for when an operation is performed multiple times in close succession on the same key, - // since it requires non-zero overhead to do so with little benefit. - updateLru bool, - ) ([]byte, bool, error) - - // Perform a batch read operation. Given a map of keys to read, performs the reads and updates the - // map with the results. - // - // It is not thread safe to read or mutate the map while this method is running. - BatchGet(keys map[string]types.BatchGetResult) error - - // Set sets the value for the given key. - Set(key []byte, value []byte) - - // Delete deletes the value for the given key. - Delete(key []byte) - - // BatchSet applies the given updates to the cache. - BatchSet(updates []CacheUpdate) error -} - -// CacheUpdate describes a single key-value mutation to apply to the cache. -type CacheUpdate struct { - // The key to update. - Key []byte - // The value to set. If nil, the key will be deleted. - Value []byte -} - -// IsDelete returns true if the update is a delete operation. -func (u *CacheUpdate) IsDelete() bool { - return u.Value == nil -} - -// BuildCache creates a new Cache. -func BuildCache( - ctx context.Context, - readFunc func(key []byte) ([]byte, bool, error), - shardCount uint64, - maxSize uint64, - readPool threading.Pool, - miscPool threading.Pool, - cacheName string, - metricsScrapeInterval time.Duration, -) (Cache, error) { - - if maxSize == 0 { - return NewNoOpCache(readFunc), nil - } - - cache, err := NewStandardCache( - ctx, - readFunc, - shardCount, - maxSize, - readPool, - miscPool, - cacheName, - metricsScrapeInterval, - ) - if err != nil { - return nil, fmt.Errorf("failed to create cache: %w", err) - } - return cache, nil -} diff --git a/sei-db/db_engine/pebbledb/dbcache/cached_batch.go b/sei-db/db_engine/pebbledb/dbcache/cached_batch.go deleted file mode 100644 index e4995fe33b..0000000000 --- a/sei-db/db_engine/pebbledb/dbcache/cached_batch.go +++ /dev/null @@ -1,55 +0,0 @@ -package dbcache - -import ( - "fmt" - - "github.com/sei-protocol/sei-chain/sei-db/db_engine/types" -) - -// cachedBatch wraps a types.Batch and applies pending mutations to the cache -// after a successful commit. -type cachedBatch struct { - inner types.Batch - cache Cache - pending []CacheUpdate -} - -var _ types.Batch = (*cachedBatch)(nil) - -func newCachedBatch(inner types.Batch, cache Cache) *cachedBatch { - return &cachedBatch{inner: inner, cache: cache} -} - -func (cb *cachedBatch) Set(key, value []byte) error { - cb.pending = append(cb.pending, CacheUpdate{Key: key, Value: value}) - return cb.inner.Set(key, value) -} - -func (cb *cachedBatch) Delete(key []byte) error { - cb.pending = append(cb.pending, CacheUpdate{Key: key, Value: nil}) - return cb.inner.Delete(key) -} - -func (cb *cachedBatch) Commit(opts types.WriteOptions) error { - if err := cb.inner.Commit(opts); err != nil { - return err - } - if err := cb.cache.BatchSet(cb.pending); err != nil { - return fmt.Errorf("failed to update cache after commit: %w", err) - } - cb.pending = nil - return nil -} - -func (cb *cachedBatch) Len() int { - return cb.inner.Len() -} - -func (cb *cachedBatch) Reset() { - cb.inner.Reset() - cb.pending = nil -} - -func (cb *cachedBatch) Close() error { - return cb.inner.Close() -} diff --git a/sei-db/db_engine/pebbledb/dbcache/cached_batch_test.go b/sei-db/db_engine/pebbledb/dbcache/cached_batch_test.go deleted file mode 100644 index 5aeb533238..0000000000 --- a/sei-db/db_engine/pebbledb/dbcache/cached_batch_test.go +++ /dev/null @@ -1,204 +0,0 @@ -package dbcache - -import ( - "errors" - "testing" - - "github.com/stretchr/testify/require" - - "github.com/sei-protocol/sei-chain/sei-db/db_engine/types" -) - -// --------------------------------------------------------------------------- -// mock batch -// --------------------------------------------------------------------------- - -type mockBatch struct { - sets []CacheUpdate - deletes [][]byte - committed bool - closed bool - resetCount int - commitErr error -} - -func (m *mockBatch) Set(key, value []byte) error { - m.sets = append(m.sets, CacheUpdate{Key: key, Value: value}) - return nil -} - -func (m *mockBatch) Delete(key []byte) error { - m.deletes = append(m.deletes, key) - return nil -} - -func (m *mockBatch) Commit(opts types.WriteOptions) error { - if m.commitErr != nil { - return m.commitErr - } - m.committed = true - return nil -} - -func (m *mockBatch) Len() int { - return len(m.sets) + len(m.deletes) -} - -func (m *mockBatch) Reset() { - m.sets = nil - m.deletes = nil - m.committed = false - m.resetCount++ -} - -func (m *mockBatch) Close() error { - m.closed = true - return nil -} - -// --------------------------------------------------------------------------- -// mock cache -// --------------------------------------------------------------------------- - -type mockCache struct { - data map[string][]byte - batchSetErr error -} - -func newMockCache() *mockCache { - return &mockCache{data: make(map[string][]byte)} -} - -func (mc *mockCache) Get(key []byte, _ bool) ([]byte, bool, error) { - v, ok := mc.data[string(key)] - return v, ok, nil -} - -func (mc *mockCache) BatchGet(keys map[string]types.BatchGetResult) error { - for k := range keys { - v, ok := mc.data[k] - if ok { - keys[k] = types.BatchGetResult{Value: v} - } - } - return nil -} - -func (mc *mockCache) Set(key, value []byte) { - mc.data[string(key)] = value -} - -func (mc *mockCache) Delete(key []byte) { - delete(mc.data, string(key)) -} - -func (mc *mockCache) BatchSet(updates []CacheUpdate) error { - if mc.batchSetErr != nil { - return mc.batchSetErr - } - for _, u := range updates { - if u.IsDelete() { - delete(mc.data, string(u.Key)) - } else { - mc.data[string(u.Key)] = u.Value - } - } - return nil -} - -// --------------------------------------------------------------------------- -// tests -// --------------------------------------------------------------------------- - -func TestCachedBatchCommitUpdatesCacheOnSuccess(t *testing.T) { - inner := &mockBatch{} - cache := newMockCache() - cb := newCachedBatch(inner, cache) - - require.NoError(t, cb.Set([]byte("a"), []byte("1"))) - require.NoError(t, cb.Set([]byte("b"), []byte("2"))) - require.NoError(t, cb.Commit(types.WriteOptions{})) - - require.True(t, inner.committed) - v, ok := cache.data["a"] - require.True(t, ok) - require.Equal(t, []byte("1"), v) - v, ok = cache.data["b"] - require.True(t, ok) - require.Equal(t, []byte("2"), v) -} - -func TestCachedBatchCommitDoesNotUpdateCacheOnInnerFailure(t *testing.T) { - inner := &mockBatch{commitErr: errors.New("disk full")} - cache := newMockCache() - cb := newCachedBatch(inner, cache) - - require.NoError(t, cb.Set([]byte("a"), []byte("1"))) - err := cb.Commit(types.WriteOptions{}) - - require.Error(t, err) - require.Contains(t, err.Error(), "disk full") - _, ok := cache.data["a"] - require.False(t, ok, "cache should not be updated when inner commit fails") -} - -func TestCachedBatchCommitReturnsCacheError(t *testing.T) { - inner := &mockBatch{} - cache := newMockCache() - cache.batchSetErr = errors.New("cache broken") - cb := newCachedBatch(inner, cache) - - require.NoError(t, cb.Set([]byte("a"), []byte("1"))) - err := cb.Commit(types.WriteOptions{}) - - require.Error(t, err) - require.Contains(t, err.Error(), "cache broken") - require.True(t, inner.committed, "inner batch should have committed") -} - -func TestCachedBatchDeleteMarksKeyForRemoval(t *testing.T) { - inner := &mockBatch{} - cache := newMockCache() - cache.Set([]byte("x"), []byte("old")) - cb := newCachedBatch(inner, cache) - - require.NoError(t, cb.Delete([]byte("x"))) - require.NoError(t, cb.Commit(types.WriteOptions{})) - - _, ok := cache.data["x"] - require.False(t, ok, "key should be deleted from cache") -} - -func TestCachedBatchResetClearsPending(t *testing.T) { - inner := &mockBatch{} - cache := newMockCache() - cb := newCachedBatch(inner, cache) - - require.NoError(t, cb.Set([]byte("a"), []byte("1"))) - require.NoError(t, cb.Set([]byte("b"), []byte("2"))) - cb.Reset() - - require.NoError(t, cb.Commit(types.WriteOptions{})) - - require.Empty(t, cache.data, "cache should have no entries after reset + commit") -} - -func TestCachedBatchLenDelegatesToInner(t *testing.T) { - inner := &mockBatch{} - cache := newMockCache() - cb := newCachedBatch(inner, cache) - - require.Equal(t, 0, cb.Len()) - require.NoError(t, cb.Set([]byte("a"), []byte("1"))) - require.NoError(t, cb.Delete([]byte("b"))) - require.Equal(t, 2, cb.Len()) -} - -func TestCachedBatchCloseDelegatesToInner(t *testing.T) { - inner := &mockBatch{} - cache := newMockCache() - cb := newCachedBatch(inner, cache) - - require.NoError(t, cb.Close()) - require.True(t, inner.closed) -} diff --git a/sei-db/db_engine/pebbledb/dbcache/cached_key_value_db.go b/sei-db/db_engine/pebbledb/dbcache/cached_key_value_db.go deleted file mode 100644 index 3a60cf39f3..0000000000 --- a/sei-db/db_engine/pebbledb/dbcache/cached_key_value_db.go +++ /dev/null @@ -1,82 +0,0 @@ -package dbcache - -import ( - "fmt" - - errorutils "github.com/sei-protocol/sei-chain/sei-db/common/errors" - "github.com/sei-protocol/sei-chain/sei-db/db_engine/types" -) - -var _ types.KeyValueDB = (*cachedKeyValueDB)(nil) -var _ types.Checkpointable = (*cachedKeyValueDB)(nil) - -type cachedKeyValueDB struct { - db types.KeyValueDB - cache Cache -} - -// Combine a cache and a key-value database to create a new key-value database with caching. -func NewCachedKeyValueDB(db types.KeyValueDB, cache Cache) types.KeyValueDB { - return &cachedKeyValueDB{db: db, cache: cache} -} - -func (c *cachedKeyValueDB) Get(key []byte) ([]byte, error) { - val, found, err := c.cache.Get(key, true) - if err != nil { - return nil, fmt.Errorf("failed to get value from cache: %w", err) - } - if !found { - return nil, errorutils.ErrNotFound - } - return val, nil -} - -func (c *cachedKeyValueDB) BatchGet(keys map[string]types.BatchGetResult) error { - err := c.cache.BatchGet(keys) - if err != nil { - return fmt.Errorf("failed to get values from cache: %w", err) - } - return nil -} - -func (c *cachedKeyValueDB) Set(key []byte, value []byte, opts types.WriteOptions) error { - err := c.db.Set(key, value, opts) - if err != nil { - return fmt.Errorf("failed to set value in database: %w", err) - } - c.cache.Set(key, value) - return nil -} - -func (c *cachedKeyValueDB) Delete(key []byte, opts types.WriteOptions) error { - err := c.db.Delete(key, opts) - if err != nil { - return fmt.Errorf("failed to delete value in database: %w", err) - } - c.cache.Delete(key) - return nil -} - -func (c *cachedKeyValueDB) NewIter(opts *types.IterOptions) (types.KeyValueDBIterator, error) { - return c.db.NewIter(opts) -} - -func (c *cachedKeyValueDB) NewBatch() types.Batch { - return newCachedBatch(c.db.NewBatch(), c.cache) -} - -func (c *cachedKeyValueDB) Flush() error { - return c.db.Flush() -} - -func (c *cachedKeyValueDB) Close() error { - return c.db.Close() -} - -func (c *cachedKeyValueDB) Checkpoint(destDir string) error { - cp, ok := c.db.(types.Checkpointable) - if !ok { - return fmt.Errorf("underlying database does not support Checkpoint") - } - return cp.Checkpoint(destDir) -} diff --git a/sei-db/db_engine/pebbledb/dbcache/lru_queue.go b/sei-db/db_engine/pebbledb/dbcache/lru_queue.go deleted file mode 100644 index 6870679c9d..0000000000 --- a/sei-db/db_engine/pebbledb/dbcache/lru_queue.go +++ /dev/null @@ -1,83 +0,0 @@ -package dbcache - -import "container/list" - -// Implements a queue-like abstraction with LRU semantics. Not thread safe. -type lruQueue struct { - order *list.List - entries map[string]*list.Element - totalSize uint64 -} - -type lruQueueEntry struct { - key string - size uint64 -} - -// Create a new LRU queue. -func newLRUQueue() *lruQueue { - return &lruQueue{ - order: list.New(), - entries: make(map[string]*list.Element), - } -} - -// Add a new entry to the LRU queue. Can also be used to update an existing value with a new weight. -func (lru *lruQueue) Push( - // the key in the cache that was recently interacted with - key []byte, - // the size of the key + value - size uint64, -) { - if elem, ok := lru.entries[string(key)]; ok { - entry := elem.Value.(*lruQueueEntry) - lru.totalSize += size - entry.size - entry.size = size - lru.order.MoveToBack(elem) - return - } - - keyStr := string(key) - elem := lru.order.PushBack(&lruQueueEntry{ - key: keyStr, - size: size, - }) - lru.entries[keyStr] = elem - lru.totalSize += size -} - -// Signal that an entry has been interated with, moving it to the back of the queue -// (i.e. making it so it doesn't get popped soon). -func (lru *lruQueue) Touch(key []byte) { - elem, ok := lru.entries[string(key)] - if !ok { - return - } - lru.order.MoveToBack(elem) -} - -// Returns the total size of all entries in the LRU queue. -func (lru *lruQueue) GetTotalSize() uint64 { - return lru.totalSize -} - -// Returns a count of the number of entries in the LRU queue, where each entry counts for 1 regardless of size. -func (lru *lruQueue) GetCount() uint64 { - return uint64(len(lru.entries)) -} - -// Pops a single element out of the queue. The element removed is the entry least recently passed to Update(). -// Returns the key in string form to avoid copying the key an additional time. -// Panics if the queue is empty. -func (lru *lruQueue) PopLeastRecentlyUsed() string { - elem := lru.order.Front() - if elem == nil { - panic("cannot pop from empty LRU queue") - } - - lru.order.Remove(elem) - entry := elem.Value.(*lruQueueEntry) - delete(lru.entries, entry.key) - lru.totalSize -= entry.size - return entry.key -} diff --git a/sei-db/db_engine/pebbledb/dbcache/lru_queue_test.go b/sei-db/db_engine/pebbledb/dbcache/lru_queue_test.go deleted file mode 100644 index 0073e6d1f0..0000000000 --- a/sei-db/db_engine/pebbledb/dbcache/lru_queue_test.go +++ /dev/null @@ -1,310 +0,0 @@ -package dbcache - -import ( - "fmt" - "testing" - - "github.com/stretchr/testify/require" -) - -func TestLRUQueueIsolatesFromCallerMutation(t *testing.T) { - lru := newLRUQueue() - - key := []byte("a") - lru.Push(key, 1) - key[0] = 'z' - - require.Equal(t, "a", lru.PopLeastRecentlyUsed()) -} - -func TestNewLRUQueueStartsEmpty(t *testing.T) { - lru := newLRUQueue() - - require.Equal(t, uint64(0), lru.GetCount()) - require.Equal(t, uint64(0), lru.GetTotalSize()) -} - -func TestPopLeastRecentlyUsedPanicsOnEmptyQueue(t *testing.T) { - lru := newLRUQueue() - require.Panics(t, func() { lru.PopLeastRecentlyUsed() }) -} - -func TestPopLeastRecentlyUsedPanicsAfterDrain(t *testing.T) { - lru := newLRUQueue() - lru.Push([]byte("x"), 1) - lru.PopLeastRecentlyUsed() - - require.Panics(t, func() { lru.PopLeastRecentlyUsed() }) -} - -func TestPushSingleElement(t *testing.T) { - lru := newLRUQueue() - lru.Push([]byte("only"), 42) - - require.Equal(t, uint64(1), lru.GetCount()) - require.Equal(t, uint64(42), lru.GetTotalSize()) - require.Equal(t, "only", lru.PopLeastRecentlyUsed()) -} - -func TestPushDuplicateDecreasesSize(t *testing.T) { - lru := newLRUQueue() - lru.Push([]byte("k"), 100) - lru.Push([]byte("k"), 30) - - require.Equal(t, uint64(1), lru.GetCount()) - require.Equal(t, uint64(30), lru.GetTotalSize()) -} - -func TestPushDuplicateMovesToBack(t *testing.T) { - lru := newLRUQueue() - lru.Push([]byte("a"), 1) - lru.Push([]byte("b"), 1) - lru.Push([]byte("c"), 1) - - // Re-push "a" — should move it behind "b" and "c" - lru.Push([]byte("a"), 1) - - require.Equal(t, "b", lru.PopLeastRecentlyUsed()) - require.Equal(t, "c", lru.PopLeastRecentlyUsed()) - require.Equal(t, "a", lru.PopLeastRecentlyUsed()) -} - -func TestPushZeroSize(t *testing.T) { - lru := newLRUQueue() - lru.Push([]byte("z"), 0) - - require.Equal(t, uint64(1), lru.GetCount()) - require.Equal(t, uint64(0), lru.GetTotalSize()) - require.Equal(t, "z", lru.PopLeastRecentlyUsed()) - require.Equal(t, uint64(0), lru.GetTotalSize()) -} - -func TestPushEmptyKey(t *testing.T) { - lru := newLRUQueue() - lru.Push([]byte(""), 5) - - require.Equal(t, uint64(1), lru.GetCount()) - require.Equal(t, "", lru.PopLeastRecentlyUsed()) -} - -func TestPushRepeatedUpdatesToSameKey(t *testing.T) { - lru := newLRUQueue() - lru.Push([]byte("k"), 1) - lru.Push([]byte("k"), 2) - lru.Push([]byte("k"), 3) - lru.Push([]byte("k"), 4) - - require.Equal(t, uint64(1), lru.GetCount()) - require.Equal(t, uint64(4), lru.GetTotalSize()) -} - -func TestTouchNonexistentKeyIsNoop(t *testing.T) { - lru := newLRUQueue() - lru.Push([]byte("a"), 1) - - lru.Touch([]byte("missing")) - - require.Equal(t, uint64(1), lru.GetCount()) - require.Equal(t, "a", lru.PopLeastRecentlyUsed()) -} - -func TestTouchOnEmptyQueueIsNoop(t *testing.T) { - lru := newLRUQueue() - lru.Touch([]byte("ghost")) - - require.Equal(t, uint64(0), lru.GetCount()) -} - -func TestTouchSingleElement(t *testing.T) { - lru := newLRUQueue() - lru.Push([]byte("solo"), 10) - lru.Touch([]byte("solo")) - - require.Equal(t, uint64(1), lru.GetCount()) - require.Equal(t, "solo", lru.PopLeastRecentlyUsed()) -} - -func TestTouchDoesNotAffectSizeOrCount(t *testing.T) { - lru := newLRUQueue() - lru.Push([]byte("a"), 3) - lru.Push([]byte("b"), 7) - - lru.Touch([]byte("a")) - - require.Equal(t, uint64(2), lru.GetCount()) - require.Equal(t, uint64(10), lru.GetTotalSize()) -} - -func TestMultipleTouchesChangeOrder(t *testing.T) { - lru := newLRUQueue() - lru.Push([]byte("a"), 1) - lru.Push([]byte("b"), 1) - lru.Push([]byte("c"), 1) - - // Order: a, b, c - lru.Touch([]byte("a")) // Order: b, c, a - lru.Touch([]byte("b")) // Order: c, a, b - - require.Equal(t, "c", lru.PopLeastRecentlyUsed()) - require.Equal(t, "a", lru.PopLeastRecentlyUsed()) - require.Equal(t, "b", lru.PopLeastRecentlyUsed()) -} - -func TestTouchAlreadyMostRecentIsNoop(t *testing.T) { - lru := newLRUQueue() - lru.Push([]byte("a"), 1) - lru.Push([]byte("b"), 1) - - lru.Touch([]byte("b")) // "b" is already at back - - require.Equal(t, "a", lru.PopLeastRecentlyUsed()) - require.Equal(t, "b", lru.PopLeastRecentlyUsed()) -} - -func TestPopDecrementsCountAndSize(t *testing.T) { - lru := newLRUQueue() - lru.Push([]byte("a"), 10) - lru.Push([]byte("b"), 20) - lru.Push([]byte("c"), 30) - - lru.PopLeastRecentlyUsed() - - require.Equal(t, uint64(2), lru.GetCount()) - require.Equal(t, uint64(50), lru.GetTotalSize()) - - lru.PopLeastRecentlyUsed() - - require.Equal(t, uint64(1), lru.GetCount()) - require.Equal(t, uint64(30), lru.GetTotalSize()) -} - -func TestPopFIFOOrderWithoutTouches(t *testing.T) { - lru := newLRUQueue() - keys := []string{"first", "second", "third", "fourth"} - for _, k := range keys { - lru.Push([]byte(k), 1) - } - - for _, want := range keys { - require.Equal(t, want, lru.PopLeastRecentlyUsed()) - } -} - -func TestPushAfterDrain(t *testing.T) { - lru := newLRUQueue() - lru.Push([]byte("a"), 5) - lru.PopLeastRecentlyUsed() - - lru.Push([]byte("x"), 10) - lru.Push([]byte("y"), 20) - - require.Equal(t, uint64(2), lru.GetCount()) - require.Equal(t, uint64(30), lru.GetTotalSize()) - require.Equal(t, "x", lru.PopLeastRecentlyUsed()) -} - -func TestPushPreviouslyPoppedKey(t *testing.T) { - lru := newLRUQueue() - lru.Push([]byte("recycled"), 5) - lru.PopLeastRecentlyUsed() - - lru.Push([]byte("recycled"), 99) - - require.Equal(t, uint64(1), lru.GetCount()) - require.Equal(t, uint64(99), lru.GetTotalSize()) - require.Equal(t, "recycled", lru.PopLeastRecentlyUsed()) -} - -func TestInterleavedPushAndPop(t *testing.T) { - lru := newLRUQueue() - - lru.Push([]byte("a"), 1) - lru.Push([]byte("b"), 2) - - require.Equal(t, "a", lru.PopLeastRecentlyUsed()) - - lru.Push([]byte("c"), 3) - - require.Equal(t, uint64(2), lru.GetCount()) - require.Equal(t, uint64(5), lru.GetTotalSize()) - require.Equal(t, "b", lru.PopLeastRecentlyUsed()) - require.Equal(t, "c", lru.PopLeastRecentlyUsed()) -} - -func TestTouchThenPushSameKey(t *testing.T) { - lru := newLRUQueue() - lru.Push([]byte("a"), 1) - lru.Push([]byte("b"), 1) - - lru.Touch([]byte("a")) // order: b, a - lru.Push([]byte("a"), 50) // updates size, stays at back - - require.Equal(t, uint64(2), lru.GetCount()) - require.Equal(t, uint64(51), lru.GetTotalSize()) - require.Equal(t, "b", lru.PopLeastRecentlyUsed()) -} - -func TestBinaryKeyData(t *testing.T) { - lru := newLRUQueue() - k1 := []byte{0x00, 0xFF, 0x01} - k2 := []byte{0x00, 0xFF, 0x02} - - lru.Push(k1, 10) - lru.Push(k2, 20) - - require.Equal(t, uint64(2), lru.GetCount()) - require.Equal(t, string(k1), lru.PopLeastRecentlyUsed()) - - lru.Touch(k2) - require.Equal(t, string(k2), lru.PopLeastRecentlyUsed()) -} - -func TestCallerMutationAfterTouchDoesNotAffectQueue(t *testing.T) { - lru := newLRUQueue() - key := []byte("abc") - lru.Push(key, 1) - - key[0] = 'Z' - lru.Touch(key) // Touch with mutated key ("Zbc") — should be a no-op - - require.Equal(t, "abc", lru.PopLeastRecentlyUsed()) -} - -func TestManyEntries(t *testing.T) { - lru := newLRUQueue() - n := 1000 - var totalSize uint64 - - for i := 0; i < n; i++ { - k := fmt.Sprintf("key-%04d", i) - lru.Push([]byte(k), uint64(i+1)) - totalSize += uint64(i + 1) - } - - require.Equal(t, uint64(n), lru.GetCount()) - require.Equal(t, totalSize, lru.GetTotalSize()) - - for i := 0; i < n; i++ { - want := fmt.Sprintf("key-%04d", i) - require.Equal(t, want, lru.PopLeastRecentlyUsed(), "pop %d", i) - } - - require.Equal(t, uint64(0), lru.GetCount()) - require.Equal(t, uint64(0), lru.GetTotalSize()) -} - -func TestPushUpdatedSizeThenPopVerifySizeAccounting(t *testing.T) { - lru := newLRUQueue() - lru.Push([]byte("a"), 10) - lru.Push([]byte("b"), 20) - lru.Push([]byte("a"), 5) // decrease a's size from 10 to 5 - - require.Equal(t, uint64(25), lru.GetTotalSize()) - - // Pop "b" (it's the LRU since "a" was re-pushed to back). - lru.PopLeastRecentlyUsed() - require.Equal(t, uint64(5), lru.GetTotalSize()) - - lru.PopLeastRecentlyUsed() - require.Equal(t, uint64(0), lru.GetTotalSize()) -} diff --git a/sei-db/db_engine/pebbledb/dbcache/noop_cache.go b/sei-db/db_engine/pebbledb/dbcache/noop_cache.go deleted file mode 100644 index 1e40e02879..0000000000 --- a/sei-db/db_engine/pebbledb/dbcache/noop_cache.go +++ /dev/null @@ -1,58 +0,0 @@ -package dbcache - -import ( - "fmt" - - "github.com/sei-protocol/sei-chain/sei-db/db_engine/types" -) - -var _ Cache = (*noOpCache)(nil) - -// noOpCache is a Cache that performs no caching. Every Get falls through -// to the underlying readFunc. Set, Delete, and BatchSet are no-ops. -// Useful for testing the storage layer without cache interference, or for -// workloads where caching is not beneficial. -type noOpCache struct { - readFunc func(key []byte) ([]byte, bool, error) -} - -// NewNoOpCache creates a Cache that always reads from readFunc and never caches. -func NewNoOpCache(readFunc func(key []byte) ([]byte, bool, error)) Cache { - return &noOpCache{readFunc: readFunc} -} - -func (c *noOpCache) Get(key []byte, _ bool) ([]byte, bool, error) { - return c.readFunc(key) -} - -func (c *noOpCache) BatchGet(keys map[string]types.BatchGetResult) error { - var firstErr error - for k := range keys { - val, _, err := c.readFunc([]byte(k)) - if err != nil { - keys[k] = types.BatchGetResult{Error: err} - if firstErr == nil { - firstErr = err - } - } else { - keys[k] = types.BatchGetResult{Value: val} - } - } - if firstErr != nil { - return fmt.Errorf("unable to batch get: %w", firstErr) - } - return nil -} - -func (c *noOpCache) Set([]byte, []byte) { - // intentional no-op -} - -func (c *noOpCache) Delete([]byte) { - // intentional no-op -} - -func (c *noOpCache) BatchSet([]CacheUpdate) error { - // intentional no-op - return nil -} diff --git a/sei-db/db_engine/pebbledb/dbcache/noop_cache_test.go b/sei-db/db_engine/pebbledb/dbcache/noop_cache_test.go deleted file mode 100644 index 2fd7bb2790..0000000000 --- a/sei-db/db_engine/pebbledb/dbcache/noop_cache_test.go +++ /dev/null @@ -1,152 +0,0 @@ -package dbcache - -import ( - "errors" - "testing" - - "github.com/stretchr/testify/require" - - "github.com/sei-protocol/sei-chain/sei-db/db_engine/types" -) - -func newNoOpTestCache(store map[string][]byte) Cache { - return NewNoOpCache(func(key []byte) ([]byte, bool, error) { - v, ok := store[string(key)] - if !ok { - return nil, false, nil - } - return v, true, nil - }) -} - -func TestNoOpGetFound(t *testing.T) { - c := newNoOpTestCache(map[string][]byte{"k": []byte("v")}) - - val, found, err := c.Get([]byte("k"), true) - require.NoError(t, err) - require.True(t, found) - require.Equal(t, "v", string(val)) -} - -func TestNoOpGetNotFound(t *testing.T) { - c := newNoOpTestCache(map[string][]byte{}) - - val, found, err := c.Get([]byte("missing"), true) - require.NoError(t, err) - require.False(t, found) - require.Nil(t, val) -} - -func TestNoOpGetError(t *testing.T) { - dbErr := errors.New("broken") - c := NewNoOpCache(func(key []byte) ([]byte, bool, error) { - return nil, false, dbErr - }) - - _, _, err := c.Get([]byte("k"), true) - require.ErrorIs(t, err, dbErr) -} - -func TestNoOpGetIgnoresUpdateLru(t *testing.T) { - c := newNoOpTestCache(map[string][]byte{"k": []byte("v")}) - - val1, _, _ := c.Get([]byte("k"), true) - val2, _, _ := c.Get([]byte("k"), false) - require.Equal(t, string(val1), string(val2)) -} - -func TestNoOpGetAlwaysReadsFromFunc(t *testing.T) { - store := map[string][]byte{"k": []byte("v1")} - c := newNoOpTestCache(store) - - val, _, _ := c.Get([]byte("k"), true) - require.Equal(t, "v1", string(val)) - - store["k"] = []byte("v2") - - val, _, _ = c.Get([]byte("k"), true) - require.Equal(t, "v2", string(val), "should re-read from func, not cache") -} - -func TestNoOpSetIsNoOp(t *testing.T) { - c := newNoOpTestCache(map[string][]byte{}) - - c.Set([]byte("k"), []byte("v")) - - _, found, err := c.Get([]byte("k"), true) - require.NoError(t, err) - require.False(t, found, "Set should not cache anything") -} - -func TestNoOpDeleteIsNoOp(t *testing.T) { - c := newNoOpTestCache(map[string][]byte{"k": []byte("v")}) - - c.Delete([]byte("k")) - - val, found, err := c.Get([]byte("k"), true) - require.NoError(t, err) - require.True(t, found, "Delete should not affect reads") - require.Equal(t, "v", string(val)) -} - -func TestNoOpBatchSetIsNoOp(t *testing.T) { - c := newNoOpTestCache(map[string][]byte{}) - - err := c.BatchSet([]CacheUpdate{ - {Key: []byte("a"), Value: []byte("1")}, - {Key: []byte("b"), Value: []byte("2")}, - }) - require.NoError(t, err) - - _, found, _ := c.Get([]byte("a"), true) - require.False(t, found) - _, found, _ = c.Get([]byte("b"), true) - require.False(t, found) -} - -func TestNoOpBatchSetEmptyAndNil(t *testing.T) { - c := newNoOpTestCache(map[string][]byte{}) - - require.NoError(t, c.BatchSet(nil)) - require.NoError(t, c.BatchSet([]CacheUpdate{})) -} - -func TestNoOpBatchGetAllFound(t *testing.T) { - c := newNoOpTestCache(map[string][]byte{"a": []byte("1"), "b": []byte("2")}) - - keys := map[string]types.BatchGetResult{"a": {}, "b": {}} - require.NoError(t, c.BatchGet(keys)) - - require.True(t, keys["a"].IsFound()) - require.Equal(t, "1", string(keys["a"].Value)) - require.True(t, keys["b"].IsFound()) - require.Equal(t, "2", string(keys["b"].Value)) -} - -func TestNoOpBatchGetNotFound(t *testing.T) { - c := newNoOpTestCache(map[string][]byte{}) - - keys := map[string]types.BatchGetResult{"x": {}} - require.NoError(t, c.BatchGet(keys)) - require.False(t, keys["x"].IsFound()) -} - -func TestNoOpBatchGetError(t *testing.T) { - dbErr := errors.New("fail") - c := NewNoOpCache(func(key []byte) ([]byte, bool, error) { - return nil, false, dbErr - }) - - keys := map[string]types.BatchGetResult{"k": {}} - err := c.BatchGet(keys) - require.Error(t, err) - require.ErrorIs(t, err, dbErr) - require.Error(t, keys["k"].Error) -} - -func TestNoOpBatchGetEmpty(t *testing.T) { - c := newNoOpTestCache(map[string][]byte{}) - - keys := map[string]types.BatchGetResult{} - require.NoError(t, c.BatchGet(keys)) -} diff --git a/sei-db/db_engine/pebbledb/dbcache/shard_manager.go b/sei-db/db_engine/pebbledb/dbcache/shard_manager.go deleted file mode 100644 index bfc837845c..0000000000 --- a/sei-db/db_engine/pebbledb/dbcache/shard_manager.go +++ /dev/null @@ -1,46 +0,0 @@ -package dbcache - -import ( - "errors" - "hash/maphash" - "sync" -) - -var ErrNumShardsNotPowerOfTwo = errors.New("numShards must be a power of two and > 0") - -// A utility for assigning keys to shard indices. -type shardManager struct { - // A random seed that makes it hard for an attacker to predict the shard index and to skew the distribution. - seed maphash.Seed - // Used to perform a quick modulo operation to get the shard index (since numShards is a power of two) - mask uint64 - // reusable Hash objects to avoid allocs - pool sync.Pool -} - -// Creates a new Sharder. Number of shards must be a power of two and greater than 0. -func newShardManager(numShards uint64) (*shardManager, error) { - if numShards == 0 || (numShards&(numShards-1)) != 0 { - return nil, ErrNumShardsNotPowerOfTwo - } - - return &shardManager{ - seed: maphash.MakeSeed(), // secret, randomized - mask: numShards - 1, - pool: sync.Pool{ - New: func() any { return new(maphash.Hash) }, - }, - }, nil -} - -// Shard returns a shard index in [0, numShards). -// addr should be the raw address bytes (e.g., 20-byte ETH address). -func (s *shardManager) Shard(addr []byte) uint64 { - h := s.pool.Get().(*maphash.Hash) - h.SetSeed(s.seed) - _, _ = h.Write(addr) - x := h.Sum64() - s.pool.Put(h) - - return x & s.mask -} diff --git a/sei-db/db_engine/pebbledb/dbcache/shard_manager_test.go b/sei-db/db_engine/pebbledb/dbcache/shard_manager_test.go deleted file mode 100644 index 07aa2041a2..0000000000 --- a/sei-db/db_engine/pebbledb/dbcache/shard_manager_test.go +++ /dev/null @@ -1,271 +0,0 @@ -package dbcache - -import ( - "fmt" - "math" - "sync" - "testing" - - "github.com/stretchr/testify/require" -) - -// --- NewShardManager --- - -func TestNewShardManagerValidPowersOfTwo(t *testing.T) { - for exp := 0; exp < 20; exp++ { - n := uint64(1) << exp - sm, err := newShardManager(n) - require.NoError(t, err, "numShards=%d", n) - require.NotNil(t, sm, "numShards=%d", n) - } -} - -func TestNewShardManagerZeroReturnsError(t *testing.T) { - sm, err := newShardManager(0) - require.ErrorIs(t, err, ErrNumShardsNotPowerOfTwo) - require.Nil(t, sm) -} - -func TestNewShardManagerNonPowersOfTwoReturnError(t *testing.T) { - bad := []uint64{3, 5, 6, 7, 9, 10, 12, 15, 17, 100, 255, 1023} - for _, n := range bad { - sm, err := newShardManager(n) - require.ErrorIs(t, err, ErrNumShardsNotPowerOfTwo, "numShards=%d", n) - require.Nil(t, sm, "numShards=%d", n) - } -} - -func TestNewShardManagerMaxUint64ReturnsError(t *testing.T) { - sm, err := newShardManager(math.MaxUint64) - require.ErrorIs(t, err, ErrNumShardsNotPowerOfTwo) - require.Nil(t, sm) -} - -func TestNewShardManagerLargePowerOfTwo(t *testing.T) { - n := uint64(1) << 40 - sm, err := newShardManager(n) - require.NoError(t, err) - require.NotNil(t, sm) -} - -// --- Shard: basic behaviour --- - -func TestShardReturnsBoundedIndex(t *testing.T) { - for _, numShards := range []uint64{1, 2, 4, 16, 256, 1024} { - sm, err := newShardManager(numShards) - require.NoError(t, err) - - for i := 0; i < 500; i++ { - key := []byte(fmt.Sprintf("key-%d", i)) - idx := sm.Shard(key) - require.Less(t, idx, numShards, "numShards=%d key=%s", numShards, key) - } - } -} - -func TestShardDeterministic(t *testing.T) { - sm, err := newShardManager(16) - require.NoError(t, err) - - key := []byte("deterministic-test-key") - first := sm.Shard(key) - for i := 0; i < 100; i++ { - require.Equal(t, first, sm.Shard(key)) - } -} - -func TestShardSingleShardAlwaysReturnsZero(t *testing.T) { - sm, err := newShardManager(1) - require.NoError(t, err) - - keys := [][]byte{ - {}, - {0x00}, - {0xFF}, - []byte("anything"), - []byte("another key entirely"), - } - for _, k := range keys { - require.Equal(t, uint64(0), sm.Shard(k), "key=%q", k) - } -} - -func TestShardEmptyKey(t *testing.T) { - sm, err := newShardManager(8) - require.NoError(t, err) - - idx := sm.Shard([]byte{}) - require.Less(t, idx, uint64(8)) - - // Deterministic - require.Equal(t, idx, sm.Shard([]byte{})) -} - -func TestShardNilKey(t *testing.T) { - sm, err := newShardManager(4) - require.NoError(t, err) - - idx := sm.Shard(nil) - require.Less(t, idx, uint64(4)) - require.Equal(t, idx, sm.Shard(nil)) -} - -func TestShardBinaryKeys(t *testing.T) { - sm, err := newShardManager(16) - require.NoError(t, err) - - k1 := []byte{0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, - 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x01} - k2 := []byte{0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, - 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02} - - idx1 := sm.Shard(k1) - idx2 := sm.Shard(k2) - require.Less(t, idx1, uint64(16)) - require.Less(t, idx2, uint64(16)) -} - -func TestShardCallerMutationDoesNotAffectFutureResults(t *testing.T) { - sm, err := newShardManager(16) - require.NoError(t, err) - - key := []byte("mutable") - first := sm.Shard(key) - - key[0] = 'X' - second := sm.Shard([]byte("mutable")) - require.Equal(t, first, second) -} - -// --- Distribution --- - -func TestShardDistribution(t *testing.T) { - const numShards = 16 - const numKeys = 10_000 - sm, err := newShardManager(numShards) - require.NoError(t, err) - - counts := make([]int, numShards) - for i := 0; i < numKeys; i++ { - key := []byte(fmt.Sprintf("addr-%06d", i)) - counts[sm.Shard(key)]++ - } - - expected := float64(numKeys) / float64(numShards) - for shard, count := range counts { - ratio := float64(count) / expected - require.Greater(t, ratio, 0.5, "shard %d is severely underrepresented (%d)", shard, count) - require.Less(t, ratio, 1.5, "shard %d is severely overrepresented (%d)", shard, count) - } -} - -// --- Distinct managers --- - -func TestDifferentManagersHaveDifferentSeeds(t *testing.T) { - sm1, err := newShardManager(256) - require.NoError(t, err) - sm2, err := newShardManager(256) - require.NoError(t, err) - - // With distinct random seeds, at least some keys should hash differently. - diffCount := 0 - for i := 0; i < 200; i++ { - key := []byte(fmt.Sprintf("seed-test-%d", i)) - if sm1.Shard(key) != sm2.Shard(key) { - diffCount++ - } - } - require.Greater(t, diffCount, 0, "two managers with independent seeds should differ on at least one key") -} - -// --- Concurrency --- - -func TestShardConcurrentAccess(t *testing.T) { - sm, err := newShardManager(64) - require.NoError(t, err) - - const goroutines = 32 - const iters = 1000 - - key := []byte("concurrent-key") - expected := sm.Shard(key) - - var wg sync.WaitGroup - wg.Add(goroutines) - for g := 0; g < goroutines; g++ { - go func() { - defer wg.Done() - for i := 0; i < iters; i++ { - got := sm.Shard(key) - if got != expected { - t.Errorf("concurrent Shard returned %d, want %d", got, expected) - return - } - } - }() - } - wg.Wait() -} - -func TestShardConcurrentDifferentKeys(t *testing.T) { - sm, err := newShardManager(32) - require.NoError(t, err) - - const goroutines = 16 - const keysPerGoroutine = 500 - - var wg sync.WaitGroup - wg.Add(goroutines) - for g := 0; g < goroutines; g++ { - g := g - go func() { - defer wg.Done() - for i := 0; i < keysPerGoroutine; i++ { - key := []byte(fmt.Sprintf("g%d-k%d", g, i)) - idx := sm.Shard(key) - if idx >= 32 { - t.Errorf("Shard(%q) = %d, want < 32", key, idx) - return - } - } - }() - } - wg.Wait() -} - -// --- Mask correctness --- - -func TestShardMaskMatchesNumShards(t *testing.T) { - for exp := 0; exp < 16; exp++ { - numShards := uint64(1) << exp - sm, err := newShardManager(numShards) - require.NoError(t, err) - require.Equal(t, numShards-1, sm.mask, "numShards=%d", numShards) - } -} - -// --- 20-byte ETH-style addresses --- - -func TestShardWith20ByteAddresses(t *testing.T) { - sm, err := newShardManager(16) - require.NoError(t, err) - - addr := make([]byte, 20) - for i := 0; i < 20; i++ { - addr[i] = byte(i + 1) - } - - idx := sm.Shard(addr) - require.Less(t, idx, uint64(16)) - require.Equal(t, idx, sm.Shard(addr)) -} - -func TestShardSingleByteKey(t *testing.T) { - sm, err := newShardManager(4) - require.NoError(t, err) - - for b := 0; b < 256; b++ { - idx := sm.Shard([]byte{byte(b)}) - require.Less(t, idx, uint64(4), "byte=%d", b) - } -} From e19a9988308d30884ff9cae666deea4f78fad447 Mon Sep 17 00:00:00 2001 From: Cody Littley Date: Mon, 16 Mar 2026 09:10:33 -0500 Subject: [PATCH 060/119] refactor API --- sei-db/db_engine/dbcache/cache.go | 21 +- sei-db/db_engine/dbcache/cache_impl.go | 12 +- sei-db/db_engine/dbcache/cache_impl_test.go | 173 ++++++------ sei-db/db_engine/dbcache/cached_batch_test.go | 4 +- .../db_engine/dbcache/cached_key_value_db.go | 17 +- sei-db/db_engine/dbcache/noop_cache.go | 20 +- sei-db/db_engine/dbcache/noop_cache_test.go | 69 ++--- sei-db/db_engine/dbcache/shard.go | 19 +- sei-db/db_engine/dbcache/shard_test.go | 258 +++++++++--------- sei-db/db_engine/pebbledb/db.go | 12 - 10 files changed, 289 insertions(+), 316 deletions(-) diff --git a/sei-db/db_engine/dbcache/cache.go b/sei-db/db_engine/dbcache/cache.go index 2ec5acfd82..0c27d22bf3 100644 --- a/sei-db/db_engine/dbcache/cache.go +++ b/sei-db/db_engine/dbcache/cache.go @@ -9,11 +9,20 @@ import ( "github.com/sei-protocol/sei-chain/sei-db/db_engine/types" ) -// Cache describes a cache capable of being used by a FlatKV store. +// Reader reads a single key from the backing store. +// +// If the key does not exist, Reader must return (nil, false, nil) rather than an error. +// Errors are reserved for actual failures (e.g. I/O errors). +type Reader func(key []byte) (value []byte, found bool, err error) + +// Cache describes a read-through cache backed by a Reader. type Cache interface { - // Get returns the value for the given key, or (nil, false) if not found. + // Get returns the value for the given key, or (nil, false, nil) if not found. + // On a cache miss the provided Reader is called to fetch from the backing store. Get( + // Reads a value from the backing store on cache miss. + read Reader, // The entry to fetch. key []byte, // If true, the LRU queue will be updated. If false, the LRU queue will not be updated. @@ -23,10 +32,10 @@ type Cache interface { ) ([]byte, bool, error) // Perform a batch read operation. Given a map of keys to read, performs the reads and updates the - // map with the results. + // map with the results. On cache misses the provided Reader is called to fetch from the backing store. // // It is not thread safe to read or mutate the map while this method is running. - BatchGet(keys map[string]types.BatchGetResult) error + BatchGet(read Reader, keys map[string]types.BatchGetResult) error // Set sets the value for the given key. Set(key []byte, value []byte) @@ -54,7 +63,6 @@ func (u *CacheUpdate) IsDelete() bool { // BuildCache creates a new Cache. func BuildCache( ctx context.Context, - readFunc func(key []byte) ([]byte, bool, error), shardCount uint64, maxSize uint64, readPool threading.Pool, @@ -64,12 +72,11 @@ func BuildCache( ) (Cache, error) { if maxSize == 0 { - return NewNoOpCache(readFunc), nil + return NewNoOpCache(), nil } cache, err := NewStandardCache( ctx, - readFunc, shardCount, maxSize, readPool, diff --git a/sei-db/db_engine/dbcache/cache_impl.go b/sei-db/db_engine/dbcache/cache_impl.go index ae55bea8b7..e7b9caba66 100644 --- a/sei-db/db_engine/dbcache/cache_impl.go +++ b/sei-db/db_engine/dbcache/cache_impl.go @@ -33,8 +33,6 @@ type cache struct { // background size scrape runs every metricsScrapeInterval. func NewStandardCache( ctx context.Context, - // A function that reads a value from the database. - readFunc func(key []byte) ([]byte, bool, error), // The number of shards in the cache. Must be a power of two and greater than 0. shardCount uint64, // The maximum size of the cache, in bytes. @@ -66,7 +64,7 @@ func NewStandardCache( shards := make([]*shard, shardCount) for i := uint64(0); i < shardCount; i++ { - shards[i], err = NewShard(ctx, readPool, readFunc, sizePerShard) + shards[i], err = NewShard(ctx, readPool, sizePerShard) if err != nil { return nil, fmt.Errorf("failed to create shard: %w", err) } @@ -123,7 +121,7 @@ func (c *cache) BatchSet(updates []CacheUpdate) error { return nil } -func (c *cache) BatchGet(keys map[string]types.BatchGetResult) error { +func (c *cache) BatchGet(read Reader, keys map[string]types.BatchGetResult) error { work := make(map[uint64]map[string]types.BatchGetResult) for key := range keys { idx := c.shardManager.Shard([]byte(key)) @@ -139,7 +137,7 @@ func (c *cache) BatchGet(keys map[string]types.BatchGetResult) error { err := c.miscPool.Submit(c.ctx, func() { defer wg.Done() - err := c.shards[shardIndex].BatchGet(subMap) + err := c.shards[shardIndex].BatchGet(read, subMap) if err != nil { for key := range subMap { subMap[key] = types.BatchGetResult{Error: err} @@ -167,11 +165,11 @@ func (c *cache) Delete(key []byte) { shard.Delete(key) } -func (c *cache) Get(key []byte, updateLru bool) ([]byte, bool, error) { +func (c *cache) Get(read Reader, key []byte, updateLru bool) ([]byte, bool, error) { shardIndex := c.shardManager.Shard(key) shard := c.shards[shardIndex] - value, ok, err := shard.Get(key, updateLru) + value, ok, err := shard.Get(read, key, updateLru) if err != nil { return nil, false, fmt.Errorf("failed to get value from shard: %w", err) } diff --git a/sei-db/db_engine/dbcache/cache_impl_test.go b/sei-db/db_engine/dbcache/cache_impl_test.go index 5e4981b281..d11201478a 100644 --- a/sei-db/db_engine/dbcache/cache_impl_test.go +++ b/sei-db/db_engine/dbcache/cache_impl_test.go @@ -21,9 +21,9 @@ import ( func noopRead(key []byte) ([]byte, bool, error) { return nil, false, nil } -func newTestCache(t *testing.T, store map[string][]byte, shardCount, maxSize uint64) Cache { +func newTestCache(t *testing.T, store map[string][]byte, shardCount, maxSize uint64) (Cache, Reader) { t.Helper() - readFunc := func(key []byte) ([]byte, bool, error) { + read := func(key []byte) ([]byte, bool, error) { v, ok := store[string(key)] if !ok { return nil, false, nil @@ -31,9 +31,9 @@ func newTestCache(t *testing.T, store map[string][]byte, shardCount, maxSize uin return v, true, nil } pool := threading.NewAdHocPool() - c, err := NewStandardCache(context.Background(), readFunc, shardCount, maxSize, pool, pool, "", 0) + c, err := NewStandardCache(context.Background(), shardCount, maxSize, pool, pool, "", 0) require.NoError(t, err) - return c + return c, read } // --------------------------------------------------------------------------- @@ -42,42 +42,42 @@ func newTestCache(t *testing.T, store map[string][]byte, shardCount, maxSize uin func TestNewStandardCacheValid(t *testing.T) { pool := threading.NewAdHocPool() - c, err := NewStandardCache(context.Background(), noopRead, 4, 1024, pool, pool, "", 0) + c, err := NewStandardCache(context.Background(), 4, 1024, pool, pool, "", 0) require.NoError(t, err) require.NotNil(t, c) } func TestNewStandardCacheSingleShard(t *testing.T) { pool := threading.NewAdHocPool() - c, err := NewStandardCache(context.Background(), noopRead, 1, 1024, pool, pool, "", 0) + c, err := NewStandardCache(context.Background(), 1, 1024, pool, pool, "", 0) require.NoError(t, err) require.NotNil(t, c) } func TestNewStandardCacheShardCountZero(t *testing.T) { pool := threading.NewAdHocPool() - _, err := NewStandardCache(context.Background(), noopRead, 0, 1024, pool, pool, "", 0) + _, err := NewStandardCache(context.Background(), 0, 1024, pool, pool, "", 0) require.Error(t, err) } func TestNewStandardCacheShardCountNotPowerOfTwo(t *testing.T) { pool := threading.NewAdHocPool() for _, n := range []uint64{3, 5, 6, 7, 9, 10} { - _, err := NewStandardCache(context.Background(), noopRead, n, 1024, pool, pool, "", 0) + _, err := NewStandardCache(context.Background(), n, 1024, pool, pool, "", 0) require.Error(t, err, "shardCount=%d", n) } } func TestNewStandardCacheMaxSizeZero(t *testing.T) { pool := threading.NewAdHocPool() - _, err := NewStandardCache(context.Background(), noopRead, 4, 0, pool, pool, "", 0) + _, err := NewStandardCache(context.Background(), 4, 0, pool, pool, "", 0) require.Error(t, err) } func TestNewStandardCacheMaxSizeLessThanShardCount(t *testing.T) { pool := threading.NewAdHocPool() // shardCount=4, maxSize=3 → sizePerShard=0 - _, err := NewStandardCache(context.Background(), noopRead, 4, 3, pool, pool, "", 0) + _, err := NewStandardCache(context.Background(), 4, 3, pool, pool, "", 0) require.Error(t, err) } @@ -85,7 +85,7 @@ func TestNewStandardCacheWithMetrics(t *testing.T) { pool := threading.NewAdHocPool() ctx, cancel := context.WithCancel(context.Background()) defer cancel() - c, err := NewStandardCache(ctx, noopRead, 2, 1024, pool, pool, "test-cache", time.Hour) + c, err := NewStandardCache(ctx, 2, 1024, pool, pool, "test-cache", time.Hour) require.NoError(t, err) require.NotNil(t, c) } @@ -96,29 +96,29 @@ func TestNewStandardCacheWithMetrics(t *testing.T) { func TestCacheGetFromDB(t *testing.T) { store := map[string][]byte{"foo": []byte("bar")} - c := newTestCache(t, store, 4, 4096) + c, read := newTestCache(t, store, 4, 4096) - val, found, err := c.Get([]byte("foo"), true) + val, found, err := c.Get(read, []byte("foo"), true) require.NoError(t, err) require.True(t, found) require.Equal(t, "bar", string(val)) } func TestCacheGetNotFound(t *testing.T) { - c := newTestCache(t, map[string][]byte{}, 4, 4096) + c, read := newTestCache(t, map[string][]byte{}, 4, 4096) - val, found, err := c.Get([]byte("missing"), true) + val, found, err := c.Get(read, []byte("missing"), true) require.NoError(t, err) require.False(t, found) require.Nil(t, val) } func TestCacheGetAfterSet(t *testing.T) { - c := newTestCache(t, map[string][]byte{}, 4, 4096) + c, read := newTestCache(t, map[string][]byte{}, 4, 4096) c.Set([]byte("k"), []byte("v")) - val, found, err := c.Get([]byte("k"), true) + val, found, err := c.Get(read, []byte("k"), true) require.NoError(t, err) require.True(t, found) require.Equal(t, "v", string(val)) @@ -126,11 +126,11 @@ func TestCacheGetAfterSet(t *testing.T) { func TestCacheGetAfterDelete(t *testing.T) { store := map[string][]byte{"k": []byte("v")} - c := newTestCache(t, store, 4, 4096) + c, read := newTestCache(t, store, 4, 4096) c.Delete([]byte("k")) - val, found, err := c.Get([]byte("k"), true) + val, found, err := c.Get(read, []byte("k"), true) require.NoError(t, err) require.False(t, found) require.Nil(t, val) @@ -140,9 +140,9 @@ func TestCacheGetDBError(t *testing.T) { dbErr := errors.New("db fail") readFunc := func(key []byte) ([]byte, bool, error) { return nil, false, dbErr } pool := threading.NewAdHocPool() - c, _ := NewStandardCache(context.Background(), readFunc, 1, 4096, pool, pool, "", 0) + c, _ := NewStandardCache(context.Background(), 1, 4096, pool, pool, "", 0) - _, _, err := c.Get([]byte("k"), true) + _, _, err := c.Get(readFunc, []byte("k"), true) require.Error(t, err) require.ErrorIs(t, err, dbErr) } @@ -154,12 +154,10 @@ func TestCacheGetSameKeyConsistentShard(t *testing.T) { return []byte("val"), true, nil } pool := threading.NewAdHocPool() - c, _ := NewStandardCache(context.Background(), readFunc, 4, 4096, pool, pool, "", 0) + c, _ := NewStandardCache(context.Background(), 4, 4096, pool, pool, "", 0) - // First call populates cache in a specific shard. - val1, _, _ := c.Get([]byte("key"), true) - // Second call should hit cache in the same shard. - val2, _, _ := c.Get([]byte("key"), true) + val1, _, _ := c.Get(readFunc, []byte("key"), true) + val2, _, _ := c.Get(readFunc, []byte("key"), true) require.Equal(t, string(val1), string(val2)) require.Equal(t, int64(1), readCalls.Load(), "second Get should hit cache") @@ -170,34 +168,34 @@ func TestCacheGetSameKeyConsistentShard(t *testing.T) { // --------------------------------------------------------------------------- func TestCacheSetNewKey(t *testing.T) { - c := newTestCache(t, map[string][]byte{}, 4, 4096) + c, read := newTestCache(t, map[string][]byte{}, 4, 4096) c.Set([]byte("a"), []byte("1")) - val, found, err := c.Get([]byte("a"), false) + val, found, err := c.Get(read, []byte("a"), false) require.NoError(t, err) require.True(t, found) require.Equal(t, "1", string(val)) } func TestCacheSetOverwrite(t *testing.T) { - c := newTestCache(t, map[string][]byte{}, 4, 4096) + c, read := newTestCache(t, map[string][]byte{}, 4, 4096) c.Set([]byte("a"), []byte("old")) c.Set([]byte("a"), []byte("new")) - val, found, err := c.Get([]byte("a"), false) + val, found, err := c.Get(read, []byte("a"), false) require.NoError(t, err) require.True(t, found) require.Equal(t, "new", string(val)) } func TestCacheSetNilValue(t *testing.T) { - c := newTestCache(t, map[string][]byte{}, 4, 4096) + c, read := newTestCache(t, map[string][]byte{}, 4, 4096) c.Set([]byte("k"), nil) - val, found, err := c.Get([]byte("k"), false) + val, found, err := c.Get(read, []byte("k"), false) require.NoError(t, err) require.True(t, found) require.Nil(t, val) @@ -208,34 +206,34 @@ func TestCacheSetNilValue(t *testing.T) { // --------------------------------------------------------------------------- func TestCacheDeleteExistingKey(t *testing.T) { - c := newTestCache(t, map[string][]byte{}, 4, 4096) + c, read := newTestCache(t, map[string][]byte{}, 4, 4096) c.Set([]byte("k"), []byte("v")) c.Delete([]byte("k")) - _, found, err := c.Get([]byte("k"), false) + _, found, err := c.Get(read, []byte("k"), false) require.NoError(t, err) require.False(t, found) } func TestCacheDeleteNonexistent(t *testing.T) { - c := newTestCache(t, map[string][]byte{}, 4, 4096) + c, read := newTestCache(t, map[string][]byte{}, 4, 4096) c.Delete([]byte("ghost")) - _, found, err := c.Get([]byte("ghost"), false) + _, found, err := c.Get(read, []byte("ghost"), false) require.NoError(t, err) require.False(t, found) } func TestCacheDeleteThenSet(t *testing.T) { - c := newTestCache(t, map[string][]byte{}, 4, 4096) + c, read := newTestCache(t, map[string][]byte{}, 4, 4096) c.Set([]byte("k"), []byte("v1")) c.Delete([]byte("k")) c.Set([]byte("k"), []byte("v2")) - val, found, err := c.Get([]byte("k"), false) + val, found, err := c.Get(read, []byte("k"), false) require.NoError(t, err) require.True(t, found) require.Equal(t, "v2", string(val)) @@ -246,7 +244,7 @@ func TestCacheDeleteThenSet(t *testing.T) { // --------------------------------------------------------------------------- func TestCacheBatchSetMultipleKeys(t *testing.T) { - c := newTestCache(t, map[string][]byte{}, 4, 4096) + c, read := newTestCache(t, map[string][]byte{}, 4, 4096) err := c.BatchSet([]CacheUpdate{ {Key: []byte("a"), Value: []byte("1")}, @@ -256,7 +254,7 @@ func TestCacheBatchSetMultipleKeys(t *testing.T) { require.NoError(t, err) for _, tc := range []struct{ key, want string }{{"a", "1"}, {"b", "2"}, {"c", "3"}} { - val, found, err := c.Get([]byte(tc.key), false) + val, found, err := c.Get(read, []byte(tc.key), false) require.NoError(t, err, "key=%q", tc.key) require.True(t, found, "key=%q", tc.key) require.Equal(t, tc.want, string(val), "key=%q", tc.key) @@ -264,7 +262,7 @@ func TestCacheBatchSetMultipleKeys(t *testing.T) { } func TestCacheBatchSetMixedSetAndDelete(t *testing.T) { - c := newTestCache(t, map[string][]byte{}, 4, 4096) + c, read := newTestCache(t, map[string][]byte{}, 4, 4096) c.Set([]byte("keep"), []byte("v")) c.Set([]byte("remove"), []byte("v")) @@ -276,29 +274,28 @@ func TestCacheBatchSetMixedSetAndDelete(t *testing.T) { }) require.NoError(t, err) - val, found, _ := c.Get([]byte("keep"), false) + val, found, _ := c.Get(read, []byte("keep"), false) require.True(t, found) require.Equal(t, "updated", string(val)) - _, found, _ = c.Get([]byte("remove"), false) + _, found, _ = c.Get(read, []byte("remove"), false) require.False(t, found) - val, found, _ = c.Get([]byte("new"), false) + val, found, _ = c.Get(read, []byte("new"), false) require.True(t, found) require.Equal(t, "fresh", string(val)) } func TestCacheBatchSetEmpty(t *testing.T) { - c := newTestCache(t, map[string][]byte{}, 4, 4096) + c, _ := newTestCache(t, map[string][]byte{}, 4, 4096) require.NoError(t, c.BatchSet(nil)) require.NoError(t, c.BatchSet([]CacheUpdate{})) } func TestCacheBatchSetPoolFailure(t *testing.T) { - readFunc := func(key []byte) ([]byte, bool, error) { return nil, false, nil } readPool := threading.NewAdHocPool() - c, _ := NewStandardCache(context.Background(), readFunc, 1, 4096, readPool, &failPool{}, "", 0) + c, _ := NewStandardCache(context.Background(), 1, 4096, readPool, &failPool{}, "", 0) err := c.BatchSet([]CacheUpdate{ {Key: []byte("k"), Value: []byte("v")}, @@ -311,13 +308,13 @@ func TestCacheBatchSetPoolFailure(t *testing.T) { // --------------------------------------------------------------------------- func TestCacheBatchGetAllCached(t *testing.T) { - c := newTestCache(t, map[string][]byte{}, 4, 4096) + c, read := newTestCache(t, map[string][]byte{}, 4, 4096) c.Set([]byte("a"), []byte("1")) c.Set([]byte("b"), []byte("2")) keys := map[string]types.BatchGetResult{"a": {}, "b": {}} - require.NoError(t, c.BatchGet(keys)) + require.NoError(t, c.BatchGet(read, keys)) require.True(t, keys["a"].IsFound()) require.Equal(t, "1", string(keys["a"].Value)) @@ -327,10 +324,10 @@ func TestCacheBatchGetAllCached(t *testing.T) { func TestCacheBatchGetAllFromDB(t *testing.T) { store := map[string][]byte{"x": []byte("10"), "y": []byte("20")} - c := newTestCache(t, store, 4, 4096) + c, read := newTestCache(t, store, 4, 4096) keys := map[string]types.BatchGetResult{"x": {}, "y": {}} - require.NoError(t, c.BatchGet(keys)) + require.NoError(t, c.BatchGet(read, keys)) require.True(t, keys["x"].IsFound()) require.Equal(t, "10", string(keys["x"].Value)) @@ -340,12 +337,12 @@ func TestCacheBatchGetAllFromDB(t *testing.T) { func TestCacheBatchGetMixedCachedAndDB(t *testing.T) { store := map[string][]byte{"db-key": []byte("from-db")} - c := newTestCache(t, store, 4, 4096) + c, read := newTestCache(t, store, 4, 4096) c.Set([]byte("cached"), []byte("from-cache")) keys := map[string]types.BatchGetResult{"cached": {}, "db-key": {}} - require.NoError(t, c.BatchGet(keys)) + require.NoError(t, c.BatchGet(read, keys)) require.True(t, keys["cached"].IsFound()) require.Equal(t, "from-cache", string(keys["cached"].Value)) @@ -354,21 +351,21 @@ func TestCacheBatchGetMixedCachedAndDB(t *testing.T) { } func TestCacheBatchGetNotFoundKeys(t *testing.T) { - c := newTestCache(t, map[string][]byte{}, 4, 4096) + c, read := newTestCache(t, map[string][]byte{}, 4, 4096) keys := map[string]types.BatchGetResult{"nope": {}} - require.NoError(t, c.BatchGet(keys)) + require.NoError(t, c.BatchGet(read, keys)) require.False(t, keys["nope"].IsFound()) } func TestCacheBatchGetDeletedKey(t *testing.T) { - c := newTestCache(t, map[string][]byte{}, 4, 4096) + c, read := newTestCache(t, map[string][]byte{}, 4, 4096) c.Set([]byte("k"), []byte("v")) c.Delete([]byte("k")) keys := map[string]types.BatchGetResult{"k": {}} - require.NoError(t, c.BatchGet(keys)) + require.NoError(t, c.BatchGet(read, keys)) require.False(t, keys["k"].IsFound()) } @@ -376,38 +373,34 @@ func TestCacheBatchGetDBError(t *testing.T) { dbErr := errors.New("broken") readFunc := func(key []byte) ([]byte, bool, error) { return nil, false, dbErr } pool := threading.NewAdHocPool() - c, _ := NewStandardCache(context.Background(), readFunc, 1, 4096, pool, pool, "", 0) + c, _ := NewStandardCache(context.Background(), 1, 4096, pool, pool, "", 0) keys := map[string]types.BatchGetResult{"fail": {}} - require.NoError(t, c.BatchGet(keys), "BatchGet itself should not fail") + require.NoError(t, c.BatchGet(readFunc, keys), "BatchGet itself should not fail") require.Error(t, keys["fail"].Error) } func TestCacheBatchGetEmpty(t *testing.T) { - c := newTestCache(t, map[string][]byte{}, 4, 4096) + c, read := newTestCache(t, map[string][]byte{}, 4, 4096) keys := map[string]types.BatchGetResult{} - require.NoError(t, c.BatchGet(keys)) + require.NoError(t, c.BatchGet(read, keys)) } func TestCacheBatchGetPoolFailure(t *testing.T) { - readFunc := func(key []byte) ([]byte, bool, error) { return nil, false, nil } readPool := threading.NewAdHocPool() - c, _ := NewStandardCache(context.Background(), readFunc, 1, 4096, readPool, &failPool{}, "", 0) + c, _ := NewStandardCache(context.Background(), 1, 4096, readPool, &failPool{}, "", 0) keys := map[string]types.BatchGetResult{"k": {}} - err := c.BatchGet(keys) + err := c.BatchGet(noopRead, keys) require.Error(t, err) } func TestCacheBatchGetShardReadPoolFailure(t *testing.T) { - // miscPool succeeds (goroutine runs), but readPool fails inside shard.BatchGet, - // causing the per-key error branch to be hit. - readFunc := func(key []byte) ([]byte, bool, error) { return nil, false, nil } miscPool := threading.NewAdHocPool() - c, _ := NewStandardCache(context.Background(), readFunc, 1, 4096, &failPool{}, miscPool, "", 0) + c, _ := NewStandardCache(context.Background(), 1, 4096, &failPool{}, miscPool, "", 0) keys := map[string]types.BatchGetResult{"a": {}, "b": {}} - require.NoError(t, c.BatchGet(keys)) + require.NoError(t, c.BatchGet(noopRead, keys)) for k, r := range keys { require.Error(t, r.Error, "key=%q should have per-key error", k) @@ -419,10 +412,9 @@ func TestCacheBatchGetShardReadPoolFailure(t *testing.T) { // --------------------------------------------------------------------------- func TestCacheDistributesAcrossShards(t *testing.T) { - c := newTestCache(t, map[string][]byte{}, 4, 4096) + c, _ := newTestCache(t, map[string][]byte{}, 4, 4096) impl := c.(*cache) - // Insert enough distinct keys that at least 2 shards get entries. for i := 0; i < 100; i++ { c.Set([]byte(fmt.Sprintf("key-%d", i)), []byte("v")) } @@ -438,7 +430,7 @@ func TestCacheDistributesAcrossShards(t *testing.T) { } func TestCacheGetRoutesToSameShard(t *testing.T) { - c := newTestCache(t, map[string][]byte{}, 4, 4096) + c, _ := newTestCache(t, map[string][]byte{}, 4, 4096) impl := c.(*cache) c.Set([]byte("key"), []byte("val")) @@ -453,7 +445,7 @@ func TestCacheGetRoutesToSameShard(t *testing.T) { // --------------------------------------------------------------------------- func TestCacheGetCacheSizeInfoEmpty(t *testing.T) { - c := newTestCache(t, map[string][]byte{}, 4, 4096) + c, _ := newTestCache(t, map[string][]byte{}, 4, 4096) impl := c.(*cache) bytes, entries := impl.getCacheSizeInfo() @@ -462,7 +454,7 @@ func TestCacheGetCacheSizeInfoEmpty(t *testing.T) { } func TestCacheGetCacheSizeInfoAggregatesShards(t *testing.T) { - c := newTestCache(t, map[string][]byte{}, 4, 4096) + c, _ := newTestCache(t, map[string][]byte{}, 4, 4096) impl := c.(*cache) for i := 0; i < 20; i++ { @@ -479,7 +471,7 @@ func TestCacheGetCacheSizeInfoAggregatesShards(t *testing.T) { // --------------------------------------------------------------------------- func TestCacheBatchSetThenBatchGetManyKeys(t *testing.T) { - c := newTestCache(t, map[string][]byte{}, 4, 100_000) + c, read := newTestCache(t, map[string][]byte{}, 4, 100_000) updates := make([]CacheUpdate, 200) for i := range updates { @@ -494,7 +486,7 @@ func TestCacheBatchSetThenBatchGetManyKeys(t *testing.T) { for i := 0; i < 200; i++ { keys[fmt.Sprintf("key-%03d", i)] = types.BatchGetResult{} } - require.NoError(t, c.BatchGet(keys)) + require.NoError(t, c.BatchGet(read, keys)) for i := 0; i < 200; i++ { k := fmt.Sprintf("key-%03d", i) @@ -514,7 +506,7 @@ func TestCacheConcurrentGetSet(t *testing.T) { for i := 0; i < 50; i++ { store[fmt.Sprintf("db-%d", i)] = []byte(fmt.Sprintf("v-%d", i)) } - c := newTestCache(t, store, 4, 100_000) + c, read := newTestCache(t, store, 4, 100_000) var wg sync.WaitGroup for i := 0; i < 100; i++ { @@ -528,7 +520,7 @@ func TestCacheConcurrentGetSet(t *testing.T) { }() go func() { defer wg.Done() - c.Get(key, true) + c.Get(read, key, true) }() } wg.Wait() @@ -539,7 +531,7 @@ func TestCacheConcurrentBatchSetAndBatchGet(t *testing.T) { for i := 0; i < 50; i++ { store[fmt.Sprintf("db-%d", i)] = []byte(fmt.Sprintf("v-%d", i)) } - c := newTestCache(t, store, 4, 100_000) + c, read := newTestCache(t, store, 4, 100_000) var wg sync.WaitGroup @@ -563,14 +555,14 @@ func TestCacheConcurrentBatchSetAndBatchGet(t *testing.T) { for i := 0; i < 50; i++ { keys[fmt.Sprintf("db-%d", i)] = types.BatchGetResult{} } - c.BatchGet(keys) + c.BatchGet(read, keys) }() wg.Wait() } func TestCacheConcurrentDeleteAndGet(t *testing.T) { - c := newTestCache(t, map[string][]byte{}, 4, 100_000) + c, read := newTestCache(t, map[string][]byte{}, 4, 100_000) for i := 0; i < 100; i++ { c.Set([]byte(fmt.Sprintf("k-%d", i)), []byte("v")) @@ -586,7 +578,7 @@ func TestCacheConcurrentDeleteAndGet(t *testing.T) { }() go func() { defer wg.Done() - c.Get(key, true) + c.Get(read, key, true) }() } wg.Wait() @@ -597,17 +589,13 @@ func TestCacheConcurrentDeleteAndGet(t *testing.T) { // --------------------------------------------------------------------------- func TestCacheEvictsPerShard(t *testing.T) { - // 1 shard, maxSize=20. Inserting more than 20 bytes triggers eviction. - c := newTestCache(t, map[string][]byte{}, 1, 20) + c, _ := newTestCache(t, map[string][]byte{}, 1, 20) impl := c.(*cache) - // key(1) + value(8) = 9 bytes each c.Set([]byte("a"), []byte("11111111")) c.Set([]byte("b"), []byte("22222222")) - // 18 bytes, fits c.Set([]byte("c"), []byte("33333333")) - // 27 bytes → must evict to get under 20 bytes, _ := impl.shards[0].getSizeInfo() require.LessOrEqual(t, bytes, uint64(20)) @@ -618,9 +606,8 @@ func TestCacheEvictsPerShard(t *testing.T) { // --------------------------------------------------------------------------- func TestCacheBatchSetSameShard(t *testing.T) { - c := newTestCache(t, map[string][]byte{}, 1, 4096) + c, read := newTestCache(t, map[string][]byte{}, 1, 4096) - // With 1 shard, every key goes to shard 0. err := c.BatchSet([]CacheUpdate{ {Key: []byte("x"), Value: []byte("1")}, {Key: []byte("y"), Value: []byte("2")}, @@ -629,7 +616,7 @@ func TestCacheBatchSetSameShard(t *testing.T) { require.NoError(t, err) for _, tc := range []struct{ key, want string }{{"x", "1"}, {"y", "2"}, {"z", "3"}} { - val, found, err := c.Get([]byte(tc.key), false) + val, found, err := c.Get(read, []byte(tc.key), false) require.NoError(t, err) require.True(t, found) require.Equal(t, tc.want, string(val)) @@ -641,7 +628,7 @@ func TestCacheBatchSetSameShard(t *testing.T) { // --------------------------------------------------------------------------- func TestCacheBatchGetAfterBatchSetWithDeletes(t *testing.T) { - c := newTestCache(t, map[string][]byte{}, 4, 4096) + c, read := newTestCache(t, map[string][]byte{}, 4, 4096) c.Set([]byte("a"), []byte("1")) c.Set([]byte("b"), []byte("2")) @@ -654,7 +641,7 @@ func TestCacheBatchGetAfterBatchSetWithDeletes(t *testing.T) { require.NoError(t, err) keys := map[string]types.BatchGetResult{"a": {}, "b": {}, "c": {}} - require.NoError(t, c.BatchGet(keys)) + require.NoError(t, c.BatchGet(read, keys)) require.True(t, keys["a"].IsFound()) require.Equal(t, "updated", string(keys["a"].Value)) @@ -670,7 +657,7 @@ func TestCacheBatchGetAfterBatchSetWithDeletes(t *testing.T) { func TestNewStandardCachePowerOfTwoShardCounts(t *testing.T) { pool := threading.NewAdHocPool() for _, n := range []uint64{1, 2, 4, 8, 16, 32, 64} { - c, err := NewStandardCache(context.Background(), noopRead, n, n*100, pool, pool, "", 0) + c, err := NewStandardCache(context.Background(), n, n*100, pool, pool, "", 0) require.NoError(t, err, "shardCount=%d", n) require.NotNil(t, c, "shardCount=%d", n) } diff --git a/sei-db/db_engine/dbcache/cached_batch_test.go b/sei-db/db_engine/dbcache/cached_batch_test.go index 5aeb533238..cf8b95c9f4 100644 --- a/sei-db/db_engine/dbcache/cached_batch_test.go +++ b/sei-db/db_engine/dbcache/cached_batch_test.go @@ -69,12 +69,12 @@ func newMockCache() *mockCache { return &mockCache{data: make(map[string][]byte)} } -func (mc *mockCache) Get(key []byte, _ bool) ([]byte, bool, error) { +func (mc *mockCache) Get(_ Reader, key []byte, _ bool) ([]byte, bool, error) { v, ok := mc.data[string(key)] return v, ok, nil } -func (mc *mockCache) BatchGet(keys map[string]types.BatchGetResult) error { +func (mc *mockCache) BatchGet(_ Reader, keys map[string]types.BatchGetResult) error { for k := range keys { v, ok := mc.data[k] if ok { diff --git a/sei-db/db_engine/dbcache/cached_key_value_db.go b/sei-db/db_engine/dbcache/cached_key_value_db.go index 0f926dff98..91551b1604 100644 --- a/sei-db/db_engine/dbcache/cached_key_value_db.go +++ b/sei-db/db_engine/dbcache/cached_key_value_db.go @@ -14,15 +14,26 @@ var _ types.Checkpointable = (*cachedKeyValueDB)(nil) type cachedKeyValueDB struct { db types.KeyValueDB cache Cache + read Reader } // Combine a cache and a key-value database to create a new key-value database with caching. func NewCachedKeyValueDB(db types.KeyValueDB, cache Cache) types.KeyValueDB { - return &cachedKeyValueDB{db: db, cache: cache} + read := func(key []byte) ([]byte, bool, error) { + val, err := db.Get(key) + if err != nil { + if errorutils.IsNotFound(err) { + return nil, false, nil + } + return nil, false, err + } + return val, true, nil + } + return &cachedKeyValueDB{db: db, cache: cache, read: read} } func (c *cachedKeyValueDB) Get(key []byte) ([]byte, error) { - val, found, err := c.cache.Get(key, true) + val, found, err := c.cache.Get(c.read, key, true) if err != nil { return nil, fmt.Errorf("failed to get value from cache: %w", err) } @@ -33,7 +44,7 @@ func (c *cachedKeyValueDB) Get(key []byte) ([]byte, error) { } func (c *cachedKeyValueDB) BatchGet(keys map[string]types.BatchGetResult) error { - err := c.cache.BatchGet(keys) + err := c.cache.BatchGet(c.read, keys) if err != nil { return fmt.Errorf("failed to get values from cache: %w", err) } diff --git a/sei-db/db_engine/dbcache/noop_cache.go b/sei-db/db_engine/dbcache/noop_cache.go index 1e40e02879..fe22771212 100644 --- a/sei-db/db_engine/dbcache/noop_cache.go +++ b/sei-db/db_engine/dbcache/noop_cache.go @@ -9,26 +9,24 @@ import ( var _ Cache = (*noOpCache)(nil) // noOpCache is a Cache that performs no caching. Every Get falls through -// to the underlying readFunc. Set, Delete, and BatchSet are no-ops. +// to the provided Reader. Set, Delete, and BatchSet are no-ops. // Useful for testing the storage layer without cache interference, or for // workloads where caching is not beneficial. -type noOpCache struct { - readFunc func(key []byte) ([]byte, bool, error) -} +type noOpCache struct{} -// NewNoOpCache creates a Cache that always reads from readFunc and never caches. -func NewNoOpCache(readFunc func(key []byte) ([]byte, bool, error)) Cache { - return &noOpCache{readFunc: readFunc} +// NewNoOpCache creates a Cache that always reads via the provided Reader and never caches. +func NewNoOpCache() Cache { + return &noOpCache{} } -func (c *noOpCache) Get(key []byte, _ bool) ([]byte, bool, error) { - return c.readFunc(key) +func (c *noOpCache) Get(read Reader, key []byte, _ bool) ([]byte, bool, error) { + return read(key) } -func (c *noOpCache) BatchGet(keys map[string]types.BatchGetResult) error { +func (c *noOpCache) BatchGet(read Reader, keys map[string]types.BatchGetResult) error { var firstErr error for k := range keys { - val, _, err := c.readFunc([]byte(k)) + val, _, err := read([]byte(k)) if err != nil { keys[k] = types.BatchGetResult{Error: err} if firstErr == nil { diff --git a/sei-db/db_engine/dbcache/noop_cache_test.go b/sei-db/db_engine/dbcache/noop_cache_test.go index 2fd7bb2790..6d1bb5a8f8 100644 --- a/sei-db/db_engine/dbcache/noop_cache_test.go +++ b/sei-db/db_engine/dbcache/noop_cache_test.go @@ -9,29 +9,30 @@ import ( "github.com/sei-protocol/sei-chain/sei-db/db_engine/types" ) -func newNoOpTestCache(store map[string][]byte) Cache { - return NewNoOpCache(func(key []byte) ([]byte, bool, error) { +func newNoOpTestCache(store map[string][]byte) (Cache, Reader) { + read := func(key []byte) ([]byte, bool, error) { v, ok := store[string(key)] if !ok { return nil, false, nil } return v, true, nil - }) + } + return NewNoOpCache(), read } func TestNoOpGetFound(t *testing.T) { - c := newNoOpTestCache(map[string][]byte{"k": []byte("v")}) + c, read := newNoOpTestCache(map[string][]byte{"k": []byte("v")}) - val, found, err := c.Get([]byte("k"), true) + val, found, err := c.Get(read, []byte("k"), true) require.NoError(t, err) require.True(t, found) require.Equal(t, "v", string(val)) } func TestNoOpGetNotFound(t *testing.T) { - c := newNoOpTestCache(map[string][]byte{}) + c, read := newNoOpTestCache(map[string][]byte{}) - val, found, err := c.Get([]byte("missing"), true) + val, found, err := c.Get(read, []byte("missing"), true) require.NoError(t, err) require.False(t, found) require.Nil(t, val) @@ -39,58 +40,59 @@ func TestNoOpGetNotFound(t *testing.T) { func TestNoOpGetError(t *testing.T) { dbErr := errors.New("broken") - c := NewNoOpCache(func(key []byte) ([]byte, bool, error) { + read := func(key []byte) ([]byte, bool, error) { return nil, false, dbErr - }) + } + c := NewNoOpCache() - _, _, err := c.Get([]byte("k"), true) + _, _, err := c.Get(read, []byte("k"), true) require.ErrorIs(t, err, dbErr) } func TestNoOpGetIgnoresUpdateLru(t *testing.T) { - c := newNoOpTestCache(map[string][]byte{"k": []byte("v")}) + c, read := newNoOpTestCache(map[string][]byte{"k": []byte("v")}) - val1, _, _ := c.Get([]byte("k"), true) - val2, _, _ := c.Get([]byte("k"), false) + val1, _, _ := c.Get(read, []byte("k"), true) + val2, _, _ := c.Get(read, []byte("k"), false) require.Equal(t, string(val1), string(val2)) } func TestNoOpGetAlwaysReadsFromFunc(t *testing.T) { store := map[string][]byte{"k": []byte("v1")} - c := newNoOpTestCache(store) + c, read := newNoOpTestCache(store) - val, _, _ := c.Get([]byte("k"), true) + val, _, _ := c.Get(read, []byte("k"), true) require.Equal(t, "v1", string(val)) store["k"] = []byte("v2") - val, _, _ = c.Get([]byte("k"), true) + val, _, _ = c.Get(read, []byte("k"), true) require.Equal(t, "v2", string(val), "should re-read from func, not cache") } func TestNoOpSetIsNoOp(t *testing.T) { - c := newNoOpTestCache(map[string][]byte{}) + c, read := newNoOpTestCache(map[string][]byte{}) c.Set([]byte("k"), []byte("v")) - _, found, err := c.Get([]byte("k"), true) + _, found, err := c.Get(read, []byte("k"), true) require.NoError(t, err) require.False(t, found, "Set should not cache anything") } func TestNoOpDeleteIsNoOp(t *testing.T) { - c := newNoOpTestCache(map[string][]byte{"k": []byte("v")}) + c, read := newNoOpTestCache(map[string][]byte{"k": []byte("v")}) c.Delete([]byte("k")) - val, found, err := c.Get([]byte("k"), true) + val, found, err := c.Get(read, []byte("k"), true) require.NoError(t, err) require.True(t, found, "Delete should not affect reads") require.Equal(t, "v", string(val)) } func TestNoOpBatchSetIsNoOp(t *testing.T) { - c := newNoOpTestCache(map[string][]byte{}) + c, read := newNoOpTestCache(map[string][]byte{}) err := c.BatchSet([]CacheUpdate{ {Key: []byte("a"), Value: []byte("1")}, @@ -98,24 +100,24 @@ func TestNoOpBatchSetIsNoOp(t *testing.T) { }) require.NoError(t, err) - _, found, _ := c.Get([]byte("a"), true) + _, found, _ := c.Get(read, []byte("a"), true) require.False(t, found) - _, found, _ = c.Get([]byte("b"), true) + _, found, _ = c.Get(read, []byte("b"), true) require.False(t, found) } func TestNoOpBatchSetEmptyAndNil(t *testing.T) { - c := newNoOpTestCache(map[string][]byte{}) + c, _ := newNoOpTestCache(map[string][]byte{}) require.NoError(t, c.BatchSet(nil)) require.NoError(t, c.BatchSet([]CacheUpdate{})) } func TestNoOpBatchGetAllFound(t *testing.T) { - c := newNoOpTestCache(map[string][]byte{"a": []byte("1"), "b": []byte("2")}) + c, read := newNoOpTestCache(map[string][]byte{"a": []byte("1"), "b": []byte("2")}) keys := map[string]types.BatchGetResult{"a": {}, "b": {}} - require.NoError(t, c.BatchGet(keys)) + require.NoError(t, c.BatchGet(read, keys)) require.True(t, keys["a"].IsFound()) require.Equal(t, "1", string(keys["a"].Value)) @@ -124,29 +126,30 @@ func TestNoOpBatchGetAllFound(t *testing.T) { } func TestNoOpBatchGetNotFound(t *testing.T) { - c := newNoOpTestCache(map[string][]byte{}) + c, read := newNoOpTestCache(map[string][]byte{}) keys := map[string]types.BatchGetResult{"x": {}} - require.NoError(t, c.BatchGet(keys)) + require.NoError(t, c.BatchGet(read, keys)) require.False(t, keys["x"].IsFound()) } func TestNoOpBatchGetError(t *testing.T) { dbErr := errors.New("fail") - c := NewNoOpCache(func(key []byte) ([]byte, bool, error) { + read := func(key []byte) ([]byte, bool, error) { return nil, false, dbErr - }) + } + c := NewNoOpCache() keys := map[string]types.BatchGetResult{"k": {}} - err := c.BatchGet(keys) + err := c.BatchGet(read, keys) require.Error(t, err) require.ErrorIs(t, err, dbErr) require.Error(t, keys["k"].Error) } func TestNoOpBatchGetEmpty(t *testing.T) { - c := newNoOpTestCache(map[string][]byte{}) + c, read := newNoOpTestCache(map[string][]byte{}) keys := map[string]types.BatchGetResult{} - require.NoError(t, c.BatchGet(keys)) + require.NoError(t, c.BatchGet(read, keys)) } diff --git a/sei-db/db_engine/dbcache/shard.go b/sei-db/db_engine/dbcache/shard.go index eb44f3e47e..80f8c18aba 100644 --- a/sei-db/db_engine/dbcache/shard.go +++ b/sei-db/db_engine/dbcache/shard.go @@ -27,9 +27,6 @@ type shard struct { // A pool for asynchronous reads. readPool threading.Pool - // A function that reads a value from the database. - readFunc func(key []byte) ([]byte, bool, error) - // The maximum size of this cache, in bytes. maxSize uint64 @@ -77,7 +74,6 @@ type shardEntry struct { func NewShard( ctx context.Context, readPool threading.Pool, - readFunc func(key []byte) ([]byte, bool, error), maxSize uint64, ) (*shard, error) { @@ -88,7 +84,6 @@ func NewShard( return &shard{ ctx: ctx, readPool: readPool, - readFunc: readFunc, lock: sync.Mutex{}, data: make(map[string]*shardEntry), gcQueue: newLRUQueue(), @@ -96,8 +91,8 @@ func NewShard( }, nil } -// Get returns the value for the given key, or (nil, false) if not found. -func (s *shard) Get(key []byte, updateLru bool) ([]byte, bool, error) { +// Get returns the value for the given key, or (nil, false, nil) if not found. +func (s *shard) Get(read Reader, key []byte, updateLru bool) ([]byte, bool, error) { s.lock.Lock() entry := s.getEntry(key) @@ -110,7 +105,7 @@ func (s *shard) Get(key []byte, updateLru bool) ([]byte, bool, error) { case statusScheduled: return s.getScheduled(entry) case statusUnknown: - return s.getUnknown(entry, key) + return s.getUnknown(read, entry, key) default: s.lock.Unlock() panic(fmt.Sprintf("unexpected status: %#v", entry.status)) @@ -157,7 +152,7 @@ func (s *shard) getScheduled(entry *shardEntry) ([]byte, bool, error) { } // Handles Get for a key not yet read. Schedules the read and waits. Lock must be held; releases it. -func (s *shard) getUnknown(entry *shardEntry, key []byte) ([]byte, bool, error) { +func (s *shard) getUnknown(read Reader, entry *shardEntry, key []byte) ([]byte, bool, error) { entry.status = statusScheduled valueChan := make(chan readResult, 1) entry.valueChan = valueChan @@ -165,7 +160,7 @@ func (s *shard) getUnknown(entry *shardEntry, key []byte) ([]byte, bool, error) s.metrics.reportCacheMisses(1) startTime := time.Now() err := s.readPool.Submit(s.ctx, func() { - value, _, readErr := s.readFunc(key) + value, _, readErr := read(key) entry.injectValue(key, readResult{value: value, err: readErr}) }) if err != nil { @@ -235,7 +230,7 @@ type pendingRead struct { } // BatchGet reads a batch of keys from the shard. Results are written into the provided map. -func (s *shard) BatchGet(keys map[string]types.BatchGetResult) error { +func (s *shard) BatchGet(read Reader, keys map[string]types.BatchGetResult) error { pending := make([]pendingRead, 0, len(keys)) var hits int64 @@ -284,7 +279,7 @@ func (s *shard) BatchGet(keys map[string]types.BatchGetResult) error { if pending[i].needsSchedule { p := &pending[i] err := s.readPool.Submit(s.ctx, func() { - value, _, readErr := s.readFunc([]byte(p.key)) + value, _, readErr := read([]byte(p.key)) p.entry.valueChan <- readResult{value: value, err: readErr} }) if err != nil { diff --git a/sei-db/db_engine/dbcache/shard_test.go b/sei-db/db_engine/dbcache/shard_test.go index e23614299e..b39414959f 100644 --- a/sei-db/db_engine/dbcache/shard_test.go +++ b/sei-db/db_engine/dbcache/shard_test.go @@ -19,20 +19,18 @@ import ( // helpers // --------------------------------------------------------------------------- -// newTestShard creates a shard backed by a simple in-memory map. -// The returned readFunc map can be populated before calling Get. -func newTestShard(t *testing.T, maxSize uint64, store map[string][]byte) *shard { +func newTestShard(t *testing.T, maxSize uint64, store map[string][]byte) (*shard, Reader) { t.Helper() - readFunc := func(key []byte) ([]byte, bool, error) { + read := Reader(func(key []byte) ([]byte, bool, error) { v, ok := store[string(key)] if !ok { return nil, false, nil } return v, true, nil - } - s, err := NewShard(context.Background(), threading.NewAdHocPool(), readFunc, maxSize) + }) + s, err := NewShard(context.Background(), threading.NewAdHocPool(), maxSize) require.NoError(t, err) - return s + return s, read } // --------------------------------------------------------------------------- @@ -40,15 +38,13 @@ func newTestShard(t *testing.T, maxSize uint64, store map[string][]byte) *shard // --------------------------------------------------------------------------- func TestNewShardValid(t *testing.T) { - readFunc := func(key []byte) ([]byte, bool, error) { return nil, false, nil } - s, err := NewShard(context.Background(), threading.NewAdHocPool(), readFunc, 1024) + s, err := NewShard(context.Background(), threading.NewAdHocPool(), 1024) require.NoError(t, err) require.NotNil(t, s) } func TestNewShardZeroMaxSize(t *testing.T) { - readFunc := func(key []byte) ([]byte, bool, error) { return nil, false, nil } - _, err := NewShard(context.Background(), threading.NewAdHocPool(), readFunc, 0) + _, err := NewShard(context.Background(), threading.NewAdHocPool(), 0) require.Error(t, err) } @@ -58,18 +54,18 @@ func TestNewShardZeroMaxSize(t *testing.T) { func TestGetCacheMissFoundInDB(t *testing.T) { store := map[string][]byte{"hello": []byte("world")} - s := newTestShard(t, 4096, store) + s, read := newTestShard(t, 4096, store) - val, found, err := s.Get([]byte("hello"), true) + val, found, err := s.Get(read, []byte("hello"), true) require.NoError(t, err) require.True(t, found) require.Equal(t, "world", string(val)) } func TestGetCacheMissNotFoundInDB(t *testing.T) { - s := newTestShard(t, 4096, map[string][]byte{}) + s, read := newTestShard(t, 4096, map[string][]byte{}) - val, found, err := s.Get([]byte("missing"), true) + val, found, err := s.Get(read, []byte("missing"), true) require.NoError(t, err) require.False(t, found) require.Nil(t, val) @@ -77,29 +73,29 @@ func TestGetCacheMissNotFoundInDB(t *testing.T) { func TestGetCacheMissDBError(t *testing.T) { dbErr := errors.New("disk on fire") - readFunc := func(key []byte) ([]byte, bool, error) { return nil, false, dbErr } - s, _ := NewShard(context.Background(), threading.NewAdHocPool(), readFunc, 4096) + readFunc := Reader(func(key []byte) ([]byte, bool, error) { return nil, false, dbErr }) + s, _ := NewShard(context.Background(), threading.NewAdHocPool(), 4096) - _, _, err := s.Get([]byte("boom"), true) + _, _, err := s.Get(readFunc, []byte("boom"), true) require.Error(t, err) require.ErrorIs(t, err, dbErr) } func TestGetDBErrorDoesNotCacheResult(t *testing.T) { var calls atomic.Int64 - readFunc := func(key []byte) ([]byte, bool, error) { + readFunc := Reader(func(key []byte) ([]byte, bool, error) { n := calls.Add(1) if n == 1 { return nil, false, errors.New("transient") } return []byte("recovered"), true, nil - } - s, _ := NewShard(context.Background(), threading.NewAdHocPool(), readFunc, 4096) + }) + s, _ := NewShard(context.Background(), threading.NewAdHocPool(), 4096) - _, _, err := s.Get([]byte("key"), true) + _, _, err := s.Get(readFunc, []byte("key"), true) require.Error(t, err, "first call should fail") - val, found, err := s.Get([]byte("key"), true) + val, found, err := s.Get(readFunc, []byte("key"), true) require.NoError(t, err, "second call should succeed") require.True(t, found) require.Equal(t, "recovered", string(val)) @@ -111,22 +107,22 @@ func TestGetDBErrorDoesNotCacheResult(t *testing.T) { // --------------------------------------------------------------------------- func TestGetCacheHitAvailable(t *testing.T) { - s := newTestShard(t, 4096, map[string][]byte{"k": []byte("v")}) + s, read := newTestShard(t, 4096, map[string][]byte{"k": []byte("v")}) - s.Get([]byte("k"), true) + s.Get(read, []byte("k"), true) - val, found, err := s.Get([]byte("k"), true) + val, found, err := s.Get(read, []byte("k"), true) require.NoError(t, err) require.True(t, found) require.Equal(t, "v", string(val)) } func TestGetCacheHitDeleted(t *testing.T) { - s := newTestShard(t, 4096, map[string][]byte{}) + s, read := newTestShard(t, 4096, map[string][]byte{}) - s.Get([]byte("gone"), true) + s.Get(read, []byte("gone"), true) - val, found, err := s.Get([]byte("gone"), true) + val, found, err := s.Get(read, []byte("gone"), true) require.NoError(t, err) require.False(t, found) require.Nil(t, val) @@ -134,15 +130,15 @@ func TestGetCacheHitDeleted(t *testing.T) { func TestGetAfterSet(t *testing.T) { var readCalls atomic.Int64 - readFunc := func(key []byte) ([]byte, bool, error) { + readFunc := Reader(func(key []byte) ([]byte, bool, error) { readCalls.Add(1) return nil, false, nil - } - s, _ := NewShard(context.Background(), threading.NewAdHocPool(), readFunc, 4096) + }) + s, _ := NewShard(context.Background(), threading.NewAdHocPool(), 4096) s.Set([]byte("k"), []byte("from-set")) - val, found, err := s.Get([]byte("k"), true) + val, found, err := s.Get(readFunc, []byte("k"), true) require.NoError(t, err) require.True(t, found) require.Equal(t, "from-set", string(val)) @@ -151,11 +147,11 @@ func TestGetAfterSet(t *testing.T) { func TestGetAfterDelete(t *testing.T) { store := map[string][]byte{"k": []byte("v")} - s := newTestShard(t, 4096, store) + s, read := newTestShard(t, 4096, store) s.Delete([]byte("k")) - val, found, err := s.Get([]byte("k"), true) + val, found, err := s.Get(read, []byte("k"), true) require.NoError(t, err) require.False(t, found) require.Nil(t, val) @@ -169,12 +165,12 @@ func TestGetConcurrentSameKey(t *testing.T) { var readCalls atomic.Int64 gate := make(chan struct{}) - readFunc := func(key []byte) ([]byte, bool, error) { + readFunc := Reader(func(key []byte) ([]byte, bool, error) { readCalls.Add(1) <-gate return []byte("value"), true, nil - } - s, _ := NewShard(context.Background(), threading.NewAdHocPool(), readFunc, 4096) + }) + s, _ := NewShard(context.Background(), threading.NewAdHocPool(), 4096) const n = 10 var wg sync.WaitGroup @@ -186,7 +182,7 @@ func TestGetConcurrentSameKey(t *testing.T) { wg.Add(1) go func(idx int) { defer wg.Done() - v, f, e := s.Get([]byte("shared"), true) + v, f, e := s.Get(readFunc, []byte("shared"), true) vals[idx] = string(v) founds[idx] = f errs[idx] = e @@ -213,15 +209,15 @@ func TestGetConcurrentSameKey(t *testing.T) { func TestGetContextCancelled(t *testing.T) { ctx, cancel := context.WithCancel(context.Background()) - readFunc := func(key []byte) ([]byte, bool, error) { + readFunc := Reader(func(key []byte) ([]byte, bool, error) { time.Sleep(time.Second) return []byte("late"), true, nil - } - s, _ := NewShard(ctx, threading.NewAdHocPool(), readFunc, 4096) + }) + s, _ := NewShard(ctx, threading.NewAdHocPool(), 4096) cancel() - _, _, err := s.Get([]byte("k"), true) + _, _, err := s.Get(readFunc, []byte("k"), true) require.Error(t, err) } @@ -234,13 +230,12 @@ func TestGetUpdateLruTrue(t *testing.T) { "a": []byte("1"), "b": []byte("2"), } - s := newTestShard(t, 4096, store) + s, read := newTestShard(t, 4096, store) - s.Get([]byte("a"), true) - s.Get([]byte("b"), true) + s.Get(read, []byte("a"), true) + s.Get(read, []byte("b"), true) - // Touch "a" via Get with updateLru=true, making "b" the LRU. - s.Get([]byte("a"), true) + s.Get(read, []byte("a"), true) s.lock.Lock() lru := s.gcQueue.PopLeastRecentlyUsed() @@ -254,13 +249,12 @@ func TestGetUpdateLruFalse(t *testing.T) { "a": []byte("1"), "b": []byte("2"), } - s := newTestShard(t, 4096, store) + s, read := newTestShard(t, 4096, store) - s.Get([]byte("a"), true) - s.Get([]byte("b"), true) + s.Get(read, []byte("a"), true) + s.Get(read, []byte("b"), true) - // Access "a" without updating LRU — "a" should remain the LRU entry. - s.Get([]byte("a"), false) + s.Get(read, []byte("a"), false) s.lock.Lock() lru := s.gcQueue.PopLeastRecentlyUsed() @@ -274,57 +268,57 @@ func TestGetUpdateLruFalse(t *testing.T) { // --------------------------------------------------------------------------- func TestSetNewKey(t *testing.T) { - s := newTestShard(t, 4096, map[string][]byte{}) + s, read := newTestShard(t, 4096, map[string][]byte{}) s.Set([]byte("k"), []byte("v")) - val, found, err := s.Get([]byte("k"), false) + val, found, err := s.Get(read, []byte("k"), false) require.NoError(t, err) require.True(t, found) require.Equal(t, "v", string(val)) } func TestSetOverwritesExistingKey(t *testing.T) { - s := newTestShard(t, 4096, map[string][]byte{}) + s, read := newTestShard(t, 4096, map[string][]byte{}) s.Set([]byte("k"), []byte("old")) s.Set([]byte("k"), []byte("new")) - val, found, err := s.Get([]byte("k"), false) + val, found, err := s.Get(read, []byte("k"), false) require.NoError(t, err) require.True(t, found) require.Equal(t, "new", string(val)) } func TestSetOverwritesDeletedKey(t *testing.T) { - s := newTestShard(t, 4096, map[string][]byte{}) + s, read := newTestShard(t, 4096, map[string][]byte{}) s.Delete([]byte("k")) s.Set([]byte("k"), []byte("revived")) - val, found, err := s.Get([]byte("k"), false) + val, found, err := s.Get(read, []byte("k"), false) require.NoError(t, err) require.True(t, found) require.Equal(t, "revived", string(val)) } func TestSetNilValue(t *testing.T) { - s := newTestShard(t, 4096, map[string][]byte{}) + s, read := newTestShard(t, 4096, map[string][]byte{}) s.Set([]byte("k"), nil) - val, found, err := s.Get([]byte("k"), false) + val, found, err := s.Get(read, []byte("k"), false) require.NoError(t, err) require.True(t, found) require.Nil(t, val) } func TestSetEmptyKey(t *testing.T) { - s := newTestShard(t, 4096, map[string][]byte{}) + s, read := newTestShard(t, 4096, map[string][]byte{}) s.Set([]byte(""), []byte("empty-key-val")) - val, found, err := s.Get([]byte(""), false) + val, found, err := s.Get(read, []byte(""), false) require.NoError(t, err) require.True(t, found) require.Equal(t, "empty-key-val", string(val)) @@ -335,36 +329,36 @@ func TestSetEmptyKey(t *testing.T) { // --------------------------------------------------------------------------- func TestDeleteExistingKey(t *testing.T) { - s := newTestShard(t, 4096, map[string][]byte{}) + s, read := newTestShard(t, 4096, map[string][]byte{}) s.Set([]byte("k"), []byte("v")) s.Delete([]byte("k")) - val, found, err := s.Get([]byte("k"), false) + val, found, err := s.Get(read, []byte("k"), false) require.NoError(t, err) require.False(t, found) require.Nil(t, val) } func TestDeleteNonexistentKey(t *testing.T) { - s := newTestShard(t, 4096, map[string][]byte{}) + s, read := newTestShard(t, 4096, map[string][]byte{}) s.Delete([]byte("ghost")) - val, found, err := s.Get([]byte("ghost"), false) + val, found, err := s.Get(read, []byte("ghost"), false) require.NoError(t, err) require.False(t, found) require.Nil(t, val) } func TestDeleteThenSetThenGet(t *testing.T) { - s := newTestShard(t, 4096, map[string][]byte{}) + s, read := newTestShard(t, 4096, map[string][]byte{}) s.Set([]byte("k"), []byte("v1")) s.Delete([]byte("k")) s.Set([]byte("k"), []byte("v2")) - val, found, err := s.Get([]byte("k"), false) + val, found, err := s.Get(read, []byte("k"), false) require.NoError(t, err) require.True(t, found) require.Equal(t, "v2", string(val)) @@ -375,7 +369,7 @@ func TestDeleteThenSetThenGet(t *testing.T) { // --------------------------------------------------------------------------- func TestBatchSetSetsMultiple(t *testing.T) { - s := newTestShard(t, 4096, map[string][]byte{}) + s, read := newTestShard(t, 4096, map[string][]byte{}) s.BatchSet([]CacheUpdate{ {Key: []byte("a"), Value: []byte("1")}, @@ -386,7 +380,7 @@ func TestBatchSetSetsMultiple(t *testing.T) { for _, tc := range []struct { key, want string }{{"a", "1"}, {"b", "2"}, {"c", "3"}} { - val, found, err := s.Get([]byte(tc.key), false) + val, found, err := s.Get(read, []byte(tc.key), false) require.NoError(t, err, "Get(%q)", tc.key) require.True(t, found, "Get(%q)", tc.key) require.Equal(t, tc.want, string(val), "Get(%q)", tc.key) @@ -394,7 +388,7 @@ func TestBatchSetSetsMultiple(t *testing.T) { } func TestBatchSetMixedSetAndDelete(t *testing.T) { - s := newTestShard(t, 4096, map[string][]byte{}) + s, read := newTestShard(t, 4096, map[string][]byte{}) s.Set([]byte("keep"), []byte("v")) s.Set([]byte("remove"), []byte("v")) @@ -405,20 +399,20 @@ func TestBatchSetMixedSetAndDelete(t *testing.T) { {Key: []byte("new"), Value: []byte("fresh")}, }) - val, found, _ := s.Get([]byte("keep"), false) + val, found, _ := s.Get(read, []byte("keep"), false) require.True(t, found) require.Equal(t, "updated", string(val)) - _, found, _ = s.Get([]byte("remove"), false) + _, found, _ = s.Get(read, []byte("remove"), false) require.False(t, found, "expected remove to be deleted") - val, found, _ = s.Get([]byte("new"), false) + val, found, _ = s.Get(read, []byte("new"), false) require.True(t, found) require.Equal(t, "fresh", string(val)) } func TestBatchSetEmpty(t *testing.T) { - s := newTestShard(t, 4096, map[string][]byte{}) + s, _ := newTestShard(t, 4096, map[string][]byte{}) s.BatchSet(nil) s.BatchSet([]CacheUpdate{}) @@ -432,7 +426,7 @@ func TestBatchSetEmpty(t *testing.T) { // --------------------------------------------------------------------------- func TestBatchGetAllCached(t *testing.T) { - s := newTestShard(t, 4096, map[string][]byte{}) + s, read := newTestShard(t, 4096, map[string][]byte{}) s.Set([]byte("a"), []byte("1")) s.Set([]byte("b"), []byte("2")) @@ -441,7 +435,7 @@ func TestBatchGetAllCached(t *testing.T) { "a": {}, "b": {}, } - require.NoError(t, s.BatchGet(keys)) + require.NoError(t, s.BatchGet(read, keys)) for k, want := range map[string]string{"a": "1", "b": "2"} { r := keys[k] @@ -452,13 +446,13 @@ func TestBatchGetAllCached(t *testing.T) { func TestBatchGetAllFromDB(t *testing.T) { store := map[string][]byte{"x": []byte("10"), "y": []byte("20")} - s := newTestShard(t, 4096, store) + s, read := newTestShard(t, 4096, store) keys := map[string]types.BatchGetResult{ "x": {}, "y": {}, } - require.NoError(t, s.BatchGet(keys)) + require.NoError(t, s.BatchGet(read, keys)) for k, want := range map[string]string{"x": "10", "y": "20"} { r := keys[k] @@ -469,7 +463,7 @@ func TestBatchGetAllFromDB(t *testing.T) { func TestBatchGetMixedCachedAndDB(t *testing.T) { store := map[string][]byte{"db-key": []byte("from-db")} - s := newTestShard(t, 4096, store) + s, read := newTestShard(t, 4096, store) s.Set([]byte("cached"), []byte("from-cache")) @@ -477,7 +471,7 @@ func TestBatchGetMixedCachedAndDB(t *testing.T) { "cached": {}, "db-key": {}, } - require.NoError(t, s.BatchGet(keys)) + require.NoError(t, s.BatchGet(read, keys)) require.True(t, keys["cached"].IsFound()) require.Equal(t, "from-cache", string(keys["cached"].Value)) @@ -486,17 +480,17 @@ func TestBatchGetMixedCachedAndDB(t *testing.T) { } func TestBatchGetNotFoundKeys(t *testing.T) { - s := newTestShard(t, 4096, map[string][]byte{}) + s, read := newTestShard(t, 4096, map[string][]byte{}) keys := map[string]types.BatchGetResult{ "nope": {}, } - require.NoError(t, s.BatchGet(keys)) + require.NoError(t, s.BatchGet(read, keys)) require.False(t, keys["nope"].IsFound()) } func TestBatchGetDeletedKeys(t *testing.T) { - s := newTestShard(t, 4096, map[string][]byte{}) + s, read := newTestShard(t, 4096, map[string][]byte{}) s.Set([]byte("del"), []byte("v")) s.Delete([]byte("del")) @@ -504,46 +498,45 @@ func TestBatchGetDeletedKeys(t *testing.T) { keys := map[string]types.BatchGetResult{ "del": {}, } - require.NoError(t, s.BatchGet(keys)) + require.NoError(t, s.BatchGet(read, keys)) require.False(t, keys["del"].IsFound()) } func TestBatchGetDBError(t *testing.T) { dbErr := errors.New("broken") - readFunc := func(key []byte) ([]byte, bool, error) { return nil, false, dbErr } - s, _ := NewShard(context.Background(), threading.NewAdHocPool(), readFunc, 4096) + readFunc := Reader(func(key []byte) ([]byte, bool, error) { return nil, false, dbErr }) + s, _ := NewShard(context.Background(), threading.NewAdHocPool(), 4096) keys := map[string]types.BatchGetResult{ "fail": {}, } - require.NoError(t, s.BatchGet(keys), "BatchGet itself should not fail") + require.NoError(t, s.BatchGet(readFunc, keys), "BatchGet itself should not fail") require.Error(t, keys["fail"].Error, "expected per-key error") } func TestBatchGetEmpty(t *testing.T) { - s := newTestShard(t, 4096, map[string][]byte{}) + s, read := newTestShard(t, 4096, map[string][]byte{}) keys := map[string]types.BatchGetResult{} - require.NoError(t, s.BatchGet(keys)) + require.NoError(t, s.BatchGet(read, keys)) } func TestBatchGetCachesResults(t *testing.T) { var readCalls atomic.Int64 store := map[string][]byte{"k": []byte("v")} - readFunc := func(key []byte) ([]byte, bool, error) { + readFunc := Reader(func(key []byte) ([]byte, bool, error) { readCalls.Add(1) v, ok := store[string(key)] return v, ok, nil - } - s, _ := NewShard(context.Background(), threading.NewAdHocPool(), readFunc, 4096) + }) + s, _ := NewShard(context.Background(), threading.NewAdHocPool(), 4096) keys := map[string]types.BatchGetResult{"k": {}} - s.BatchGet(keys) + s.BatchGet(readFunc, keys) - // bulkInjectValues runs in a goroutine — give it a moment. time.Sleep(50 * time.Millisecond) - val, found, err := s.Get([]byte("k"), false) + val, found, err := s.Get(readFunc, []byte("k"), false) require.NoError(t, err) require.True(t, found) require.Equal(t, "v", string(val)) @@ -555,16 +548,14 @@ func TestBatchGetCachesResults(t *testing.T) { // --------------------------------------------------------------------------- func TestEvictionRespectMaxSize(t *testing.T) { - s := newTestShard(t, 30, map[string][]byte{}) + s, _ := newTestShard(t, 30, map[string][]byte{}) - // key="a" (1 byte) + value="aaaaaaaaaa" (10 bytes) = 11 bytes per entry s.Set([]byte("a"), []byte("aaaaaaaaaa")) s.Set([]byte("b"), []byte("bbbbbbbbbb")) _, entries := s.getSizeInfo() require.Equal(t, uint64(2), entries) - // Third entry pushes to 33 bytes, exceeding maxSize=30 → evict "a". s.Set([]byte("c"), []byte("cccccccccc")) bytes, entries := s.getSizeInfo() @@ -573,17 +564,14 @@ func TestEvictionRespectMaxSize(t *testing.T) { } func TestEvictionOrderIsLRU(t *testing.T) { - // Each entry: key(1) + value(4) = 5 bytes. maxSize=15 → fits 3. - s := newTestShard(t, 15, map[string][]byte{}) + s, read := newTestShard(t, 15, map[string][]byte{}) s.Set([]byte("a"), []byte("1111")) s.Set([]byte("b"), []byte("2222")) s.Set([]byte("c"), []byte("3333")) - // Touch "a" so "b" becomes the LRU. - s.Get([]byte("a"), true) + s.Get(read, []byte("a"), true) - // Insert "d" → total 20 > 15 → must evict. "b" is LRU. s.Set([]byte("d"), []byte("4444")) s.lock.Lock() @@ -596,10 +584,10 @@ func TestEvictionOrderIsLRU(t *testing.T) { } func TestEvictionOnDelete(t *testing.T) { - s := newTestShard(t, 10, map[string][]byte{}) + s, _ := newTestShard(t, 10, map[string][]byte{}) - s.Set([]byte("a"), []byte("val")) // size 4 - s.Delete([]byte("longkey1")) // size 8 + s.Set([]byte("a"), []byte("val")) + s.Delete([]byte("longkey1")) bytes, _ := s.getSizeInfo() require.LessOrEqual(t, bytes, uint64(10), "size should not exceed maxSize") @@ -609,12 +597,11 @@ func TestEvictionOnGetFromDB(t *testing.T) { store := map[string][]byte{ "x": []byte("12345678901234567890"), } - s := newTestShard(t, 25, store) + s, read := newTestShard(t, 25, store) s.Set([]byte("a"), []byte("small")) - // Reading "x" brings in 1+20=21 bytes, total becomes 6+21=27 > 25 → eviction. - s.Get([]byte("x"), true) + s.Get(read, []byte("x"), true) time.Sleep(50 * time.Millisecond) @@ -627,17 +614,17 @@ func TestEvictionOnGetFromDB(t *testing.T) { // --------------------------------------------------------------------------- func TestGetSizeInfoEmpty(t *testing.T) { - s := newTestShard(t, 4096, map[string][]byte{}) + s, _ := newTestShard(t, 4096, map[string][]byte{}) bytes, entries := s.getSizeInfo() require.Equal(t, uint64(0), bytes) require.Equal(t, uint64(0), entries) } func TestGetSizeInfoAfterSets(t *testing.T) { - s := newTestShard(t, 4096, map[string][]byte{}) + s, _ := newTestShard(t, 4096, map[string][]byte{}) - s.Set([]byte("ab"), []byte("cd")) // 2+2 = 4 - s.Set([]byte("efg"), []byte("hi")) // 3+2 = 5 + s.Set([]byte("ab"), []byte("cd")) + s.Set([]byte("efg"), []byte("hi")) bytes, entries := s.getSizeInfo() require.Equal(t, uint64(2), entries) @@ -649,9 +636,9 @@ func TestGetSizeInfoAfterSets(t *testing.T) { // --------------------------------------------------------------------------- func TestInjectValueNotFound(t *testing.T) { - s := newTestShard(t, 4096, map[string][]byte{}) + s, read := newTestShard(t, 4096, map[string][]byte{}) - val, found, err := s.Get([]byte("missing"), true) + val, found, err := s.Get(read, []byte("missing"), true) require.NoError(t, err) require.False(t, found) require.Nil(t, val) @@ -668,7 +655,7 @@ func TestInjectValueNotFound(t *testing.T) { // --------------------------------------------------------------------------- func TestConcurrentSetAndGet(t *testing.T) { - s := newTestShard(t, 4096, map[string][]byte{}) + s, read := newTestShard(t, 4096, map[string][]byte{}) const n = 100 var wg sync.WaitGroup @@ -684,7 +671,7 @@ func TestConcurrentSetAndGet(t *testing.T) { }() go func() { defer wg.Done() - s.Get(key, true) + s.Get(read, key, true) }() } @@ -696,7 +683,7 @@ func TestConcurrentBatchSetAndBatchGet(t *testing.T) { for i := 0; i < 50; i++ { store[fmt.Sprintf("db-%d", i)] = []byte(fmt.Sprintf("v-%d", i)) } - s := newTestShard(t, 100_000, store) + s, read := newTestShard(t, 100_000, store) var wg sync.WaitGroup @@ -720,7 +707,7 @@ func TestConcurrentBatchSetAndBatchGet(t *testing.T) { for i := 0; i < 50; i++ { keys[fmt.Sprintf("db-%d", i)] = types.BatchGetResult{} } - s.BatchGet(keys) + s.BatchGet(read, keys) }() wg.Wait() @@ -737,19 +724,19 @@ func (fp *failPool) Submit(_ context.Context, _ func()) error { } func TestGetPoolSubmitFailure(t *testing.T) { - readFunc := func(key []byte) ([]byte, bool, error) { return []byte("v"), true, nil } - s, _ := NewShard(context.Background(), &failPool{}, readFunc, 4096) + readFunc := Reader(func(key []byte) ([]byte, bool, error) { return []byte("v"), true, nil }) + s, _ := NewShard(context.Background(), &failPool{}, 4096) - _, _, err := s.Get([]byte("k"), true) + _, _, err := s.Get(readFunc, []byte("k"), true) require.Error(t, err) } func TestBatchGetPoolSubmitFailure(t *testing.T) { - readFunc := func(key []byte) ([]byte, bool, error) { return []byte("v"), true, nil } - s, _ := NewShard(context.Background(), &failPool{}, readFunc, 4096) + readFunc := Reader(func(key []byte) ([]byte, bool, error) { return []byte("v"), true, nil }) + s, _ := NewShard(context.Background(), &failPool{}, 4096) keys := map[string]types.BatchGetResult{"k": {}} - err := s.BatchGet(keys) + err := s.BatchGet(readFunc, keys) require.Error(t, err) } @@ -758,7 +745,7 @@ func TestBatchGetPoolSubmitFailure(t *testing.T) { // --------------------------------------------------------------------------- func TestSetLargeValueExceedingMaxSizeEvictsOldEntries(t *testing.T) { - s := newTestShard(t, 100, map[string][]byte{}) + s, _ := newTestShard(t, 100, map[string][]byte{}) s.Set([]byte("a"), []byte("small")) @@ -778,22 +765,21 @@ func TestSetLargeValueExceedingMaxSizeEvictsOldEntries(t *testing.T) { func TestBatchGetDBErrorNotCached(t *testing.T) { var calls atomic.Int64 - readFunc := func(key []byte) ([]byte, bool, error) { + readFunc := Reader(func(key []byte) ([]byte, bool, error) { n := calls.Add(1) if n == 1 { return nil, false, errors.New("transient db error") } return []byte("ok"), true, nil - } - s, _ := NewShard(context.Background(), threading.NewAdHocPool(), readFunc, 4096) + }) + s, _ := NewShard(context.Background(), threading.NewAdHocPool(), 4096) keys := map[string]types.BatchGetResult{"k": {}} - s.BatchGet(keys) + s.BatchGet(readFunc, keys) - // Wait for bulkInjectValues goroutine. time.Sleep(50 * time.Millisecond) - val, found, err := s.Get([]byte("k"), true) + val, found, err := s.Get(readFunc, []byte("k"), true) require.NoError(t, err, "retry should succeed") require.True(t, found) require.Equal(t, "ok", string(val)) @@ -804,12 +790,12 @@ func TestBatchGetDBErrorNotCached(t *testing.T) { // --------------------------------------------------------------------------- func TestSetDeleteThenBatchGet(t *testing.T) { - s := newTestShard(t, 4096, map[string][]byte{}) + s, read := newTestShard(t, 4096, map[string][]byte{}) s.Set([]byte("k"), []byte("v")) s.Delete([]byte("k")) keys := map[string]types.BatchGetResult{"k": {}} - require.NoError(t, s.BatchGet(keys)) + require.NoError(t, s.BatchGet(read, keys)) require.False(t, keys["k"].IsFound()) } diff --git a/sei-db/db_engine/pebbledb/db.go b/sei-db/db_engine/pebbledb/db.go index 6d2bea30e8..9106175b47 100644 --- a/sei-db/db_engine/pebbledb/db.go +++ b/sei-db/db_engine/pebbledb/db.go @@ -110,17 +110,6 @@ func OpenWithCache( return nil, fmt.Errorf("failed to open database: %w", err) } - readFunc := func(key []byte) ([]byte, bool, error) { - val, getErr := db.Get(key) - if getErr != nil { - if errorutils.IsNotFound(getErr) { - return nil, false, nil - } - return nil, false, getErr - } - return val, true, nil - } - var cacheName string if config.EnableMetrics { cacheName = filepath.Base(config.DataDir) @@ -128,7 +117,6 @@ func OpenWithCache( cache, err := dbcache.BuildCache( ctx, - readFunc, config.CacheShardCount, config.CacheSize, readPool, From 94ae673ee27050a6617df94deea1658591598d93 Mon Sep 17 00:00:00 2001 From: Cody Littley Date: Mon, 16 Mar 2026 11:36:39 -0500 Subject: [PATCH 061/119] made suggested changes --- sei-db/db_engine/dbcache/cache.go | 26 +++++++++++++++++-- sei-db/db_engine/dbcache/cached_batch.go | 3 +++ .../db_engine/dbcache/cached_key_value_db.go | 22 ++++++++++++++++ 3 files changed, 49 insertions(+), 2 deletions(-) diff --git a/sei-db/db_engine/dbcache/cache.go b/sei-db/db_engine/dbcache/cache.go index 0c27d22bf3..6b9687c234 100644 --- a/sei-db/db_engine/dbcache/cache.go +++ b/sei-db/db_engine/dbcache/cache.go @@ -16,10 +16,25 @@ import ( type Reader func(key []byte) (value []byte, found bool, err error) // Cache describes a read-through cache backed by a Reader. +// +// Warning: it is not safe to mutate byte slices (keys or values) passed to or received from the cache. +// A cache is not required to make defensive copies, and so these slices must be treated as immutable. +// +// Although several methods on this interface return errors, the conditions when a cache +// is permitted to actually return an error is limited at the API level. A cache method +// may return an error under the following conditions: +// - malformed input (e.g. a nil key) +// - the Reader method returns an error (for methods that accpet a Reader) +// - the cache is shutting down +// - the cache's work pools are shutting down type Cache interface { // Get returns the value for the given key, or (nil, false, nil) if not found. - // On a cache miss the provided Reader is called to fetch from the backing store. + // On a cache miss the provided Reader is called to fetch from the backing store, + // and the result is loaded into the cache. + // + // It is not safe to mutate the key slice after calling this method, nor is it safe to mutate the value slice + // that is returned. Get( // Reads a value from the backing store on cache miss. read Reader, @@ -34,16 +49,23 @@ type Cache interface { // Perform a batch read operation. Given a map of keys to read, performs the reads and updates the // map with the results. On cache misses the provided Reader is called to fetch from the backing store. // - // It is not thread safe to read or mutate the map while this method is running. + // It is not thread safe to read or mutate the map while this method is running. It is also not safe to mutate the + // key or value slices in the map after calling this method. BatchGet(read Reader, keys map[string]types.BatchGetResult) error // Set sets the value for the given key. + // + // It is not safe to mutate the key or value slices after calling this method. Set(key []byte, value []byte) // Delete deletes the value for the given key. + // + // It is not safe to mutate the key slice after calling this method. Delete(key []byte) // BatchSet applies the given updates to the cache. + // + // It is not safe to mutate the key or value slices in the CacheUpdate structs after calling this method. BatchSet(updates []CacheUpdate) error } diff --git a/sei-db/db_engine/dbcache/cached_batch.go b/sei-db/db_engine/dbcache/cached_batch.go index e4995fe33b..25c5133b27 100644 --- a/sei-db/db_engine/dbcache/cached_batch.go +++ b/sei-db/db_engine/dbcache/cached_batch.go @@ -35,6 +35,9 @@ func (cb *cachedBatch) Commit(opts types.WriteOptions) error { return err } if err := cb.cache.BatchSet(cb.pending); err != nil { + // A cache write can only fail during a shutdown when the cache's context is cancelled, + // or when the cache's work pools have their contexts cancelled. Continuing to use the + // cache after shutdown is not permissible, and so this method must return an error. return fmt.Errorf("failed to update cache after commit: %w", err) } cb.pending = nil diff --git a/sei-db/db_engine/dbcache/cached_key_value_db.go b/sei-db/db_engine/dbcache/cached_key_value_db.go index 91551b1604..c0c5fb43ce 100644 --- a/sei-db/db_engine/dbcache/cached_key_value_db.go +++ b/sei-db/db_engine/dbcache/cached_key_value_db.go @@ -18,6 +18,9 @@ type cachedKeyValueDB struct { } // Combine a cache and a key-value database to create a new key-value database with caching. +// +// Due to the nature of a Cache, it is not safe to mutate byte slices (keys or values) passed to or received from +// any of the methods on a cachedKeyValueDB after calling them. func NewCachedKeyValueDB(db types.KeyValueDB, cache Cache) types.KeyValueDB { read := func(key []byte) ([]byte, bool, error) { val, err := db.Get(key) @@ -32,6 +35,10 @@ func NewCachedKeyValueDB(db types.KeyValueDB, cache Cache) types.KeyValueDB { return &cachedKeyValueDB{db: db, cache: cache, read: read} } +// Get returns the value for the given key, or ErrNotFound if not found. +// +// It is not safe to mutate the key slice after calling this method, nor is it safe to mutate the value slice +// that is returned. func (c *cachedKeyValueDB) Get(key []byte) ([]byte, error) { val, found, err := c.cache.Get(c.read, key, true) if err != nil { @@ -43,6 +50,11 @@ func (c *cachedKeyValueDB) Get(key []byte) ([]byte, error) { return val, nil } +// BatchGet performs a batch read operation. Given a map of keys to read, performs the reads and updates the +// map with the results. On cache misses the provided Reader is called to fetch from the backing store. +// +// It is not thread safe to read or mutate the map while this method is running. It is also not safe to mutate the +// key or value slices in the map after calling this method. func (c *cachedKeyValueDB) BatchGet(keys map[string]types.BatchGetResult) error { err := c.cache.BatchGet(c.read, keys) if err != nil { @@ -51,6 +63,9 @@ func (c *cachedKeyValueDB) BatchGet(keys map[string]types.BatchGetResult) error return nil } +// Set sets the value for the given key. +// +// It is not safe to mutate the key or value slices after calling this method. func (c *cachedKeyValueDB) Set(key []byte, value []byte, opts types.WriteOptions) error { err := c.db.Set(key, value, opts) if err != nil { @@ -60,6 +75,9 @@ func (c *cachedKeyValueDB) Set(key []byte, value []byte, opts types.WriteOptions return nil } +// Delete deletes the value for the given key. +// +// It is not safe to mutate the key slice after calling this method. func (c *cachedKeyValueDB) Delete(key []byte, opts types.WriteOptions) error { err := c.db.Delete(key, opts) if err != nil { @@ -73,6 +91,10 @@ func (c *cachedKeyValueDB) NewIter(opts *types.IterOptions) (types.KeyValueDBIte return c.db.NewIter(opts) } +// NewBatch returns a new batch for atomic writes. +// +// It is not safe to mutate the key/value slices passed to the batch once inserted. This remains true even +// after the batch is committed. func (c *cachedKeyValueDB) NewBatch() types.Batch { return newCachedBatch(c.db.NewBatch(), c.cache) } From ed10a26733ef0f6f74b74f2592b2290fcc41d393 Mon Sep 17 00:00:00 2001 From: Cody Littley Date: Mon, 16 Mar 2026 11:59:44 -0500 Subject: [PATCH 062/119] made suggested changes --- sei-db/db_engine/dbcache/cached_key_value_db.go | 4 ++-- sei-db/db_engine/dbcache/lru_queue.go | 15 +++++++++++++-- 2 files changed, 15 insertions(+), 4 deletions(-) diff --git a/sei-db/db_engine/dbcache/cached_key_value_db.go b/sei-db/db_engine/dbcache/cached_key_value_db.go index c0c5fb43ce..cd5d661512 100644 --- a/sei-db/db_engine/dbcache/cached_key_value_db.go +++ b/sei-db/db_engine/dbcache/cached_key_value_db.go @@ -10,14 +10,14 @@ import ( var _ types.KeyValueDB = (*cachedKeyValueDB)(nil) var _ types.Checkpointable = (*cachedKeyValueDB)(nil) -// Combines a cache and a key-value database to create a new key-value database with caching. +// A unified interface for a key-value database and its read-through cache. type cachedKeyValueDB struct { db types.KeyValueDB cache Cache read Reader } -// Combine a cache and a key-value database to create a new key-value database with caching. +// Combine a cache and a key-value database into a unified interface. // // Due to the nature of a Cache, it is not safe to mutate byte slices (keys or values) passed to or received from // any of the methods on a cachedKeyValueDB after calling them. diff --git a/sei-db/db_engine/dbcache/lru_queue.go b/sei-db/db_engine/dbcache/lru_queue.go index 6870679c9d..467e49f22d 100644 --- a/sei-db/db_engine/dbcache/lru_queue.go +++ b/sei-db/db_engine/dbcache/lru_queue.go @@ -1,6 +1,9 @@ package dbcache -import "container/list" +import ( + "container/list" + "fmt" +) // Implements a queue-like abstraction with LRU semantics. Not thread safe. type lruQueue struct { @@ -31,6 +34,10 @@ func (lru *lruQueue) Push( ) { if elem, ok := lru.entries[string(key)]; ok { entry := elem.Value.(*lruQueueEntry) + if size < entry.size { + // should be impossible + panic(fmt.Errorf("size tracking is corrupted: size %d < entry.size %d", size, entry.size)) + } lru.totalSize += size - entry.size entry.size = size lru.order.MoveToBack(elem) @@ -46,7 +53,7 @@ func (lru *lruQueue) Push( lru.totalSize += size } -// Signal that an entry has been interated with, moving it to the back of the queue +// Signal that an entry has been interacted with, moving it to the back of the queue // (i.e. making it so it doesn't get popped soon). func (lru *lruQueue) Touch(key []byte) { elem, ok := lru.entries[string(key)] @@ -78,6 +85,10 @@ func (lru *lruQueue) PopLeastRecentlyUsed() string { lru.order.Remove(elem) entry := elem.Value.(*lruQueueEntry) delete(lru.entries, entry.key) + if entry.size > lru.totalSize { + // should be impossible + panic(fmt.Errorf("size tracking is corrupted: entry.size %d > totalSize %d", entry.size, lru.totalSize)) + } lru.totalSize -= entry.size return entry.key } From 81dfd463d6ed62e094c4883b8b6722741fcec233 Mon Sep 17 00:00:00 2001 From: Cody Littley Date: Mon, 16 Mar 2026 12:18:11 -0500 Subject: [PATCH 063/119] fix bug --- sei-db/state_db/sc/flatkv/store.go | 1 + 1 file changed, 1 insertion(+) diff --git a/sei-db/state_db/sc/flatkv/store.go b/sei-db/state_db/sc/flatkv/store.go index 2e0588bd46..ec3efca2c1 100644 --- a/sei-db/state_db/sc/flatkv/store.go +++ b/sei-db/state_db/sc/flatkv/store.go @@ -159,6 +159,7 @@ func NewCommitStore( return &CommitStore{ ctx: ctx, cancel: cancel, + config: *cfg, localMeta: make(map[string]*LocalMeta), accountWrites: make(map[string]*pendingAccountWrite), codeWrites: make(map[string]*pendingKVWrite), From 783568355f140bb6be98b926f0bb58fd0ab1ea23 Mon Sep 17 00:00:00 2001 From: Cody Littley Date: Mon, 16 Mar 2026 15:34:54 -0500 Subject: [PATCH 064/119] Implement a standard cache. --- sei-db/common/metrics/buckets.go | 23 + sei-db/db_engine/dbcache/cache.go | 35 + sei-db/db_engine/dbcache/cache_impl.go | 186 +++++ sei-db/db_engine/dbcache/cache_impl_test.go | 664 ++++++++++++++++ sei-db/db_engine/dbcache/cache_metrics.go | 136 ++++ sei-db/db_engine/dbcache/shard.go | 399 ++++++++++ sei-db/db_engine/dbcache/shard_test.go | 801 ++++++++++++++++++++ 7 files changed, 2244 insertions(+) create mode 100644 sei-db/common/metrics/buckets.go create mode 100644 sei-db/db_engine/dbcache/cache_impl.go create mode 100644 sei-db/db_engine/dbcache/cache_impl_test.go create mode 100644 sei-db/db_engine/dbcache/cache_metrics.go create mode 100644 sei-db/db_engine/dbcache/shard.go create mode 100644 sei-db/db_engine/dbcache/shard_test.go diff --git a/sei-db/common/metrics/buckets.go b/sei-db/common/metrics/buckets.go new file mode 100644 index 0000000000..42977fd032 --- /dev/null +++ b/sei-db/common/metrics/buckets.go @@ -0,0 +1,23 @@ +package metrics + +// Shared histogram bucket boundaries for use across the codebase. +// The OTel defaults are too coarse for meaningful percentile queries in Grafana. + +// LatencyBuckets covers 10μs to 5 minutes — wide enough for both fast key +// lookups and slow compactions/flushes without needing per-metric tuning. +var LatencyBuckets = []float64{ + 0.00001, 0.000025, 0.00005, 0.0001, 0.00025, 0.0005, // 10μs–500μs + 0.001, 0.0025, 0.005, 0.01, 0.025, 0.05, // 1ms–50ms + 0.1, 0.25, 0.5, 1, 2.5, 5, 10, 30, 60, 120, 300, // 100ms–5min +} + +// ByteSizeBuckets covers 256B to 1GB for data size histograms. +var ByteSizeBuckets = []float64{ + 256, 1024, 4096, 16384, 65536, 262144, // 256B–256KB + 1 << 20, 4 << 20, 16 << 20, 64 << 20, 256 << 20, 1 << 30, // 1MB–1GB +} + +// CountBuckets covers 1 to 1M for per-operation step/iteration counts. +var CountBuckets = []float64{ + 1, 5, 10, 50, 100, 500, 1000, 5000, 10000, 100000, 1000000, +} diff --git a/sei-db/db_engine/dbcache/cache.go b/sei-db/db_engine/dbcache/cache.go index da65017fdf..6b9687c234 100644 --- a/sei-db/db_engine/dbcache/cache.go +++ b/sei-db/db_engine/dbcache/cache.go @@ -1,6 +1,11 @@ package dbcache import ( + "context" + "fmt" + "time" + + "github.com/sei-protocol/sei-chain/sei-db/common/threading" "github.com/sei-protocol/sei-chain/sei-db/db_engine/types" ) @@ -76,3 +81,33 @@ type CacheUpdate struct { func (u *CacheUpdate) IsDelete() bool { return u.Value == nil } + +// BuildCache creates a new Cache. +func BuildCache( + ctx context.Context, + shardCount uint64, + maxSize uint64, + readPool threading.Pool, + miscPool threading.Pool, + cacheName string, + metricsScrapeInterval time.Duration, +) (Cache, error) { + + if maxSize == 0 { + return NewNoOpCache(), nil + } + + cache, err := NewStandardCache( + ctx, + shardCount, + maxSize, + readPool, + miscPool, + cacheName, + metricsScrapeInterval, + ) + if err != nil { + return nil, fmt.Errorf("failed to create cache: %w", err) + } + return cache, nil +} diff --git a/sei-db/db_engine/dbcache/cache_impl.go b/sei-db/db_engine/dbcache/cache_impl.go new file mode 100644 index 0000000000..e7b9caba66 --- /dev/null +++ b/sei-db/db_engine/dbcache/cache_impl.go @@ -0,0 +1,186 @@ +package dbcache + +import ( + "context" + "fmt" + "sync" + "time" + + "github.com/sei-protocol/sei-chain/sei-db/common/threading" + "github.com/sei-protocol/sei-chain/sei-db/db_engine/types" +) + +var _ Cache = (*cache)(nil) + +// A standard implementation of a flatcache. +type cache struct { + ctx context.Context + + // A utility for assigning keys to shard indices. + shardManager *shardManager + + // The shards in the cache. + shards []*shard + + // A pool for asynchronous reads. + readPool threading.Pool + + // A pool for miscellaneous operations that are neither computationally intensive nor IO bound. + miscPool threading.Pool +} + +// Creates a new Cache. If cacheName is non-empty, OTel metrics are enabled and the +// background size scrape runs every metricsScrapeInterval. +func NewStandardCache( + ctx context.Context, + // The number of shards in the cache. Must be a power of two and greater than 0. + shardCount uint64, + // The maximum size of the cache, in bytes. + maxSize uint64, + // A work pool for reading from the DB. + readPool threading.Pool, + // A work pool for miscellaneous operations that are neither computationally intensive nor IO bound. + miscPool threading.Pool, + // Name used as the "cache" attribute on metrics. Empty string disables metrics. + cacheName string, + // How often to scrape cache size for metrics. Ignored if cacheName is empty. + metricsScrapeInterval time.Duration, +) (Cache, error) { + if shardCount == 0 || (shardCount&(shardCount-1)) != 0 { + return nil, ErrNumShardsNotPowerOfTwo + } + if maxSize == 0 { + return nil, fmt.Errorf("maxSize must be greater than 0") + } + + shardManager, err := newShardManager(shardCount) + if err != nil { + return nil, fmt.Errorf("failed to create shard manager: %w", err) + } + sizePerShard := maxSize / shardCount + if sizePerShard == 0 { + return nil, fmt.Errorf("maxSize must be greater than shardCount") + } + + shards := make([]*shard, shardCount) + for i := uint64(0); i < shardCount; i++ { + shards[i], err = NewShard(ctx, readPool, sizePerShard) + if err != nil { + return nil, fmt.Errorf("failed to create shard: %w", err) + } + } + + c := &cache{ + ctx: ctx, + shardManager: shardManager, + shards: shards, + readPool: readPool, + miscPool: miscPool, + } + + if cacheName != "" { + metrics := newCacheMetrics(ctx, cacheName, metricsScrapeInterval, c.getCacheSizeInfo) + for _, s := range c.shards { + s.metrics = metrics + } + } + + return c, nil +} + +func (c *cache) getCacheSizeInfo() (bytes uint64, entries uint64) { + for _, s := range c.shards { + b, e := s.getSizeInfo() + bytes += b + entries += e + } + return bytes, entries +} + +func (c *cache) BatchSet(updates []CacheUpdate) error { + // Sort entries by shard index so each shard is locked only once. + shardMap := make(map[uint64][]CacheUpdate) + for i := range updates { + idx := c.shardManager.Shard(updates[i].Key) + shardMap[idx] = append(shardMap[idx], updates[i]) + } + + var wg sync.WaitGroup + for shardIndex, shardEntries := range shardMap { + wg.Add(1) + err := c.miscPool.Submit(c.ctx, func() { + c.shards[shardIndex].BatchSet(shardEntries) + wg.Done() + }) + if err != nil { + return fmt.Errorf("failed to submit batch set: %w", err) + } + } + wg.Wait() + + return nil +} + +func (c *cache) BatchGet(read Reader, keys map[string]types.BatchGetResult) error { + work := make(map[uint64]map[string]types.BatchGetResult) + for key := range keys { + idx := c.shardManager.Shard([]byte(key)) + if work[idx] == nil { + work[idx] = make(map[string]types.BatchGetResult) + } + work[idx][key] = types.BatchGetResult{} + } + + var wg sync.WaitGroup + for shardIndex, subMap := range work { + wg.Add(1) + + err := c.miscPool.Submit(c.ctx, func() { + defer wg.Done() + err := c.shards[shardIndex].BatchGet(read, subMap) + if err != nil { + for key := range subMap { + subMap[key] = types.BatchGetResult{Error: err} + } + } + }) + if err != nil { + return fmt.Errorf("failed to submit batch get: %w", err) + } + } + wg.Wait() + + for _, subMap := range work { + for key, result := range subMap { + keys[key] = result + } + } + + return nil +} + +func (c *cache) Delete(key []byte) { + shardIndex := c.shardManager.Shard(key) + shard := c.shards[shardIndex] + shard.Delete(key) +} + +func (c *cache) Get(read Reader, key []byte, updateLru bool) ([]byte, bool, error) { + shardIndex := c.shardManager.Shard(key) + shard := c.shards[shardIndex] + + value, ok, err := shard.Get(read, key, updateLru) + if err != nil { + return nil, false, fmt.Errorf("failed to get value from shard: %w", err) + } + if !ok { + return nil, false, nil + } + return value, ok, nil +} + +func (c *cache) Set(key []byte, value []byte) { + shardIndex := c.shardManager.Shard(key) + shard := c.shards[shardIndex] + shard.Set(key, value) +} diff --git a/sei-db/db_engine/dbcache/cache_impl_test.go b/sei-db/db_engine/dbcache/cache_impl_test.go new file mode 100644 index 0000000000..d11201478a --- /dev/null +++ b/sei-db/db_engine/dbcache/cache_impl_test.go @@ -0,0 +1,664 @@ +package dbcache + +import ( + "context" + "errors" + "fmt" + "sync" + "sync/atomic" + "testing" + "time" + + "github.com/stretchr/testify/require" + + "github.com/sei-protocol/sei-chain/sei-db/common/threading" + "github.com/sei-protocol/sei-chain/sei-db/db_engine/types" +) + +// --------------------------------------------------------------------------- +// helpers +// --------------------------------------------------------------------------- + +func noopRead(key []byte) ([]byte, bool, error) { return nil, false, nil } + +func newTestCache(t *testing.T, store map[string][]byte, shardCount, maxSize uint64) (Cache, Reader) { + t.Helper() + read := func(key []byte) ([]byte, bool, error) { + v, ok := store[string(key)] + if !ok { + return nil, false, nil + } + return v, true, nil + } + pool := threading.NewAdHocPool() + c, err := NewStandardCache(context.Background(), shardCount, maxSize, pool, pool, "", 0) + require.NoError(t, err) + return c, read +} + +// --------------------------------------------------------------------------- +// NewStandardCache — validation +// --------------------------------------------------------------------------- + +func TestNewStandardCacheValid(t *testing.T) { + pool := threading.NewAdHocPool() + c, err := NewStandardCache(context.Background(), 4, 1024, pool, pool, "", 0) + require.NoError(t, err) + require.NotNil(t, c) +} + +func TestNewStandardCacheSingleShard(t *testing.T) { + pool := threading.NewAdHocPool() + c, err := NewStandardCache(context.Background(), 1, 1024, pool, pool, "", 0) + require.NoError(t, err) + require.NotNil(t, c) +} + +func TestNewStandardCacheShardCountZero(t *testing.T) { + pool := threading.NewAdHocPool() + _, err := NewStandardCache(context.Background(), 0, 1024, pool, pool, "", 0) + require.Error(t, err) +} + +func TestNewStandardCacheShardCountNotPowerOfTwo(t *testing.T) { + pool := threading.NewAdHocPool() + for _, n := range []uint64{3, 5, 6, 7, 9, 10} { + _, err := NewStandardCache(context.Background(), n, 1024, pool, pool, "", 0) + require.Error(t, err, "shardCount=%d", n) + } +} + +func TestNewStandardCacheMaxSizeZero(t *testing.T) { + pool := threading.NewAdHocPool() + _, err := NewStandardCache(context.Background(), 4, 0, pool, pool, "", 0) + require.Error(t, err) +} + +func TestNewStandardCacheMaxSizeLessThanShardCount(t *testing.T) { + pool := threading.NewAdHocPool() + // shardCount=4, maxSize=3 → sizePerShard=0 + _, err := NewStandardCache(context.Background(), 4, 3, pool, pool, "", 0) + require.Error(t, err) +} + +func TestNewStandardCacheWithMetrics(t *testing.T) { + pool := threading.NewAdHocPool() + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + c, err := NewStandardCache(ctx, 2, 1024, pool, pool, "test-cache", time.Hour) + require.NoError(t, err) + require.NotNil(t, c) +} + +// --------------------------------------------------------------------------- +// Get +// --------------------------------------------------------------------------- + +func TestCacheGetFromDB(t *testing.T) { + store := map[string][]byte{"foo": []byte("bar")} + c, read := newTestCache(t, store, 4, 4096) + + val, found, err := c.Get(read, []byte("foo"), true) + require.NoError(t, err) + require.True(t, found) + require.Equal(t, "bar", string(val)) +} + +func TestCacheGetNotFound(t *testing.T) { + c, read := newTestCache(t, map[string][]byte{}, 4, 4096) + + val, found, err := c.Get(read, []byte("missing"), true) + require.NoError(t, err) + require.False(t, found) + require.Nil(t, val) +} + +func TestCacheGetAfterSet(t *testing.T) { + c, read := newTestCache(t, map[string][]byte{}, 4, 4096) + + c.Set([]byte("k"), []byte("v")) + + val, found, err := c.Get(read, []byte("k"), true) + require.NoError(t, err) + require.True(t, found) + require.Equal(t, "v", string(val)) +} + +func TestCacheGetAfterDelete(t *testing.T) { + store := map[string][]byte{"k": []byte("v")} + c, read := newTestCache(t, store, 4, 4096) + + c.Delete([]byte("k")) + + val, found, err := c.Get(read, []byte("k"), true) + require.NoError(t, err) + require.False(t, found) + require.Nil(t, val) +} + +func TestCacheGetDBError(t *testing.T) { + dbErr := errors.New("db fail") + readFunc := func(key []byte) ([]byte, bool, error) { return nil, false, dbErr } + pool := threading.NewAdHocPool() + c, _ := NewStandardCache(context.Background(), 1, 4096, pool, pool, "", 0) + + _, _, err := c.Get(readFunc, []byte("k"), true) + require.Error(t, err) + require.ErrorIs(t, err, dbErr) +} + +func TestCacheGetSameKeyConsistentShard(t *testing.T) { + var readCalls atomic.Int64 + readFunc := func(key []byte) ([]byte, bool, error) { + readCalls.Add(1) + return []byte("val"), true, nil + } + pool := threading.NewAdHocPool() + c, _ := NewStandardCache(context.Background(), 4, 4096, pool, pool, "", 0) + + val1, _, _ := c.Get(readFunc, []byte("key"), true) + val2, _, _ := c.Get(readFunc, []byte("key"), true) + + require.Equal(t, string(val1), string(val2)) + require.Equal(t, int64(1), readCalls.Load(), "second Get should hit cache") +} + +// --------------------------------------------------------------------------- +// Set +// --------------------------------------------------------------------------- + +func TestCacheSetNewKey(t *testing.T) { + c, read := newTestCache(t, map[string][]byte{}, 4, 4096) + + c.Set([]byte("a"), []byte("1")) + + val, found, err := c.Get(read, []byte("a"), false) + require.NoError(t, err) + require.True(t, found) + require.Equal(t, "1", string(val)) +} + +func TestCacheSetOverwrite(t *testing.T) { + c, read := newTestCache(t, map[string][]byte{}, 4, 4096) + + c.Set([]byte("a"), []byte("old")) + c.Set([]byte("a"), []byte("new")) + + val, found, err := c.Get(read, []byte("a"), false) + require.NoError(t, err) + require.True(t, found) + require.Equal(t, "new", string(val)) +} + +func TestCacheSetNilValue(t *testing.T) { + c, read := newTestCache(t, map[string][]byte{}, 4, 4096) + + c.Set([]byte("k"), nil) + + val, found, err := c.Get(read, []byte("k"), false) + require.NoError(t, err) + require.True(t, found) + require.Nil(t, val) +} + +// --------------------------------------------------------------------------- +// Delete +// --------------------------------------------------------------------------- + +func TestCacheDeleteExistingKey(t *testing.T) { + c, read := newTestCache(t, map[string][]byte{}, 4, 4096) + + c.Set([]byte("k"), []byte("v")) + c.Delete([]byte("k")) + + _, found, err := c.Get(read, []byte("k"), false) + require.NoError(t, err) + require.False(t, found) +} + +func TestCacheDeleteNonexistent(t *testing.T) { + c, read := newTestCache(t, map[string][]byte{}, 4, 4096) + + c.Delete([]byte("ghost")) + + _, found, err := c.Get(read, []byte("ghost"), false) + require.NoError(t, err) + require.False(t, found) +} + +func TestCacheDeleteThenSet(t *testing.T) { + c, read := newTestCache(t, map[string][]byte{}, 4, 4096) + + c.Set([]byte("k"), []byte("v1")) + c.Delete([]byte("k")) + c.Set([]byte("k"), []byte("v2")) + + val, found, err := c.Get(read, []byte("k"), false) + require.NoError(t, err) + require.True(t, found) + require.Equal(t, "v2", string(val)) +} + +// --------------------------------------------------------------------------- +// BatchSet +// --------------------------------------------------------------------------- + +func TestCacheBatchSetMultipleKeys(t *testing.T) { + c, read := newTestCache(t, map[string][]byte{}, 4, 4096) + + err := c.BatchSet([]CacheUpdate{ + {Key: []byte("a"), Value: []byte("1")}, + {Key: []byte("b"), Value: []byte("2")}, + {Key: []byte("c"), Value: []byte("3")}, + }) + require.NoError(t, err) + + for _, tc := range []struct{ key, want string }{{"a", "1"}, {"b", "2"}, {"c", "3"}} { + val, found, err := c.Get(read, []byte(tc.key), false) + require.NoError(t, err, "key=%q", tc.key) + require.True(t, found, "key=%q", tc.key) + require.Equal(t, tc.want, string(val), "key=%q", tc.key) + } +} + +func TestCacheBatchSetMixedSetAndDelete(t *testing.T) { + c, read := newTestCache(t, map[string][]byte{}, 4, 4096) + + c.Set([]byte("keep"), []byte("v")) + c.Set([]byte("remove"), []byte("v")) + + err := c.BatchSet([]CacheUpdate{ + {Key: []byte("keep"), Value: []byte("updated")}, + {Key: []byte("remove"), Value: nil}, + {Key: []byte("new"), Value: []byte("fresh")}, + }) + require.NoError(t, err) + + val, found, _ := c.Get(read, []byte("keep"), false) + require.True(t, found) + require.Equal(t, "updated", string(val)) + + _, found, _ = c.Get(read, []byte("remove"), false) + require.False(t, found) + + val, found, _ = c.Get(read, []byte("new"), false) + require.True(t, found) + require.Equal(t, "fresh", string(val)) +} + +func TestCacheBatchSetEmpty(t *testing.T) { + c, _ := newTestCache(t, map[string][]byte{}, 4, 4096) + + require.NoError(t, c.BatchSet(nil)) + require.NoError(t, c.BatchSet([]CacheUpdate{})) +} + +func TestCacheBatchSetPoolFailure(t *testing.T) { + readPool := threading.NewAdHocPool() + c, _ := NewStandardCache(context.Background(), 1, 4096, readPool, &failPool{}, "", 0) + + err := c.BatchSet([]CacheUpdate{ + {Key: []byte("k"), Value: []byte("v")}, + }) + require.Error(t, err) +} + +// --------------------------------------------------------------------------- +// BatchGet +// --------------------------------------------------------------------------- + +func TestCacheBatchGetAllCached(t *testing.T) { + c, read := newTestCache(t, map[string][]byte{}, 4, 4096) + + c.Set([]byte("a"), []byte("1")) + c.Set([]byte("b"), []byte("2")) + + keys := map[string]types.BatchGetResult{"a": {}, "b": {}} + require.NoError(t, c.BatchGet(read, keys)) + + require.True(t, keys["a"].IsFound()) + require.Equal(t, "1", string(keys["a"].Value)) + require.True(t, keys["b"].IsFound()) + require.Equal(t, "2", string(keys["b"].Value)) +} + +func TestCacheBatchGetAllFromDB(t *testing.T) { + store := map[string][]byte{"x": []byte("10"), "y": []byte("20")} + c, read := newTestCache(t, store, 4, 4096) + + keys := map[string]types.BatchGetResult{"x": {}, "y": {}} + require.NoError(t, c.BatchGet(read, keys)) + + require.True(t, keys["x"].IsFound()) + require.Equal(t, "10", string(keys["x"].Value)) + require.True(t, keys["y"].IsFound()) + require.Equal(t, "20", string(keys["y"].Value)) +} + +func TestCacheBatchGetMixedCachedAndDB(t *testing.T) { + store := map[string][]byte{"db-key": []byte("from-db")} + c, read := newTestCache(t, store, 4, 4096) + + c.Set([]byte("cached"), []byte("from-cache")) + + keys := map[string]types.BatchGetResult{"cached": {}, "db-key": {}} + require.NoError(t, c.BatchGet(read, keys)) + + require.True(t, keys["cached"].IsFound()) + require.Equal(t, "from-cache", string(keys["cached"].Value)) + require.True(t, keys["db-key"].IsFound()) + require.Equal(t, "from-db", string(keys["db-key"].Value)) +} + +func TestCacheBatchGetNotFoundKeys(t *testing.T) { + c, read := newTestCache(t, map[string][]byte{}, 4, 4096) + + keys := map[string]types.BatchGetResult{"nope": {}} + require.NoError(t, c.BatchGet(read, keys)) + require.False(t, keys["nope"].IsFound()) +} + +func TestCacheBatchGetDeletedKey(t *testing.T) { + c, read := newTestCache(t, map[string][]byte{}, 4, 4096) + + c.Set([]byte("k"), []byte("v")) + c.Delete([]byte("k")) + + keys := map[string]types.BatchGetResult{"k": {}} + require.NoError(t, c.BatchGet(read, keys)) + require.False(t, keys["k"].IsFound()) +} + +func TestCacheBatchGetDBError(t *testing.T) { + dbErr := errors.New("broken") + readFunc := func(key []byte) ([]byte, bool, error) { return nil, false, dbErr } + pool := threading.NewAdHocPool() + c, _ := NewStandardCache(context.Background(), 1, 4096, pool, pool, "", 0) + + keys := map[string]types.BatchGetResult{"fail": {}} + require.NoError(t, c.BatchGet(readFunc, keys), "BatchGet itself should not fail") + require.Error(t, keys["fail"].Error) +} + +func TestCacheBatchGetEmpty(t *testing.T) { + c, read := newTestCache(t, map[string][]byte{}, 4, 4096) + keys := map[string]types.BatchGetResult{} + require.NoError(t, c.BatchGet(read, keys)) +} + +func TestCacheBatchGetPoolFailure(t *testing.T) { + readPool := threading.NewAdHocPool() + c, _ := NewStandardCache(context.Background(), 1, 4096, readPool, &failPool{}, "", 0) + + keys := map[string]types.BatchGetResult{"k": {}} + err := c.BatchGet(noopRead, keys) + require.Error(t, err) +} + +func TestCacheBatchGetShardReadPoolFailure(t *testing.T) { + miscPool := threading.NewAdHocPool() + c, _ := NewStandardCache(context.Background(), 1, 4096, &failPool{}, miscPool, "", 0) + + keys := map[string]types.BatchGetResult{"a": {}, "b": {}} + require.NoError(t, c.BatchGet(noopRead, keys)) + + for k, r := range keys { + require.Error(t, r.Error, "key=%q should have per-key error", k) + } +} + +// --------------------------------------------------------------------------- +// Cross-shard distribution +// --------------------------------------------------------------------------- + +func TestCacheDistributesAcrossShards(t *testing.T) { + c, _ := newTestCache(t, map[string][]byte{}, 4, 4096) + impl := c.(*cache) + + for i := 0; i < 100; i++ { + c.Set([]byte(fmt.Sprintf("key-%d", i)), []byte("v")) + } + + nonEmpty := 0 + for _, s := range impl.shards { + _, entries := s.getSizeInfo() + if entries > 0 { + nonEmpty++ + } + } + require.GreaterOrEqual(t, nonEmpty, 2, "keys should distribute across multiple shards") +} + +func TestCacheGetRoutesToSameShard(t *testing.T) { + c, _ := newTestCache(t, map[string][]byte{}, 4, 4096) + impl := c.(*cache) + + c.Set([]byte("key"), []byte("val")) + + idx := impl.shardManager.Shard([]byte("key")) + _, entries := impl.shards[idx].getSizeInfo() + require.Equal(t, uint64(1), entries, "key should be in the shard determined by shardManager") +} + +// --------------------------------------------------------------------------- +// getCacheSizeInfo +// --------------------------------------------------------------------------- + +func TestCacheGetCacheSizeInfoEmpty(t *testing.T) { + c, _ := newTestCache(t, map[string][]byte{}, 4, 4096) + impl := c.(*cache) + + bytes, entries := impl.getCacheSizeInfo() + require.Equal(t, uint64(0), bytes) + require.Equal(t, uint64(0), entries) +} + +func TestCacheGetCacheSizeInfoAggregatesShards(t *testing.T) { + c, _ := newTestCache(t, map[string][]byte{}, 4, 4096) + impl := c.(*cache) + + for i := 0; i < 20; i++ { + c.Set([]byte(fmt.Sprintf("k%d", i)), []byte(fmt.Sprintf("v%d", i))) + } + + bytes, entries := impl.getCacheSizeInfo() + require.Equal(t, uint64(20), entries) + require.Greater(t, bytes, uint64(0)) +} + +// --------------------------------------------------------------------------- +// Many keys — BatchGet/BatchSet spanning all shards +// --------------------------------------------------------------------------- + +func TestCacheBatchSetThenBatchGetManyKeys(t *testing.T) { + c, read := newTestCache(t, map[string][]byte{}, 4, 100_000) + + updates := make([]CacheUpdate, 200) + for i := range updates { + updates[i] = CacheUpdate{ + Key: []byte(fmt.Sprintf("key-%03d", i)), + Value: []byte(fmt.Sprintf("val-%03d", i)), + } + } + require.NoError(t, c.BatchSet(updates)) + + keys := make(map[string]types.BatchGetResult, 200) + for i := 0; i < 200; i++ { + keys[fmt.Sprintf("key-%03d", i)] = types.BatchGetResult{} + } + require.NoError(t, c.BatchGet(read, keys)) + + for i := 0; i < 200; i++ { + k := fmt.Sprintf("key-%03d", i) + want := fmt.Sprintf("val-%03d", i) + require.True(t, keys[k].IsFound(), "key=%q", k) + require.Equal(t, want, string(keys[k].Value), "key=%q", k) + require.NoError(t, keys[k].Error, "key=%q", k) + } +} + +// --------------------------------------------------------------------------- +// Concurrency +// --------------------------------------------------------------------------- + +func TestCacheConcurrentGetSet(t *testing.T) { + store := map[string][]byte{} + for i := 0; i < 50; i++ { + store[fmt.Sprintf("db-%d", i)] = []byte(fmt.Sprintf("v-%d", i)) + } + c, read := newTestCache(t, store, 4, 100_000) + + var wg sync.WaitGroup + for i := 0; i < 100; i++ { + wg.Add(2) + key := []byte(fmt.Sprintf("key-%d", i)) + val := []byte(fmt.Sprintf("val-%d", i)) + + go func() { + defer wg.Done() + c.Set(key, val) + }() + go func() { + defer wg.Done() + c.Get(read, key, true) + }() + } + wg.Wait() +} + +func TestCacheConcurrentBatchSetAndBatchGet(t *testing.T) { + store := map[string][]byte{} + for i := 0; i < 50; i++ { + store[fmt.Sprintf("db-%d", i)] = []byte(fmt.Sprintf("v-%d", i)) + } + c, read := newTestCache(t, store, 4, 100_000) + + var wg sync.WaitGroup + + wg.Add(1) + go func() { + defer wg.Done() + updates := make([]CacheUpdate, 50) + for i := range updates { + updates[i] = CacheUpdate{ + Key: []byte(fmt.Sprintf("set-%d", i)), + Value: []byte(fmt.Sprintf("sv-%d", i)), + } + } + c.BatchSet(updates) + }() + + wg.Add(1) + go func() { + defer wg.Done() + keys := make(map[string]types.BatchGetResult) + for i := 0; i < 50; i++ { + keys[fmt.Sprintf("db-%d", i)] = types.BatchGetResult{} + } + c.BatchGet(read, keys) + }() + + wg.Wait() +} + +func TestCacheConcurrentDeleteAndGet(t *testing.T) { + c, read := newTestCache(t, map[string][]byte{}, 4, 100_000) + + for i := 0; i < 100; i++ { + c.Set([]byte(fmt.Sprintf("k-%d", i)), []byte("v")) + } + + var wg sync.WaitGroup + for i := 0; i < 100; i++ { + wg.Add(2) + key := []byte(fmt.Sprintf("k-%d", i)) + go func() { + defer wg.Done() + c.Delete(key) + }() + go func() { + defer wg.Done() + c.Get(read, key, true) + }() + } + wg.Wait() +} + +// --------------------------------------------------------------------------- +// Eviction through the cache layer +// --------------------------------------------------------------------------- + +func TestCacheEvictsPerShard(t *testing.T) { + c, _ := newTestCache(t, map[string][]byte{}, 1, 20) + impl := c.(*cache) + + c.Set([]byte("a"), []byte("11111111")) + c.Set([]byte("b"), []byte("22222222")) + + c.Set([]byte("c"), []byte("33333333")) + + bytes, _ := impl.shards[0].getSizeInfo() + require.LessOrEqual(t, bytes, uint64(20)) +} + +// --------------------------------------------------------------------------- +// Edge: BatchSet with keys all routed to the same shard +// --------------------------------------------------------------------------- + +func TestCacheBatchSetSameShard(t *testing.T) { + c, read := newTestCache(t, map[string][]byte{}, 1, 4096) + + err := c.BatchSet([]CacheUpdate{ + {Key: []byte("x"), Value: []byte("1")}, + {Key: []byte("y"), Value: []byte("2")}, + {Key: []byte("z"), Value: []byte("3")}, + }) + require.NoError(t, err) + + for _, tc := range []struct{ key, want string }{{"x", "1"}, {"y", "2"}, {"z", "3"}} { + val, found, err := c.Get(read, []byte(tc.key), false) + require.NoError(t, err) + require.True(t, found) + require.Equal(t, tc.want, string(val)) + } +} + +// --------------------------------------------------------------------------- +// Edge: BatchGet after BatchSet with deletes +// --------------------------------------------------------------------------- + +func TestCacheBatchGetAfterBatchSetWithDeletes(t *testing.T) { + c, read := newTestCache(t, map[string][]byte{}, 4, 4096) + + c.Set([]byte("a"), []byte("1")) + c.Set([]byte("b"), []byte("2")) + c.Set([]byte("c"), []byte("3")) + + err := c.BatchSet([]CacheUpdate{ + {Key: []byte("a"), Value: []byte("updated")}, + {Key: []byte("b"), Value: nil}, + }) + require.NoError(t, err) + + keys := map[string]types.BatchGetResult{"a": {}, "b": {}, "c": {}} + require.NoError(t, c.BatchGet(read, keys)) + + require.True(t, keys["a"].IsFound()) + require.Equal(t, "updated", string(keys["a"].Value)) + require.False(t, keys["b"].IsFound()) + require.True(t, keys["c"].IsFound()) + require.Equal(t, "3", string(keys["c"].Value)) +} + +// --------------------------------------------------------------------------- +// Power-of-two shard counts +// --------------------------------------------------------------------------- + +func TestNewStandardCachePowerOfTwoShardCounts(t *testing.T) { + pool := threading.NewAdHocPool() + for _, n := range []uint64{1, 2, 4, 8, 16, 32, 64} { + c, err := NewStandardCache(context.Background(), n, n*100, pool, pool, "", 0) + require.NoError(t, err, "shardCount=%d", n) + require.NotNil(t, c, "shardCount=%d", n) + } +} diff --git a/sei-db/db_engine/dbcache/cache_metrics.go b/sei-db/db_engine/dbcache/cache_metrics.go new file mode 100644 index 0000000000..a6344bf08f --- /dev/null +++ b/sei-db/db_engine/dbcache/cache_metrics.go @@ -0,0 +1,136 @@ +package dbcache + +import ( + "context" + "time" + + "go.opentelemetry.io/otel" + "go.opentelemetry.io/otel/attribute" + "go.opentelemetry.io/otel/metric" + + "github.com/sei-protocol/sei-chain/sei-db/common/metrics" +) + +const cacheMeterName = "seidb_pebblecache" + +// CacheMetrics records OTel metrics for a pebblecache instance. +// All report methods are nil-safe: if the receiver is nil, they are no-ops, +// allowing the cache to call them unconditionally regardless of whether metrics +// are enabled. +// +// The cacheName is used as the "cache" attribute on all recorded metrics, +// enabling multiple cache instances to be distinguished in dashboards. +type CacheMetrics struct { + // Pre-computed attribute option reused on every recording to avoid + // per-call allocations on the hot path. + attrs metric.MeasurementOption + + sizeBytes metric.Int64Gauge + sizeEntries metric.Int64Gauge + hits metric.Int64Counter + misses metric.Int64Counter + missLatency metric.Float64Histogram +} + +// newCacheMetrics creates a CacheMetrics that records cache statistics via OTel. +// A background goroutine scrapes cache size every scrapeInterval until ctx is +// cancelled. The cacheName is attached as the "cache" attribute to all recorded +// metrics, enabling multiple cache instances to be distinguished in dashboards. +// +// Multiple instances are safe: OTel instrument registration is idempotent, so each +// call receives references to the same underlying instruments. The "cache" attribute +// distinguishes series (e.g. pebblecache_hits{cache="state"}). +func newCacheMetrics( + ctx context.Context, + cacheName string, + scrapeInterval time.Duration, + getSize func() (bytes uint64, entries uint64), +) *CacheMetrics { + meter := otel.Meter(cacheMeterName) + + sizeBytes, _ := meter.Int64Gauge( + "pebblecache_size_bytes", + metric.WithDescription("Current cache size in bytes"), + metric.WithUnit("By"), + ) + sizeEntries, _ := meter.Int64Gauge( + "pebblecache_size_entries", + metric.WithDescription("Current number of entries in the cache"), + metric.WithUnit("{count}"), + ) + hits, _ := meter.Int64Counter( + "pebblecache_hits", + metric.WithDescription("Total number of cache hits"), + metric.WithUnit("{count}"), + ) + misses, _ := meter.Int64Counter( + "pebblecache_misses", + metric.WithDescription("Total number of cache misses"), + metric.WithUnit("{count}"), + ) + missLatency, _ := meter.Float64Histogram( + "pebblecache_miss_latency", + metric.WithDescription("Time taken to resolve a cache miss from the backing store"), + metric.WithUnit("s"), + metric.WithExplicitBucketBoundaries(metrics.LatencyBuckets...), + ) + + cm := &CacheMetrics{ + attrs: metric.WithAttributes(attribute.String("cache", cacheName)), + sizeBytes: sizeBytes, + sizeEntries: sizeEntries, + hits: hits, + misses: misses, + missLatency: missLatency, + } + + go cm.collectLoop(ctx, scrapeInterval, getSize) + + return cm +} + +func (cm *CacheMetrics) reportCacheHits(count int64) { + if cm == nil { + return + } + cm.hits.Add(context.Background(), count, cm.attrs) +} + +func (cm *CacheMetrics) reportCacheMisses(count int64) { + if cm == nil { + return + } + cm.misses.Add(context.Background(), count, cm.attrs) +} + +func (cm *CacheMetrics) reportCacheMissLatency(latency time.Duration) { + if cm == nil { + return + } + cm.missLatency.Record(context.Background(), latency.Seconds(), cm.attrs) +} + +// collectLoop periodically scrapes cache size from the provided function +// and records it as gauge values. It exits when ctx is cancelled. +func (cm *CacheMetrics) collectLoop( + ctx context.Context, + interval time.Duration, + getSize func() (bytes uint64, entries uint64), +) { + + if cm == nil { + return + } + ticker := time.NewTicker(interval) + defer ticker.Stop() + for { + select { + case <-ctx.Done(): + return + case <-ticker.C: + bytes, entries := getSize() + cm.sizeBytes.Record(ctx, int64(bytes), cm.attrs) //nolint:gosec // G115: safe, cache size fits int64 + cm.sizeEntries.Record(ctx, int64(entries), cm.attrs) //nolint:gosec // G115: safe, entry count fits int64 + } + } +} diff --git a/sei-db/db_engine/dbcache/shard.go b/sei-db/db_engine/dbcache/shard.go new file mode 100644 index 0000000000..80f8c18aba --- /dev/null +++ b/sei-db/db_engine/dbcache/shard.go @@ -0,0 +1,399 @@ +package dbcache + +import ( + "bytes" + "context" + "fmt" + "sync" + "time" + + "github.com/sei-protocol/sei-chain/sei-db/common/threading" + "github.com/sei-protocol/sei-chain/sei-db/db_engine/types" +) + +// A single shard of a Cache. +type shard struct { + ctx context.Context + + // A lock to protect the shard's data. + lock sync.Mutex + + // The data in the shard. + data map[string]*shardEntry + + // Organizes data for garbage collection. + gcQueue *lruQueue + + // A pool for asynchronous reads. + readPool threading.Pool + + // The maximum size of this cache, in bytes. + maxSize uint64 + + // Cache-level metrics. Nil-safe; if nil, no metrics are recorded. + metrics *CacheMetrics +} + +// The result of a read from the underlying database. +type readResult struct { + value []byte + err error +} + +// The status of a value in the cache. +type valueStatus int + +const ( + // The value is not known and we are not currently attempting to find it. + statusUnknown valueStatus = iota + // We've scheduled a read of the value but haven't yet finsihed the read. + statusScheduled + // The data is available. + statusAvailable + // We are aware that the value is deleted (special case of data being available). + statusDeleted +) + +// A single shardEntry in a shard. Records data for a single key. +type shardEntry struct { + // The parent shard that contains this entry. + shard *shard + + // The current status of this entry. + status valueStatus + + // The value, if known. + value []byte + + // If the value is not available when we request it, + // it will be written to this channel when it is available. + valueChan chan readResult +} + +// Creates a new Shard. +func NewShard( + ctx context.Context, + readPool threading.Pool, + maxSize uint64, +) (*shard, error) { + + if maxSize <= 0 { + return nil, fmt.Errorf("maxSize must be greater than 0") + } + + return &shard{ + ctx: ctx, + readPool: readPool, + lock: sync.Mutex{}, + data: make(map[string]*shardEntry), + gcQueue: newLRUQueue(), + maxSize: maxSize, + }, nil +} + +// Get returns the value for the given key, or (nil, false, nil) if not found. +func (s *shard) Get(read Reader, key []byte, updateLru bool) ([]byte, bool, error) { + s.lock.Lock() + + entry := s.getEntry(key) + + switch entry.status { + case statusAvailable: + return s.getAvailable(entry, key, updateLru) + case statusDeleted: + return s.getDeleted(key, updateLru) + case statusScheduled: + return s.getScheduled(entry) + case statusUnknown: + return s.getUnknown(read, entry, key) + default: + s.lock.Unlock() + panic(fmt.Sprintf("unexpected status: %#v", entry.status)) + } +} + +// Handles Get for a key whose value is already cached. Lock must be held; releases it. +func (s *shard) getAvailable(entry *shardEntry, key []byte, updateLru bool) ([]byte, bool, error) { + value := bytes.Clone(entry.value) + if updateLru { + s.gcQueue.Touch(key) + } + s.lock.Unlock() + s.metrics.reportCacheHits(1) + return value, true, nil +} + +// Handles Get for a key known to be deleted. Lock must be held; releases it. +func (s *shard) getDeleted(key []byte, updateLru bool) ([]byte, bool, error) { + if updateLru { + s.gcQueue.Touch(key) + } + s.lock.Unlock() + s.metrics.reportCacheHits(1) + return nil, false, nil +} + +// Handles Get for a key with an in-flight read from another goroutine. Lock must be held; releases it. +func (s *shard) getScheduled(entry *shardEntry) ([]byte, bool, error) { + valueChan := entry.valueChan + s.lock.Unlock() + s.metrics.reportCacheMisses(1) + startTime := time.Now() + result, err := threading.InterruptiblePull(s.ctx, valueChan) + s.metrics.reportCacheMissLatency(time.Since(startTime)) + if err != nil { + return nil, false, fmt.Errorf("failed to pull value from channel: %w", err) + } + valueChan <- result // reload the channel in case there are other listeners + if result.err != nil { + return nil, false, fmt.Errorf("failed to read value from database: %w", result.err) + } + return result.value, result.value != nil, nil +} + +// Handles Get for a key not yet read. Schedules the read and waits. Lock must be held; releases it. +func (s *shard) getUnknown(read Reader, entry *shardEntry, key []byte) ([]byte, bool, error) { + entry.status = statusScheduled + valueChan := make(chan readResult, 1) + entry.valueChan = valueChan + s.lock.Unlock() + s.metrics.reportCacheMisses(1) + startTime := time.Now() + err := s.readPool.Submit(s.ctx, func() { + value, _, readErr := read(key) + entry.injectValue(key, readResult{value: value, err: readErr}) + }) + if err != nil { + return nil, false, fmt.Errorf("failed to schedule read: %w", err) + } + result, err := threading.InterruptiblePull(s.ctx, valueChan) + s.metrics.reportCacheMissLatency(time.Since(startTime)) + if err != nil { + return nil, false, fmt.Errorf("failed to pull value from channel: %w", err) + } + valueChan <- result // reload the channel in case there are other listeners + if result.err != nil { + return nil, false, result.err + } + return result.value, result.value != nil, nil +} + +// This method is called by the read scheduler when a value becomes available. +func (se *shardEntry) injectValue(key []byte, result readResult) { + se.shard.lock.Lock() + + if se.status == statusScheduled { + if result.err != nil { + // Don't cache errors — reset so the next caller retries. + delete(se.shard.data, string(key)) + } else if result.value == nil { + se.status = statusDeleted + se.value = nil + se.shard.gcQueue.Push(key, uint64(len(key))) + se.shard.evictUnlocked() + } else { + se.status = statusAvailable + se.value = result.value + se.shard.gcQueue.Push(key, uint64(len(key)+len(result.value))) //nolint:gosec // G115: len is non-negative + se.shard.evictUnlocked() + } + } + + se.shard.lock.Unlock() + + se.valueChan <- result +} + +// Get a shard entry for a given key. Caller is responsible for holding the shard's lock +// when this method is called. +func (s *shard) getEntry(key []byte) *shardEntry { + if entry, ok := s.data[string(key)]; ok { + return entry + } + entry := &shardEntry{ + shard: s, + status: statusUnknown, + } + keyStr := string(key) + s.data[keyStr] = entry + return entry +} + +// Tracks a key whose value is not yet available and must be waited on. +type pendingRead struct { + key string + entry *shardEntry + valueChan chan readResult + needsSchedule bool + // Populated after the read completes, used by bulkInjectValues. + result readResult +} + +// BatchGet reads a batch of keys from the shard. Results are written into the provided map. +func (s *shard) BatchGet(read Reader, keys map[string]types.BatchGetResult) error { + pending := make([]pendingRead, 0, len(keys)) + var hits int64 + + s.lock.Lock() + for key := range keys { + entry := s.getEntry([]byte(key)) + + switch entry.status { + case statusAvailable, statusDeleted: + keys[key] = types.BatchGetResult{Value: bytes.Clone(entry.value)} + hits++ + case statusScheduled: + pending = append(pending, pendingRead{ + key: key, + entry: entry, + valueChan: entry.valueChan, + }) + case statusUnknown: + entry.status = statusScheduled + valueChan := make(chan readResult, 1) + entry.valueChan = valueChan + pending = append(pending, pendingRead{ + key: key, + entry: entry, + valueChan: valueChan, + needsSchedule: true, + }) + default: + s.lock.Unlock() + panic(fmt.Sprintf("unexpected status: %#v", entry.status)) + } + } + s.lock.Unlock() + + if hits > 0 { + s.metrics.reportCacheHits(hits) + } + if len(pending) == 0 { + return nil + } + + s.metrics.reportCacheMisses(int64(len(pending))) + startTime := time.Now() + + for i := range pending { + if pending[i].needsSchedule { + p := &pending[i] + err := s.readPool.Submit(s.ctx, func() { + value, _, readErr := read([]byte(p.key)) + p.entry.valueChan <- readResult{value: value, err: readErr} + }) + if err != nil { + return fmt.Errorf("failed to schedule read: %w", err) + } + } + } + + for i := range pending { + result, err := threading.InterruptiblePull(s.ctx, pending[i].valueChan) + if err != nil { + return fmt.Errorf("failed to pull value from channel: %w", err) + } + pending[i].valueChan <- result + pending[i].result = result + + if result.err != nil { + keys[pending[i].key] = types.BatchGetResult{Error: result.err} + } else { + keys[pending[i].key] = types.BatchGetResult{Value: result.value} + } + } + + s.metrics.reportCacheMissLatency(time.Since(startTime)) + go s.bulkInjectValues(pending) + + return nil +} + +// Applies deferred cache updates for a batch of reads under a single lock acquisition. +func (s *shard) bulkInjectValues(reads []pendingRead) { + s.lock.Lock() + for i := range reads { + entry := reads[i].entry + if entry.status != statusScheduled { + continue + } + result := reads[i].result + if result.err != nil { + // Don't cache errors — reset so the next caller retries. + delete(s.data, reads[i].key) + } else if result.value == nil { + entry.status = statusDeleted + entry.value = nil + s.gcQueue.Push([]byte(reads[i].key), uint64(len(reads[i].key))) + } else { + entry.status = statusAvailable + entry.value = result.value + s.gcQueue.Push([]byte(reads[i].key), uint64(len(reads[i].key)+len(result.value))) //nolint:gosec // G115 + } + } + s.evictUnlocked() + s.lock.Unlock() +} + +// Evicts least recently used entries until the cache is within its size budget. +// Caller is required to hold the lock. +func (s *shard) evictUnlocked() { + for s.gcQueue.GetTotalSize() > s.maxSize { + next := s.gcQueue.PopLeastRecentlyUsed() + delete(s.data, next) + } +} + +// getSizeInfo returns the current size (bytes) and entry count under the shard lock. +func (s *shard) getSizeInfo() (bytes uint64, entries uint64) { + s.lock.Lock() + defer s.lock.Unlock() + return s.gcQueue.GetTotalSize(), s.gcQueue.GetCount() +} + +// Set sets the value for the given key. +func (s *shard) Set(key []byte, value []byte) { + s.lock.Lock() + s.setUnlocked(key, value) + s.lock.Unlock() +} + +// Set a value. Caller is required to hold the lock. +func (s *shard) setUnlocked(key []byte, value []byte) { + entry := s.getEntry(key) + entry.status = statusAvailable + entry.value = value + + s.gcQueue.Push(key, uint64(len(key)+len(value))) //nolint:gosec // G115 + s.evictUnlocked() +} + +// BatchSet sets the values for a batch of keys. +func (s *shard) BatchSet(entries []CacheUpdate) { + s.lock.Lock() + for i := range entries { + if entries[i].IsDelete() { + s.deleteUnlocked(entries[i].Key) + } else { + s.setUnlocked(entries[i].Key, entries[i].Value) + } + } + s.lock.Unlock() +} + +// Delete deletes the value for the given key. +func (s *shard) Delete(key []byte) { + s.lock.Lock() + s.deleteUnlocked(key) + s.lock.Unlock() +} + +// Delete a value. Caller is required to hold the lock. +func (s *shard) deleteUnlocked(key []byte) { + entry := s.getEntry(key) + entry.status = statusDeleted + entry.value = nil + + s.gcQueue.Push(key, uint64(len(key))) + s.evictUnlocked() +} diff --git a/sei-db/db_engine/dbcache/shard_test.go b/sei-db/db_engine/dbcache/shard_test.go new file mode 100644 index 0000000000..b39414959f --- /dev/null +++ b/sei-db/db_engine/dbcache/shard_test.go @@ -0,0 +1,801 @@ +package dbcache + +import ( + "context" + "errors" + "fmt" + "sync" + "sync/atomic" + "testing" + "time" + + "github.com/stretchr/testify/require" + + "github.com/sei-protocol/sei-chain/sei-db/common/threading" + "github.com/sei-protocol/sei-chain/sei-db/db_engine/types" +) + +// --------------------------------------------------------------------------- +// helpers +// --------------------------------------------------------------------------- + +func newTestShard(t *testing.T, maxSize uint64, store map[string][]byte) (*shard, Reader) { + t.Helper() + read := Reader(func(key []byte) ([]byte, bool, error) { + v, ok := store[string(key)] + if !ok { + return nil, false, nil + } + return v, true, nil + }) + s, err := NewShard(context.Background(), threading.NewAdHocPool(), maxSize) + require.NoError(t, err) + return s, read +} + +// --------------------------------------------------------------------------- +// NewShard +// --------------------------------------------------------------------------- + +func TestNewShardValid(t *testing.T) { + s, err := NewShard(context.Background(), threading.NewAdHocPool(), 1024) + require.NoError(t, err) + require.NotNil(t, s) +} + +func TestNewShardZeroMaxSize(t *testing.T) { + _, err := NewShard(context.Background(), threading.NewAdHocPool(), 0) + require.Error(t, err) +} + +// --------------------------------------------------------------------------- +// Get — cache miss flows +// --------------------------------------------------------------------------- + +func TestGetCacheMissFoundInDB(t *testing.T) { + store := map[string][]byte{"hello": []byte("world")} + s, read := newTestShard(t, 4096, store) + + val, found, err := s.Get(read, []byte("hello"), true) + require.NoError(t, err) + require.True(t, found) + require.Equal(t, "world", string(val)) +} + +func TestGetCacheMissNotFoundInDB(t *testing.T) { + s, read := newTestShard(t, 4096, map[string][]byte{}) + + val, found, err := s.Get(read, []byte("missing"), true) + require.NoError(t, err) + require.False(t, found) + require.Nil(t, val) +} + +func TestGetCacheMissDBError(t *testing.T) { + dbErr := errors.New("disk on fire") + readFunc := Reader(func(key []byte) ([]byte, bool, error) { return nil, false, dbErr }) + s, _ := NewShard(context.Background(), threading.NewAdHocPool(), 4096) + + _, _, err := s.Get(readFunc, []byte("boom"), true) + require.Error(t, err) + require.ErrorIs(t, err, dbErr) +} + +func TestGetDBErrorDoesNotCacheResult(t *testing.T) { + var calls atomic.Int64 + readFunc := Reader(func(key []byte) ([]byte, bool, error) { + n := calls.Add(1) + if n == 1 { + return nil, false, errors.New("transient") + } + return []byte("recovered"), true, nil + }) + s, _ := NewShard(context.Background(), threading.NewAdHocPool(), 4096) + + _, _, err := s.Get(readFunc, []byte("key"), true) + require.Error(t, err, "first call should fail") + + val, found, err := s.Get(readFunc, []byte("key"), true) + require.NoError(t, err, "second call should succeed") + require.True(t, found) + require.Equal(t, "recovered", string(val)) + require.Equal(t, int64(2), calls.Load(), "error should not be cached") +} + +// --------------------------------------------------------------------------- +// Get — cache hit flows +// --------------------------------------------------------------------------- + +func TestGetCacheHitAvailable(t *testing.T) { + s, read := newTestShard(t, 4096, map[string][]byte{"k": []byte("v")}) + + s.Get(read, []byte("k"), true) + + val, found, err := s.Get(read, []byte("k"), true) + require.NoError(t, err) + require.True(t, found) + require.Equal(t, "v", string(val)) +} + +func TestGetCacheHitDeleted(t *testing.T) { + s, read := newTestShard(t, 4096, map[string][]byte{}) + + s.Get(read, []byte("gone"), true) + + val, found, err := s.Get(read, []byte("gone"), true) + require.NoError(t, err) + require.False(t, found) + require.Nil(t, val) +} + +func TestGetAfterSet(t *testing.T) { + var readCalls atomic.Int64 + readFunc := Reader(func(key []byte) ([]byte, bool, error) { + readCalls.Add(1) + return nil, false, nil + }) + s, _ := NewShard(context.Background(), threading.NewAdHocPool(), 4096) + + s.Set([]byte("k"), []byte("from-set")) + + val, found, err := s.Get(readFunc, []byte("k"), true) + require.NoError(t, err) + require.True(t, found) + require.Equal(t, "from-set", string(val)) + require.Equal(t, int64(0), readCalls.Load(), "readFunc should not be called for Set-populated entry") +} + +func TestGetAfterDelete(t *testing.T) { + store := map[string][]byte{"k": []byte("v")} + s, read := newTestShard(t, 4096, store) + + s.Delete([]byte("k")) + + val, found, err := s.Get(read, []byte("k"), true) + require.NoError(t, err) + require.False(t, found) + require.Nil(t, val) +} + +// --------------------------------------------------------------------------- +// Get — concurrent reads on the same key +// --------------------------------------------------------------------------- + +func TestGetConcurrentSameKey(t *testing.T) { + var readCalls atomic.Int64 + gate := make(chan struct{}) + + readFunc := Reader(func(key []byte) ([]byte, bool, error) { + readCalls.Add(1) + <-gate + return []byte("value"), true, nil + }) + s, _ := NewShard(context.Background(), threading.NewAdHocPool(), 4096) + + const n = 10 + var wg sync.WaitGroup + errs := make([]error, n) + vals := make([]string, n) + founds := make([]bool, n) + + for i := 0; i < n; i++ { + wg.Add(1) + go func(idx int) { + defer wg.Done() + v, f, e := s.Get(readFunc, []byte("shared"), true) + vals[idx] = string(v) + founds[idx] = f + errs[idx] = e + }(i) + } + + time.Sleep(50 * time.Millisecond) + close(gate) + wg.Wait() + + for i := 0; i < n; i++ { + require.NoError(t, errs[i], "goroutine %d", i) + require.True(t, founds[i], "goroutine %d", i) + require.Equal(t, "value", vals[i], "goroutine %d", i) + } + + require.Equal(t, int64(1), readCalls.Load(), "readFunc should be called exactly once") +} + +// --------------------------------------------------------------------------- +// Get — context cancellation +// --------------------------------------------------------------------------- + +func TestGetContextCancelled(t *testing.T) { + ctx, cancel := context.WithCancel(context.Background()) + + readFunc := Reader(func(key []byte) ([]byte, bool, error) { + time.Sleep(time.Second) + return []byte("late"), true, nil + }) + s, _ := NewShard(ctx, threading.NewAdHocPool(), 4096) + + cancel() + + _, _, err := s.Get(readFunc, []byte("k"), true) + require.Error(t, err) +} + +// --------------------------------------------------------------------------- +// Get — updateLru flag +// --------------------------------------------------------------------------- + +func TestGetUpdateLruTrue(t *testing.T) { + store := map[string][]byte{ + "a": []byte("1"), + "b": []byte("2"), + } + s, read := newTestShard(t, 4096, store) + + s.Get(read, []byte("a"), true) + s.Get(read, []byte("b"), true) + + s.Get(read, []byte("a"), true) + + s.lock.Lock() + lru := s.gcQueue.PopLeastRecentlyUsed() + s.lock.Unlock() + + require.Equal(t, "b", lru) +} + +func TestGetUpdateLruFalse(t *testing.T) { + store := map[string][]byte{ + "a": []byte("1"), + "b": []byte("2"), + } + s, read := newTestShard(t, 4096, store) + + s.Get(read, []byte("a"), true) + s.Get(read, []byte("b"), true) + + s.Get(read, []byte("a"), false) + + s.lock.Lock() + lru := s.gcQueue.PopLeastRecentlyUsed() + s.lock.Unlock() + + require.Equal(t, "a", lru, "updateLru=false should not move entry") +} + +// --------------------------------------------------------------------------- +// Set +// --------------------------------------------------------------------------- + +func TestSetNewKey(t *testing.T) { + s, read := newTestShard(t, 4096, map[string][]byte{}) + + s.Set([]byte("k"), []byte("v")) + + val, found, err := s.Get(read, []byte("k"), false) + require.NoError(t, err) + require.True(t, found) + require.Equal(t, "v", string(val)) +} + +func TestSetOverwritesExistingKey(t *testing.T) { + s, read := newTestShard(t, 4096, map[string][]byte{}) + + s.Set([]byte("k"), []byte("old")) + s.Set([]byte("k"), []byte("new")) + + val, found, err := s.Get(read, []byte("k"), false) + require.NoError(t, err) + require.True(t, found) + require.Equal(t, "new", string(val)) +} + +func TestSetOverwritesDeletedKey(t *testing.T) { + s, read := newTestShard(t, 4096, map[string][]byte{}) + + s.Delete([]byte("k")) + s.Set([]byte("k"), []byte("revived")) + + val, found, err := s.Get(read, []byte("k"), false) + require.NoError(t, err) + require.True(t, found) + require.Equal(t, "revived", string(val)) +} + +func TestSetNilValue(t *testing.T) { + s, read := newTestShard(t, 4096, map[string][]byte{}) + + s.Set([]byte("k"), nil) + + val, found, err := s.Get(read, []byte("k"), false) + require.NoError(t, err) + require.True(t, found) + require.Nil(t, val) +} + +func TestSetEmptyKey(t *testing.T) { + s, read := newTestShard(t, 4096, map[string][]byte{}) + + s.Set([]byte(""), []byte("empty-key-val")) + + val, found, err := s.Get(read, []byte(""), false) + require.NoError(t, err) + require.True(t, found) + require.Equal(t, "empty-key-val", string(val)) +} + +// --------------------------------------------------------------------------- +// Delete +// --------------------------------------------------------------------------- + +func TestDeleteExistingKey(t *testing.T) { + s, read := newTestShard(t, 4096, map[string][]byte{}) + + s.Set([]byte("k"), []byte("v")) + s.Delete([]byte("k")) + + val, found, err := s.Get(read, []byte("k"), false) + require.NoError(t, err) + require.False(t, found) + require.Nil(t, val) +} + +func TestDeleteNonexistentKey(t *testing.T) { + s, read := newTestShard(t, 4096, map[string][]byte{}) + + s.Delete([]byte("ghost")) + + val, found, err := s.Get(read, []byte("ghost"), false) + require.NoError(t, err) + require.False(t, found) + require.Nil(t, val) +} + +func TestDeleteThenSetThenGet(t *testing.T) { + s, read := newTestShard(t, 4096, map[string][]byte{}) + + s.Set([]byte("k"), []byte("v1")) + s.Delete([]byte("k")) + s.Set([]byte("k"), []byte("v2")) + + val, found, err := s.Get(read, []byte("k"), false) + require.NoError(t, err) + require.True(t, found) + require.Equal(t, "v2", string(val)) +} + +// --------------------------------------------------------------------------- +// BatchSet +// --------------------------------------------------------------------------- + +func TestBatchSetSetsMultiple(t *testing.T) { + s, read := newTestShard(t, 4096, map[string][]byte{}) + + s.BatchSet([]CacheUpdate{ + {Key: []byte("a"), Value: []byte("1")}, + {Key: []byte("b"), Value: []byte("2")}, + {Key: []byte("c"), Value: []byte("3")}, + }) + + for _, tc := range []struct { + key, want string + }{{"a", "1"}, {"b", "2"}, {"c", "3"}} { + val, found, err := s.Get(read, []byte(tc.key), false) + require.NoError(t, err, "Get(%q)", tc.key) + require.True(t, found, "Get(%q)", tc.key) + require.Equal(t, tc.want, string(val), "Get(%q)", tc.key) + } +} + +func TestBatchSetMixedSetAndDelete(t *testing.T) { + s, read := newTestShard(t, 4096, map[string][]byte{}) + + s.Set([]byte("keep"), []byte("v")) + s.Set([]byte("remove"), []byte("v")) + + s.BatchSet([]CacheUpdate{ + {Key: []byte("keep"), Value: []byte("updated")}, + {Key: []byte("remove"), Value: nil}, + {Key: []byte("new"), Value: []byte("fresh")}, + }) + + val, found, _ := s.Get(read, []byte("keep"), false) + require.True(t, found) + require.Equal(t, "updated", string(val)) + + _, found, _ = s.Get(read, []byte("remove"), false) + require.False(t, found, "expected remove to be deleted") + + val, found, _ = s.Get(read, []byte("new"), false) + require.True(t, found) + require.Equal(t, "fresh", string(val)) +} + +func TestBatchSetEmpty(t *testing.T) { + s, _ := newTestShard(t, 4096, map[string][]byte{}) + s.BatchSet(nil) + s.BatchSet([]CacheUpdate{}) + + bytes, entries := s.getSizeInfo() + require.Equal(t, uint64(0), bytes) + require.Equal(t, uint64(0), entries) +} + +// --------------------------------------------------------------------------- +// BatchGet +// --------------------------------------------------------------------------- + +func TestBatchGetAllCached(t *testing.T) { + s, read := newTestShard(t, 4096, map[string][]byte{}) + + s.Set([]byte("a"), []byte("1")) + s.Set([]byte("b"), []byte("2")) + + keys := map[string]types.BatchGetResult{ + "a": {}, + "b": {}, + } + require.NoError(t, s.BatchGet(read, keys)) + + for k, want := range map[string]string{"a": "1", "b": "2"} { + r := keys[k] + require.True(t, r.IsFound(), "key=%q", k) + require.Equal(t, want, string(r.Value), "key=%q", k) + } +} + +func TestBatchGetAllFromDB(t *testing.T) { + store := map[string][]byte{"x": []byte("10"), "y": []byte("20")} + s, read := newTestShard(t, 4096, store) + + keys := map[string]types.BatchGetResult{ + "x": {}, + "y": {}, + } + require.NoError(t, s.BatchGet(read, keys)) + + for k, want := range map[string]string{"x": "10", "y": "20"} { + r := keys[k] + require.True(t, r.IsFound(), "key=%q", k) + require.Equal(t, want, string(r.Value), "key=%q", k) + } +} + +func TestBatchGetMixedCachedAndDB(t *testing.T) { + store := map[string][]byte{"db-key": []byte("from-db")} + s, read := newTestShard(t, 4096, store) + + s.Set([]byte("cached"), []byte("from-cache")) + + keys := map[string]types.BatchGetResult{ + "cached": {}, + "db-key": {}, + } + require.NoError(t, s.BatchGet(read, keys)) + + require.True(t, keys["cached"].IsFound()) + require.Equal(t, "from-cache", string(keys["cached"].Value)) + require.True(t, keys["db-key"].IsFound()) + require.Equal(t, "from-db", string(keys["db-key"].Value)) +} + +func TestBatchGetNotFoundKeys(t *testing.T) { + s, read := newTestShard(t, 4096, map[string][]byte{}) + + keys := map[string]types.BatchGetResult{ + "nope": {}, + } + require.NoError(t, s.BatchGet(read, keys)) + require.False(t, keys["nope"].IsFound()) +} + +func TestBatchGetDeletedKeys(t *testing.T) { + s, read := newTestShard(t, 4096, map[string][]byte{}) + + s.Set([]byte("del"), []byte("v")) + s.Delete([]byte("del")) + + keys := map[string]types.BatchGetResult{ + "del": {}, + } + require.NoError(t, s.BatchGet(read, keys)) + require.False(t, keys["del"].IsFound()) +} + +func TestBatchGetDBError(t *testing.T) { + dbErr := errors.New("broken") + readFunc := Reader(func(key []byte) ([]byte, bool, error) { return nil, false, dbErr }) + s, _ := NewShard(context.Background(), threading.NewAdHocPool(), 4096) + + keys := map[string]types.BatchGetResult{ + "fail": {}, + } + require.NoError(t, s.BatchGet(readFunc, keys), "BatchGet itself should not fail") + require.Error(t, keys["fail"].Error, "expected per-key error") +} + +func TestBatchGetEmpty(t *testing.T) { + s, read := newTestShard(t, 4096, map[string][]byte{}) + + keys := map[string]types.BatchGetResult{} + require.NoError(t, s.BatchGet(read, keys)) +} + +func TestBatchGetCachesResults(t *testing.T) { + var readCalls atomic.Int64 + store := map[string][]byte{"k": []byte("v")} + readFunc := Reader(func(key []byte) ([]byte, bool, error) { + readCalls.Add(1) + v, ok := store[string(key)] + return v, ok, nil + }) + s, _ := NewShard(context.Background(), threading.NewAdHocPool(), 4096) + + keys := map[string]types.BatchGetResult{"k": {}} + s.BatchGet(readFunc, keys) + + time.Sleep(50 * time.Millisecond) + + val, found, err := s.Get(readFunc, []byte("k"), false) + require.NoError(t, err) + require.True(t, found) + require.Equal(t, "v", string(val)) + require.Equal(t, int64(1), readCalls.Load(), "result should be cached") +} + +// --------------------------------------------------------------------------- +// Eviction +// --------------------------------------------------------------------------- + +func TestEvictionRespectMaxSize(t *testing.T) { + s, _ := newTestShard(t, 30, map[string][]byte{}) + + s.Set([]byte("a"), []byte("aaaaaaaaaa")) + s.Set([]byte("b"), []byte("bbbbbbbbbb")) + + _, entries := s.getSizeInfo() + require.Equal(t, uint64(2), entries) + + s.Set([]byte("c"), []byte("cccccccccc")) + + bytes, entries := s.getSizeInfo() + require.LessOrEqual(t, bytes, uint64(30), "shard size should not exceed maxSize") + require.Equal(t, uint64(2), entries) +} + +func TestEvictionOrderIsLRU(t *testing.T) { + s, read := newTestShard(t, 15, map[string][]byte{}) + + s.Set([]byte("a"), []byte("1111")) + s.Set([]byte("b"), []byte("2222")) + s.Set([]byte("c"), []byte("3333")) + + s.Get(read, []byte("a"), true) + + s.Set([]byte("d"), []byte("4444")) + + s.lock.Lock() + _, bExists := s.data["b"] + _, aExists := s.data["a"] + s.lock.Unlock() + + require.False(t, bExists, "expected 'b' to be evicted (it was LRU)") + require.True(t, aExists, "expected 'a' to survive (it was recently touched)") +} + +func TestEvictionOnDelete(t *testing.T) { + s, _ := newTestShard(t, 10, map[string][]byte{}) + + s.Set([]byte("a"), []byte("val")) + s.Delete([]byte("longkey1")) + + bytes, _ := s.getSizeInfo() + require.LessOrEqual(t, bytes, uint64(10), "size should not exceed maxSize") +} + +func TestEvictionOnGetFromDB(t *testing.T) { + store := map[string][]byte{ + "x": []byte("12345678901234567890"), + } + s, read := newTestShard(t, 25, store) + + s.Set([]byte("a"), []byte("small")) + + s.Get(read, []byte("x"), true) + + time.Sleep(50 * time.Millisecond) + + bytes, _ := s.getSizeInfo() + require.LessOrEqual(t, bytes, uint64(25), "size should not exceed maxSize after DB read") +} + +// --------------------------------------------------------------------------- +// getSizeInfo +// --------------------------------------------------------------------------- + +func TestGetSizeInfoEmpty(t *testing.T) { + s, _ := newTestShard(t, 4096, map[string][]byte{}) + bytes, entries := s.getSizeInfo() + require.Equal(t, uint64(0), bytes) + require.Equal(t, uint64(0), entries) +} + +func TestGetSizeInfoAfterSets(t *testing.T) { + s, _ := newTestShard(t, 4096, map[string][]byte{}) + + s.Set([]byte("ab"), []byte("cd")) + s.Set([]byte("efg"), []byte("hi")) + + bytes, entries := s.getSizeInfo() + require.Equal(t, uint64(2), entries) + require.Equal(t, uint64(9), bytes) +} + +// --------------------------------------------------------------------------- +// injectValue — edge cases +// --------------------------------------------------------------------------- + +func TestInjectValueNotFound(t *testing.T) { + s, read := newTestShard(t, 4096, map[string][]byte{}) + + val, found, err := s.Get(read, []byte("missing"), true) + require.NoError(t, err) + require.False(t, found) + require.Nil(t, val) + + s.lock.Lock() + entry, ok := s.data["missing"] + s.lock.Unlock() + require.True(t, ok, "entry should exist in map") + require.Equal(t, statusDeleted, entry.status) +} + +// --------------------------------------------------------------------------- +// Concurrent Set and Get +// --------------------------------------------------------------------------- + +func TestConcurrentSetAndGet(t *testing.T) { + s, read := newTestShard(t, 4096, map[string][]byte{}) + + const n = 100 + var wg sync.WaitGroup + + for i := 0; i < n; i++ { + wg.Add(2) + key := []byte(fmt.Sprintf("key-%d", i)) + val := []byte(fmt.Sprintf("val-%d", i)) + + go func() { + defer wg.Done() + s.Set(key, val) + }() + go func() { + defer wg.Done() + s.Get(read, key, true) + }() + } + + wg.Wait() +} + +func TestConcurrentBatchSetAndBatchGet(t *testing.T) { + store := map[string][]byte{} + for i := 0; i < 50; i++ { + store[fmt.Sprintf("db-%d", i)] = []byte(fmt.Sprintf("v-%d", i)) + } + s, read := newTestShard(t, 100_000, store) + + var wg sync.WaitGroup + + wg.Add(1) + go func() { + defer wg.Done() + updates := make([]CacheUpdate, 20) + for i := 0; i < 20; i++ { + updates[i] = CacheUpdate{ + Key: []byte(fmt.Sprintf("set-%d", i)), + Value: []byte(fmt.Sprintf("sv-%d", i)), + } + } + s.BatchSet(updates) + }() + + wg.Add(1) + go func() { + defer wg.Done() + keys := make(map[string]types.BatchGetResult) + for i := 0; i < 50; i++ { + keys[fmt.Sprintf("db-%d", i)] = types.BatchGetResult{} + } + s.BatchGet(read, keys) + }() + + wg.Wait() +} + +// --------------------------------------------------------------------------- +// Pool submission failure +// --------------------------------------------------------------------------- + +type failPool struct{} + +func (fp *failPool) Submit(_ context.Context, _ func()) error { + return errors.New("pool exhausted") +} + +func TestGetPoolSubmitFailure(t *testing.T) { + readFunc := Reader(func(key []byte) ([]byte, bool, error) { return []byte("v"), true, nil }) + s, _ := NewShard(context.Background(), &failPool{}, 4096) + + _, _, err := s.Get(readFunc, []byte("k"), true) + require.Error(t, err) +} + +func TestBatchGetPoolSubmitFailure(t *testing.T) { + readFunc := Reader(func(key []byte) ([]byte, bool, error) { return []byte("v"), true, nil }) + s, _ := NewShard(context.Background(), &failPool{}, 4096) + + keys := map[string]types.BatchGetResult{"k": {}} + err := s.BatchGet(readFunc, keys) + require.Error(t, err) +} + +// --------------------------------------------------------------------------- +// Large values +// --------------------------------------------------------------------------- + +func TestSetLargeValueExceedingMaxSizeEvictsOldEntries(t *testing.T) { + s, _ := newTestShard(t, 100, map[string][]byte{}) + + s.Set([]byte("a"), []byte("small")) + + bigVal := make([]byte, 95) + for i := range bigVal { + bigVal[i] = 'X' + } + s.Set([]byte("b"), bigVal) + + bytes, _ := s.getSizeInfo() + require.LessOrEqual(t, bytes, uint64(100), "size should not exceed maxSize after large set") +} + +// --------------------------------------------------------------------------- +// bulkInjectValues — error entries are not cached +// --------------------------------------------------------------------------- + +func TestBatchGetDBErrorNotCached(t *testing.T) { + var calls atomic.Int64 + readFunc := Reader(func(key []byte) ([]byte, bool, error) { + n := calls.Add(1) + if n == 1 { + return nil, false, errors.New("transient db error") + } + return []byte("ok"), true, nil + }) + s, _ := NewShard(context.Background(), threading.NewAdHocPool(), 4096) + + keys := map[string]types.BatchGetResult{"k": {}} + s.BatchGet(readFunc, keys) + + time.Sleep(50 * time.Millisecond) + + val, found, err := s.Get(readFunc, []byte("k"), true) + require.NoError(t, err, "retry should succeed") + require.True(t, found) + require.Equal(t, "ok", string(val)) +} + +// --------------------------------------------------------------------------- +// Edge: Set then Delete then BatchGet +// --------------------------------------------------------------------------- + +func TestSetDeleteThenBatchGet(t *testing.T) { + s, read := newTestShard(t, 4096, map[string][]byte{}) + + s.Set([]byte("k"), []byte("v")) + s.Delete([]byte("k")) + + keys := map[string]types.BatchGetResult{"k": {}} + require.NoError(t, s.BatchGet(read, keys)) + require.False(t, keys["k"].IsFound()) +} From 950197c49bc96e8d013a85bb7203e5fee1385284 Mon Sep 17 00:00:00 2001 From: Cody Littley Date: Mon, 16 Mar 2026 15:47:46 -0500 Subject: [PATCH 065/119] cleanup --- sei-db/common/metrics/buckets.go | 6 ++++-- 1 file changed, 4 insertions(+), 2 deletions(-) diff --git a/sei-db/common/metrics/buckets.go b/sei-db/common/metrics/buckets.go index 42977fd032..5bb4004416 100644 --- a/sei-db/common/metrics/buckets.go +++ b/sei-db/common/metrics/buckets.go @@ -1,5 +1,7 @@ package metrics +import "github.com/sei-protocol/sei-chain/sei-db/common/unit" + // Shared histogram bucket boundaries for use across the codebase. // The OTel defaults are too coarse for meaningful percentile queries in Grafana. @@ -13,8 +15,8 @@ var LatencyBuckets = []float64{ // ByteSizeBuckets covers 256B to 1GB for data size histograms. var ByteSizeBuckets = []float64{ - 256, 1024, 4096, 16384, 65536, 262144, // 256B–256KB - 1 << 20, 4 << 20, 16 << 20, 64 << 20, 256 << 20, 1 << 30, // 1MB–1GB + 256, unit.KB, 4 * unit.KB, 16 * unit.KB, 64 * unit.KB, 256 * unit.KB, + unit.MB, 4 * unit.MB, 16 * unit.MB, 64 * unit.MB, 256 * unit.MB, unit.GB, } // CountBuckets covers 1 to 1M for per-operation step/iteration counts. From 003fcc965a155b4733de7298a67232ac411f1c30 Mon Sep 17 00:00:00 2001 From: Cody Littley Date: Tue, 17 Mar 2026 11:36:28 -0500 Subject: [PATCH 066/119] made suggested changes --- sei-db/common/threading/pool.go | 7 + sei-db/db_engine/dbcache/cache.go | 13 ++ sei-db/db_engine/dbcache/cache_impl.go | 7 +- sei-db/db_engine/dbcache/cache_impl_test.go | 72 +++++++--- sei-db/db_engine/dbcache/shard.go | 40 ++++-- sei-db/db_engine/dbcache/shard_test.go | 142 ++++++++++++++++++-- 6 files changed, 239 insertions(+), 42 deletions(-) diff --git a/sei-db/common/threading/pool.go b/sei-db/common/threading/pool.go index b86f85b9c3..4af9cebfa1 100644 --- a/sei-db/common/threading/pool.go +++ b/sei-db/common/threading/pool.go @@ -9,5 +9,12 @@ type Pool interface { // If Submit is called concurrently with or after shutdown (i.e. when ctx is done/cancelled), the task may // be silently dropped. Callers that need a guarantee of execution must // ensure Submit happens-before shutdown. + // + // This method is permitted to return an error only under the following conditions: + // - the pool is shutting down (i.e. its context is done/cancelled) + // - the provided ctx parameter is done/cancelled before this method returns + // - invalid input (e.g. the task is nil) + // + // If this method returns an error, the task may or may not have been executed. Submit(ctx context.Context, task func()) error } diff --git a/sei-db/db_engine/dbcache/cache.go b/sei-db/db_engine/dbcache/cache.go index 6b9687c234..ccbaf6464c 100644 --- a/sei-db/db_engine/dbcache/cache.go +++ b/sei-db/db_engine/dbcache/cache.go @@ -27,6 +27,9 @@ type Reader func(key []byte) (value []byte, found bool, err error) // - the Reader method returns an error (for methods that accpet a Reader) // - the cache is shutting down // - the cache's work pools are shutting down +// +// Cache errors are are generally not recoverable, and it should be assumed that a cache that has returned an error +// is in a corrupted state, and should be discarded. type Cache interface { // Get returns the value for the given key, or (nil, false, nil) if not found. @@ -69,6 +72,14 @@ type Cache interface { BatchSet(updates []CacheUpdate) error } +// DefaultEstimatedOverheadPerEntry is a rough estimate of the fixed heap overhead per cache entry +// on a 64-bit architecture (amd64/arm64). It accounts for the shardEntry struct (48 B), +// list.Element (48 B), lruQueueEntry (32 B), two map-entry costs (~64 B), string allocation +// rounding (~16 B), and a margin for the duplicate key copy stored in the LRU. Derived from +// static analysis of Go size classes and map bucket layout; validate experimentally for your +// target platform. +const DefaultEstimatedOverheadPerEntry uint64 = 250 + // CacheUpdate describes a single key-value mutation to apply to the cache. type CacheUpdate struct { // The key to update. @@ -89,6 +100,7 @@ func BuildCache( maxSize uint64, readPool threading.Pool, miscPool threading.Pool, + estimatedOverheadPerEntry uint64, cacheName string, metricsScrapeInterval time.Duration, ) (Cache, error) { @@ -103,6 +115,7 @@ func BuildCache( maxSize, readPool, miscPool, + estimatedOverheadPerEntry, cacheName, metricsScrapeInterval, ) diff --git a/sei-db/db_engine/dbcache/cache_impl.go b/sei-db/db_engine/dbcache/cache_impl.go index e7b9caba66..6a3da2352e 100644 --- a/sei-db/db_engine/dbcache/cache_impl.go +++ b/sei-db/db_engine/dbcache/cache_impl.go @@ -41,6 +41,9 @@ func NewStandardCache( readPool threading.Pool, // A work pool for miscellaneous operations that are neither computationally intensive nor IO bound. miscPool threading.Pool, + // The estimated overhead per entry, in bytes. This is used to calculate the maximum size of the cache. + // This value should be derived experimentally, and may differ between different builds and architectures. + estimatedOverheadPerEntry uint64, // Name used as the "cache" attribute on metrics. Empty string disables metrics. cacheName string, // How often to scrape cache size for metrics. Ignored if cacheName is empty. @@ -64,7 +67,7 @@ func NewStandardCache( shards := make([]*shard, shardCount) for i := uint64(0); i < shardCount; i++ { - shards[i], err = NewShard(ctx, readPool, sizePerShard) + shards[i], err = NewShard(ctx, readPool, sizePerShard, estimatedOverheadPerEntry) if err != nil { return nil, fmt.Errorf("failed to create shard: %w", err) } @@ -109,8 +112,8 @@ func (c *cache) BatchSet(updates []CacheUpdate) error { for shardIndex, shardEntries := range shardMap { wg.Add(1) err := c.miscPool.Submit(c.ctx, func() { + defer wg.Done() c.shards[shardIndex].BatchSet(shardEntries) - wg.Done() }) if err != nil { return fmt.Errorf("failed to submit batch set: %w", err) diff --git a/sei-db/db_engine/dbcache/cache_impl_test.go b/sei-db/db_engine/dbcache/cache_impl_test.go index d11201478a..099fce4097 100644 --- a/sei-db/db_engine/dbcache/cache_impl_test.go +++ b/sei-db/db_engine/dbcache/cache_impl_test.go @@ -31,7 +31,7 @@ func newTestCache(t *testing.T, store map[string][]byte, shardCount, maxSize uin return v, true, nil } pool := threading.NewAdHocPool() - c, err := NewStandardCache(context.Background(), shardCount, maxSize, pool, pool, "", 0) + c, err := NewStandardCache(context.Background(), shardCount, maxSize, pool, pool, 16, "", 0) require.NoError(t, err) return c, read } @@ -42,42 +42,42 @@ func newTestCache(t *testing.T, store map[string][]byte, shardCount, maxSize uin func TestNewStandardCacheValid(t *testing.T) { pool := threading.NewAdHocPool() - c, err := NewStandardCache(context.Background(), 4, 1024, pool, pool, "", 0) + c, err := NewStandardCache(context.Background(), 4, 1024, pool, pool, 16, "", 0) require.NoError(t, err) require.NotNil(t, c) } func TestNewStandardCacheSingleShard(t *testing.T) { pool := threading.NewAdHocPool() - c, err := NewStandardCache(context.Background(), 1, 1024, pool, pool, "", 0) + c, err := NewStandardCache(context.Background(), 1, 1024, pool, pool, 16, "", 0) require.NoError(t, err) require.NotNil(t, c) } func TestNewStandardCacheShardCountZero(t *testing.T) { pool := threading.NewAdHocPool() - _, err := NewStandardCache(context.Background(), 0, 1024, pool, pool, "", 0) + _, err := NewStandardCache(context.Background(), 0, 1024, pool, pool, 16, "", 0) require.Error(t, err) } func TestNewStandardCacheShardCountNotPowerOfTwo(t *testing.T) { pool := threading.NewAdHocPool() for _, n := range []uint64{3, 5, 6, 7, 9, 10} { - _, err := NewStandardCache(context.Background(), n, 1024, pool, pool, "", 0) + _, err := NewStandardCache(context.Background(), n, 1024, pool, pool, 16, "", 0) require.Error(t, err, "shardCount=%d", n) } } func TestNewStandardCacheMaxSizeZero(t *testing.T) { pool := threading.NewAdHocPool() - _, err := NewStandardCache(context.Background(), 4, 0, pool, pool, "", 0) + _, err := NewStandardCache(context.Background(), 4, 0, pool, pool, 16, "", 0) require.Error(t, err) } func TestNewStandardCacheMaxSizeLessThanShardCount(t *testing.T) { pool := threading.NewAdHocPool() // shardCount=4, maxSize=3 → sizePerShard=0 - _, err := NewStandardCache(context.Background(), 4, 3, pool, pool, "", 0) + _, err := NewStandardCache(context.Background(), 4, 3, pool, pool, 16, "", 0) require.Error(t, err) } @@ -85,7 +85,7 @@ func TestNewStandardCacheWithMetrics(t *testing.T) { pool := threading.NewAdHocPool() ctx, cancel := context.WithCancel(context.Background()) defer cancel() - c, err := NewStandardCache(ctx, 2, 1024, pool, pool, "test-cache", time.Hour) + c, err := NewStandardCache(ctx, 2, 1024, pool, pool, 0, "test-cache", time.Hour) require.NoError(t, err) require.NotNil(t, c) } @@ -140,7 +140,7 @@ func TestCacheGetDBError(t *testing.T) { dbErr := errors.New("db fail") readFunc := func(key []byte) ([]byte, bool, error) { return nil, false, dbErr } pool := threading.NewAdHocPool() - c, _ := NewStandardCache(context.Background(), 1, 4096, pool, pool, "", 0) + c, _ := NewStandardCache(context.Background(), 1, 4096, pool, pool, 0, "", 0) _, _, err := c.Get(readFunc, []byte("k"), true) require.Error(t, err) @@ -154,7 +154,7 @@ func TestCacheGetSameKeyConsistentShard(t *testing.T) { return []byte("val"), true, nil } pool := threading.NewAdHocPool() - c, _ := NewStandardCache(context.Background(), 4, 4096, pool, pool, "", 0) + c, _ := NewStandardCache(context.Background(), 4, 4096, pool, pool, 0, "", 0) val1, _, _ := c.Get(readFunc, []byte("key"), true) val2, _, _ := c.Get(readFunc, []byte("key"), true) @@ -295,7 +295,7 @@ func TestCacheBatchSetEmpty(t *testing.T) { func TestCacheBatchSetPoolFailure(t *testing.T) { readPool := threading.NewAdHocPool() - c, _ := NewStandardCache(context.Background(), 1, 4096, readPool, &failPool{}, "", 0) + c, _ := NewStandardCache(context.Background(), 1, 4096, readPool, &failPool{}, 0, "", 0) err := c.BatchSet([]CacheUpdate{ {Key: []byte("k"), Value: []byte("v")}, @@ -373,7 +373,7 @@ func TestCacheBatchGetDBError(t *testing.T) { dbErr := errors.New("broken") readFunc := func(key []byte) ([]byte, bool, error) { return nil, false, dbErr } pool := threading.NewAdHocPool() - c, _ := NewStandardCache(context.Background(), 1, 4096, pool, pool, "", 0) + c, _ := NewStandardCache(context.Background(), 1, 4096, pool, pool, 0, "", 0) keys := map[string]types.BatchGetResult{"fail": {}} require.NoError(t, c.BatchGet(readFunc, keys), "BatchGet itself should not fail") @@ -388,7 +388,7 @@ func TestCacheBatchGetEmpty(t *testing.T) { func TestCacheBatchGetPoolFailure(t *testing.T) { readPool := threading.NewAdHocPool() - c, _ := NewStandardCache(context.Background(), 1, 4096, readPool, &failPool{}, "", 0) + c, _ := NewStandardCache(context.Background(), 1, 4096, readPool, &failPool{}, 0, "", 0) keys := map[string]types.BatchGetResult{"k": {}} err := c.BatchGet(noopRead, keys) @@ -397,7 +397,7 @@ func TestCacheBatchGetPoolFailure(t *testing.T) { func TestCacheBatchGetShardReadPoolFailure(t *testing.T) { miscPool := threading.NewAdHocPool() - c, _ := NewStandardCache(context.Background(), 1, 4096, &failPool{}, miscPool, "", 0) + c, _ := NewStandardCache(context.Background(), 1, 4096, &failPool{}, miscPool, 0, "", 0) keys := map[string]types.BatchGetResult{"a": {}, "b": {}} require.NoError(t, c.BatchGet(noopRead, keys)) @@ -466,6 +466,48 @@ func TestCacheGetCacheSizeInfoAggregatesShards(t *testing.T) { require.Greater(t, bytes, uint64(0)) } +// --------------------------------------------------------------------------- +// estimatedOverheadPerEntry +// --------------------------------------------------------------------------- + +func TestCacheSizeInfoIncludesOverhead(t *testing.T) { + const overhead = 200 + pool := threading.NewAdHocPool() + c, err := NewStandardCache(context.Background(), 1, 100_000, pool, pool, overhead, "", 0) + require.NoError(t, err) + impl := c.(*cache) + + c.Set([]byte("ab"), []byte("cd")) + c.Set([]byte("efg"), []byte("hi")) + + bytes, entries := impl.getCacheSizeInfo() + require.Equal(t, uint64(2), entries) + // (2+2+200) + (3+2+200) = 409 + require.Equal(t, uint64(409), bytes) +} + +func TestCacheOverheadCausesEarlierEviction(t *testing.T) { + const overhead = 200 + pool := threading.NewAdHocPool() + // Single shard, maxSize=500. Each 10-byte value entry costs 1+10+200=211 bytes. + // Two entries = 422 < 500. Three entries = 633 > 500, so one must be evicted. + c, err := NewStandardCache(context.Background(), 1, 500, pool, pool, overhead, "", 0) + require.NoError(t, err) + impl := c.(*cache) + + c.Set([]byte("a"), []byte("0123456789")) + c.Set([]byte("b"), []byte("0123456789")) + + _, entries := impl.getCacheSizeInfo() + require.Equal(t, uint64(2), entries, "two entries should fit") + + c.Set([]byte("c"), []byte("0123456789")) + + bytes, entries := impl.getCacheSizeInfo() + require.Equal(t, uint64(2), entries, "third entry should trigger eviction") + require.LessOrEqual(t, bytes, uint64(500)) +} + // --------------------------------------------------------------------------- // Many keys — BatchGet/BatchSet spanning all shards // --------------------------------------------------------------------------- @@ -657,7 +699,7 @@ func TestCacheBatchGetAfterBatchSetWithDeletes(t *testing.T) { func TestNewStandardCachePowerOfTwoShardCounts(t *testing.T) { pool := threading.NewAdHocPool() for _, n := range []uint64{1, 2, 4, 8, 16, 32, 64} { - c, err := NewStandardCache(context.Background(), n, n*100, pool, pool, "", 0) + c, err := NewStandardCache(context.Background(), n, n*100, pool, pool, 0, "", 0) require.NoError(t, err, "shardCount=%d", n) require.NotNil(t, c, "shardCount=%d", n) } diff --git a/sei-db/db_engine/dbcache/shard.go b/sei-db/db_engine/dbcache/shard.go index 80f8c18aba..95db9c60e0 100644 --- a/sei-db/db_engine/dbcache/shard.go +++ b/sei-db/db_engine/dbcache/shard.go @@ -30,6 +30,10 @@ type shard struct { // The maximum size of this cache, in bytes. maxSize uint64 + // The estimated overhead per entry, in bytes. This is used to calculate the maximum size of the cache. + // This value should be derived experimentally, and may differ between different builds and architectures. + estimatedOverheadPerEntry uint64 + // Cache-level metrics. Nil-safe; if nil, no metrics are recorded. metrics *CacheMetrics } @@ -73,8 +77,13 @@ type shardEntry struct { // Creates a new Shard. func NewShard( ctx context.Context, + // A work pool for asynchronous reads. readPool threading.Pool, + // The maximum size of this shard, in bytes. maxSize uint64, + // The estimated overhead per entry, in bytes. This is used to calculate the maximum size of the cache. + // This value should be derived experimentally, and may differ between different builds and architectures. + estimatedOverheadPerEntry uint64, ) (*shard, error) { if maxSize <= 0 { @@ -82,12 +91,13 @@ func NewShard( } return &shard{ - ctx: ctx, - readPool: readPool, - lock: sync.Mutex{}, - data: make(map[string]*shardEntry), - gcQueue: newLRUQueue(), - maxSize: maxSize, + ctx: ctx, + readPool: readPool, + lock: sync.Mutex{}, + data: make(map[string]*shardEntry), + gcQueue: newLRUQueue(), + estimatedOverheadPerEntry: estimatedOverheadPerEntry, + maxSize: maxSize, }, nil } @@ -189,12 +199,14 @@ func (se *shardEntry) injectValue(key []byte, result readResult) { } else if result.value == nil { se.status = statusDeleted se.value = nil - se.shard.gcQueue.Push(key, uint64(len(key))) + size := uint64(len(key)) + se.shard.estimatedOverheadPerEntry + se.shard.gcQueue.Push(key, size) se.shard.evictUnlocked() } else { se.status = statusAvailable se.value = result.value - se.shard.gcQueue.Push(key, uint64(len(key)+len(result.value))) //nolint:gosec // G115: len is non-negative + size := uint64(len(key)) + uint64(len(result.value)) + se.shard.estimatedOverheadPerEntry + se.shard.gcQueue.Push(key, size) se.shard.evictUnlocked() } } @@ -324,11 +336,13 @@ func (s *shard) bulkInjectValues(reads []pendingRead) { } else if result.value == nil { entry.status = statusDeleted entry.value = nil - s.gcQueue.Push([]byte(reads[i].key), uint64(len(reads[i].key))) + size := uint64(len(reads[i].key)) + s.estimatedOverheadPerEntry + s.gcQueue.Push([]byte(reads[i].key), size) } else { entry.status = statusAvailable entry.value = result.value - s.gcQueue.Push([]byte(reads[i].key), uint64(len(reads[i].key)+len(result.value))) //nolint:gosec // G115 + size := uint64(len(reads[i].key)) + uint64(len(result.value)) + s.estimatedOverheadPerEntry + s.gcQueue.Push([]byte(reads[i].key), size) } } s.evictUnlocked() @@ -364,7 +378,8 @@ func (s *shard) setUnlocked(key []byte, value []byte) { entry.status = statusAvailable entry.value = value - s.gcQueue.Push(key, uint64(len(key)+len(value))) //nolint:gosec // G115 + size := uint64(len(key)) + uint64(len(value)) + s.estimatedOverheadPerEntry + s.gcQueue.Push(key, size) s.evictUnlocked() } @@ -394,6 +409,7 @@ func (s *shard) deleteUnlocked(key []byte) { entry.status = statusDeleted entry.value = nil - s.gcQueue.Push(key, uint64(len(key))) + size := uint64(len(key)) + s.estimatedOverheadPerEntry + s.gcQueue.Push(key, size) s.evictUnlocked() } diff --git a/sei-db/db_engine/dbcache/shard_test.go b/sei-db/db_engine/dbcache/shard_test.go index b39414959f..42bb4dd0fe 100644 --- a/sei-db/db_engine/dbcache/shard_test.go +++ b/sei-db/db_engine/dbcache/shard_test.go @@ -28,7 +28,7 @@ func newTestShard(t *testing.T, maxSize uint64, store map[string][]byte) (*shard } return v, true, nil }) - s, err := NewShard(context.Background(), threading.NewAdHocPool(), maxSize) + s, err := NewShard(context.Background(), threading.NewAdHocPool(), maxSize, 0) require.NoError(t, err) return s, read } @@ -38,13 +38,13 @@ func newTestShard(t *testing.T, maxSize uint64, store map[string][]byte) (*shard // --------------------------------------------------------------------------- func TestNewShardValid(t *testing.T) { - s, err := NewShard(context.Background(), threading.NewAdHocPool(), 1024) + s, err := NewShard(context.Background(), threading.NewAdHocPool(), 1024, 0) require.NoError(t, err) require.NotNil(t, s) } func TestNewShardZeroMaxSize(t *testing.T) { - _, err := NewShard(context.Background(), threading.NewAdHocPool(), 0) + _, err := NewShard(context.Background(), threading.NewAdHocPool(), 0, 0) require.Error(t, err) } @@ -74,7 +74,7 @@ func TestGetCacheMissNotFoundInDB(t *testing.T) { func TestGetCacheMissDBError(t *testing.T) { dbErr := errors.New("disk on fire") readFunc := Reader(func(key []byte) ([]byte, bool, error) { return nil, false, dbErr }) - s, _ := NewShard(context.Background(), threading.NewAdHocPool(), 4096) + s, _ := NewShard(context.Background(), threading.NewAdHocPool(), 4096, 0) _, _, err := s.Get(readFunc, []byte("boom"), true) require.Error(t, err) @@ -90,7 +90,7 @@ func TestGetDBErrorDoesNotCacheResult(t *testing.T) { } return []byte("recovered"), true, nil }) - s, _ := NewShard(context.Background(), threading.NewAdHocPool(), 4096) + s, _ := NewShard(context.Background(), threading.NewAdHocPool(), 4096, 0) _, _, err := s.Get(readFunc, []byte("key"), true) require.Error(t, err, "first call should fail") @@ -134,7 +134,7 @@ func TestGetAfterSet(t *testing.T) { readCalls.Add(1) return nil, false, nil }) - s, _ := NewShard(context.Background(), threading.NewAdHocPool(), 4096) + s, _ := NewShard(context.Background(), threading.NewAdHocPool(), 4096, 0) s.Set([]byte("k"), []byte("from-set")) @@ -170,7 +170,7 @@ func TestGetConcurrentSameKey(t *testing.T) { <-gate return []byte("value"), true, nil }) - s, _ := NewShard(context.Background(), threading.NewAdHocPool(), 4096) + s, _ := NewShard(context.Background(), threading.NewAdHocPool(), 4096, 0) const n = 10 var wg sync.WaitGroup @@ -213,7 +213,7 @@ func TestGetContextCancelled(t *testing.T) { time.Sleep(time.Second) return []byte("late"), true, nil }) - s, _ := NewShard(ctx, threading.NewAdHocPool(), 4096) + s, _ := NewShard(ctx, threading.NewAdHocPool(), 4096, 0) cancel() @@ -505,7 +505,7 @@ func TestBatchGetDeletedKeys(t *testing.T) { func TestBatchGetDBError(t *testing.T) { dbErr := errors.New("broken") readFunc := Reader(func(key []byte) ([]byte, bool, error) { return nil, false, dbErr }) - s, _ := NewShard(context.Background(), threading.NewAdHocPool(), 4096) + s, _ := NewShard(context.Background(), threading.NewAdHocPool(), 4096, 0) keys := map[string]types.BatchGetResult{ "fail": {}, @@ -529,7 +529,7 @@ func TestBatchGetCachesResults(t *testing.T) { v, ok := store[string(key)] return v, ok, nil }) - s, _ := NewShard(context.Background(), threading.NewAdHocPool(), 4096) + s, _ := NewShard(context.Background(), threading.NewAdHocPool(), 4096, 0) keys := map[string]types.BatchGetResult{"k": {}} s.BatchGet(readFunc, keys) @@ -631,6 +631,122 @@ func TestGetSizeInfoAfterSets(t *testing.T) { require.Equal(t, uint64(9), bytes) } +// --------------------------------------------------------------------------- +// estimatedOverheadPerEntry +// --------------------------------------------------------------------------- + +func TestOverheadIncludedInSizeAfterSet(t *testing.T) { + const overhead = 100 + s, _ := NewShard(context.Background(), threading.NewAdHocPool(), 100_000, overhead) + + s.Set([]byte("ab"), []byte("cd")) + s.Set([]byte("efg"), []byte("hi")) + + bytes, entries := s.getSizeInfo() + require.Equal(t, uint64(2), entries) + // (2+2+100) + (3+2+100) = 209 + require.Equal(t, uint64(209), bytes) +} + +func TestOverheadIncludedInSizeAfterDelete(t *testing.T) { + const overhead = 100 + s, _ := NewShard(context.Background(), threading.NewAdHocPool(), 100_000, overhead) + + s.Delete([]byte("abc")) + + bytes, entries := s.getSizeInfo() + require.Equal(t, uint64(1), entries) + // 3 + 100 = 103 + require.Equal(t, uint64(103), bytes) +} + +func TestOverheadIncludedInSizeAfterDBRead(t *testing.T) { + const overhead = 100 + store := map[string][]byte{"key": []byte("value")} + read := Reader(func(key []byte) ([]byte, bool, error) { + v, ok := store[string(key)] + return v, ok, nil + }) + s, _ := NewShard(context.Background(), threading.NewAdHocPool(), 100_000, overhead) + + val, found, err := s.Get(read, []byte("key"), true) + require.NoError(t, err) + require.True(t, found) + require.Equal(t, "value", string(val)) + + bytes, entries := s.getSizeInfo() + require.Equal(t, uint64(1), entries) + // 3 + 5 + 100 = 108 + require.Equal(t, uint64(108), bytes) +} + +func TestOverheadIncludedInSizeAfterDBReadNotFound(t *testing.T) { + const overhead = 100 + read := Reader(func(key []byte) ([]byte, bool, error) { return nil, false, nil }) + s, _ := NewShard(context.Background(), threading.NewAdHocPool(), 100_000, overhead) + + _, found, err := s.Get(read, []byte("key"), true) + require.NoError(t, err) + require.False(t, found) + + bytes, entries := s.getSizeInfo() + require.Equal(t, uint64(1), entries) + // 3 + 100 = 103 + require.Equal(t, uint64(103), bytes) +} + +func TestOverheadTriggersEarlierEviction(t *testing.T) { + const overhead = 50 + s, _ := NewShard(context.Background(), threading.NewAdHocPool(), 100, overhead) + + // "a" + "1234" + 50 = 55 bytes + s.Set([]byte("a"), []byte("1234")) + _, entries := s.getSizeInfo() + require.Equal(t, uint64(1), entries) + + // "b" + "5678" + 50 = 55 bytes, total = 110 > 100 → evict "a" + s.Set([]byte("b"), []byte("5678")) + bytes, entries := s.getSizeInfo() + require.Equal(t, uint64(1), entries, "overhead should cause eviction to keep only one entry") + require.LessOrEqual(t, bytes, uint64(100)) +} + +func TestOverheadIncludedInBatchGetFromDB(t *testing.T) { + const overhead = 100 + store := map[string][]byte{"x": []byte("10"), "y": []byte("20")} + read := Reader(func(key []byte) ([]byte, bool, error) { + v, ok := store[string(key)] + return v, ok, nil + }) + s, _ := NewShard(context.Background(), threading.NewAdHocPool(), 100_000, overhead) + + keys := map[string]types.BatchGetResult{"x": {}, "y": {}} + require.NoError(t, s.BatchGet(read, keys)) + + time.Sleep(50 * time.Millisecond) + + bytes, entries := s.getSizeInfo() + require.Equal(t, uint64(2), entries) + // (1+2+100) + (1+2+100) = 206 + require.Equal(t, uint64(206), bytes) +} + +func TestOverheadSizeUpdatedOnOverwrite(t *testing.T) { + const overhead = 100 + s, _ := NewShard(context.Background(), threading.NewAdHocPool(), 100_000, overhead) + + s.Set([]byte("k"), []byte("short")) + b1, _ := s.getSizeInfo() + // 1 + 5 + 100 = 106 + require.Equal(t, uint64(106), b1) + + s.Set([]byte("k"), []byte("a-longer-value")) + b2, entries := s.getSizeInfo() + require.Equal(t, uint64(1), entries) + // 1 + 14 + 100 = 115 + require.Equal(t, uint64(115), b2) +} + // --------------------------------------------------------------------------- // injectValue — edge cases // --------------------------------------------------------------------------- @@ -725,7 +841,7 @@ func (fp *failPool) Submit(_ context.Context, _ func()) error { func TestGetPoolSubmitFailure(t *testing.T) { readFunc := Reader(func(key []byte) ([]byte, bool, error) { return []byte("v"), true, nil }) - s, _ := NewShard(context.Background(), &failPool{}, 4096) + s, _ := NewShard(context.Background(), &failPool{}, 4096, 0) _, _, err := s.Get(readFunc, []byte("k"), true) require.Error(t, err) @@ -733,7 +849,7 @@ func TestGetPoolSubmitFailure(t *testing.T) { func TestBatchGetPoolSubmitFailure(t *testing.T) { readFunc := Reader(func(key []byte) ([]byte, bool, error) { return []byte("v"), true, nil }) - s, _ := NewShard(context.Background(), &failPool{}, 4096) + s, _ := NewShard(context.Background(), &failPool{}, 4096, 0) keys := map[string]types.BatchGetResult{"k": {}} err := s.BatchGet(readFunc, keys) @@ -772,7 +888,7 @@ func TestBatchGetDBErrorNotCached(t *testing.T) { } return []byte("ok"), true, nil }) - s, _ := NewShard(context.Background(), threading.NewAdHocPool(), 4096) + s, _ := NewShard(context.Background(), threading.NewAdHocPool(), 4096, 0) keys := map[string]types.BatchGetResult{"k": {}} s.BatchGet(readFunc, keys) From a208a1b9c34f7136c5a0f125f8242b680df38421 Mon Sep 17 00:00:00 2001 From: Cody Littley Date: Tue, 17 Mar 2026 12:03:14 -0500 Subject: [PATCH 067/119] made suggested change --- sei-db/db_engine/dbcache/shard.go | 10 +++++----- 1 file changed, 5 insertions(+), 5 deletions(-) diff --git a/sei-db/db_engine/dbcache/shard.go b/sei-db/db_engine/dbcache/shard.go index 95db9c60e0..0548168d15 100644 --- a/sei-db/db_engine/dbcache/shard.go +++ b/sei-db/db_engine/dbcache/shard.go @@ -1,7 +1,6 @@ package dbcache import ( - "bytes" "context" "fmt" "sync" @@ -124,7 +123,7 @@ func (s *shard) Get(read Reader, key []byte, updateLru bool) ([]byte, bool, erro // Handles Get for a key whose value is already cached. Lock must be held; releases it. func (s *shard) getAvailable(entry *shardEntry, key []byte, updateLru bool) ([]byte, bool, error) { - value := bytes.Clone(entry.value) + value := entry.value if updateLru { s.gcQueue.Touch(key) } @@ -252,7 +251,7 @@ func (s *shard) BatchGet(read Reader, keys map[string]types.BatchGetResult) erro switch entry.status { case statusAvailable, statusDeleted: - keys[key] = types.BatchGetResult{Value: bytes.Clone(entry.value)} + keys[key] = types.BatchGetResult{Value: entry.value} hits++ case statusScheduled: pending = append(pending, pendingRead{ @@ -369,6 +368,7 @@ func (s *shard) getSizeInfo() (bytes uint64, entries uint64) { func (s *shard) Set(key []byte, value []byte) { s.lock.Lock() s.setUnlocked(key, value) + s.evictUnlocked() s.lock.Unlock() } @@ -380,7 +380,6 @@ func (s *shard) setUnlocked(key []byte, value []byte) { size := uint64(len(key)) + uint64(len(value)) + s.estimatedOverheadPerEntry s.gcQueue.Push(key, size) - s.evictUnlocked() } // BatchSet sets the values for a batch of keys. @@ -393,6 +392,7 @@ func (s *shard) BatchSet(entries []CacheUpdate) { s.setUnlocked(entries[i].Key, entries[i].Value) } } + s.evictUnlocked() s.lock.Unlock() } @@ -400,6 +400,7 @@ func (s *shard) BatchSet(entries []CacheUpdate) { func (s *shard) Delete(key []byte) { s.lock.Lock() s.deleteUnlocked(key) + s.evictUnlocked() s.lock.Unlock() } @@ -411,5 +412,4 @@ func (s *shard) deleteUnlocked(key []byte) { size := uint64(len(key)) + s.estimatedOverheadPerEntry s.gcQueue.Push(key, size) - s.evictUnlocked() } From 157a6009993eaef321ced96b4aee3387d3220ca9 Mon Sep 17 00:00:00 2001 From: Cody Littley Date: Tue, 17 Mar 2026 13:30:42 -0500 Subject: [PATCH 068/119] made suggested changes --- sei-db/db_engine/dbcache/cache_impl.go | 7 +++- sei-db/db_engine/dbcache/cache_impl_test.go | 30 ++++++++++++++++- sei-db/db_engine/dbcache/shard.go | 36 +++++++++++++++++---- 3 files changed, 64 insertions(+), 9 deletions(-) diff --git a/sei-db/db_engine/dbcache/cache_impl.go b/sei-db/db_engine/dbcache/cache_impl.go index 6a3da2352e..1292f74caf 100644 --- a/sei-db/db_engine/dbcache/cache_impl.go +++ b/sei-db/db_engine/dbcache/cache_impl.go @@ -185,5 +185,10 @@ func (c *cache) Get(read Reader, key []byte, updateLru bool) ([]byte, bool, erro func (c *cache) Set(key []byte, value []byte) { shardIndex := c.shardManager.Shard(key) shard := c.shards[shardIndex] - shard.Set(key, value) + + if value == nil { + shard.Delete(key) + } else { + shard.Set(key, value) + } } diff --git a/sei-db/db_engine/dbcache/cache_impl_test.go b/sei-db/db_engine/dbcache/cache_impl_test.go index 099fce4097..2d206ccc8a 100644 --- a/sei-db/db_engine/dbcache/cache_impl_test.go +++ b/sei-db/db_engine/dbcache/cache_impl_test.go @@ -197,10 +197,38 @@ func TestCacheSetNilValue(t *testing.T) { val, found, err := c.Get(read, []byte("k"), false) require.NoError(t, err) - require.True(t, found) + require.False(t, found, "Set(key, nil) should be treated as a deletion") require.Nil(t, val) } +func TestCacheSetNilConsistentWithBatchSet(t *testing.T) { + store := map[string][]byte{"a": []byte("orig-a"), "b": []byte("orig-b")} + + cSet, readSet := newTestCache(t, store, 1, 4096) + cBatch, readBatch := newTestCache(t, store, 1, 4096) + + // Warm both caches so the backing store value is loaded. + _, _, err := cSet.Get(readSet, []byte("a"), true) + require.NoError(t, err) + _, _, err = cBatch.Get(readBatch, []byte("b"), true) + require.NoError(t, err) + + // Delete via Set(key, nil) in one cache and BatchSet({key, nil}) in the other. + cSet.Set([]byte("a"), nil) + require.NoError(t, cBatch.BatchSet([]CacheUpdate{ + {Key: []byte("b"), Value: nil}, + })) + + valA, foundA, err := cSet.Get(readSet, []byte("a"), false) + require.NoError(t, err) + valB, foundB, err := cBatch.Get(readBatch, []byte("b"), false) + require.NoError(t, err) + + require.Equal(t, foundA, foundB, "Set(key, nil) and BatchSet with nil value should agree on found") + require.Equal(t, valA, valB, "Set(key, nil) and BatchSet with nil value should agree on value") + require.False(t, foundA, "nil value should be treated as a deletion") +} + // --------------------------------------------------------------------------- // Delete // --------------------------------------------------------------------------- diff --git a/sei-db/db_engine/dbcache/shard.go b/sei-db/db_engine/dbcache/shard.go index 0548168d15..6a71105add 100644 --- a/sei-db/db_engine/dbcache/shard.go +++ b/sei-db/db_engine/dbcache/shard.go @@ -49,7 +49,7 @@ type valueStatus int const ( // The value is not known and we are not currently attempting to find it. statusUnknown valueStatus = iota - // We've scheduled a read of the value but haven't yet finsihed the read. + // We've scheduled a read of the value but haven't yet finished the read. statusScheduled // The data is available. statusAvailable @@ -73,6 +73,21 @@ type shardEntry struct { valueChan chan readResult } +/* +This implementation currently uses a single exlusive lock, as opposed to a RW lock. This is a lot simpler than +using a RW lock, but it comes at higher risk of contention under certain workloads. If this contention ever +becomes a problem, we might consider switching to a RW lock. Below is a potential implementation strategy +for converting to a RW lock: + +- Create a background goroutine that is responsible for garbage collection and updating the LRU. +- The GC goroutine should periodically wake up, grab the lock, and do garbage collection. +- When Get() is called, the calling goroutine should grab a read lock and attempt to read the value. + - If the value is present, send a message to the GC goroutine over a channel (so it can update the LRU) + and return the value. In this way, many readers can read from this shard concurrently. + - If the value is missing, drop the read lock and acquire a write lock. Then, handle the read + like we currently handle in the current implementation. +*/ + // Creates a new Shard. func NewShard( ctx context.Context, @@ -85,7 +100,7 @@ func NewShard( estimatedOverheadPerEntry uint64, ) (*shard, error) { - if maxSize <= 0 { + if maxSize == 0 { return nil, fmt.Errorf("maxSize must be greater than 0") } @@ -104,7 +119,7 @@ func NewShard( func (s *shard) Get(read Reader, key []byte, updateLru bool) ([]byte, bool, error) { s.lock.Lock() - entry := s.getEntry(key) + entry := s.getEntry(key, true) switch entry.status { case statusAvailable: @@ -217,10 +232,13 @@ func (se *shardEntry) injectValue(key []byte, result readResult) { // Get a shard entry for a given key. Caller is responsible for holding the shard's lock // when this method is called. -func (s *shard) getEntry(key []byte) *shardEntry { +func (s *shard) getEntry(key []byte, createIfMissing bool) *shardEntry { if entry, ok := s.data[string(key)]; ok { return entry } + if !createIfMissing { + return nil + } entry := &shardEntry{ shard: s, status: statusUnknown, @@ -247,7 +265,7 @@ func (s *shard) BatchGet(read Reader, keys map[string]types.BatchGetResult) erro s.lock.Lock() for key := range keys { - entry := s.getEntry([]byte(key)) + entry := s.getEntry([]byte(key), true) switch entry.status { case statusAvailable, statusDeleted: @@ -374,7 +392,7 @@ func (s *shard) Set(key []byte, value []byte) { // Set a value. Caller is required to hold the lock. func (s *shard) setUnlocked(key []byte, value []byte) { - entry := s.getEntry(key) + entry := s.getEntry(key, true) entry.status = statusAvailable entry.value = value @@ -406,7 +424,11 @@ func (s *shard) Delete(key []byte) { // Delete a value. Caller is required to hold the lock. func (s *shard) deleteUnlocked(key []byte) { - entry := s.getEntry(key) + entry := s.getEntry(key, false) + if entry == nil { + // Key is not in the cache, so nothing to do. + return + } entry.status = statusDeleted entry.value = nil From b41639fd90232c4be6374871cf6f7e494d74657f Mon Sep 17 00:00:00 2001 From: Cody Littley Date: Tue, 17 Mar 2026 15:04:12 -0500 Subject: [PATCH 069/119] fix unit test --- sei-db/db_engine/dbcache/cache_impl_test.go | 4 ++++ sei-db/db_engine/dbcache/shard_test.go | 4 ++++ 2 files changed, 8 insertions(+) diff --git a/sei-db/db_engine/dbcache/cache_impl_test.go b/sei-db/db_engine/dbcache/cache_impl_test.go index 2d206ccc8a..5433019c93 100644 --- a/sei-db/db_engine/dbcache/cache_impl_test.go +++ b/sei-db/db_engine/dbcache/cache_impl_test.go @@ -128,6 +128,10 @@ func TestCacheGetAfterDelete(t *testing.T) { store := map[string][]byte{"k": []byte("v")} c, read := newTestCache(t, store, 4, 4096) + // Warm the cache so the key is present before deleting. + _, _, err := c.Get(read, []byte("k"), true) + require.NoError(t, err) + c.Delete([]byte("k")) val, found, err := c.Get(read, []byte("k"), true) diff --git a/sei-db/db_engine/dbcache/shard_test.go b/sei-db/db_engine/dbcache/shard_test.go index 42bb4dd0fe..277a4fb803 100644 --- a/sei-db/db_engine/dbcache/shard_test.go +++ b/sei-db/db_engine/dbcache/shard_test.go @@ -149,6 +149,10 @@ func TestGetAfterDelete(t *testing.T) { store := map[string][]byte{"k": []byte("v")} s, read := newTestShard(t, 4096, store) + // Warm the cache so the key is present before deleting. + _, _, err := s.Get(read, []byte("k"), true) + require.NoError(t, err) + s.Delete([]byte("k")) val, found, err := s.Get(read, []byte("k"), true) From fe31475c4e295d695ee7f09fcbfefea9f39e7032 Mon Sep 17 00:00:00 2001 From: Cody Littley Date: Tue, 17 Mar 2026 15:09:30 -0500 Subject: [PATCH 070/119] fix unit test --- sei-db/db_engine/dbcache/shard_test.go | 9 +++++++++ 1 file changed, 9 insertions(+) diff --git a/sei-db/db_engine/dbcache/shard_test.go b/sei-db/db_engine/dbcache/shard_test.go index 277a4fb803..5e438dd72b 100644 --- a/sei-db/db_engine/dbcache/shard_test.go +++ b/sei-db/db_engine/dbcache/shard_test.go @@ -654,8 +654,17 @@ func TestOverheadIncludedInSizeAfterSet(t *testing.T) { func TestOverheadIncludedInSizeAfterDelete(t *testing.T) { const overhead = 100 + store := map[string][]byte{"abc": []byte("val")} + read := Reader(func(key []byte) ([]byte, bool, error) { + v, ok := store[string(key)] + return v, ok, nil + }) s, _ := NewShard(context.Background(), threading.NewAdHocPool(), 100_000, overhead) + // Warm the cache so the key is present before deleting. + _, _, err := s.Get(read, []byte("abc"), true) + require.NoError(t, err) + s.Delete([]byte("abc")) bytes, entries := s.getSizeInfo() From 64f853084477b82ec767048a57f85f1724483543 Mon Sep 17 00:00:00 2001 From: Cody Littley Date: Fri, 20 Mar 2026 09:29:01 -0500 Subject: [PATCH 071/119] fixed merge bugs --- .../bench/cryptosim/config/debug.json | 1 + .../bench/cryptosim/config/reciept-store.json | 1 + .../bench/cryptosim/config/standard-perf.json | 1 + sei-db/state_db/sc/flatkv/store.go | 29 ++++++++++--------- 4 files changed, 18 insertions(+), 14 deletions(-) diff --git a/sei-db/state_db/bench/cryptosim/config/debug.json b/sei-db/state_db/bench/cryptosim/config/debug.json index 15f556bf57..a8e5666931 100644 --- a/sei-db/state_db/bench/cryptosim/config/debug.json +++ b/sei-db/state_db/bench/cryptosim/config/debug.json @@ -1,6 +1,7 @@ { "Comment": "For locally testing/debugging the benchmark or related code.", "DataDir": "data", + "LogDir": "logs", "DeleteDataDirOnStartup": true, "DeleteDataDirOnShutdown": true } diff --git a/sei-db/state_db/bench/cryptosim/config/reciept-store.json b/sei-db/state_db/bench/cryptosim/config/reciept-store.json index dbb621e8ae..ac2801ec8e 100644 --- a/sei-db/state_db/bench/cryptosim/config/reciept-store.json +++ b/sei-db/state_db/bench/cryptosim/config/reciept-store.json @@ -1,6 +1,7 @@ { "Comment": "For testing with the state store and reciept store both enabled.", "DataDir": "data", + "LogDir": "logs", "MinimumNumberOfColdAccounts": 1000000, "MinimumNumberOfDormantAccounts": 1000000, "GenerateReceipts": true diff --git a/sei-db/state_db/bench/cryptosim/config/standard-perf.json b/sei-db/state_db/bench/cryptosim/config/standard-perf.json index a233241242..4cf205f830 100644 --- a/sei-db/state_db/bench/cryptosim/config/standard-perf.json +++ b/sei-db/state_db/bench/cryptosim/config/standard-perf.json @@ -1,6 +1,7 @@ { "Comment": "The standardized parameters for performance and longevity testing.", "DataDir": "data", + "LogDir": "logs", "MinimumNumberOfColdAccounts": 1000000, "MinimumNumberOfDormantAccounts": 100000000, "FlatKVConfig": { diff --git a/sei-db/state_db/sc/flatkv/store.go b/sei-db/state_db/sc/flatkv/store.go index bc8308dce3..8e7a4619f5 100644 --- a/sei-db/state_db/sc/flatkv/store.go +++ b/sei-db/state_db/sc/flatkv/store.go @@ -161,20 +161,21 @@ func NewCommitStore( miscPool := threading.NewElasticPool(ctx, "flatkv-misc", miscPoolSize) return &CommitStore{ - ctx: ctx, - cancel: cancel, - config: *cfg, - localMeta: make(map[string]*LocalMeta), - accountWrites: make(map[string]*pendingAccountWrite), - codeWrites: make(map[string]*pendingKVWrite), - storageWrites: make(map[string]*pendingKVWrite), - legacyWrites: make(map[string]*pendingKVWrite), - pendingChangeSets: make([]*proto.NamedChangeSet, 0), - committedLtHash: lthash.New(), - workingLtHash: lthash.New(), - phaseTimer: metrics.NewPhaseTimer(meter, "seidb_main_thread"), - readPool: readPool, - miscPool: miscPool, + ctx: ctx, + cancel: cancel, + config: *cfg, + localMeta: make(map[string]*LocalMeta), + accountWrites: make(map[string]*pendingAccountWrite), + codeWrites: make(map[string]*pendingKVWrite), + storageWrites: make(map[string]*pendingKVWrite), + legacyWrites: make(map[string]*pendingKVWrite), + pendingChangeSets: make([]*proto.NamedChangeSet, 0), + committedLtHash: lthash.New(), + workingLtHash: lthash.New(), + perDBWorkingLtHash: make(map[string]*lthash.LtHash), + phaseTimer: metrics.NewPhaseTimer(meter, "seidb_main_thread"), + readPool: readPool, + miscPool: miscPool, }, nil } From d9c5fc112dd81a9a865526f54d6a488153315e00 Mon Sep 17 00:00:00 2001 From: Cody Littley Date: Fri, 20 Mar 2026 09:36:52 -0500 Subject: [PATCH 072/119] fix teardown race --- sei-db/state_db/bench/cryptosim/block_builder.go | 1 + sei-db/state_db/bench/cryptosim/cryptosim.go | 2 -- 2 files changed, 1 insertion(+), 2 deletions(-) diff --git a/sei-db/state_db/bench/cryptosim/block_builder.go b/sei-db/state_db/bench/cryptosim/block_builder.go index 66d45e66d5..5389b33135 100644 --- a/sei-db/state_db/bench/cryptosim/block_builder.go +++ b/sei-db/state_db/bench/cryptosim/block_builder.go @@ -48,6 +48,7 @@ func (b *blockBuilder) Start() { // Builds blocks and sends them to the blocks channel. func (b *blockBuilder) mainLoop() { + defer b.dataGenerator.Close() for { block := b.buildBlock() select { diff --git a/sei-db/state_db/bench/cryptosim/cryptosim.go b/sei-db/state_db/bench/cryptosim/cryptosim.go index 241b397344..c686139ed8 100644 --- a/sei-db/state_db/bench/cryptosim/cryptosim.go +++ b/sei-db/state_db/bench/cryptosim/cryptosim.go @@ -509,8 +509,6 @@ func (c *CryptoSim) teardown() { } c.cancel() - c.dataGenerator.Close() - c.closeChan <- struct{}{} } From 14593ec856e1201332a649071453b65c43a4ca59 Mon Sep 17 00:00:00 2001 From: Cody Littley Date: Fri, 20 Mar 2026 10:20:44 -0500 Subject: [PATCH 073/119] Add logging metric, clean up log files before/after run --- .../dashboards/cryptosim-dashboard.json | 457 +++++++++++------- .../cryptosim/cmd/configure-logger/main.go | 15 + .../bench/cryptosim/cmd/cryptosim/main.go | 24 +- .../bench/cryptosim/cryptosim_config.go | 4 +- sei-db/state_db/sc/flatkv/store.go | 2 + 5 files changed, 311 insertions(+), 191 deletions(-) diff --git a/docker/monitornode/dashboards/cryptosim-dashboard.json b/docker/monitornode/dashboards/cryptosim-dashboard.json index d03da69924..08647b78ab 100644 --- a/docker/monitornode/dashboards/cryptosim-dashboard.json +++ b/docker/monitornode/dashboards/cryptosim-dashboard.json @@ -711,7 +711,7 @@ "h": 8, "w": 12, "x": 0, - "y": 42 + "y": 106 }, "id": 280, "options": { @@ -806,7 +806,7 @@ "h": 8, "w": 12, "x": 12, - "y": 42 + "y": 106 }, "id": 281, "options": { @@ -903,7 +903,7 @@ "h": 8, "w": 12, "x": 0, - "y": 50 + "y": 114 }, "id": 282, "options": { @@ -998,7 +998,7 @@ "h": 8, "w": 12, "x": 12, - "y": 50 + "y": 114 }, "id": 285, "options": { @@ -1093,7 +1093,7 @@ "h": 8, "w": 12, "x": 0, - "y": 58 + "y": 122 }, "id": 284, "options": { @@ -1188,7 +1188,7 @@ "h": 8, "w": 12, "x": 12, - "y": 58 + "y": 122 }, "id": 283, "options": { @@ -1297,7 +1297,7 @@ "h": 8, "w": 12, "x": 0, - "y": 66 + "y": 194 }, "id": 6, "options": { @@ -1427,7 +1427,7 @@ "h": 8, "w": 12, "x": 12, - "y": 66 + "y": 194 }, "id": 7, "options": { @@ -1522,7 +1522,7 @@ "h": 8, "w": 12, "x": 0, - "y": 74 + "y": 202 }, "id": 10, "options": { @@ -1616,7 +1616,7 @@ "h": 8, "w": 12, "x": 12, - "y": 74 + "y": 202 }, "id": 11, "options": { @@ -1685,7 +1685,7 @@ "h": 16, "w": 12, "x": 0, - "y": 6867 + "y": 6995 }, "id": 20, "options": { @@ -1788,7 +1788,7 @@ "h": 8, "w": 12, "x": 12, - "y": 6867 + "y": 6995 }, "id": 19, "options": { @@ -1919,7 +1919,7 @@ "h": 8, "w": 12, "x": 12, - "y": 6883 + "y": 7011 }, "id": 21, "options": { @@ -2014,7 +2014,7 @@ "h": 8, "w": 12, "x": 0, - "y": 6891 + "y": 7019 }, "id": 22, "options": { @@ -2145,7 +2145,7 @@ "h": 8, "w": 12, "x": 12, - "y": 6891 + "y": 7019 }, "id": 23, "options": { @@ -2276,7 +2276,7 @@ "h": 8, "w": 12, "x": 0, - "y": 6899 + "y": 7027 }, "id": 24, "options": { @@ -2407,7 +2407,7 @@ "h": 8, "w": 12, "x": 12, - "y": 6899 + "y": 7027 }, "id": 25, "options": { @@ -2538,7 +2538,7 @@ "h": 8, "w": 12, "x": 0, - "y": 6907 + "y": 7035 }, "id": 26, "options": { @@ -2669,7 +2669,7 @@ "h": 8, "w": 12, "x": 12, - "y": 6907 + "y": 7035 }, "id": 27, "options": { @@ -2800,7 +2800,7 @@ "h": 8, "w": 12, "x": 0, - "y": 6915 + "y": 7043 }, "id": 28, "options": { @@ -2944,7 +2944,7 @@ "h": 8, "w": 12, "x": 0, - "y": 6868 + "y": 6996 }, "id": 1, "options": { @@ -3043,7 +3043,7 @@ "h": 8, "w": 12, "x": 12, - "y": 6868 + "y": 6996 }, "id": 18, "options": { @@ -3187,7 +3187,7 @@ "h": 8, "w": 12, "x": 0, - "y": 17750 + "y": 17878 }, "id": 3, "options": { @@ -3286,7 +3286,7 @@ "h": 8, "w": 12, "x": 12, - "y": 17750 + "y": 17878 }, "id": 4, "options": { @@ -3363,7 +3363,7 @@ "x": 0, "y": 38 }, - "id": 277, + "id": 286, "panels": [ { "datasource": { @@ -3431,9 +3431,9 @@ "h": 8, "w": 12, "x": 0, - "y": 0 + "y": 151 }, - "id": 278, + "id": 287, "options": { "legend": { "calcs": [], @@ -3561,9 +3561,9 @@ "h": 8, "w": 12, "x": 12, - "y": 0 + "y": 151 }, - "id": 279, + "id": 288, "options": { "legend": { "calcs": [], @@ -3660,9 +3660,9 @@ "h": 8, "w": 12, "x": 0, - "y": 8 + "y": 159 }, - "id": 280, + "id": 289, "options": { "legend": { "calcs": [], @@ -3754,9 +3754,9 @@ "h": 8, "w": 12, "x": 12, - "y": 8 + "y": 159 }, - "id": 281, + "id": 290, "options": { "legend": { "calcs": [], @@ -3797,7 +3797,7 @@ "h": 1, "w": 24, "x": 0, - "y": 38 + "y": 39 }, "id": 29, "panels": [ @@ -3867,7 +3867,7 @@ "h": 8, "w": 12, "x": 0, - "y": 13654 + "y": 13782 }, "id": 31, "options": { @@ -3962,7 +3962,7 @@ "h": 8, "w": 12, "x": 12, - "y": 13654 + "y": 13782 }, "id": 36, "options": { @@ -4057,7 +4057,7 @@ "h": 8, "w": 12, "x": 0, - "y": 13662 + "y": 13790 }, "id": 38, "options": { @@ -4096,7 +4096,7 @@ "h": 1, "w": 24, "x": 0, - "y": 39 + "y": 40 }, "id": 35, "panels": [ @@ -4165,7 +4165,7 @@ "h": 8, "w": 12, "x": 0, - "y": 18361 + "y": 18489 }, "id": 30, "options": { @@ -4260,7 +4260,7 @@ "h": 8, "w": 12, "x": 12, - "y": 18361 + "y": 18489 }, "id": 33, "options": { @@ -4355,7 +4355,7 @@ "h": 8, "w": 12, "x": 0, - "y": 18401 + "y": 18529 }, "id": 34, "options": { @@ -4394,7 +4394,7 @@ "h": 1, "w": 24, "x": 0, - "y": 40 + "y": 41 }, "id": 37, "panels": [ @@ -4464,7 +4464,7 @@ "h": 8, "w": 12, "x": 0, - "y": 10248 + "y": 42 }, "id": 39, "options": { @@ -4559,7 +4559,7 @@ "h": 8, "w": 12, "x": 12, - "y": 10248 + "y": 42 }, "id": 40, "options": { @@ -4654,7 +4654,7 @@ "h": 8, "w": 12, "x": 0, - "y": 10296 + "y": 50 }, "id": 41, "options": { @@ -4749,7 +4749,7 @@ "h": 8, "w": 12, "x": 12, - "y": 10296 + "y": 50 }, "id": 42, "options": { @@ -4844,7 +4844,7 @@ "h": 8, "w": 12, "x": 0, - "y": 10304 + "y": 58 }, "id": 32, "options": { @@ -4872,6 +4872,101 @@ ], "title": "Open File Descriptors", "type": "timeseries" + }, + { + "datasource": { + "type": "prometheus", + "uid": "PBFA97CFB590B2093" + }, + "fieldConfig": { + "defaults": { + "color": { + "mode": "palette-classic" + }, + "custom": { + "axisBorderShow": false, + "axisCenteredZero": false, + "axisColorMode": "text", + "axisLabel": "", + "axisPlacement": "auto", + "barAlignment": 0, + "barWidthFactor": 0.6, + "drawStyle": "line", + "fillOpacity": 0, + "gradientMode": "none", + "hideFrom": { + "legend": false, + "tooltip": false, + "viz": false + }, + "insertNulls": false, + "lineInterpolation": "linear", + "lineWidth": 1, + "pointSize": 5, + "scaleDistribution": { + "type": "linear" + }, + "showPoints": "auto", + "showValues": false, + "spanNulls": false, + "stacking": { + "group": "A", + "mode": "none" + }, + "thresholdsStyle": { + "mode": "off" + } + }, + "mappings": [], + "thresholds": { + "mode": "absolute", + "steps": [ + { + "color": "green", + "value": 0 + }, + { + "color": "red", + "value": 80 + } + ] + }, + "unit": "bytes" + }, + "overrides": [] + }, + "gridPos": { + "h": 8, + "w": 12, + "x": 12, + "y": 58 + }, + "id": 291, + "options": { + "legend": { + "calcs": [], + "displayMode": "list", + "placement": "bottom", + "showLegend": false + }, + "tooltip": { + "hideZeros": false, + "mode": "single", + "sort": "none" + } + }, + "pluginVersion": "12.4.0", + "targets": [ + { + "editorMode": "code", + "expr": "cryptosim_log_dir_size_bytes", + "legendFormat": "__auto", + "range": true, + "refId": "A" + } + ], + "title": "Log Files Size", + "type": "timeseries" } ], "title": "File System", @@ -4883,7 +4978,7 @@ "h": 1, "w": 24, "x": 0, - "y": 41 + "y": 42 }, "id": 44, "panels": [ @@ -4953,7 +5048,7 @@ "h": 8, "w": 12, "x": 0, - "y": 17555 + "y": 17683 }, "id": 43, "options": { @@ -4992,7 +5087,7 @@ "h": 1, "w": 24, "x": 0, - "y": 42 + "y": 43 }, "id": 117, "panels": [ @@ -5252,7 +5347,7 @@ "h": 8, "w": 12, "x": 0, - "y": 10490 + "y": 10618 }, "id": 261, "options": { @@ -5347,7 +5442,7 @@ "h": 8, "w": 12, "x": 12, - "y": 10490 + "y": 10618 }, "id": 263, "options": { @@ -5442,7 +5537,7 @@ "h": 8, "w": 12, "x": 0, - "y": 10498 + "y": 10626 }, "id": 262, "options": { @@ -5537,7 +5632,7 @@ "h": 8, "w": 12, "x": 12, - "y": 10498 + "y": 10626 }, "id": 264, "options": { @@ -5576,7 +5671,7 @@ "h": 1, "w": 24, "x": 0, - "y": 43 + "y": 44 }, "id": 191, "panels": [ @@ -5645,7 +5740,7 @@ "h": 8, "w": 12, "x": 0, - "y": 13675 + "y": 13803 }, "id": 155, "options": { @@ -5740,7 +5835,7 @@ "h": 8, "w": 12, "x": 12, - "y": 13675 + "y": 13803 }, "id": 111, "options": { @@ -5835,7 +5930,7 @@ "h": 8, "w": 12, "x": 0, - "y": 13739 + "y": 13867 }, "id": 175, "options": { @@ -5930,7 +6025,7 @@ "h": 8, "w": 12, "x": 12, - "y": 13739 + "y": 13867 }, "id": 173, "options": { @@ -6024,7 +6119,7 @@ "h": 8, "w": 12, "x": 0, - "y": 13747 + "y": 13875 }, "id": 138, "options": { @@ -6119,7 +6214,7 @@ "h": 8, "w": 12, "x": 12, - "y": 13747 + "y": 13875 }, "id": 172, "options": { @@ -6214,7 +6309,7 @@ "h": 8, "w": 12, "x": 0, - "y": 13755 + "y": 13883 }, "id": 236, "options": { @@ -6253,7 +6348,7 @@ "h": 1, "w": 24, "x": 0, - "y": 44 + "y": 45 }, "id": 118, "panels": [ @@ -6322,7 +6417,7 @@ "h": 8, "w": 12, "x": 0, - "y": 11828 + "y": 11956 }, "id": 127, "options": { @@ -6417,7 +6512,7 @@ "h": 8, "w": 12, "x": 12, - "y": 11828 + "y": 11956 }, "id": 120, "options": { @@ -6511,7 +6606,7 @@ "h": 8, "w": 12, "x": 0, - "y": 11836 + "y": 11964 }, "id": 128, "options": { @@ -6606,7 +6701,7 @@ "h": 8, "w": 12, "x": 12, - "y": 11836 + "y": 11964 }, "id": 121, "options": { @@ -6700,7 +6795,7 @@ "h": 8, "w": 12, "x": 0, - "y": 11844 + "y": 11972 }, "id": 129, "options": { @@ -6795,7 +6890,7 @@ "h": 8, "w": 12, "x": 12, - "y": 11844 + "y": 11972 }, "id": 122, "options": { @@ -6889,7 +6984,7 @@ "h": 8, "w": 12, "x": 0, - "y": 11852 + "y": 11980 }, "id": 130, "options": { @@ -6984,7 +7079,7 @@ "h": 8, "w": 12, "x": 12, - "y": 11852 + "y": 11980 }, "id": 123, "options": { @@ -7078,7 +7173,7 @@ "h": 8, "w": 12, "x": 0, - "y": 11860 + "y": 11988 }, "id": 131, "options": { @@ -7173,7 +7268,7 @@ "h": 8, "w": 12, "x": 12, - "y": 11860 + "y": 11988 }, "id": 124, "options": { @@ -7267,7 +7362,7 @@ "h": 8, "w": 12, "x": 0, - "y": 11868 + "y": 11996 }, "id": 132, "options": { @@ -7362,7 +7457,7 @@ "h": 8, "w": 12, "x": 12, - "y": 11868 + "y": 11996 }, "id": 125, "options": { @@ -7456,7 +7551,7 @@ "h": 8, "w": 12, "x": 0, - "y": 11876 + "y": 12004 }, "id": 119, "options": { @@ -7551,7 +7646,7 @@ "h": 8, "w": 12, "x": 12, - "y": 11876 + "y": 12004 }, "id": 126, "options": { @@ -7590,7 +7685,7 @@ "h": 1, "w": 24, "x": 0, - "y": 45 + "y": 46 }, "id": 115, "panels": [ @@ -7659,7 +7754,7 @@ "h": 8, "w": 12, "x": 0, - "y": 10253 + "y": 10381 }, "id": 101, "options": { @@ -7753,7 +7848,7 @@ "h": 8, "w": 12, "x": 12, - "y": 10253 + "y": 10381 }, "id": 187, "options": { @@ -7848,7 +7943,7 @@ "h": 8, "w": 12, "x": 0, - "y": 10261 + "y": 10389 }, "id": 113, "options": { @@ -7942,7 +8037,7 @@ "h": 8, "w": 12, "x": 12, - "y": 10261 + "y": 10389 }, "id": 103, "options": { @@ -8037,7 +8132,7 @@ "h": 8, "w": 12, "x": 0, - "y": 10269 + "y": 10397 }, "id": 102, "options": { @@ -8132,7 +8227,7 @@ "h": 8, "w": 12, "x": 12, - "y": 10269 + "y": 10397 }, "id": 116, "options": { @@ -8226,7 +8321,7 @@ "h": 8, "w": 12, "x": 0, - "y": 10277 + "y": 10405 }, "id": 135, "options": { @@ -8321,7 +8416,7 @@ "h": 8, "w": 12, "x": 12, - "y": 10277 + "y": 10405 }, "id": 134, "options": { @@ -8415,7 +8510,7 @@ "h": 8, "w": 12, "x": 0, - "y": 10285 + "y": 10413 }, "id": 136, "options": { @@ -8510,7 +8605,7 @@ "h": 8, "w": 12, "x": 12, - "y": 10285 + "y": 10413 }, "id": 159, "options": { @@ -8549,7 +8644,7 @@ "h": 1, "w": 24, "x": 0, - "y": 46 + "y": 47 }, "id": 193, "panels": [ @@ -8618,7 +8713,7 @@ "h": 8, "w": 12, "x": 0, - "y": 15402 + "y": 15530 }, "id": 141, "options": { @@ -8712,7 +8807,7 @@ "h": 8, "w": 12, "x": 12, - "y": 15402 + "y": 15530 }, "id": 148, "options": { @@ -8806,7 +8901,7 @@ "h": 8, "w": 12, "x": 0, - "y": 15410 + "y": 15538 }, "id": 142, "options": { @@ -8900,7 +8995,7 @@ "h": 8, "w": 12, "x": 12, - "y": 15410 + "y": 15538 }, "id": 149, "options": { @@ -8994,7 +9089,7 @@ "h": 8, "w": 12, "x": 0, - "y": 15418 + "y": 15546 }, "id": 143, "options": { @@ -9088,7 +9183,7 @@ "h": 8, "w": 12, "x": 12, - "y": 15418 + "y": 15546 }, "id": 150, "options": { @@ -9182,7 +9277,7 @@ "h": 8, "w": 12, "x": 0, - "y": 15426 + "y": 15554 }, "id": 144, "options": { @@ -9276,7 +9371,7 @@ "h": 8, "w": 12, "x": 12, - "y": 15426 + "y": 15554 }, "id": 151, "options": { @@ -9370,7 +9465,7 @@ "h": 8, "w": 12, "x": 0, - "y": 15434 + "y": 15562 }, "id": 145, "options": { @@ -9464,7 +9559,7 @@ "h": 8, "w": 12, "x": 12, - "y": 15434 + "y": 15562 }, "id": 152, "options": { @@ -9558,7 +9653,7 @@ "h": 8, "w": 12, "x": 0, - "y": 15442 + "y": 15570 }, "id": 146, "options": { @@ -9652,7 +9747,7 @@ "h": 8, "w": 12, "x": 12, - "y": 15442 + "y": 15570 }, "id": 153, "options": { @@ -9746,7 +9841,7 @@ "h": 8, "w": 12, "x": 0, - "y": 15450 + "y": 15578 }, "id": 147, "options": { @@ -9840,7 +9935,7 @@ "h": 8, "w": 12, "x": 12, - "y": 15450 + "y": 15578 }, "id": 154, "options": { @@ -9879,7 +9974,7 @@ "h": 1, "w": 24, "x": 0, - "y": 47 + "y": 48 }, "id": 192, "panels": [ @@ -9948,7 +10043,7 @@ "h": 8, "w": 12, "x": 0, - "y": 15178 + "y": 15306 }, "id": 190, "options": { @@ -10043,7 +10138,7 @@ "h": 8, "w": 12, "x": 12, - "y": 15178 + "y": 15306 }, "id": 184, "options": { @@ -10137,7 +10232,7 @@ "h": 8, "w": 12, "x": 0, - "y": 15186 + "y": 15314 }, "id": 188, "options": { @@ -10232,7 +10327,7 @@ "h": 8, "w": 12, "x": 12, - "y": 15186 + "y": 15314 }, "id": 186, "options": { @@ -10327,7 +10422,7 @@ "h": 8, "w": 12, "x": 0, - "y": 15194 + "y": 15322 }, "id": 185, "options": { @@ -10421,7 +10516,7 @@ "h": 8, "w": 12, "x": 12, - "y": 15194 + "y": 15322 }, "id": 189, "options": { @@ -10515,7 +10610,7 @@ "h": 8, "w": 12, "x": 0, - "y": 15202 + "y": 15330 }, "id": 181, "options": { @@ -10610,7 +10705,7 @@ "h": 8, "w": 12, "x": 12, - "y": 15202 + "y": 15330 }, "id": 182, "options": { @@ -10649,7 +10744,7 @@ "h": 1, "w": 24, "x": 0, - "y": 48 + "y": 49 }, "id": 194, "panels": [ @@ -10718,7 +10813,7 @@ "h": 8, "w": 12, "x": 0, - "y": 11128 + "y": 11256 }, "id": 170, "options": { @@ -10813,7 +10908,7 @@ "h": 8, "w": 12, "x": 12, - "y": 11128 + "y": 11256 }, "id": 171, "options": { @@ -10907,7 +11002,7 @@ "h": 8, "w": 12, "x": 0, - "y": 11528 + "y": 11656 }, "id": 162, "options": { @@ -11002,7 +11097,7 @@ "h": 8, "w": 12, "x": 12, - "y": 11528 + "y": 11656 }, "id": 108, "options": { @@ -11097,7 +11192,7 @@ "h": 8, "w": 12, "x": 0, - "y": 11536 + "y": 11664 }, "id": 169, "options": { @@ -11191,7 +11286,7 @@ "h": 8, "w": 12, "x": 12, - "y": 11536 + "y": 11664 }, "id": 166, "options": { @@ -11285,7 +11380,7 @@ "h": 8, "w": 12, "x": 0, - "y": 11544 + "y": 11672 }, "id": 157, "options": { @@ -11380,7 +11475,7 @@ "h": 8, "w": 12, "x": 12, - "y": 11544 + "y": 11672 }, "id": 158, "options": { @@ -11474,7 +11569,7 @@ "h": 8, "w": 12, "x": 0, - "y": 11552 + "y": 11680 }, "id": 167, "options": { @@ -11569,7 +11664,7 @@ "h": 8, "w": 12, "x": 12, - "y": 11552 + "y": 11680 }, "id": 168, "options": { @@ -11663,7 +11758,7 @@ "h": 8, "w": 12, "x": 0, - "y": 11560 + "y": 11688 }, "id": 137, "options": { @@ -11758,7 +11853,7 @@ "h": 8, "w": 12, "x": 12, - "y": 11560 + "y": 11688 }, "id": 183, "options": { @@ -11853,7 +11948,7 @@ "h": 8, "w": 12, "x": 0, - "y": 11568 + "y": 11696 }, "id": 241, "options": { @@ -11948,7 +12043,7 @@ "h": 8, "w": 12, "x": 12, - "y": 11568 + "y": 11696 }, "id": 242, "options": { @@ -12043,7 +12138,7 @@ "h": 8, "w": 12, "x": 0, - "y": 11576 + "y": 11704 }, "id": 243, "options": { @@ -12138,7 +12233,7 @@ "h": 8, "w": 12, "x": 12, - "y": 11576 + "y": 11704 }, "id": 244, "options": { @@ -12233,7 +12328,7 @@ "h": 8, "w": 12, "x": 0, - "y": 11584 + "y": 11712 }, "id": 245, "options": { @@ -12328,7 +12423,7 @@ "h": 8, "w": 12, "x": 12, - "y": 11584 + "y": 11712 }, "id": 246, "options": { @@ -12423,7 +12518,7 @@ "h": 8, "w": 12, "x": 0, - "y": 11592 + "y": 11720 }, "id": 247, "options": { @@ -12518,7 +12613,7 @@ "h": 8, "w": 12, "x": 12, - "y": 11592 + "y": 11720 }, "id": 248, "options": { @@ -12557,7 +12652,7 @@ "h": 1, "w": 24, "x": 0, - "y": 49 + "y": 50 }, "id": 195, "panels": [ @@ -12626,7 +12721,7 @@ "h": 8, "w": 12, "x": 0, - "y": 10353 + "y": 10481 }, "id": 161, "options": { @@ -12720,7 +12815,7 @@ "h": 8, "w": 12, "x": 12, - "y": 10353 + "y": 10481 }, "id": 104, "options": { @@ -12815,7 +12910,7 @@ "h": 8, "w": 12, "x": 0, - "y": 10361 + "y": 10489 }, "id": 105, "options": { @@ -12909,7 +13004,7 @@ "h": 8, "w": 12, "x": 12, - "y": 10361 + "y": 10489 }, "id": 164, "options": { @@ -13003,7 +13098,7 @@ "h": 8, "w": 12, "x": 0, - "y": 10369 + "y": 10497 }, "id": 163, "options": { @@ -13098,7 +13193,7 @@ "h": 8, "w": 12, "x": 12, - "y": 10369 + "y": 10497 }, "id": 165, "options": { @@ -13193,7 +13288,7 @@ "h": 8, "w": 12, "x": 0, - "y": 10377 + "y": 10505 }, "id": 223, "options": { @@ -13232,7 +13327,7 @@ "h": 1, "w": 24, "x": 0, - "y": 50 + "y": 51 }, "id": 210, "panels": [ @@ -13302,7 +13397,7 @@ "h": 8, "w": 12, "x": 0, - "y": 11130 + "y": 11258 }, "id": 211, "options": { @@ -13397,7 +13492,7 @@ "h": 8, "w": 12, "x": 12, - "y": 11130 + "y": 11258 }, "id": 212, "options": { @@ -13492,7 +13587,7 @@ "h": 8, "w": 12, "x": 0, - "y": 11210 + "y": 11338 }, "id": 213, "options": { @@ -13587,7 +13682,7 @@ "h": 8, "w": 12, "x": 12, - "y": 11210 + "y": 11338 }, "id": 214, "options": { @@ -13682,7 +13777,7 @@ "h": 8, "w": 12, "x": 0, - "y": 11218 + "y": 11346 }, "id": 215, "options": { @@ -13777,7 +13872,7 @@ "h": 8, "w": 12, "x": 12, - "y": 11218 + "y": 11346 }, "id": 216, "options": { @@ -13872,7 +13967,7 @@ "h": 8, "w": 12, "x": 0, - "y": 11226 + "y": 11354 }, "id": 217, "options": { @@ -13967,7 +14062,7 @@ "h": 8, "w": 12, "x": 12, - "y": 11226 + "y": 11354 }, "id": 218, "options": { @@ -14006,7 +14101,7 @@ "h": 1, "w": 24, "x": 0, - "y": 51 + "y": 52 }, "id": 230, "panels": [ @@ -14076,7 +14171,7 @@ "h": 8, "w": 12, "x": 0, - "y": 13972 + "y": 14100 }, "id": 231, "options": { @@ -14170,7 +14265,7 @@ "h": 8, "w": 12, "x": 12, - "y": 13972 + "y": 14100 }, "id": 178, "options": { @@ -14265,7 +14360,7 @@ "h": 8, "w": 12, "x": 0, - "y": 13980 + "y": 14108 }, "id": 179, "options": { @@ -14359,7 +14454,7 @@ "h": 8, "w": 12, "x": 12, - "y": 13980 + "y": 14108 }, "id": 156, "options": { @@ -14398,7 +14493,7 @@ "h": 1, "w": 24, "x": 0, - "y": 52 + "y": 53 }, "id": 250, "panels": [ @@ -14468,7 +14563,7 @@ "h": 8, "w": 12, "x": 0, - "y": 13764 + "y": 13892 }, "id": 251, "options": { @@ -14563,7 +14658,7 @@ "h": 8, "w": 12, "x": 12, - "y": 13764 + "y": 13892 }, "id": 252, "options": { @@ -14658,7 +14753,7 @@ "h": 8, "w": 12, "x": 0, - "y": 13876 + "y": 14004 }, "id": 253, "options": { @@ -14753,7 +14848,7 @@ "h": 8, "w": 12, "x": 12, - "y": 13876 + "y": 14004 }, "id": 254, "options": { @@ -14848,7 +14943,7 @@ "h": 8, "w": 12, "x": 0, - "y": 13884 + "y": 14012 }, "id": 273, "options": { @@ -14887,7 +14982,7 @@ "h": 1, "w": 24, "x": 0, - "y": 53 + "y": 54 }, "id": 100, "panels": [ @@ -14956,7 +15051,7 @@ "h": 8, "w": 12, "x": 0, - "y": 11133 + "y": 11261 }, "id": 107, "options": { @@ -15051,7 +15146,7 @@ "h": 8, "w": 12, "x": 12, - "y": 11133 + "y": 11261 }, "id": 110, "options": { @@ -15146,7 +15241,7 @@ "h": 8, "w": 12, "x": 0, - "y": 11141 + "y": 11269 }, "id": 180, "options": { @@ -15240,7 +15335,7 @@ "h": 8, "w": 12, "x": 12, - "y": 11141 + "y": 11269 }, "id": 160, "options": { @@ -15334,7 +15429,7 @@ "h": 8, "w": 12, "x": 0, - "y": 11149 + "y": 11277 }, "id": 139, "options": { @@ -15428,7 +15523,7 @@ "h": 8, "w": 12, "x": 12, - "y": 11149 + "y": 11277 }, "id": 176, "options": { @@ -15522,7 +15617,7 @@ "h": 8, "w": 12, "x": 0, - "y": 11157 + "y": 11285 }, "id": 133, "options": { @@ -15617,7 +15712,7 @@ "h": 8, "w": 12, "x": 12, - "y": 11157 + "y": 11285 }, "id": 221, "options": { @@ -15711,7 +15806,7 @@ "h": 8, "w": 12, "x": 0, - "y": 11165 + "y": 11293 }, "id": 177, "options": { @@ -15806,7 +15901,7 @@ "h": 8, "w": 12, "x": 12, - "y": 11165 + "y": 11293 }, "id": 271, "options": { @@ -15901,7 +15996,7 @@ "h": 8, "w": 12, "x": 0, - "y": 11173 + "y": 11301 }, "id": 274, "options": { @@ -15996,7 +16091,7 @@ "h": 8, "w": 12, "x": 12, - "y": 11173 + "y": 11301 }, "id": 272, "options": { @@ -16091,7 +16186,7 @@ "h": 8, "w": 12, "x": 0, - "y": 11181 + "y": 11309 }, "id": 232, "options": { @@ -16186,7 +16281,7 @@ "h": 8, "w": 12, "x": 12, - "y": 11181 + "y": 11309 }, "id": 233, "options": { @@ -16281,7 +16376,7 @@ "h": 8, "w": 12, "x": 0, - "y": 11189 + "y": 11317 }, "id": 234, "options": { @@ -16376,7 +16471,7 @@ "h": 8, "w": 12, "x": 12, - "y": 11189 + "y": 11317 }, "id": 235, "options": { @@ -16471,7 +16566,7 @@ "h": 8, "w": 12, "x": 0, - "y": 11197 + "y": 11325 }, "id": 222, "options": { @@ -16520,6 +16615,6 @@ "timezone": "browser", "title": "CryptoSim", "uid": "adnqfm4", - "version": 31, + "version": 11, "weekStart": "" -} +} \ No newline at end of file diff --git a/sei-db/state_db/bench/cryptosim/cmd/configure-logger/main.go b/sei-db/state_db/bench/cryptosim/cmd/configure-logger/main.go index 0282e0e251..9a48afc410 100644 --- a/sei-db/state_db/bench/cryptosim/cmd/configure-logger/main.go +++ b/sei-db/state_db/bench/cryptosim/cmd/configure-logger/main.go @@ -38,6 +38,21 @@ func run() error { return fmt.Errorf("load config: %w", err) } + if cfg.LogDir == "" { + return fmt.Errorf("LogDir is empty, refusing to proceed") + } + + if cfg.DeleteDataDirOnStartup { + resolved, err := filepath.Abs(cfg.LogDir) + if err != nil { + return fmt.Errorf("failed to resolve log directory: %w", err) + } + fmt.Fprintf(os.Stderr, "Deleting log directory: %s\n", resolved) + if err := os.RemoveAll(resolved); err != nil { + return fmt.Errorf("failed to delete log directory %s: %w", resolved, err) + } + } + logDir, err := cryptosim.ResolveAndCreateDir(cfg.LogDir) if err != nil { return fmt.Errorf("resolve log dir: %w", err) diff --git a/sei-db/state_db/bench/cryptosim/cmd/cryptosim/main.go b/sei-db/state_db/bench/cryptosim/cmd/cryptosim/main.go index d2bd3e00ee..7847630a07 100644 --- a/sei-db/state_db/bench/cryptosim/cmd/cryptosim/main.go +++ b/sei-db/state_db/bench/cryptosim/cmd/cryptosim/main.go @@ -91,6 +91,9 @@ func run() error { fmt.Printf("%s\n", configString) if config.DeleteDataDirOnStartup { + if config.DataDir == "" { + return fmt.Errorf("DataDir is empty, refusing to delete") + } resolved, err := filepath.Abs(config.DataDir) if err != nil { return fmt.Errorf("failed to resolve data directory: %w", err) @@ -148,14 +151,19 @@ func run() error { cs.BlockUntilHalted() if config.DeleteDataDirOnShutdown { - resolved, err := filepath.Abs(config.DataDir) - if err != nil { - return fmt.Errorf("failed to resolve data directory: %w", err) - } - fmt.Printf("Deleting data directory: %s\n", resolved) - err = os.RemoveAll(resolved) - if err != nil { - return fmt.Errorf("failed to delete data directory: %w", err) + for _, dir := range []string{config.DataDir, config.LogDir} { + if dir == "" { + return fmt.Errorf("directory path is empty, refusing to delete") + } + resolved, err := filepath.Abs(dir) + if err != nil { + return fmt.Errorf("failed to resolve directory: %w", err) + } + fmt.Printf("Deleting directory: %s\n", resolved) + err = os.RemoveAll(resolved) + if err != nil { + return fmt.Errorf("failed to delete directory %s: %w", resolved, err) + } } } diff --git a/sei-db/state_db/bench/cryptosim/cryptosim_config.go b/sei-db/state_db/bench/cryptosim/cryptosim_config.go index 6330157527..9eb7ebb037 100644 --- a/sei-db/state_db/bench/cryptosim/cryptosim_config.go +++ b/sei-db/state_db/bench/cryptosim/cryptosim_config.go @@ -138,10 +138,10 @@ type CryptoSimConfig struct { // If false, Enter has no effect. EnableSuspension bool - // If true, the data directory will be deleted on startup if it exists. + // If true, the data directory and log directory will be deleted on startup if they exist. DeleteDataDirOnStartup bool - // If true, the data directory will be deleted on a clean shutdown. + // If true, the data directory and log directory will be deleted on a clean shutdown. DeleteDataDirOnShutdown bool // Configures the FlatKV database. Ignored if Backend is not "FlatKV". diff --git a/sei-db/state_db/sc/flatkv/store.go b/sei-db/state_db/sc/flatkv/store.go index 8e7a4619f5..3491c7f363 100644 --- a/sei-db/state_db/sc/flatkv/store.go +++ b/sei-db/state_db/sc/flatkv/store.go @@ -142,6 +142,8 @@ func NewCommitStore( cfg *Config, ) (*CommitStore, error) { + logger.Info("Creating new FlatKV commit store", "config", cfg) + cfg.InitializeDataDirectories() if err := cfg.Validate(); err != nil { From 2d880762994b3ad8a7480480761499aeab8fffcb Mon Sep 17 00:00:00 2001 From: Cody Littley Date: Fri, 20 Mar 2026 10:35:59 -0500 Subject: [PATCH 074/119] fix unit test --- sei-db/db_engine/pebbledb/db_test.go | 20 ++++++++++---------- 1 file changed, 10 insertions(+), 10 deletions(-) diff --git a/sei-db/db_engine/pebbledb/db_test.go b/sei-db/db_engine/pebbledb/db_test.go index c583224d87..468b7d3cd8 100644 --- a/sei-db/db_engine/pebbledb/db_test.go +++ b/sei-db/db_engine/pebbledb/db_test.go @@ -98,19 +98,19 @@ func TestErrNotFoundConsistency(t *testing.T) { } func TestGetReturnsCopy(t *testing.T) { - forEachCacheMode(t, func(t *testing.T, cfg PebbleDBConfig) { - db := openDB(t, &cfg) + cfg := DefaultTestConfig(t) + cfg.CacheSize = 0 + db := openDB(t, &cfg) - require.NoError(t, db.Set([]byte("k"), []byte("v"), types.WriteOptions{Sync: false})) + require.NoError(t, db.Set([]byte("k"), []byte("v"), types.WriteOptions{Sync: false})) - got, err := db.Get([]byte("k")) - require.NoError(t, err) - got[0] = 'X' + got, err := db.Get([]byte("k")) + require.NoError(t, err) + got[0] = 'X' - got2, err := db.Get([]byte("k")) - require.NoError(t, err) - require.Equal(t, "v", string(got2), "stored value should remain unchanged") - }) + got2, err := db.Get([]byte("k")) + require.NoError(t, err) + require.Equal(t, "v", string(got2), "stored value should remain unchanged") } func TestBatchLenResetDelete(t *testing.T) { From ccad07433b4e7dc653a3b50640cf49576ba9427d Mon Sep 17 00:00:00 2001 From: Cody Littley Date: Fri, 20 Mar 2026 10:47:11 -0500 Subject: [PATCH 075/119] fix unit tests --- sei-db/state_db/sc/flatkv/snapshot_test.go | 19 ++++++------------- sei-db/state_db/sc/flatkv/store.go | 2 -- 2 files changed, 6 insertions(+), 15 deletions(-) diff --git a/sei-db/state_db/sc/flatkv/snapshot_test.go b/sei-db/state_db/sc/flatkv/snapshot_test.go index 4b21f426af..6415293f66 100644 --- a/sei-db/state_db/sc/flatkv/snapshot_test.go +++ b/sei-db/state_db/sc/flatkv/snapshot_test.go @@ -1391,12 +1391,9 @@ func TestReopenAfterDeletes(t *testing.T) { // ============================================================================= func TestWALTruncationThenRollback(t *testing.T) { - dir := t.TempDir() - cfg := &Config{ - DataDir: filepath.Join(dir, flatkvRootDir), - SnapshotInterval: 5, - SnapshotKeepRecent: 1, - } + cfg := DefaultTestConfig(t) + cfg.SnapshotInterval = 5 + cfg.SnapshotKeepRecent = 1 s, err := NewCommitStore(t.Context(), cfg) require.NoError(t, err) _, err = s.LoadVersion(0, false) @@ -1432,13 +1429,9 @@ func TestWALTruncationThenRollback(t *testing.T) { // ============================================================================= func TestReopenAfterSnapshotAndTruncation(t *testing.T) { - dir := t.TempDir() - dbDir := filepath.Join(dir, flatkvRootDir) - cfg := &Config{ - DataDir: dbDir, - SnapshotInterval: 5, - SnapshotKeepRecent: 1, - } + cfg := DefaultTestConfig(t) + cfg.SnapshotInterval = 5 + cfg.SnapshotKeepRecent = 1 s, err := NewCommitStore(t.Context(), cfg) require.NoError(t, err) diff --git a/sei-db/state_db/sc/flatkv/store.go b/sei-db/state_db/sc/flatkv/store.go index 3491c7f363..8e7a4619f5 100644 --- a/sei-db/state_db/sc/flatkv/store.go +++ b/sei-db/state_db/sc/flatkv/store.go @@ -142,8 +142,6 @@ func NewCommitStore( cfg *Config, ) (*CommitStore, error) { - logger.Info("Creating new FlatKV commit store", "config", cfg) - cfg.InitializeDataDirectories() if err := cfg.Validate(); err != nil { From 38ffd3527a38b50b13467bc4a77ef7144980809a Mon Sep 17 00:00:00 2001 From: Cody Littley Date: Fri, 20 Mar 2026 11:08:15 -0500 Subject: [PATCH 076/119] fix unit test --- sei-db/state_db/sc/flatkv/store.go | 19 ++++++++++++++++++- 1 file changed, 18 insertions(+), 1 deletion(-) diff --git a/sei-db/state_db/sc/flatkv/store.go b/sei-db/state_db/sc/flatkv/store.go index 8e7a4619f5..2cc94d27ce 100644 --- a/sei-db/state_db/sc/flatkv/store.go +++ b/sei-db/state_db/sc/flatkv/store.go @@ -179,6 +179,19 @@ func NewCommitStore( }, nil } +// resetPools recreates the context and thread pools after a full Close(). +func (s *CommitStore) resetPools() { + coreCount := runtime.NumCPU() + + s.ctx, s.cancel = context.WithCancel(context.Background()) + + readPoolSize := int(s.config.ReaderThreadsPerCore*float64(coreCount) + float64(s.config.ReaderConstantThreadCount)) + s.readPool = threading.NewFixedPool(s.ctx, "flatkv-read", readPoolSize, s.config.ReaderPoolQueueSize) + + miscPoolSize := int(s.config.MiscPoolThreadsPerCore*float64(coreCount) + float64(s.config.MiscConstantThreadCount)) + s.miscPool = threading.NewElasticPool(s.ctx, "flatkv-misc", miscPoolSize) +} + func (s *CommitStore) flatkvDir() string { return s.config.DataDir } @@ -615,8 +628,12 @@ func (s *CommitStore) Importer(version int64) (types.Importer, error) { return nil, errReadOnly } // rootmulti.Restore closes the store before creating an importer. - // Reopen the DBs so the importer can write data. + // Close() cancels the context (killing pools), so recreate them + // before reopening the DBs. if s.isClosed() { + if s.ctx.Err() != nil { + s.resetPools() + } if err := s.open(); err != nil { return nil, fmt.Errorf("reopen store for import: %w", err) } From f143d3028aa32a8c12075a1a56fdde0d71cf869c Mon Sep 17 00:00:00 2001 From: Cody Littley Date: Wed, 25 Mar 2026 08:21:54 -0400 Subject: [PATCH 077/119] made suggested changes --- sei-cosmos/storev2/rootmulti/store.go | 8 +-- sei-db/config/sc_config.go | 4 +- .../bench/wrappers/db_implementations.go | 5 +- sei-db/state_db/sc/composite/store.go | 8 +-- sei-db/state_db/sc/composite/store_test.go | 56 ++++++++----------- sei-db/state_db/sc/flatkv/store_write.go | 2 - 6 files changed, 30 insertions(+), 53 deletions(-) diff --git a/sei-cosmos/storev2/rootmulti/store.go b/sei-cosmos/storev2/rootmulti/store.go index 0ef5e7c7fc..baa0c14efc 100644 --- a/sei-cosmos/storev2/rootmulti/store.go +++ b/sei-cosmos/storev2/rootmulti/store.go @@ -92,16 +92,10 @@ func NewStore( limiter = rate.NewLimiter(rate.Limit(scConfig.HistoricalProofRateLimit), burst) } ctx := context.Background() - scStore, err := composite.NewCompositeCommitStore(ctx, scDir, scConfig) - if err != nil { - panic(err) - } + scStore := composite.NewCompositeCommitStore(ctx, scDir, scConfig) if err := scStore.CleanupCrashArtifacts(); err != nil { panic(err) } - if err := scStore.CleanupCrashArtifacts(); err != nil { - panic(fmt.Errorf("failed to cleanup crash artifacts: %w", err)) - } store := &Store{ scStore: scStore, storesParams: make(map[types.StoreKey]storeParams), diff --git a/sei-db/config/sc_config.go b/sei-db/config/sc_config.go index 92d16a5abc..fc1de69c65 100644 --- a/sei-db/config/sc_config.go +++ b/sei-db/config/sc_config.go @@ -46,7 +46,7 @@ type StateCommitConfig struct { MemIAVLConfig memiavl.Config // FlatKVConfig is the configuration for the FlatKV (EVM) backend - FlatKVConfig *flatkv.Config + FlatKVConfig flatkv.Config // Max concurrent historical proof queries (RPC /store path). HistoricalProofMaxInFlight int `mapstructure:"historical-proof-max-inflight"` @@ -67,7 +67,7 @@ func DefaultStateCommitConfig() StateCommitConfig { ReadMode: CosmosOnlyRead, EnableLatticeHash: false, MemIAVLConfig: memiavl.DefaultConfig(), - FlatKVConfig: flatkv.DefaultConfig(), + FlatKVConfig: *flatkv.DefaultConfig(), HistoricalProofMaxInFlight: DefaultSCHistoricalProofMaxInFlight, HistoricalProofRateLimit: DefaultSCHistoricalProofRateLimit, diff --git a/sei-db/state_db/bench/wrappers/db_implementations.go b/sei-db/state_db/bench/wrappers/db_implementations.go index 9b4f48ccdc..bba15e8799 100644 --- a/sei-db/state_db/bench/wrappers/db_implementations.go +++ b/sei-db/state_db/bench/wrappers/db_implementations.go @@ -73,10 +73,7 @@ func newCompositeCommitStore(ctx context.Context, dbDir string, writeMode config cfg.MemIAVLConfig.AsyncCommitBuffer = 10 cfg.MemIAVLConfig.SnapshotInterval = 100 - cs, err := composite.NewCompositeCommitStore(ctx, dbDir, cfg) - if err != nil { - return nil, fmt.Errorf("failed to create Composite commit store: %w", err) - } + cs := composite.NewCompositeCommitStore(ctx, dbDir, cfg) if err := cs.CleanupCrashArtifacts(); err != nil { return nil, fmt.Errorf("failed to cleanup crash artifacts: %w", err) } diff --git a/sei-db/state_db/sc/composite/store.go b/sei-db/state_db/sc/composite/store.go index 8cab6287b5..1d49e4121f 100644 --- a/sei-db/state_db/sc/composite/store.go +++ b/sei-db/state_db/sc/composite/store.go @@ -55,7 +55,7 @@ func NewCompositeCommitStore( ctx context.Context, homeDir string, cfg config.StateCommitConfig, -) (*CompositeCommitStore, error) { +) *CompositeCommitStore { // Always initialize the Cosmos backend (creates struct only, not opened) cosmosCommitter := memiavl.NewCommitStore(homeDir, cfg.MemIAVLConfig) @@ -70,13 +70,13 @@ func NewCompositeCommitStore( if cfg.WriteMode == config.DualWrite || cfg.WriteMode == config.SplitWrite { cfg.FlatKVConfig.DataDir = filepath.Join(homeDir, "data", "flatkv") var err error - store.evmCommitter, err = flatkv.NewCommitStore(ctx, cfg.FlatKVConfig) + store.evmCommitter, err = flatkv.NewCommitStore(ctx, &cfg.FlatKVConfig) if err != nil { - return nil, fmt.Errorf("failed to create FlatKV commit store: %w", err) + panic(fmt.Errorf("failed to create FlatKV commit store: %w", err)) } } - return store, nil + return store } // Initialize initializes the store with the given store names diff --git a/sei-db/state_db/sc/composite/store_test.go b/sei-db/state_db/sc/composite/store_test.go index cbee41ad10..6ed8710190 100644 --- a/sei-db/state_db/sc/composite/store_test.go +++ b/sei-db/state_db/sc/composite/store_test.go @@ -45,11 +45,10 @@ func TestCompositeStoreBasicOperations(t *testing.T) { dir := t.TempDir() cfg := config.DefaultStateCommitConfig() - cs, err := NewCompositeCommitStore(t.Context(), dir, cfg) - require.NoError(t, err) + cs := NewCompositeCommitStore(t.Context(), dir, cfg) cs.Initialize([]string{"test", EVMStoreName}) - _, err = cs.LoadVersion(0, false) + _, err := cs.LoadVersion(0, false) require.NoError(t, err) defer func() { require.NoError(t, cs.Close()) @@ -95,11 +94,10 @@ func TestEmptyChangesets(t *testing.T) { dir := t.TempDir() cfg := config.DefaultStateCommitConfig() - cs, err := NewCompositeCommitStore(t.Context(), dir, cfg) - require.NoError(t, err) + cs := NewCompositeCommitStore(t.Context(), dir, cfg) cs.Initialize([]string{"test"}) - _, err = cs.LoadVersion(0, false) + _, err := cs.LoadVersion(0, false) require.NoError(t, err) defer func() { require.NoError(t, cs.Close()) @@ -117,11 +115,10 @@ func TestLoadVersionCopyExisting(t *testing.T) { dir := t.TempDir() cfg := config.DefaultStateCommitConfig() - cs, err := NewCompositeCommitStore(t.Context(), dir, cfg) - require.NoError(t, err) + cs := NewCompositeCommitStore(t.Context(), dir, cfg) cs.Initialize([]string{"test"}) - _, err = cs.LoadVersion(0, false) + _, err := cs.LoadVersion(0, false) require.NoError(t, err) err = cs.ApplyChangeSets([]*proto.NamedChangeSet{ @@ -155,11 +152,10 @@ func TestWorkingAndLastCommitInfo(t *testing.T) { dir := t.TempDir() cfg := config.DefaultStateCommitConfig() - cs, err := NewCompositeCommitStore(t.Context(), dir, cfg) - require.NoError(t, err) + cs := NewCompositeCommitStore(t.Context(), dir, cfg) cs.Initialize([]string{"test"}) - _, err = cs.LoadVersion(0, false) + _, err := cs.LoadVersion(0, false) require.NoError(t, err) defer func() { require.NoError(t, cs.Close()) @@ -234,10 +230,9 @@ func TestLatticeHashCommitInfo(t *testing.T) { cfg.WriteMode = tt.writeMode cfg.EnableLatticeHash = tt.enableLattice - cs, err := NewCompositeCommitStore(t.Context(), dir, cfg) - require.NoError(t, err) + cs := NewCompositeCommitStore(t.Context(), dir, cfg) cs.Initialize([]string{"test", EVMStoreName}) - _, err = cs.LoadVersion(0, false) + _, err := cs.LoadVersion(0, false) require.NoError(t, err) defer cs.Close() @@ -335,11 +330,10 @@ func TestRollback(t *testing.T) { dir := t.TempDir() cfg := config.DefaultStateCommitConfig() - cs, err := NewCompositeCommitStore(t.Context(), dir, cfg) - require.NoError(t, err) + cs := NewCompositeCommitStore(t.Context(), dir, cfg) cs.Initialize([]string{"test"}) - _, err = cs.LoadVersion(0, false) + _, err := cs.LoadVersion(0, false) require.NoError(t, err) // Commit a few versions @@ -372,11 +366,10 @@ func TestGetVersions(t *testing.T) { dir := t.TempDir() cfg := config.DefaultStateCommitConfig() - cs, err := NewCompositeCommitStore(t.Context(), dir, cfg) - require.NoError(t, err) + cs := NewCompositeCommitStore(t.Context(), dir, cfg) cs.Initialize([]string{"test"}) - _, err = cs.LoadVersion(0, false) + _, err := cs.LoadVersion(0, false) require.NoError(t, err) for i := 0; i < 3; i++ { @@ -396,8 +389,7 @@ func TestGetVersions(t *testing.T) { } require.NoError(t, cs.Close()) - cs2, err := NewCompositeCommitStore(t.Context(), dir, cfg) - require.NoError(t, err) + cs2 := NewCompositeCommitStore(t.Context(), dir, cfg) cs2.Initialize([]string{"test"}) latestVersion, err := cs2.GetLatestVersion() @@ -410,11 +402,10 @@ func TestReadOnlyLoadVersionSoftFailsWhenFlatKVUnavailable(t *testing.T) { cfg := config.DefaultStateCommitConfig() cfg.MemIAVLConfig.AsyncCommitBuffer = 0 - cs, err := NewCompositeCommitStore(t.Context(), dir, cfg) - require.NoError(t, err) + cs := NewCompositeCommitStore(t.Context(), dir, cfg) cs.Initialize([]string{"test"}) - _, err = cs.LoadVersion(0, false) + _, err := cs.LoadVersion(0, false) require.NoError(t, err) err = cs.ApplyChangeSets([]*proto.NamedChangeSet{ @@ -510,10 +501,9 @@ func TestExportImportSplitWrite(t *testing.T) { // --- Source store: write cosmos + EVM data --- srcDir := t.TempDir() - src, err := NewCompositeCommitStore(t.Context(), srcDir, cfg) - require.NoError(t, err) + src := NewCompositeCommitStore(t.Context(), srcDir, cfg) src.Initialize([]string{"bank", EVMStoreName}) - _, err = src.LoadVersion(0, false) + _, err := src.LoadVersion(0, false) require.NoError(t, err) addr := flatkv.Address{0xAA} @@ -559,8 +549,7 @@ func TestExportImportSplitWrite(t *testing.T) { // --- Destination store: import --- dstDir := t.TempDir() - dst, err := NewCompositeCommitStore(t.Context(), dstDir, cfg) - require.NoError(t, err) + dst := NewCompositeCommitStore(t.Context(), dstDir, cfg) dst.Initialize([]string{"bank", EVMStoreName}) _, err = dst.LoadVersion(0, false) require.NoError(t, err) @@ -599,10 +588,9 @@ func TestExportCosmosOnlyHasNoFlatKVModule(t *testing.T) { cfg.MemIAVLConfig.AsyncCommitBuffer = 0 dir := t.TempDir() - cs, err := NewCompositeCommitStore(t.Context(), dir, cfg) - require.NoError(t, err) + cs := NewCompositeCommitStore(t.Context(), dir, cfg) cs.Initialize([]string{"bank"}) - _, err = cs.LoadVersion(0, false) + _, err := cs.LoadVersion(0, false) require.NoError(t, err) err = cs.ApplyChangeSets([]*proto.NamedChangeSet{ diff --git a/sei-db/state_db/sc/flatkv/store_write.go b/sei-db/state_db/sc/flatkv/store_write.go index a46f168f17..b6ecdfdac9 100644 --- a/sei-db/state_db/sc/flatkv/store_write.go +++ b/sei-db/state_db/sc/flatkv/store_write.go @@ -32,8 +32,6 @@ func (s *CommitStore) ApplyChangeSets(cs []*proto.NamedChangeSet) error { return fmt.Errorf("failed to batch read old values: %w", err) } - s.phaseTimer.SetPhase("apply_change_sets_prepare") - s.phaseTimer.SetPhase("apply_change_sets_prepare") s.pendingChangeSets = append(s.pendingChangeSets, cs...) From a18fd93a2a6080e7fd99bb442904ec04b477c1b4 Mon Sep 17 00:00:00 2001 From: Cody Littley Date: Wed, 25 Mar 2026 08:49:44 -0400 Subject: [PATCH 078/119] config changes --- sei-db/db_engine/dbcache/cache.go | 23 ++------ sei-db/db_engine/dbcache/cache_config.go | 43 +++++++++++++++ sei-db/db_engine/dbcache/cache_impl.go | 37 +++++-------- sei-db/db_engine/dbcache/cache_impl_test.go | 54 +++++++++++++------ sei-db/db_engine/pebbledb/db.go | 22 ++------ sei-db/db_engine/pebbledb/db_test.go | 52 ++++++++++-------- sei-db/db_engine/pebbledb/pebbledb_config.go | 23 ++------ .../pebbledb/pebbledb_test_config.go | 16 ++++-- .../bench/cryptosim/config/standard-perf.json | 12 ++--- sei-db/state_db/sc/flatkv/config.go | 30 ++++++++--- .../state_db/sc/flatkv/flatkv_test_config.go | 19 +++++-- sei-db/state_db/sc/flatkv/store.go | 15 +++--- sei-db/state_db/sc/flatkv/store_test.go | 3 +- 13 files changed, 197 insertions(+), 152 deletions(-) create mode 100644 sei-db/db_engine/dbcache/cache_config.go diff --git a/sei-db/db_engine/dbcache/cache.go b/sei-db/db_engine/dbcache/cache.go index ccbaf6464c..604cd4d7d7 100644 --- a/sei-db/db_engine/dbcache/cache.go +++ b/sei-db/db_engine/dbcache/cache.go @@ -3,7 +3,6 @@ package dbcache import ( "context" "fmt" - "time" "github.com/sei-protocol/sei-chain/sei-db/common/threading" "github.com/sei-protocol/sei-chain/sei-db/db_engine/types" @@ -93,32 +92,18 @@ func (u *CacheUpdate) IsDelete() bool { return u.Value == nil } -// BuildCache creates a new Cache. +// BuildCache creates a new Cache. When cfg.MaxSize is 0 a no-op (passthrough) cache is returned. func BuildCache( ctx context.Context, - shardCount uint64, - maxSize uint64, + cfg *CacheConfig, readPool threading.Pool, miscPool threading.Pool, - estimatedOverheadPerEntry uint64, - cacheName string, - metricsScrapeInterval time.Duration, ) (Cache, error) { - - if maxSize == 0 { + if cfg.MaxSize == 0 { return NewNoOpCache(), nil } - cache, err := NewStandardCache( - ctx, - shardCount, - maxSize, - readPool, - miscPool, - estimatedOverheadPerEntry, - cacheName, - metricsScrapeInterval, - ) + cache, err := NewStandardCache(ctx, cfg, readPool, miscPool) if err != nil { return nil, fmt.Errorf("failed to create cache: %w", err) } diff --git a/sei-db/db_engine/dbcache/cache_config.go b/sei-db/db_engine/dbcache/cache_config.go new file mode 100644 index 0000000000..a1f9f3c3a7 --- /dev/null +++ b/sei-db/db_engine/dbcache/cache_config.go @@ -0,0 +1,43 @@ +package dbcache + +import ( + "fmt" + "time" + + "github.com/sei-protocol/sei-chain/sei-db/common/unit" +) + +// CacheConfig defines configuration for a sharded LRU read-through cache. +type CacheConfig struct { + // The number of shards in the cache. Must be a power of two and greater than 0. + ShardCount uint64 + // The maximum size of the cache, in bytes. 0 disables the cache. + MaxSize uint64 + // The estimated overhead per entry, in bytes. Used to calculate effective cache + // capacity. Derive experimentally; may differ between builds and architectures. + EstimatedOverheadPerEntry uint64 + // Name used as the "cache" attribute on OTel metrics. Empty string disables metrics. + MetricsName string + // How often to scrape cache size for metrics. Ignored if MetricsName is empty. + MetricsScrapeInterval time.Duration +} + +// DefaultCacheConfig returns a CacheConfig with sensible defaults. +func DefaultCacheConfig() CacheConfig { + return CacheConfig{ + ShardCount: 8, + MaxSize: 512 * unit.MB, + EstimatedOverheadPerEntry: DefaultEstimatedOverheadPerEntry, + } +} + +// Validate checks that the configuration is sane and returns an error if it is not. +func (c *CacheConfig) Validate() error { + if c.MaxSize > 0 && (c.ShardCount&(c.ShardCount-1)) != 0 { + return fmt.Errorf("shard count must be a power of two") + } + if c.MetricsName != "" && c.MetricsScrapeInterval <= 0 { + return fmt.Errorf("metrics scrape interval must be positive when metrics name is set") + } + return nil +} diff --git a/sei-db/db_engine/dbcache/cache_impl.go b/sei-db/db_engine/dbcache/cache_impl.go index 1292f74caf..8dc5704f50 100644 --- a/sei-db/db_engine/dbcache/cache_impl.go +++ b/sei-db/db_engine/dbcache/cache_impl.go @@ -4,7 +4,6 @@ import ( "context" "fmt" "sync" - "time" "github.com/sei-protocol/sei-chain/sei-db/common/threading" "github.com/sei-protocol/sei-chain/sei-db/db_engine/types" @@ -29,45 +28,33 @@ type cache struct { miscPool threading.Pool } -// Creates a new Cache. If cacheName is non-empty, OTel metrics are enabled and the -// background size scrape runs every metricsScrapeInterval. +// Creates a new Cache. If cfg.MetricsName is non-empty, OTel metrics are enabled and the +// background size scrape runs every cfg.MetricsScrapeInterval. func NewStandardCache( ctx context.Context, - // The number of shards in the cache. Must be a power of two and greater than 0. - shardCount uint64, - // The maximum size of the cache, in bytes. - maxSize uint64, - // A work pool for reading from the DB. + cfg *CacheConfig, readPool threading.Pool, - // A work pool for miscellaneous operations that are neither computationally intensive nor IO bound. miscPool threading.Pool, - // The estimated overhead per entry, in bytes. This is used to calculate the maximum size of the cache. - // This value should be derived experimentally, and may differ between different builds and architectures. - estimatedOverheadPerEntry uint64, - // Name used as the "cache" attribute on metrics. Empty string disables metrics. - cacheName string, - // How often to scrape cache size for metrics. Ignored if cacheName is empty. - metricsScrapeInterval time.Duration, ) (Cache, error) { - if shardCount == 0 || (shardCount&(shardCount-1)) != 0 { + if cfg.ShardCount == 0 || (cfg.ShardCount&(cfg.ShardCount-1)) != 0 { return nil, ErrNumShardsNotPowerOfTwo } - if maxSize == 0 { + if cfg.MaxSize == 0 { return nil, fmt.Errorf("maxSize must be greater than 0") } - shardManager, err := newShardManager(shardCount) + shardManager, err := newShardManager(cfg.ShardCount) if err != nil { return nil, fmt.Errorf("failed to create shard manager: %w", err) } - sizePerShard := maxSize / shardCount + sizePerShard := cfg.MaxSize / cfg.ShardCount if sizePerShard == 0 { return nil, fmt.Errorf("maxSize must be greater than shardCount") } - shards := make([]*shard, shardCount) - for i := uint64(0); i < shardCount; i++ { - shards[i], err = NewShard(ctx, readPool, sizePerShard, estimatedOverheadPerEntry) + shards := make([]*shard, cfg.ShardCount) + for i := uint64(0); i < cfg.ShardCount; i++ { + shards[i], err = NewShard(ctx, readPool, sizePerShard, cfg.EstimatedOverheadPerEntry) if err != nil { return nil, fmt.Errorf("failed to create shard: %w", err) } @@ -81,8 +68,8 @@ func NewStandardCache( miscPool: miscPool, } - if cacheName != "" { - metrics := newCacheMetrics(ctx, cacheName, metricsScrapeInterval, c.getCacheSizeInfo) + if cfg.MetricsName != "" { + metrics := newCacheMetrics(ctx, cfg.MetricsName, cfg.MetricsScrapeInterval, c.getCacheSizeInfo) for _, s := range c.shards { s.metrics = metrics } diff --git a/sei-db/db_engine/dbcache/cache_impl_test.go b/sei-db/db_engine/dbcache/cache_impl_test.go index 5433019c93..4c44d5283a 100644 --- a/sei-db/db_engine/dbcache/cache_impl_test.go +++ b/sei-db/db_engine/dbcache/cache_impl_test.go @@ -31,7 +31,9 @@ func newTestCache(t *testing.T, store map[string][]byte, shardCount, maxSize uin return v, true, nil } pool := threading.NewAdHocPool() - c, err := NewStandardCache(context.Background(), shardCount, maxSize, pool, pool, 16, "", 0) + c, err := NewStandardCache(context.Background(), &CacheConfig{ + ShardCount: shardCount, MaxSize: maxSize, EstimatedOverheadPerEntry: 16, + }, pool, pool) require.NoError(t, err) return c, read } @@ -42,42 +44,54 @@ func newTestCache(t *testing.T, store map[string][]byte, shardCount, maxSize uin func TestNewStandardCacheValid(t *testing.T) { pool := threading.NewAdHocPool() - c, err := NewStandardCache(context.Background(), 4, 1024, pool, pool, 16, "", 0) + c, err := NewStandardCache(context.Background(), &CacheConfig{ + ShardCount: 4, MaxSize: 1024, EstimatedOverheadPerEntry: 16, + }, pool, pool) require.NoError(t, err) require.NotNil(t, c) } func TestNewStandardCacheSingleShard(t *testing.T) { pool := threading.NewAdHocPool() - c, err := NewStandardCache(context.Background(), 1, 1024, pool, pool, 16, "", 0) + c, err := NewStandardCache(context.Background(), &CacheConfig{ + ShardCount: 1, MaxSize: 1024, EstimatedOverheadPerEntry: 16, + }, pool, pool) require.NoError(t, err) require.NotNil(t, c) } func TestNewStandardCacheShardCountZero(t *testing.T) { pool := threading.NewAdHocPool() - _, err := NewStandardCache(context.Background(), 0, 1024, pool, pool, 16, "", 0) + _, err := NewStandardCache(context.Background(), &CacheConfig{ + ShardCount: 0, MaxSize: 1024, EstimatedOverheadPerEntry: 16, + }, pool, pool) require.Error(t, err) } func TestNewStandardCacheShardCountNotPowerOfTwo(t *testing.T) { pool := threading.NewAdHocPool() for _, n := range []uint64{3, 5, 6, 7, 9, 10} { - _, err := NewStandardCache(context.Background(), n, 1024, pool, pool, 16, "", 0) + _, err := NewStandardCache(context.Background(), &CacheConfig{ + ShardCount: n, MaxSize: 1024, EstimatedOverheadPerEntry: 16, + }, pool, pool) require.Error(t, err, "shardCount=%d", n) } } func TestNewStandardCacheMaxSizeZero(t *testing.T) { pool := threading.NewAdHocPool() - _, err := NewStandardCache(context.Background(), 4, 0, pool, pool, 16, "", 0) + _, err := NewStandardCache(context.Background(), &CacheConfig{ + ShardCount: 4, MaxSize: 0, EstimatedOverheadPerEntry: 16, + }, pool, pool) require.Error(t, err) } func TestNewStandardCacheMaxSizeLessThanShardCount(t *testing.T) { pool := threading.NewAdHocPool() // shardCount=4, maxSize=3 → sizePerShard=0 - _, err := NewStandardCache(context.Background(), 4, 3, pool, pool, 16, "", 0) + _, err := NewStandardCache(context.Background(), &CacheConfig{ + ShardCount: 4, MaxSize: 3, EstimatedOverheadPerEntry: 16, + }, pool, pool) require.Error(t, err) } @@ -85,7 +99,9 @@ func TestNewStandardCacheWithMetrics(t *testing.T) { pool := threading.NewAdHocPool() ctx, cancel := context.WithCancel(context.Background()) defer cancel() - c, err := NewStandardCache(ctx, 2, 1024, pool, pool, 0, "test-cache", time.Hour) + c, err := NewStandardCache(ctx, &CacheConfig{ + ShardCount: 2, MaxSize: 1024, MetricsName: "test-cache", MetricsScrapeInterval: time.Hour, + }, pool, pool) require.NoError(t, err) require.NotNil(t, c) } @@ -144,7 +160,7 @@ func TestCacheGetDBError(t *testing.T) { dbErr := errors.New("db fail") readFunc := func(key []byte) ([]byte, bool, error) { return nil, false, dbErr } pool := threading.NewAdHocPool() - c, _ := NewStandardCache(context.Background(), 1, 4096, pool, pool, 0, "", 0) + c, _ := NewStandardCache(context.Background(), &CacheConfig{ShardCount: 1, MaxSize: 4096}, pool, pool) _, _, err := c.Get(readFunc, []byte("k"), true) require.Error(t, err) @@ -158,7 +174,7 @@ func TestCacheGetSameKeyConsistentShard(t *testing.T) { return []byte("val"), true, nil } pool := threading.NewAdHocPool() - c, _ := NewStandardCache(context.Background(), 4, 4096, pool, pool, 0, "", 0) + c, _ := NewStandardCache(context.Background(), &CacheConfig{ShardCount: 4, MaxSize: 4096}, pool, pool) val1, _, _ := c.Get(readFunc, []byte("key"), true) val2, _, _ := c.Get(readFunc, []byte("key"), true) @@ -327,7 +343,7 @@ func TestCacheBatchSetEmpty(t *testing.T) { func TestCacheBatchSetPoolFailure(t *testing.T) { readPool := threading.NewAdHocPool() - c, _ := NewStandardCache(context.Background(), 1, 4096, readPool, &failPool{}, 0, "", 0) + c, _ := NewStandardCache(context.Background(), &CacheConfig{ShardCount: 1, MaxSize: 4096}, readPool, &failPool{}) err := c.BatchSet([]CacheUpdate{ {Key: []byte("k"), Value: []byte("v")}, @@ -405,7 +421,7 @@ func TestCacheBatchGetDBError(t *testing.T) { dbErr := errors.New("broken") readFunc := func(key []byte) ([]byte, bool, error) { return nil, false, dbErr } pool := threading.NewAdHocPool() - c, _ := NewStandardCache(context.Background(), 1, 4096, pool, pool, 0, "", 0) + c, _ := NewStandardCache(context.Background(), &CacheConfig{ShardCount: 1, MaxSize: 4096}, pool, pool) keys := map[string]types.BatchGetResult{"fail": {}} require.NoError(t, c.BatchGet(readFunc, keys), "BatchGet itself should not fail") @@ -420,7 +436,7 @@ func TestCacheBatchGetEmpty(t *testing.T) { func TestCacheBatchGetPoolFailure(t *testing.T) { readPool := threading.NewAdHocPool() - c, _ := NewStandardCache(context.Background(), 1, 4096, readPool, &failPool{}, 0, "", 0) + c, _ := NewStandardCache(context.Background(), &CacheConfig{ShardCount: 1, MaxSize: 4096}, readPool, &failPool{}) keys := map[string]types.BatchGetResult{"k": {}} err := c.BatchGet(noopRead, keys) @@ -429,7 +445,7 @@ func TestCacheBatchGetPoolFailure(t *testing.T) { func TestCacheBatchGetShardReadPoolFailure(t *testing.T) { miscPool := threading.NewAdHocPool() - c, _ := NewStandardCache(context.Background(), 1, 4096, &failPool{}, miscPool, 0, "", 0) + c, _ := NewStandardCache(context.Background(), &CacheConfig{ShardCount: 1, MaxSize: 4096}, &failPool{}, miscPool) keys := map[string]types.BatchGetResult{"a": {}, "b": {}} require.NoError(t, c.BatchGet(noopRead, keys)) @@ -505,7 +521,9 @@ func TestCacheGetCacheSizeInfoAggregatesShards(t *testing.T) { func TestCacheSizeInfoIncludesOverhead(t *testing.T) { const overhead = 200 pool := threading.NewAdHocPool() - c, err := NewStandardCache(context.Background(), 1, 100_000, pool, pool, overhead, "", 0) + c, err := NewStandardCache(context.Background(), &CacheConfig{ + ShardCount: 1, MaxSize: 100_000, EstimatedOverheadPerEntry: overhead, + }, pool, pool) require.NoError(t, err) impl := c.(*cache) @@ -523,7 +541,9 @@ func TestCacheOverheadCausesEarlierEviction(t *testing.T) { pool := threading.NewAdHocPool() // Single shard, maxSize=500. Each 10-byte value entry costs 1+10+200=211 bytes. // Two entries = 422 < 500. Three entries = 633 > 500, so one must be evicted. - c, err := NewStandardCache(context.Background(), 1, 500, pool, pool, overhead, "", 0) + c, err := NewStandardCache(context.Background(), &CacheConfig{ + ShardCount: 1, MaxSize: 500, EstimatedOverheadPerEntry: overhead, + }, pool, pool) require.NoError(t, err) impl := c.(*cache) @@ -731,7 +751,7 @@ func TestCacheBatchGetAfterBatchSetWithDeletes(t *testing.T) { func TestNewStandardCachePowerOfTwoShardCounts(t *testing.T) { pool := threading.NewAdHocPool() for _, n := range []uint64{1, 2, 4, 8, 16, 32, 64} { - c, err := NewStandardCache(context.Background(), n, n*100, pool, pool, 0, "", 0) + c, err := NewStandardCache(context.Background(), &CacheConfig{ShardCount: n, MaxSize: n * 100}, pool, pool) require.NoError(t, err, "shardCount=%d", n) require.NotNil(t, c, "shardCount=%d", n) } diff --git a/sei-db/db_engine/pebbledb/db.go b/sei-db/db_engine/pebbledb/db.go index be924bad12..1cda457a57 100644 --- a/sei-db/db_engine/pebbledb/db.go +++ b/sei-db/db_engine/pebbledb/db.go @@ -95,12 +95,12 @@ func Open( }, nil } -// OpenCached opens a Pebble-backed DB and wraps it with a read-through cache. -// Cache behaviour is controlled by config: when CacheSize is 0 a no-op cache -// is used, otherwise a sharded LRU cache is created. +// OpenWithCache opens a Pebble-backed DB and wraps it with a read-through cache. +// When cacheConfig.MaxSize is 0 a no-op (passthrough) cache is used. func OpenWithCache( ctx context.Context, config *PebbleDBConfig, + cacheConfig *dbcache.CacheConfig, comparer *pebble.Comparer, readPool threading.Pool, miscPool threading.Pool, @@ -110,21 +110,7 @@ func OpenWithCache( return nil, fmt.Errorf("failed to open database: %w", err) } - var cacheName string - if config.EnableMetrics { - cacheName = filepath.Base(config.DataDir) - } - - cache, err := dbcache.BuildCache( - ctx, - config.CacheShardCount, - config.CacheSize, - readPool, - miscPool, - config.EstimatedOverheadPerEntry, - cacheName, - config.MetricsScrapeInterval, - ) + cache, err := dbcache.BuildCache(ctx, cacheConfig, readPool, miscPool) if err != nil { _ = db.Close() return nil, fmt.Errorf("failed to create cache: %w", err) diff --git a/sei-db/db_engine/pebbledb/db_test.go b/sei-db/db_engine/pebbledb/db_test.go index 468b7d3cd8..8b7ac68439 100644 --- a/sei-db/db_engine/pebbledb/db_test.go +++ b/sei-db/db_engine/pebbledb/db_test.go @@ -10,12 +10,13 @@ import ( errorutils "github.com/sei-protocol/sei-chain/sei-db/common/errors" "github.com/sei-protocol/sei-chain/sei-db/common/threading" "github.com/sei-protocol/sei-chain/sei-db/common/unit" + "github.com/sei-protocol/sei-chain/sei-db/db_engine/dbcache" "github.com/sei-protocol/sei-chain/sei-db/db_engine/types" ) // forEachCacheMode runs fn once with a warm cache and once with caching disabled, // so cache-sensitive tests exercise both the cache and the raw storage layer. -func forEachCacheMode(t *testing.T, fn func(t *testing.T, cfg PebbleDBConfig)) { +func forEachCacheMode(t *testing.T, fn func(t *testing.T, cfg PebbleDBConfig, cacheCfg dbcache.CacheConfig)) { for _, mode := range []struct { name string cacheSize uint64 @@ -25,15 +26,16 @@ func forEachCacheMode(t *testing.T, fn func(t *testing.T, cfg PebbleDBConfig)) { } { t.Run(mode.name, func(t *testing.T) { cfg := DefaultTestConfig(t) - cfg.CacheSize = mode.cacheSize - fn(t, cfg) + cacheCfg := DefaultTestCacheConfig() + cacheCfg.MaxSize = mode.cacheSize + fn(t, cfg, cacheCfg) }) } } -func openDB(t *testing.T, cfg *PebbleDBConfig) types.KeyValueDB { +func openDB(t *testing.T, cfg *PebbleDBConfig, cacheCfg *dbcache.CacheConfig) types.KeyValueDB { t.Helper() - db, err := OpenWithCache(t.Context(), cfg, pebble.DefaultComparer, + db, err := OpenWithCache(t.Context(), cfg, cacheCfg, pebble.DefaultComparer, threading.NewAdHocPool(), threading.NewAdHocPool()) require.NoError(t, err) t.Cleanup(func() { require.NoError(t, db.Close()) }) @@ -45,8 +47,8 @@ func openDB(t *testing.T, cfg *PebbleDBConfig) types.KeyValueDB { // --------------------------------------------------------------------------- func TestDBGetSetDelete(t *testing.T) { - forEachCacheMode(t, func(t *testing.T, cfg PebbleDBConfig) { - db := openDB(t, &cfg) + forEachCacheMode(t, func(t *testing.T, cfg PebbleDBConfig, cacheCfg dbcache.CacheConfig) { + db := openDB(t, &cfg, &cacheCfg) key := []byte("k1") val := []byte("v1") @@ -68,8 +70,8 @@ func TestDBGetSetDelete(t *testing.T) { } func TestBatchAtomicWrite(t *testing.T) { - forEachCacheMode(t, func(t *testing.T, cfg PebbleDBConfig) { - db := openDB(t, &cfg) + forEachCacheMode(t, func(t *testing.T, cfg PebbleDBConfig, cacheCfg dbcache.CacheConfig) { + db := openDB(t, &cfg, &cacheCfg) b := db.NewBatch() t.Cleanup(func() { require.NoError(t, b.Close()) }) @@ -87,8 +89,8 @@ func TestBatchAtomicWrite(t *testing.T) { } func TestErrNotFoundConsistency(t *testing.T) { - forEachCacheMode(t, func(t *testing.T, cfg PebbleDBConfig) { - db := openDB(t, &cfg) + forEachCacheMode(t, func(t *testing.T, cfg PebbleDBConfig, cacheCfg dbcache.CacheConfig) { + db := openDB(t, &cfg, &cacheCfg) _, err := db.Get([]byte("missing-key")) require.Error(t, err) @@ -99,8 +101,9 @@ func TestErrNotFoundConsistency(t *testing.T) { func TestGetReturnsCopy(t *testing.T) { cfg := DefaultTestConfig(t) - cfg.CacheSize = 0 - db := openDB(t, &cfg) + cacheCfg := DefaultTestCacheConfig() + cacheCfg.MaxSize = 0 + db := openDB(t, &cfg, &cacheCfg) require.NoError(t, db.Set([]byte("k"), []byte("v"), types.WriteOptions{Sync: false})) @@ -114,8 +117,8 @@ func TestGetReturnsCopy(t *testing.T) { } func TestBatchLenResetDelete(t *testing.T) { - forEachCacheMode(t, func(t *testing.T, cfg PebbleDBConfig) { - db := openDB(t, &cfg) + forEachCacheMode(t, func(t *testing.T, cfg PebbleDBConfig, cacheCfg dbcache.CacheConfig) { + db := openDB(t, &cfg, &cacheCfg) require.NoError(t, db.Set([]byte("to-delete"), []byte("val"), types.WriteOptions{Sync: false})) @@ -141,8 +144,8 @@ func TestBatchLenResetDelete(t *testing.T) { } func TestFlush(t *testing.T) { - forEachCacheMode(t, func(t *testing.T, cfg PebbleDBConfig) { - db := openDB(t, &cfg) + forEachCacheMode(t, func(t *testing.T, cfg PebbleDBConfig, cacheCfg dbcache.CacheConfig) { + db := openDB(t, &cfg, &cacheCfg) require.NoError(t, db.Set([]byte("flush-test"), []byte("val"), types.WriteOptions{Sync: false})) require.NoError(t, db.Flush()) @@ -159,7 +162,8 @@ func TestFlush(t *testing.T) { func TestIteratorBounds(t *testing.T) { cfg := DefaultTestConfig(t) - db := openDB(t, &cfg) + cacheCfg := DefaultTestCacheConfig() + db := openDB(t, &cfg, &cacheCfg) for _, k := range []string{"a", "b", "c"} { require.NoError(t, db.Set([]byte(k), []byte("x"), types.WriteOptions{Sync: false})) @@ -179,7 +183,8 @@ func TestIteratorBounds(t *testing.T) { func TestIteratorPrev(t *testing.T) { cfg := DefaultTestConfig(t) - db := openDB(t, &cfg) + cacheCfg := DefaultTestCacheConfig() + db := openDB(t, &cfg, &cacheCfg) for _, k := range []string{"a", "b", "c"} { require.NoError(t, db.Set([]byte(k), []byte("x"), types.WriteOptions{Sync: false})) @@ -221,7 +226,8 @@ func TestIteratorNextPrefixWithComparerSplit(t *testing.T) { } cfg := DefaultTestConfig(t) - db, err := OpenWithCache(t.Context(), &cfg, &cmp, threading.NewAdHocPool(), threading.NewAdHocPool()) + cacheCfg := DefaultTestCacheConfig() + db, err := OpenWithCache(t.Context(), &cfg, &cacheCfg, &cmp, threading.NewAdHocPool(), threading.NewAdHocPool()) require.NoError(t, err) t.Cleanup(func() { require.NoError(t, db.Close()) }) @@ -244,7 +250,8 @@ func TestIteratorNextPrefixWithComparerSplit(t *testing.T) { func TestIteratorSeekLTAndValue(t *testing.T) { cfg := DefaultTestConfig(t) - db := openDB(t, &cfg) + cacheCfg := DefaultTestCacheConfig() + db := openDB(t, &cfg, &cacheCfg) for _, kv := range []struct{ k, v string }{ {"a", "val-a"}, @@ -266,7 +273,8 @@ func TestIteratorSeekLTAndValue(t *testing.T) { func TestCloseIsIdempotent(t *testing.T) { cfg := DefaultTestConfig(t) - db, err := OpenWithCache(t.Context(), &cfg, pebble.DefaultComparer, + cacheCfg := DefaultTestCacheConfig() + db, err := OpenWithCache(t.Context(), &cfg, &cacheCfg, pebble.DefaultComparer, threading.NewAdHocPool(), threading.NewAdHocPool()) require.NoError(t, err) diff --git a/sei-db/db_engine/pebbledb/pebbledb_config.go b/sei-db/db_engine/pebbledb/pebbledb_config.go index 982eaa257d..3e41c6f462 100644 --- a/sei-db/db_engine/pebbledb/pebbledb_config.go +++ b/sei-db/db_engine/pebbledb/pebbledb_config.go @@ -11,30 +11,20 @@ import ( type PebbleDBConfig struct { // The directory to store the database files. This has no default value and must be provided. DataDir string - // The size of key-value cache, in bytes. - CacheSize uint64 - // The number of shards in the key-value cache. Must be a power of two and greater than 0. - CacheShardCount uint64 // The size of pebbleDB's internal block cache, in bytes. BlockCacheSize int - // Whether to enable metrics. + // Whether to enable pebble-internal metrics. EnableMetrics bool - // How often to scrape metrics (pebble internals + cache size). + // How often to scrape pebble-internal metrics. MetricsScrapeInterval time.Duration - // The estimated overhead per entry in the cache, in bytes. - // This should be derived experimentally, and may differ between different builds and architectures. - EstimatedOverheadPerEntry uint64 } // Default configuration for the PebbleDB database. func DefaultConfig() PebbleDBConfig { return PebbleDBConfig{ - CacheSize: 512 * unit.MB, - CacheShardCount: 8, - BlockCacheSize: 512 * unit.MB, - EnableMetrics: true, - MetricsScrapeInterval: 10 * time.Second, - EstimatedOverheadPerEntry: 256, + BlockCacheSize: 512 * unit.MB, + EnableMetrics: true, + MetricsScrapeInterval: 10 * time.Second, } } @@ -43,9 +33,6 @@ func (c *PebbleDBConfig) Validate() error { if c.DataDir == "" { return fmt.Errorf("data dir is required") } - if c.CacheSize > 0 && (c.CacheShardCount&(c.CacheShardCount-1)) != 0 { - return fmt.Errorf("cache shard count must be a power of two or 0") - } if c.BlockCacheSize <= 0 { return fmt.Errorf("block cache size must be greater than 0") } diff --git a/sei-db/db_engine/pebbledb/pebbledb_test_config.go b/sei-db/db_engine/pebbledb/pebbledb_test_config.go index ef39ed299c..bb494cbae2 100644 --- a/sei-db/db_engine/pebbledb/pebbledb_test_config.go +++ b/sei-db/db_engine/pebbledb/pebbledb_test_config.go @@ -4,17 +4,23 @@ import ( "testing" "github.com/sei-protocol/sei-chain/sei-db/common/unit" + "github.com/sei-protocol/sei-chain/sei-db/db_engine/dbcache" ) -// Default configuration suitable for testing. Allocates much smaller cache sizes and disables metrics. -// DataDir defaults to t.TempDir(); callers that need a specific path can override it after calling. +// DefaultTestConfig returns a PebbleDBConfig suitable for testing. +// Allocates a smaller block cache and disables metrics. func DefaultTestConfig(t *testing.T) PebbleDBConfig { cfg := DefaultConfig() - cfg.DataDir = t.TempDir() - cfg.CacheSize = 16 * unit.MB cfg.BlockCacheSize = 16 * unit.MB cfg.EnableMetrics = false - return cfg } + +// DefaultTestCacheConfig returns a CacheConfig suitable for testing. +func DefaultTestCacheConfig() dbcache.CacheConfig { + return dbcache.CacheConfig{ + ShardCount: 8, + MaxSize: 16 * unit.MB, + } +} diff --git a/sei-db/state_db/bench/cryptosim/config/standard-perf.json b/sei-db/state_db/bench/cryptosim/config/standard-perf.json index 4cf205f830..ca267f6ef0 100644 --- a/sei-db/state_db/bench/cryptosim/config/standard-perf.json +++ b/sei-db/state_db/bench/cryptosim/config/standard-perf.json @@ -5,15 +5,9 @@ "MinimumNumberOfColdAccounts": 1000000, "MinimumNumberOfDormantAccounts": 100000000, "FlatKVConfig": { - "AccountDBConfig": { - "CacheSize": 1073741824 - }, - "CodeDBConfig": { - "CacheSize": 1073741824 - }, - "StorageDBConfig": { - "CacheSize": 4294967296 - } + "AccountCacheConfig": { "MaxSize": 1073741824 }, + "CodeCacheConfig": { "MaxSize": 1073741824 }, + "StorageCacheConfig": { "MaxSize": 4294967296 } } } diff --git a/sei-db/state_db/sc/flatkv/config.go b/sei-db/state_db/sc/flatkv/config.go index dbe8eb40bd..109244aa6c 100644 --- a/sei-db/state_db/sc/flatkv/config.go +++ b/sei-db/state_db/sc/flatkv/config.go @@ -5,6 +5,7 @@ import ( "path/filepath" "github.com/sei-protocol/sei-chain/sei-db/common/unit" + "github.com/sei-protocol/sei-chain/sei-db/db_engine/dbcache" "github.com/sei-protocol/sei-chain/sei-db/db_engine/pebbledb" ) @@ -46,20 +47,30 @@ type Config struct { // Default: true EnablePebbleMetrics bool `mapstructure:"enable-pebble-metrics"` - // AccountDBConfig defines the configuration for the account database. + // AccountDBConfig defines the PebbleDB configuration for the account database. AccountDBConfig pebbledb.PebbleDBConfig + // AccountCacheConfig defines the cache configuration for the account database. + AccountCacheConfig dbcache.CacheConfig - // CodeDBConfig defines the configuration for the code database. + // CodeDBConfig defines the PebbleDB configuration for the code database. CodeDBConfig pebbledb.PebbleDBConfig + // CodeCacheConfig defines the cache configuration for the code database. + CodeCacheConfig dbcache.CacheConfig - // StorageDBConfig defines the configuration for the storage database. + // StorageDBConfig defines the PebbleDB configuration for the storage database. StorageDBConfig pebbledb.PebbleDBConfig + // StorageCacheConfig defines the cache configuration for the storage database. + StorageCacheConfig dbcache.CacheConfig - // LegacyDBConfig defines the configuration for the legacy database. + // LegacyDBConfig defines the PebbleDB configuration for the legacy database. LegacyDBConfig pebbledb.PebbleDBConfig + // LegacyCacheConfig defines the cache configuration for the legacy database. + LegacyCacheConfig dbcache.CacheConfig - // MetadataDBConfig defines the configuration for the metadata database. + // MetadataDBConfig defines the PebbleDB configuration for the metadata database. MetadataDBConfig pebbledb.PebbleDBConfig + // MetadataCacheConfig defines the cache configuration for the metadata database. + MetadataCacheConfig dbcache.CacheConfig // Controls the number of goroutines in the DB read pool. The number of threads in this pool is equal to // ReaderThreadsPerCore * runtime.NumCPU() + ReaderConstantThreadCount. @@ -90,10 +101,15 @@ func DefaultConfig() *Config { SnapshotKeepRecent: DefaultSnapshotKeepRecent, EnablePebbleMetrics: true, AccountDBConfig: pebbledb.DefaultConfig(), + AccountCacheConfig: dbcache.DefaultCacheConfig(), CodeDBConfig: pebbledb.DefaultConfig(), + CodeCacheConfig: dbcache.DefaultCacheConfig(), StorageDBConfig: pebbledb.DefaultConfig(), + StorageCacheConfig: dbcache.DefaultCacheConfig(), LegacyDBConfig: pebbledb.DefaultConfig(), + LegacyCacheConfig: dbcache.DefaultCacheConfig(), MetadataDBConfig: pebbledb.DefaultConfig(), + MetadataCacheConfig: dbcache.DefaultCacheConfig(), ReaderThreadsPerCore: 2.0, ReaderConstantThreadCount: 0, ReaderPoolQueueSize: 1024, @@ -101,8 +117,8 @@ func DefaultConfig() *Config { MiscConstantThreadCount: 0, } - cfg.AccountDBConfig.CacheSize = unit.GB - cfg.StorageDBConfig.CacheSize = unit.GB * 4 + cfg.AccountCacheConfig.MaxSize = unit.GB + cfg.StorageCacheConfig.MaxSize = unit.GB * 4 return cfg } diff --git a/sei-db/state_db/sc/flatkv/flatkv_test_config.go b/sei-db/state_db/sc/flatkv/flatkv_test_config.go index 05328cac81..e544ecfe5a 100644 --- a/sei-db/state_db/sc/flatkv/flatkv_test_config.go +++ b/sei-db/state_db/sc/flatkv/flatkv_test_config.go @@ -5,15 +5,21 @@ import ( "testing" "github.com/sei-protocol/sei-chain/sei-db/common/unit" + "github.com/sei-protocol/sei-chain/sei-db/db_engine/dbcache" "github.com/sei-protocol/sei-chain/sei-db/db_engine/pebbledb" ) func smallTestPebbleConfig() pebbledb.PebbleDBConfig { return pebbledb.PebbleDBConfig{ - CacheSize: 16 * unit.MB, - CacheShardCount: 8, - BlockCacheSize: 16 * unit.MB, - EnableMetrics: false, + BlockCacheSize: 16 * unit.MB, + EnableMetrics: false, + } +} + +func smallTestCacheConfig() dbcache.CacheConfig { + return dbcache.CacheConfig{ + ShardCount: 8, + MaxSize: 16 * unit.MB, } } @@ -26,10 +32,15 @@ func DefaultTestConfig(t *testing.T) *Config { SnapshotInterval: DefaultSnapshotInterval, SnapshotKeepRecent: DefaultSnapshotKeepRecent, AccountDBConfig: smallTestPebbleConfig(), + AccountCacheConfig: smallTestCacheConfig(), CodeDBConfig: smallTestPebbleConfig(), + CodeCacheConfig: smallTestCacheConfig(), StorageDBConfig: smallTestPebbleConfig(), + StorageCacheConfig: smallTestCacheConfig(), LegacyDBConfig: smallTestPebbleConfig(), + LegacyCacheConfig: smallTestCacheConfig(), MetadataDBConfig: smallTestPebbleConfig(), + MetadataCacheConfig: smallTestCacheConfig(), ReaderThreadsPerCore: 2.0, ReaderPoolQueueSize: 1024, MiscPoolThreadsPerCore: 4.0, diff --git a/sei-db/state_db/sc/flatkv/store.go b/sei-db/state_db/sc/flatkv/store.go index 2cc94d27ce..0ba90bdde5 100644 --- a/sei-db/state_db/sc/flatkv/store.go +++ b/sei-db/state_db/sc/flatkv/store.go @@ -14,6 +14,7 @@ import ( "github.com/sei-protocol/sei-chain/sei-db/common/metrics" "github.com/sei-protocol/sei-chain/sei-db/common/threading" + "github.com/sei-protocol/sei-chain/sei-db/db_engine/dbcache" "github.com/sei-protocol/sei-chain/sei-db/db_engine/pebbledb" seidbtypes "github.com/sei-protocol/sei-chain/sei-db/db_engine/types" "github.com/sei-protocol/sei-chain/sei-db/proto" @@ -451,11 +452,11 @@ func (s *CommitStore) acquireFileLock(dir string) error { } // openPebbleDB creates the directory at cfg.DataDir and opens a PebbleDB instance. -func (s *CommitStore) openPebbleDB(cfg *pebbledb.PebbleDBConfig) (seidbtypes.KeyValueDB, error) { +func (s *CommitStore) openPebbleDB(cfg *pebbledb.PebbleDBConfig, cacheCfg *dbcache.CacheConfig) (seidbtypes.KeyValueDB, error) { if err := os.MkdirAll(cfg.DataDir, 0750); err != nil { return nil, fmt.Errorf("create directory %s: %w", cfg.DataDir, err) } - db, err := pebbledb.OpenWithCache(s.ctx, cfg, pebble.DefaultComparer, s.readPool, s.miscPool) + db, err := pebbledb.OpenWithCache(s.ctx, cfg, cacheCfg, pebble.DefaultComparer, s.readPool, s.miscPool) if err != nil { return nil, fmt.Errorf("open %s: %w", cfg.DataDir, err) } @@ -483,31 +484,31 @@ func (s *CommitStore) openDBs(dbDir, changelogRoot string) (retErr error) { }() var err error - s.accountDB, err = s.openPebbleDB(&s.config.AccountDBConfig) + s.accountDB, err = s.openPebbleDB(&s.config.AccountDBConfig, &s.config.AccountCacheConfig) if err != nil { return fmt.Errorf("failed to open account DB: %w", err) } toClose = append(toClose, s.accountDB) - s.codeDB, err = s.openPebbleDB(&s.config.CodeDBConfig) + s.codeDB, err = s.openPebbleDB(&s.config.CodeDBConfig, &s.config.CodeCacheConfig) if err != nil { return fmt.Errorf("failed to open code DB: %w", err) } toClose = append(toClose, s.codeDB) - s.storageDB, err = s.openPebbleDB(&s.config.StorageDBConfig) + s.storageDB, err = s.openPebbleDB(&s.config.StorageDBConfig, &s.config.StorageCacheConfig) if err != nil { return fmt.Errorf("failed to open storage DB: %w", err) } toClose = append(toClose, s.storageDB) - s.legacyDB, err = s.openPebbleDB(&s.config.LegacyDBConfig) + s.legacyDB, err = s.openPebbleDB(&s.config.LegacyDBConfig, &s.config.LegacyCacheConfig) if err != nil { return fmt.Errorf("failed to open legacy DB: %w", err) } toClose = append(toClose, s.legacyDB) - s.metadataDB, err = s.openPebbleDB(&s.config.MetadataDBConfig) + s.metadataDB, err = s.openPebbleDB(&s.config.MetadataDBConfig, &s.config.MetadataCacheConfig) if err != nil { return fmt.Errorf("failed to open metadata DB: %w", err) } diff --git a/sei-db/state_db/sc/flatkv/store_test.go b/sei-db/state_db/sc/flatkv/store_test.go index 9ea52a5741..4ec860d8cb 100644 --- a/sei-db/state_db/sc/flatkv/store_test.go +++ b/sei-db/state_db/sc/flatkv/store_test.go @@ -60,7 +60,8 @@ func makeChangeSet(key, value []byte, delete bool) *proto.NamedChangeSet { func setupTestDB(t *testing.T) types.KeyValueDB { t.Helper() cfg := pebbledb.DefaultTestConfig(t) - db, err := pebbledb.OpenWithCache(t.Context(), &cfg, pebble.DefaultComparer, + cacheCfg := pebbledb.DefaultTestCacheConfig() + db, err := pebbledb.OpenWithCache(t.Context(), &cfg, &cacheCfg, pebble.DefaultComparer, threading.NewAdHocPool(), threading.NewAdHocPool()) require.NoError(t, err) return db From 34e711de38bf759453a20ea514f17e7569f9fab6 Mon Sep 17 00:00:00 2001 From: Cody Littley Date: Wed, 25 Mar 2026 09:13:59 -0400 Subject: [PATCH 079/119] made suggested changes --- sei-db/common/threading/fixed_pool.go | 4 ++ sei-db/db_engine/dbcache/cache_config.go | 4 +- sei-db/db_engine/pebbledb/db.go | 9 ++-- sei-db/db_engine/pebbledb/db_test.go | 51 +------------------ sei-db/db_engine/pebbledb/pebbledb_config.go | 8 --- .../pebbledb/pebbledb_test_config.go | 1 - sei-db/state_db/sc/flatkv/config.go | 20 ++++---- .../state_db/sc/flatkv/flatkv_test_config.go | 3 +- .../state_db/sc/flatkv/perdb_lthash_test.go | 3 +- sei-db/state_db/sc/flatkv/snapshot.go | 4 +- sei-db/state_db/sc/flatkv/snapshot_test.go | 15 +++--- sei-db/state_db/sc/flatkv/store.go | 4 +- sei-db/state_db/sc/flatkv/store_test.go | 3 +- 13 files changed, 34 insertions(+), 95 deletions(-) diff --git a/sei-db/common/threading/fixed_pool.go b/sei-db/common/threading/fixed_pool.go index 2702741ce4..2e393ef5c3 100644 --- a/sei-db/common/threading/fixed_pool.go +++ b/sei-db/common/threading/fixed_pool.go @@ -26,6 +26,10 @@ func NewFixedPool( queueSize int, ) Pool { + if workers <= 0 { + workers = 1 + } + workQueue := make(chan func(), queueSize) fp := &fixedPool{ workQueue: workQueue, diff --git a/sei-db/db_engine/dbcache/cache_config.go b/sei-db/db_engine/dbcache/cache_config.go index a1f9f3c3a7..703653fab7 100644 --- a/sei-db/db_engine/dbcache/cache_config.go +++ b/sei-db/db_engine/dbcache/cache_config.go @@ -33,8 +33,8 @@ func DefaultCacheConfig() CacheConfig { // Validate checks that the configuration is sane and returns an error if it is not. func (c *CacheConfig) Validate() error { - if c.MaxSize > 0 && (c.ShardCount&(c.ShardCount-1)) != 0 { - return fmt.Errorf("shard count must be a power of two") + if c.MaxSize > 0 && (c.ShardCount == 0 || (c.ShardCount&(c.ShardCount-1)) != 0) { + return fmt.Errorf("shard count must be a non-zero power of two") } if c.MetricsName != "" && c.MetricsScrapeInterval <= 0 { return fmt.Errorf("metrics scrape interval must be positive when metrics name is set") diff --git a/sei-db/db_engine/pebbledb/db.go b/sei-db/db_engine/pebbledb/db.go index 1cda457a57..623abcceb6 100644 --- a/sei-db/db_engine/pebbledb/db.go +++ b/sei-db/db_engine/pebbledb/db.go @@ -13,6 +13,7 @@ import ( errorutils "github.com/sei-protocol/sei-chain/sei-db/common/errors" "github.com/sei-protocol/sei-chain/sei-db/common/threading" + "github.com/sei-protocol/sei-chain/sei-db/common/unit" "github.com/sei-protocol/sei-chain/sei-db/db_engine/dbcache" "github.com/sei-protocol/sei-chain/sei-db/db_engine/types" ) @@ -29,19 +30,18 @@ var _ types.KeyValueDB = (*pebbleDB)(nil) func Open( ctx context.Context, config *PebbleDBConfig, - comparer *pebble.Comparer, ) (_ types.KeyValueDB, err error) { if err := config.Validate(); err != nil { return nil, fmt.Errorf("failed to validate config: %w", err) } - pebbleCache := pebble.NewCache(int64(config.BlockCacheSize)) + pebbleCache := pebble.NewCache(int64(512 * unit.MB)) defer pebbleCache.Unref() popts := &pebble.Options{ Cache: pebbleCache, - Comparer: comparer, + Comparer: pebble.DefaultComparer, // FormatMajorVersion is pinned to a specific version to prevent accidental // breaking changes when updating the pebble dependency. Using FormatNewest // would cause the on-disk format to silently upgrade when pebble is updated, @@ -101,11 +101,10 @@ func OpenWithCache( ctx context.Context, config *PebbleDBConfig, cacheConfig *dbcache.CacheConfig, - comparer *pebble.Comparer, readPool threading.Pool, miscPool threading.Pool, ) (types.KeyValueDB, error) { - db, err := Open(ctx, config, comparer) + db, err := Open(ctx, config) if err != nil { return nil, fmt.Errorf("failed to open database: %w", err) } diff --git a/sei-db/db_engine/pebbledb/db_test.go b/sei-db/db_engine/pebbledb/db_test.go index 8b7ac68439..17c13899b5 100644 --- a/sei-db/db_engine/pebbledb/db_test.go +++ b/sei-db/db_engine/pebbledb/db_test.go @@ -1,12 +1,10 @@ package pebbledb import ( - "bytes" "testing" "github.com/stretchr/testify/require" - "github.com/cockroachdb/pebble/v2" errorutils "github.com/sei-protocol/sei-chain/sei-db/common/errors" "github.com/sei-protocol/sei-chain/sei-db/common/threading" "github.com/sei-protocol/sei-chain/sei-db/common/unit" @@ -35,7 +33,7 @@ func forEachCacheMode(t *testing.T, fn func(t *testing.T, cfg PebbleDBConfig, ca func openDB(t *testing.T, cfg *PebbleDBConfig, cacheCfg *dbcache.CacheConfig) types.KeyValueDB { t.Helper() - db, err := OpenWithCache(t.Context(), cfg, cacheCfg, pebble.DefaultComparer, + db, err := OpenWithCache(t.Context(), cfg, cacheCfg, threading.NewAdHocPool(), threading.NewAdHocPool()) require.NoError(t, err) t.Cleanup(func() { require.NoError(t, db.Close()) }) @@ -203,51 +201,6 @@ func TestIteratorPrev(t *testing.T) { require.Equal(t, "b", string(itr.Key())) } -func TestIteratorNextPrefixWithComparerSplit(t *testing.T) { - cmp := *pebble.DefaultComparer - cmp.Name = "sei-db/test-split-on-slash" - cmp.Split = func(k []byte) int { - for i, b := range k { - if b == '/' { - return i + 1 - } - } - return len(k) - } - cmp.ImmediateSuccessor = func(dst, a []byte) []byte { - for i := len(a) - 1; i >= 0; i-- { - if a[i] != 0xff { - dst = append(dst, a[:i+1]...) - dst[len(dst)-1]++ - return dst - } - } - return append(dst, a...) - } - - cfg := DefaultTestConfig(t) - cacheCfg := DefaultTestCacheConfig() - db, err := OpenWithCache(t.Context(), &cfg, &cacheCfg, &cmp, threading.NewAdHocPool(), threading.NewAdHocPool()) - require.NoError(t, err) - t.Cleanup(func() { require.NoError(t, db.Close()) }) - - for _, k := range []string{"a/1", "a/2", "a/3", "b/1"} { - require.NoError(t, db.Set([]byte(k), []byte("x"), types.WriteOptions{Sync: false})) - } - - itr, err := db.NewIter(nil) - require.NoError(t, err) - t.Cleanup(func() { require.NoError(t, itr.Close()) }) - - require.True(t, itr.SeekGE([]byte("a/"))) - require.True(t, itr.Valid()) - require.True(t, bytes.HasPrefix(itr.Key(), []byte("a/"))) - - require.True(t, itr.NextPrefix()) - require.True(t, itr.Valid()) - require.Equal(t, "b/1", string(itr.Key())) -} - func TestIteratorSeekLTAndValue(t *testing.T) { cfg := DefaultTestConfig(t) cacheCfg := DefaultTestCacheConfig() @@ -274,7 +227,7 @@ func TestIteratorSeekLTAndValue(t *testing.T) { func TestCloseIsIdempotent(t *testing.T) { cfg := DefaultTestConfig(t) cacheCfg := DefaultTestCacheConfig() - db, err := OpenWithCache(t.Context(), &cfg, &cacheCfg, pebble.DefaultComparer, + db, err := OpenWithCache(t.Context(), &cfg, &cacheCfg, threading.NewAdHocPool(), threading.NewAdHocPool()) require.NoError(t, err) diff --git a/sei-db/db_engine/pebbledb/pebbledb_config.go b/sei-db/db_engine/pebbledb/pebbledb_config.go index 3e41c6f462..383710e90a 100644 --- a/sei-db/db_engine/pebbledb/pebbledb_config.go +++ b/sei-db/db_engine/pebbledb/pebbledb_config.go @@ -3,16 +3,12 @@ package pebbledb import ( "fmt" "time" - - "github.com/sei-protocol/sei-chain/sei-db/common/unit" ) // Configuration for the PebbleDB database. type PebbleDBConfig struct { // The directory to store the database files. This has no default value and must be provided. DataDir string - // The size of pebbleDB's internal block cache, in bytes. - BlockCacheSize int // Whether to enable pebble-internal metrics. EnableMetrics bool // How often to scrape pebble-internal metrics. @@ -22,7 +18,6 @@ type PebbleDBConfig struct { // Default configuration for the PebbleDB database. func DefaultConfig() PebbleDBConfig { return PebbleDBConfig{ - BlockCacheSize: 512 * unit.MB, EnableMetrics: true, MetricsScrapeInterval: 10 * time.Second, } @@ -33,9 +28,6 @@ func (c *PebbleDBConfig) Validate() error { if c.DataDir == "" { return fmt.Errorf("data dir is required") } - if c.BlockCacheSize <= 0 { - return fmt.Errorf("block cache size must be greater than 0") - } if c.EnableMetrics && c.MetricsScrapeInterval <= 0 { return fmt.Errorf("metrics scrape interval must be positive when metrics are enabled") } diff --git a/sei-db/db_engine/pebbledb/pebbledb_test_config.go b/sei-db/db_engine/pebbledb/pebbledb_test_config.go index bb494cbae2..897e73c0c2 100644 --- a/sei-db/db_engine/pebbledb/pebbledb_test_config.go +++ b/sei-db/db_engine/pebbledb/pebbledb_test_config.go @@ -12,7 +12,6 @@ import ( func DefaultTestConfig(t *testing.T) PebbleDBConfig { cfg := DefaultConfig() cfg.DataDir = t.TempDir() - cfg.BlockCacheSize = 16 * unit.MB cfg.EnableMetrics = false return cfg } diff --git a/sei-db/state_db/sc/flatkv/config.go b/sei-db/state_db/sc/flatkv/config.go index 109244aa6c..9959dcbde4 100644 --- a/sei-db/state_db/sc/flatkv/config.go +++ b/sei-db/state_db/sc/flatkv/config.go @@ -157,20 +157,20 @@ func (c *Config) Validate() error { if c.DataDir == "" { return fmt.Errorf("data dir is required") } - if c.AccountDBConfig.Validate() != nil { - return fmt.Errorf("account db config is invalid: %w", c.AccountDBConfig.Validate()) + if err := c.AccountDBConfig.Validate(); err != nil { + return fmt.Errorf("account db config is invalid: %w", err) } - if c.CodeDBConfig.Validate() != nil { - return fmt.Errorf("code db config is invalid: %w", c.CodeDBConfig.Validate()) + if err := c.CodeDBConfig.Validate(); err != nil { + return fmt.Errorf("code db config is invalid: %w", err) } - if c.StorageDBConfig.Validate() != nil { - return fmt.Errorf("storage db config is invalid: %w", c.StorageDBConfig.Validate()) + if err := c.StorageDBConfig.Validate(); err != nil { + return fmt.Errorf("storage db config is invalid: %w", err) } - if c.LegacyDBConfig.Validate() != nil { - return fmt.Errorf("legacy db config is invalid: %w", c.LegacyDBConfig.Validate()) + if err := c.LegacyDBConfig.Validate(); err != nil { + return fmt.Errorf("legacy db config is invalid: %w", err) } - if c.MetadataDBConfig.Validate() != nil { - return fmt.Errorf("metadata db config is invalid: %w", c.MetadataDBConfig.Validate()) + if err := c.MetadataDBConfig.Validate(); err != nil { + return fmt.Errorf("metadata db config is invalid: %w", err) } if c.ReaderThreadsPerCore < 0 { diff --git a/sei-db/state_db/sc/flatkv/flatkv_test_config.go b/sei-db/state_db/sc/flatkv/flatkv_test_config.go index e544ecfe5a..4ab1b71bfa 100644 --- a/sei-db/state_db/sc/flatkv/flatkv_test_config.go +++ b/sei-db/state_db/sc/flatkv/flatkv_test_config.go @@ -11,8 +11,7 @@ import ( func smallTestPebbleConfig() pebbledb.PebbleDBConfig { return pebbledb.PebbleDBConfig{ - BlockCacheSize: 16 * unit.MB, - EnableMetrics: false, + EnableMetrics: false, } } diff --git a/sei-db/state_db/sc/flatkv/perdb_lthash_test.go b/sei-db/state_db/sc/flatkv/perdb_lthash_test.go index f89455ff75..60fe3b3af1 100644 --- a/sei-db/state_db/sc/flatkv/perdb_lthash_test.go +++ b/sei-db/state_db/sc/flatkv/perdb_lthash_test.go @@ -5,7 +5,6 @@ import ( "path/filepath" "testing" - "github.com/cockroachdb/pebble/v2" "github.com/sei-protocol/sei-chain/sei-db/db_engine/pebbledb" "github.com/sei-protocol/sei-chain/sei-db/db_engine/types" "github.com/sei-protocol/sei-chain/sei-db/proto" @@ -118,7 +117,7 @@ func TestPerDBLtHashSkewRecovery(t *testing.T) { metaCfg := pebbledb.DefaultConfig() metaCfg.DataDir = metaDBPath metaCfg.EnableMetrics = false - db, err := pebbledb.Open(t.Context(), &metaCfg, pebble.DefaultComparer) + db, err := pebbledb.Open(t.Context(), &metaCfg) require.NoError(t, err) require.NoError(t, db.Set(metaVersionKey, versionToBytes(1), types.WriteOptions{Sync: true})) require.NoError(t, db.Close()) diff --git a/sei-db/state_db/sc/flatkv/snapshot.go b/sei-db/state_db/sc/flatkv/snapshot.go index 04c14d3569..05f79c2e55 100644 --- a/sei-db/state_db/sc/flatkv/snapshot.go +++ b/sei-db/state_db/sc/flatkv/snapshot.go @@ -11,7 +11,6 @@ import ( "strings" "time" - "github.com/cockroachdb/pebble/v2" "github.com/sei-protocol/sei-chain/sei-db/db_engine/pebbledb" "github.com/sei-protocol/sei-chain/sei-db/db_engine/types" "github.com/sei-protocol/sei-chain/sei-db/proto" @@ -381,8 +380,7 @@ func (s *CommitStore) migrateFlatLayout(flatkvDir string) (string, error) { var version int64 metaCfg := s.config.MetadataDBConfig metaCfg.DataDir = filepath.Join(flatkvDir, metadataDir) - tmpMeta, err := pebbledb.Open( - s.ctx, &metaCfg, pebble.DefaultComparer) + tmpMeta, err := pebbledb.Open(s.ctx, &metaCfg) if err == nil { verData, verErr := tmpMeta.Get(metaVersionKey) _ = tmpMeta.Close() diff --git a/sei-db/state_db/sc/flatkv/snapshot_test.go b/sei-db/state_db/sc/flatkv/snapshot_test.go index 6415293f66..d903c89653 100644 --- a/sei-db/state_db/sc/flatkv/snapshot_test.go +++ b/sei-db/state_db/sc/flatkv/snapshot_test.go @@ -7,7 +7,6 @@ import ( "strings" "testing" - "github.com/cockroachdb/pebble/v2" "github.com/sei-protocol/sei-chain/sei-db/common/evm" "github.com/sei-protocol/sei-chain/sei-db/db_engine/pebbledb" "github.com/sei-protocol/sei-chain/sei-db/db_engine/types" @@ -279,7 +278,7 @@ func TestMigrationFromFlatLayout(t *testing.T) { // Create an actual PebbleDB so Open works cfg := pebbledb.DefaultTestConfig(t) cfg.DataDir = dbPath - db, err := pebbledb.Open(t.Context(), &cfg, pebble.DefaultComparer) + db, err := pebbledb.Open(t.Context(), &cfg) require.NoError(t, err) require.NoError(t, db.Close()) } @@ -345,7 +344,7 @@ func TestOpenVersionValidation(t *testing.T) { acctCfg := pebbledb.DefaultConfig() acctCfg.DataDir = accountDBPath acctCfg.EnableMetrics = false - db, err := pebbledb.Open(t.Context(), &acctCfg, pebble.DefaultComparer) + db, err := pebbledb.Open(t.Context(), &acctCfg) require.NoError(t, err) require.NoError(t, db.Set(metaVersionKey, versionToBytes(1), types.WriteOptions{Sync: true})) require.NoError(t, db.Close()) @@ -1523,7 +1522,7 @@ func TestGlobalMetadataCorruption(t *testing.T) { metaCfg := pebbledb.DefaultConfig() metaCfg.DataDir = workingMeta metaCfg.EnableMetrics = false - db, err := pebbledb.Open(context.Background(), &metaCfg, pebble.DefaultComparer) + db, err := pebbledb.Open(context.Background(), &metaCfg) require.NoError(t, err) require.NoError(t, db.Set(metaVersionKey, []byte{0xFF, 0xFF, 0xFF}, types.WriteOptions{Sync: true})) require.NoError(t, db.Close()) @@ -1532,7 +1531,7 @@ func TestGlobalMetadataCorruption(t *testing.T) { metaCfg2 := pebbledb.DefaultConfig() metaCfg2.DataDir = snapMeta metaCfg2.EnableMetrics = false - db2, err := pebbledb.Open(context.Background(), &metaCfg2, pebble.DefaultComparer) + db2, err := pebbledb.Open(context.Background(), &metaCfg2) require.NoError(t, err) require.NoError(t, db2.Set(metaVersionKey, []byte{0xFF, 0xFF, 0xFF}, types.WriteOptions{Sync: true})) require.NoError(t, db2.Close()) @@ -1607,7 +1606,7 @@ func TestLocalMetaCorruption(t *testing.T) { acctCfg := pebbledb.DefaultConfig() acctCfg.DataDir = workingAccount acctCfg.EnableMetrics = false - db, err := pebbledb.Open(context.Background(), &acctCfg, pebble.DefaultComparer) + db, err := pebbledb.Open(context.Background(), &acctCfg) require.NoError(t, err) require.NoError(t, db.Set(metaVersionKey, []byte{0xDE, 0xAD, 0xFF}, types.WriteOptions{Sync: true})) require.NoError(t, db.Close()) @@ -1617,7 +1616,7 @@ func TestLocalMetaCorruption(t *testing.T) { acctCfg2 := pebbledb.DefaultConfig() acctCfg2.DataDir = snapAccount acctCfg2.EnableMetrics = false - db2, err := pebbledb.Open(context.Background(), &acctCfg2, pebble.DefaultComparer) + db2, err := pebbledb.Open(context.Background(), &acctCfg2) require.NoError(t, err) require.NoError(t, db2.Set(metaVersionKey, []byte{0xDE, 0xAD, 0xFF}, types.WriteOptions{Sync: true})) require.NoError(t, db2.Close()) @@ -1659,7 +1658,7 @@ func TestWALSegmentCorruption(t *testing.T) { metaCfg := pebbledb.DefaultConfig() metaCfg.DataDir = workingMeta metaCfg.EnableMetrics = false - mdb, err := pebbledb.Open(context.Background(), &metaCfg, pebble.DefaultComparer) + mdb, err := pebbledb.Open(context.Background(), &metaCfg) require.NoError(t, err) require.NoError(t, mdb.Set(metaVersionKey, versionToBytes(1), types.WriteOptions{Sync: true})) require.NoError(t, mdb.Close()) diff --git a/sei-db/state_db/sc/flatkv/store.go b/sei-db/state_db/sc/flatkv/store.go index 0ba90bdde5..664300e1d2 100644 --- a/sei-db/state_db/sc/flatkv/store.go +++ b/sei-db/state_db/sc/flatkv/store.go @@ -10,8 +10,6 @@ import ( "runtime" "time" - "github.com/cockroachdb/pebble/v2" - "github.com/sei-protocol/sei-chain/sei-db/common/metrics" "github.com/sei-protocol/sei-chain/sei-db/common/threading" "github.com/sei-protocol/sei-chain/sei-db/db_engine/dbcache" @@ -456,7 +454,7 @@ func (s *CommitStore) openPebbleDB(cfg *pebbledb.PebbleDBConfig, cacheCfg *dbcac if err := os.MkdirAll(cfg.DataDir, 0750); err != nil { return nil, fmt.Errorf("create directory %s: %w", cfg.DataDir, err) } - db, err := pebbledb.OpenWithCache(s.ctx, cfg, cacheCfg, pebble.DefaultComparer, s.readPool, s.miscPool) + db, err := pebbledb.OpenWithCache(s.ctx, cfg, cacheCfg, s.readPool, s.miscPool) if err != nil { return nil, fmt.Errorf("open %s: %w", cfg.DataDir, err) } diff --git a/sei-db/state_db/sc/flatkv/store_test.go b/sei-db/state_db/sc/flatkv/store_test.go index 4ec860d8cb..a5e1f5599e 100644 --- a/sei-db/state_db/sc/flatkv/store_test.go +++ b/sei-db/state_db/sc/flatkv/store_test.go @@ -7,7 +7,6 @@ import ( "github.com/stretchr/testify/require" - "github.com/cockroachdb/pebble/v2" "github.com/sei-protocol/sei-chain/sei-db/common/evm" "github.com/sei-protocol/sei-chain/sei-db/common/threading" "github.com/sei-protocol/sei-chain/sei-db/db_engine/pebbledb" @@ -61,7 +60,7 @@ func setupTestDB(t *testing.T) types.KeyValueDB { t.Helper() cfg := pebbledb.DefaultTestConfig(t) cacheCfg := pebbledb.DefaultTestCacheConfig() - db, err := pebbledb.OpenWithCache(t.Context(), &cfg, &cacheCfg, pebble.DefaultComparer, + db, err := pebbledb.OpenWithCache(t.Context(), &cfg, &cacheCfg, threading.NewAdHocPool(), threading.NewAdHocPool()) require.NoError(t, err) return db From 61d1f4dc4ab619579066947b50f780b1e7dd717a Mon Sep 17 00:00:00 2001 From: Cody Littley Date: Thu, 26 Mar 2026 12:48:01 -0400 Subject: [PATCH 080/119] rearranged files --- .../rand}/canned_random.go | 14 +++++-- sei-db/common/utils/hash64.go | 39 +++++++++++++++++++ sei-db/state_db/bench/blocksim/block.go | 1 + sei-db/state_db/bench/blocksim/transaction.go | 25 ++++++++++++ sei-db/state_db/bench/cryptosim/cryptosim.go | 3 +- .../bench/cryptosim/data_generator.go | 7 ++-- sei-db/state_db/bench/cryptosim/receipt.go | 5 ++- .../state_db/bench/cryptosim/receipt_test.go | 19 ++++----- sei-db/state_db/bench/cryptosim/util.go | 36 ----------------- 9 files changed, 94 insertions(+), 55 deletions(-) rename sei-db/{state_db/bench/cryptosim => common/rand}/canned_random.go (95%) create mode 100644 sei-db/common/utils/hash64.go create mode 100644 sei-db/state_db/bench/blocksim/block.go create mode 100644 sei-db/state_db/bench/blocksim/transaction.go diff --git a/sei-db/state_db/bench/cryptosim/canned_random.go b/sei-db/common/rand/canned_random.go similarity index 95% rename from sei-db/state_db/bench/cryptosim/canned_random.go rename to sei-db/common/rand/canned_random.go index 6e95db15e4..e5636a16f3 100644 --- a/sei-db/state_db/bench/cryptosim/canned_random.go +++ b/sei-db/common/rand/canned_random.go @@ -1,10 +1,16 @@ -package cryptosim +package rand import ( "encoding/binary" "fmt" "math" "math/rand" + + "github.com/sei-protocol/sei-chain/sei-db/common/utils" +) + +const ( + AddressLen = 20 // EVM address length ) // CannedRandom provides pre-generated randomness for benchmarking. @@ -69,7 +75,7 @@ func NewCannedRandom( func (cr *CannedRandom) Clone(randomizeOffset bool) *CannedRandom { index := cr.index if randomizeOffset { - index = PositiveHash64(cr.Int64()) % int64(len(cr.buffer)) + index = utils.PositiveHash64(cr.Int64()) % int64(len(cr.buffer)) } return &CannedRandom{ buffer: cr.buffer, @@ -102,7 +108,7 @@ func (cr *CannedRandom) SeededBytes(count int, seed int64) []byte { return cr.buffer } - startIndex := PositiveHash64(seed) % int64(len(cr.buffer)-count) + startIndex := utils.PositiveHash64(seed) % int64(len(cr.buffer)-count) return cr.buffer[startIndex : startIndex+int64(count)] } @@ -115,7 +121,7 @@ func (cr *CannedRandom) Int64() int64 { } base := binary.BigEndian.Uint64(buf[:]) //nolint:gosec // G115 - benchmark uses deterministic non-crypto randomness, overflow acceptable - result := Hash64(int64(base) + cr.index) + result := utils.Hash64(int64(base) + cr.index) // Add 8 to the index to skip the 8 bytes we just read. cr.index = (cr.index + 8) % bufLen diff --git a/sei-db/common/utils/hash64.go b/sei-db/common/utils/hash64.go new file mode 100644 index 0000000000..04526131a7 --- /dev/null +++ b/sei-db/common/utils/hash64.go @@ -0,0 +1,39 @@ +package utils + +import "math" + +// Hash64 returns a well-distributed 64-bit hash of x. +// It implements the SplitMix64 finalizer, a fast non-cryptographic mixing +// function with excellent avalanche properties. It is suitable for hash tables, +// sharding, randomized iteration, and benchmarks, but it is NOT +// cryptographically secure. +// +// The function is a bijection over uint64 (no collisions as a mapping). +// +// References: +// - Steele, Lea, Flood. "Fast Splittable Pseudorandom Number Generators" +// (OOPSLA 2014): https://doi.org/10.1145/2660193.2660195 +// - Public domain reference implementation: +// http://xorshift.di.unimi.it/splitmix64.c +func Hash64(x int64) int64 { + z := uint64(x) //nolint:gosec // G115 - hash function, int64->uint64 conversion intentional + z += 0x9e3779b97f4a7c15 + z = (z ^ (z >> 30)) * 0xbf58476d1ce4e5b9 + z = (z ^ (z >> 27)) * 0x94d049bb133111eb + z = z ^ (z >> 31) + //nolint:gosec // G115 - hash function converts uint64 to int64, overflow intentional + return int64(z) +} + +// PositiveHash64 returns the absolute value of Hash64(x). It never returns a negative value. +// When Hash64(x) is math.MinInt64, returns math.MaxInt64 since the true absolute value does not fit in int64. +func PositiveHash64(x int64) int64 { + result := Hash64(x) + if result == math.MinInt64 { + return math.MaxInt64 + } + if result < 0 { + return -result + } + return result +} diff --git a/sei-db/state_db/bench/blocksim/block.go b/sei-db/state_db/bench/blocksim/block.go new file mode 100644 index 0000000000..cb1b03cca3 --- /dev/null +++ b/sei-db/state_db/bench/blocksim/block.go @@ -0,0 +1 @@ +package blocksim diff --git a/sei-db/state_db/bench/blocksim/transaction.go b/sei-db/state_db/bench/blocksim/transaction.go new file mode 100644 index 0000000000..852d02aaa8 --- /dev/null +++ b/sei-db/state_db/bench/blocksim/transaction.go @@ -0,0 +1,25 @@ +package blocksim + +import "github.com/sei-protocol/sei-chain/sei-db/common/rand" + +// A simulated transaction for the blocksim benchmark. +type transaction struct { + // The unique ID of the transaction. Used to determinstically generate the transaction hash. + id uint64 + + // The (simulated) hash of the transaction. + hash []byte + + // Data contained by the transaction. These bytes are randomly generated. + payload []byte +} + +// Creates a new transaction with the given ID. +func NewTransaction( + id uint64, + crand *rand.CannedRandom, +) *transaction { + return &transaction{ + id: id, + } +} diff --git a/sei-db/state_db/bench/cryptosim/cryptosim.go b/sei-db/state_db/bench/cryptosim/cryptosim.go index 2b1b593250..069a46294f 100644 --- a/sei-db/state_db/bench/cryptosim/cryptosim.go +++ b/sei-db/state_db/bench/cryptosim/cryptosim.go @@ -6,6 +6,7 @@ import ( "runtime" "time" + "github.com/sei-protocol/sei-chain/sei-db/common/rand" "github.com/sei-protocol/sei-chain/sei-db/state_db/bench/wrappers" "golang.org/x/time/rate" ) @@ -139,7 +140,7 @@ func NewCryptoSim( // avoiding rate() spikes when restarting with a preserved DB. fmt.Printf("Initializing random number generator.\n") - rand := NewCannedRandom(config.CannedRandomSize, config.Seed) + rand := rand.NewCannedRandom(config.CannedRandomSize, config.Seed) consoleUpdatePeriod := time.Duration(config.ConsoleUpdateIntervalSeconds * float64(time.Second)) diff --git a/sei-db/state_db/bench/cryptosim/data_generator.go b/sei-db/state_db/bench/cryptosim/data_generator.go index 09ead76273..c050042665 100644 --- a/sei-db/state_db/bench/cryptosim/data_generator.go +++ b/sei-db/state_db/bench/cryptosim/data_generator.go @@ -5,6 +5,7 @@ import ( "fmt" "github.com/sei-protocol/sei-chain/sei-db/common/evm" + "github.com/sei-protocol/sei-chain/sei-db/common/rand" ) const ( @@ -36,7 +37,7 @@ type DataGenerator struct { initialNextBlockNumber uint64 // The random number generator. - rand *CannedRandom + rand *rand.CannedRandom // The address of the fee account (i.e. the account that collects gas fees). This is a special account // and has account ID 0. Since we reuse this account very often, it is cached for performance. @@ -64,7 +65,7 @@ type DataGenerator struct { func NewDataGenerator( config *CryptoSimConfig, database *Database, - rand *CannedRandom, + rand *rand.CannedRandom, metrics *CryptosimMetrics, ) (*DataGenerator, error) { @@ -323,6 +324,6 @@ func (d *DataGenerator) ReportEndOfBlock() { // Get the random number generator. Note that the random number generator is not thread safe, and // so the caller is responsible for ensuring that it is not used concurrently with other calls to the data generator. -func (d *DataGenerator) Rand() *CannedRandom { +func (d *DataGenerator) Rand() *rand.CannedRandom { return d.rand } diff --git a/sei-db/state_db/bench/cryptosim/receipt.go b/sei-db/state_db/bench/cryptosim/receipt.go index 4ab348aa2c..730e27c3e9 100644 --- a/sei-db/state_db/bench/cryptosim/receipt.go +++ b/sei-db/state_db/bench/cryptosim/receipt.go @@ -7,6 +7,7 @@ import ( "hash" ethtypes "github.com/ethereum/go-ethereum/core/types" + "github.com/sei-protocol/sei-chain/sei-db/common/rand" evmtypes "github.com/sei-protocol/sei-chain/x/evm/types" "golang.org/x/crypto/sha3" ) @@ -43,7 +44,7 @@ var erc20TransferEventSignatureBytes = [hashLen]byte{ // BuildERC20TransferReceiptFromTxn produces a plausible successful ERC20 transfer receipt from a transaction. func BuildERC20TransferReceiptFromTxn( - crand *CannedRandom, + crand *rand.CannedRandom, feeCollectionAccount []byte, blockNumber uint64, txIndex uint32, @@ -67,7 +68,7 @@ func BuildERC20TransferReceiptFromTxn( // ERC20 balances as storage slots rather than separate account references. The caller supplies the block number and tx // index so the resulting receipt can line up with the simulated block being benchmarked. func BuildERC20TransferReceipt( - crand *CannedRandom, + crand *rand.CannedRandom, feeCollectionAccount []byte, srcAccount []byte, dstAccount []byte, diff --git a/sei-db/state_db/bench/cryptosim/receipt_test.go b/sei-db/state_db/bench/cryptosim/receipt_test.go index f1e61109fb..a23d4b0fbf 100644 --- a/sei-db/state_db/bench/cryptosim/receipt_test.go +++ b/sei-db/state_db/bench/cryptosim/receipt_test.go @@ -5,11 +5,12 @@ import ( ethtypes "github.com/ethereum/go-ethereum/core/types" "github.com/sei-protocol/sei-chain/sei-db/common/evm" + "github.com/sei-protocol/sei-chain/sei-db/common/rand" ) func makeTestKeys(t *testing.T) (feeAccount, srcAccount, dstAccount, senderSlot, receiverSlot, erc20Contract []byte) { t.Helper() - keyRand := NewCannedRandom(4096, 1) + keyRand := rand.NewCannedRandom(4096, 1) feeAccount = evm.BuildMemIAVLEVMKey(evm.EVMKeyCodeHash, keyRand.Address(accountPrefix, 0, AddressLen)) srcAddr := keyRand.Address(accountPrefix, 1, AddressLen) @@ -32,7 +33,7 @@ func makeTestKeys(t *testing.T) (feeAccount, srcAccount, dstAccount, senderSlot, } func TestBuildERC20TransferReceipt(t *testing.T) { - crand := NewCannedRandom(1<<20, 42) + crand := rand.NewCannedRandom(1<<20, 42) feeAccount, srcAccount, dstAccount, senderSlot, receiverSlot, erc20Contract := makeTestKeys(t) receipt, err := BuildERC20TransferReceipt( @@ -68,7 +69,7 @@ func TestBuildERC20TransferReceipt(t *testing.T) { } func TestBuildERC20TransferReceipt_InvalidInputs(t *testing.T) { - crand := NewCannedRandom(1<<20, 42) + crand := rand.NewCannedRandom(1<<20, 42) feeAccount, srcAccount, dstAccount, senderSlot, receiverSlot, erc20Contract := makeTestKeys(t) if _, err := BuildERC20TransferReceipt(nil, feeAccount, srcAccount, dstAccount, senderSlot, receiverSlot, erc20Contract, 1_000_000, 0); err == nil { @@ -84,8 +85,8 @@ func TestBuildERC20TransferReceipt_InvalidInputs(t *testing.T) { // Regression test: account keys with EVMKeyCode prefix must still be accepted. func TestBuildERC20TransferReceipt_EVMKeyCodeAccounts(t *testing.T) { - crand := NewCannedRandom(1<<20, 42) - keyRand := NewCannedRandom(4096, 1) + keyRand := rand.NewCannedRandom(4096, 1) + crand := rand.NewCannedRandom(1<<20, 42) feeAccount := evm.BuildMemIAVLEVMKey(evm.EVMKeyCode, keyRand.Address(accountPrefix, 0, AddressLen)) srcAddr := keyRand.Address(accountPrefix, 1, AddressLen) @@ -113,8 +114,8 @@ func TestBuildERC20TransferReceipt_EVMKeyCodeAccounts(t *testing.T) { // Regression test: uses the exact key formats produced by data_generator.go // (EVMKeyCodeHash for accounts, EVMKeyStorage with full StorageKeyLen payload). func TestBuildERC20TransferReceipt_DataGeneratorKeyFormats(t *testing.T) { - crand := NewCannedRandom(1<<20, 42) - keyRand := NewCannedRandom(4096, 1) + keyRand := rand.NewCannedRandom(4096, 1) + crand := rand.NewCannedRandom(1<<20, 42) feeAccount := evm.BuildMemIAVLEVMKey(evm.EVMKeyCodeHash, keyRand.Address(accountPrefix, 0, AddressLen)) srcAccount := evm.BuildMemIAVLEVMKey(evm.EVMKeyCodeHash, keyRand.Address(accountPrefix, 1, AddressLen)) @@ -134,8 +135,8 @@ func TestBuildERC20TransferReceipt_DataGeneratorKeyFormats(t *testing.T) { } func BenchmarkBuildERC20TransferReceipt(b *testing.B) { - keyRand := NewCannedRandom(4096, 1) - receiptRand := NewCannedRandom(1<<20, 2) + keyRand := rand.NewCannedRandom(4096, 1) + receiptRand := rand.NewCannedRandom(1<<20, 2) feeAccount := evm.BuildMemIAVLEVMKey(evm.EVMKeyCodeHash, keyRand.Address(accountPrefix, 0, AddressLen)) srcAddr := keyRand.Address(accountPrefix, 1, AddressLen) diff --git a/sei-db/state_db/bench/cryptosim/util.go b/sei-db/state_db/bench/cryptosim/util.go index 508408d61e..cc46e24c7f 100644 --- a/sei-db/state_db/bench/cryptosim/util.go +++ b/sei-db/state_db/bench/cryptosim/util.go @@ -42,42 +42,6 @@ func paddedCounterKey(s string) []byte { return b } -// Hash64 returns a well-distributed 64-bit hash of x. -// It implements the SplitMix64 finalizer, a fast non-cryptographic mixing -// function with excellent avalanche properties. It is suitable for hash tables, -// sharding, randomized iteration, and benchmarks, but it is NOT -// cryptographically secure. -// -// The function is a bijection over uint64 (no collisions as a mapping). -// -// References: -// - Steele, Lea, Flood. "Fast Splittable Pseudorandom Number Generators" -// (OOPSLA 2014): https://doi.org/10.1145/2660193.2660195 -// - Public domain reference implementation: -// http://xorshift.di.unimi.it/splitmix64.c -func Hash64(x int64) int64 { - z := uint64(x) //nolint:gosec // G115 - hash function, int64->uint64 conversion intentional - z += 0x9e3779b97f4a7c15 - z = (z ^ (z >> 30)) * 0xbf58476d1ce4e5b9 - z = (z ^ (z >> 27)) * 0x94d049bb133111eb - z = z ^ (z >> 31) - //nolint:gosec // G115 - hash function converts uint64 to int64, overflow intentional - return int64(z) -} - -// PositiveHash64 returns the absolute value of Hash64(x). It never returns a negative value. -// When Hash64(x) is math.MinInt64, returns math.MaxInt64 since the true absolute value does not fit in int64. -func PositiveHash64(x int64) int64 { - result := Hash64(x) - if result == math.MinInt64 { - return math.MaxInt64 - } - if result < 0 { - return -result - } - return result -} - // ResolveAndCreateDir expands ~ to the home directory, resolves the path to // an absolute path, and creates the directory if it doesn't exist. func ResolveAndCreateDir(dataDir string) (string, error) { From fa351b371245655ade3146ec8187a93db5a451be Mon Sep 17 00:00:00 2001 From: Cody Littley Date: Fri, 27 Mar 2026 08:05:20 -0400 Subject: [PATCH 081/119] basic data structures --- sei-db/state_db/bench/blocksim/block.go | 166 ++++++++++++++ .../state_db/bench/blocksim/blocksim_test.go | 205 ++++++++++++++++++ sei-db/state_db/bench/blocksim/transaction.go | 71 +++++- 3 files changed, 438 insertions(+), 4 deletions(-) create mode 100644 sei-db/state_db/bench/blocksim/blocksim_test.go diff --git a/sei-db/state_db/bench/blocksim/block.go b/sei-db/state_db/bench/blocksim/block.go index cb1b03cca3..af58f0ebb2 100644 --- a/sei-db/state_db/bench/blocksim/block.go +++ b/sei-db/state_db/bench/blocksim/block.go @@ -1 +1,167 @@ package blocksim + +import ( + "encoding/binary" + "fmt" + + "github.com/sei-protocol/sei-chain/sei-db/common/rand" +) + +type block struct { + // The height of the block. + height uint64 + + // The (simulated) hash of the block. + hash []byte + + // The transactions in the block. + transactions []*transaction + + // Metadata for the block. Randomly generated. + metadata []byte +} + +// Creates a randomized block with the given height, first transaction ID, last transaction ID, +// transaction size, and metadata size. +func RandomBlock( + height uint64, + crand *rand.CannedRandom, + firstTransactionID uint64, + lastTransactionID uint64, + transactionSize int, + metadataSize int, +) *block { + transactions := make([]*transaction, 0, lastTransactionID-firstTransactionID+1) + for id := firstTransactionID; id <= lastTransactionID; id++ { + transactions = append(transactions, RandomTransaction(id, crand, transactionSize)) + } + metadata := crand.Bytes(metadataSize) + hash := crand.Address(0, int64(height), 32) + return &block{ + height: height, + hash: hash, + transactions: transactions, + metadata: metadata, + } +} + +// Returns the hash of the block. +// +// Data is not safe to modify in place, make a copy before modifying. +func (b *block) Hash() []byte { + return b.hash +} + +// Returns the transactions in the block. +// +// Data is not safe to modify in place, make a copy before modifying. +func (b *block) Transactions() []*transaction { + return b.transactions +} + +// Returns the metadata of the block. +// +// Data is not safe to modify in place, make a copy before modifying. +func (b *block) Metadata() []byte { + return b.metadata +} + +// Returns the height of the block. +func (b *block) Height() uint64 { + return b.height +} + +// Serialized block layout: +// +// [8 bytes: height] +// [4 bytes: metadata size (M)] +// [M bytes: metadata] +// [4 bytes: number of transactions (N)] +// For each transaction: +// [4 bytes: serialized transaction size (S)] +// [S bytes: serialized transaction data] +func (b *block) Serialize() []byte { + serializedTransactions := make([][]byte, 0, len(b.transactions)) + serializedTransactionsSize := 0 + for _, txn := range b.transactions { + serializedTransaction := txn.Serialize() + serializedTransactions = append(serializedTransactions, serializedTransaction) + serializedTransactionsSize += 4 /* size prefix */ + len(serializedTransaction) + } + + dataLen := 8 /* height */ + 4 /* metadata size */ + len(b.metadata) + + 4 /* number of transactions */ + serializedTransactionsSize + + data := make([]byte, dataLen) + off := 0 + + binary.BigEndian.PutUint64(data[off:], b.height) + off += 8 + + binary.BigEndian.PutUint32(data[off:], uint32(len(b.metadata))) + off += 4 + + copy(data[off:], b.metadata) + off += len(b.metadata) + + binary.BigEndian.PutUint32(data[off:], uint32(len(b.transactions))) + off += 4 + + for _, serializedTransaction := range serializedTransactions { + binary.BigEndian.PutUint32(data[off:], uint32(len(serializedTransaction))) + off += 4 + copy(data[off:], serializedTransaction) + off += len(serializedTransaction) + } + return data +} + +func DeserializeBlock(crand *rand.CannedRandom, data []byte) (*block, error) { + if len(data) < 16 { + return nil, fmt.Errorf("data too short to contain a block") + } + + off := 0 + + height := binary.BigEndian.Uint64(data[off:]) + off += 8 + + metadataSize := int(binary.BigEndian.Uint32(data[off:])) + off += 4 + + if len(data) < off+metadataSize+4 { + return nil, fmt.Errorf("data too short to contain block metadata") + } + metadata := make([]byte, metadataSize) + copy(metadata, data[off:off+metadataSize]) + off += metadataSize + + numberOfTransactions := int(binary.BigEndian.Uint32(data[off:])) + off += 4 + + transactions := make([]*transaction, 0, numberOfTransactions) + for i := 0; i < numberOfTransactions; i++ { + if len(data) < off+4 { + return nil, fmt.Errorf("data too short to contain transaction size") + } + transactionSize := int(binary.BigEndian.Uint32(data[off:])) + off += 4 + if len(data) < off+transactionSize { + return nil, fmt.Errorf("data too short to contain transaction") + } + txn, err := DeserializeTransaction(crand, data[off:off+transactionSize]) + if err != nil { + return nil, fmt.Errorf("failed to deserialize transaction: %w", err) + } + off += transactionSize + transactions = append(transactions, txn) + } + + hash := crand.Address(0, int64(height), 32) + return &block{ + height: height, + hash: hash, + metadata: metadata, + transactions: transactions, + }, nil +} diff --git a/sei-db/state_db/bench/blocksim/blocksim_test.go b/sei-db/state_db/bench/blocksim/blocksim_test.go new file mode 100644 index 0000000000..0e8f03081c --- /dev/null +++ b/sei-db/state_db/bench/blocksim/blocksim_test.go @@ -0,0 +1,205 @@ +package blocksim + +import ( + "bytes" + "testing" + + "github.com/sei-protocol/sei-chain/sei-db/common/rand" + "github.com/stretchr/testify/require" +) + +const testBufferSize = 1024 + +func newTestCrand() *rand.CannedRandom { + return rand.NewCannedRandom(testBufferSize, 42) +} + +func TestTransactionRoundTrip(t *testing.T) { + crand := newTestCrand() + txn := RandomTransaction(7, crand, 64) + + serialized := txn.Serialize() + crand.Reset() + deserialized, err := DeserializeTransaction(crand, serialized) + require.NoError(t, err) + + require.Equal(t, txn.ID(), deserialized.ID()) + require.True(t, bytes.Equal(txn.Hash(), deserialized.Hash())) + require.True(t, bytes.Equal(txn.Payload(), deserialized.Payload())) +} + +func TestTransactionRoundTripEmptyPayload(t *testing.T) { + crand := newTestCrand() + txn := RandomTransaction(1, crand, 0) + + serialized := txn.Serialize() + crand.Reset() + deserialized, err := DeserializeTransaction(crand, serialized) + require.NoError(t, err) + + require.Equal(t, txn.ID(), deserialized.ID()) + require.True(t, bytes.Equal(txn.Hash(), deserialized.Hash())) + require.Empty(t, deserialized.Payload()) +} + +func TestTransactionDeserializePayloadIsolation(t *testing.T) { + crand := newTestCrand() + txn := RandomTransaction(1, crand, 32) + serialized := txn.Serialize() + + crand.Reset() + deserialized, err := DeserializeTransaction(crand, serialized) + require.NoError(t, err) + + // Mutating the serialized buffer should not affect the deserialized transaction. + for i := 12; i < len(serialized); i++ { + serialized[i] = 0xFF + } + require.True(t, bytes.Equal(txn.Payload(), deserialized.Payload())) +} + +func TestTransactionDeserializeTooShort(t *testing.T) { + crand := newTestCrand() + _, err := DeserializeTransaction(crand, make([]byte, 11)) + require.Error(t, err) + + _, err = DeserializeTransaction(crand, make([]byte, 0)) + require.Error(t, err) +} + +func TestTransactionDeserializeTruncatedPayload(t *testing.T) { + crand := newTestCrand() + txn := RandomTransaction(1, crand, 64) + serialized := txn.Serialize() + + // Truncate the serialized data to cut off part of the payload. + crand.Reset() + _, err := DeserializeTransaction(crand, serialized[:20]) + require.Error(t, err) +} + +func TestBlockRoundTrip(t *testing.T) { + crand := newTestCrand() + blk := RandomBlock(100, crand, 1, 5, 48, 32) + + serialized := blk.Serialize() + crand.Reset() + deserialized, err := DeserializeBlock(crand, serialized) + require.NoError(t, err) + + require.Equal(t, blk.Height(), deserialized.Height()) + require.True(t, bytes.Equal(blk.Hash(), deserialized.Hash())) + require.True(t, bytes.Equal(blk.Metadata(), deserialized.Metadata())) + require.Equal(t, len(blk.Transactions()), len(deserialized.Transactions())) + + for i, txn := range blk.Transactions() { + dtxn := deserialized.Transactions()[i] + require.Equal(t, txn.ID(), dtxn.ID()) + require.True(t, bytes.Equal(txn.Hash(), dtxn.Hash())) + require.True(t, bytes.Equal(txn.Payload(), dtxn.Payload())) + } +} + +func TestBlockRoundTripNoTransactions(t *testing.T) { + crand := newTestCrand() + hash := crand.Address(0, int64(5), 32) + metadata := crand.Bytes(16) + + blk := &block{ + height: 5, + hash: hash, + transactions: []*transaction{}, + metadata: metadata, + } + + serialized := blk.Serialize() + crand.Reset() + deserialized, err := DeserializeBlock(crand, serialized) + require.NoError(t, err) + + require.Equal(t, blk.Height(), deserialized.Height()) + require.True(t, bytes.Equal(blk.Metadata(), deserialized.Metadata())) + require.Empty(t, deserialized.Transactions()) +} + +func TestBlockRoundTripEmptyMetadata(t *testing.T) { + crand := newTestCrand() + blk := RandomBlock(1, crand, 1, 3, 32, 0) + + serialized := blk.Serialize() + crand.Reset() + deserialized, err := DeserializeBlock(crand, serialized) + require.NoError(t, err) + + require.Equal(t, blk.Height(), deserialized.Height()) + require.Empty(t, deserialized.Metadata()) + require.Equal(t, len(blk.Transactions()), len(deserialized.Transactions())) + + for i, txn := range blk.Transactions() { + dtxn := deserialized.Transactions()[i] + require.Equal(t, txn.ID(), dtxn.ID()) + require.True(t, bytes.Equal(txn.Payload(), dtxn.Payload())) + } +} + +func TestBlockRoundTripLargeMetadata(t *testing.T) { + crand := newTestCrand() + blk := RandomBlock(42, crand, 1, 2, 32, 256) + + serialized := blk.Serialize() + crand.Reset() + deserialized, err := DeserializeBlock(crand, serialized) + require.NoError(t, err) + + require.Equal(t, blk.Height(), deserialized.Height()) + require.True(t, bytes.Equal(blk.Metadata(), deserialized.Metadata())) + require.Equal(t, len(blk.Transactions()), len(deserialized.Transactions())) +} + +func TestBlockDeserializeMetadataIsolation(t *testing.T) { + crand := newTestCrand() + blk := RandomBlock(1, crand, 1, 2, 32, 16) + serialized := blk.Serialize() + + crand.Reset() + deserialized, err := DeserializeBlock(crand, serialized) + require.NoError(t, err) + + // Mutating the serialized buffer should not affect deserialized metadata. + for i := 12; i < 12+16; i++ { + serialized[i] = 0xFF + } + require.True(t, bytes.Equal(blk.Metadata(), deserialized.Metadata())) +} + +func TestBlockDeserializeTooShort(t *testing.T) { + crand := newTestCrand() + _, err := DeserializeBlock(crand, make([]byte, 15)) + require.Error(t, err) + + _, err = DeserializeBlock(crand, make([]byte, 0)) + require.Error(t, err) +} + +func TestBlockDeserializeTruncatedMetadata(t *testing.T) { + crand := newTestCrand() + blk := RandomBlock(1, crand, 1, 2, 32, 64) + serialized := blk.Serialize() + + // Truncate so metadata is incomplete. + crand.Reset() + _, err := DeserializeBlock(crand, serialized[:14]) + require.Error(t, err) +} + +func TestBlockDeserializeTruncatedTransaction(t *testing.T) { + crand := newTestCrand() + blk := RandomBlock(1, crand, 1, 3, 48, 8) + serialized := blk.Serialize() + + // Keep header + metadata + num txns + part of first transaction. + truncateAt := 8 + 4 + 8 + 4 + 10 + crand.Reset() + _, err := DeserializeBlock(crand, serialized[:truncateAt]) + require.Error(t, err) +} diff --git a/sei-db/state_db/bench/blocksim/transaction.go b/sei-db/state_db/bench/blocksim/transaction.go index 852d02aaa8..adc57bad78 100644 --- a/sei-db/state_db/bench/blocksim/transaction.go +++ b/sei-db/state_db/bench/blocksim/transaction.go @@ -1,6 +1,11 @@ package blocksim -import "github.com/sei-protocol/sei-chain/sei-db/common/rand" +import ( + "encoding/binary" + "fmt" + + "github.com/sei-protocol/sei-chain/sei-db/common/rand" +) // A simulated transaction for the blocksim benchmark. type transaction struct { @@ -14,12 +19,70 @@ type transaction struct { payload []byte } -// Creates a new transaction with the given ID. -func NewTransaction( +// Creates a randomized transaction with the given ID. +func RandomTransaction( id uint64, crand *rand.CannedRandom, + size int, ) *transaction { + + hash := crand.Address(0, int64(id), 32) + payload := crand.Bytes(size) + return &transaction{ - id: id, + id: id, + hash: hash, + payload: payload, } } + +// Returns the hash of the transaction. +// +// Data is not safe to modify in place, make a copy before modifying. +func (t *transaction) Hash() []byte { + return t.hash +} + +// Returns the payload of the transaction. +// +// Data is not safe to modify in place, make a copy before modifying. +func (t *transaction) Payload() []byte { + return t.payload +} + +// Returns the ID of the transaction. +func (t *transaction) ID() uint64 { + return t.id +} + +// Returns the serialized transaction. +func (t *transaction) Serialize() []byte { + data := make([]byte, len(t.payload)+8 /* id */ +4 /* payload size */) + binary.BigEndian.PutUint64(data[:8], t.id) + binary.BigEndian.PutUint32(data[8:12], uint32(len(t.payload))) + copy(data[12:], t.payload) + return data +} + +// Deserializes a transaction from the given data. +func DeserializeTransaction(crand *rand.CannedRandom, data []byte) (*transaction, error) { + if len(data) < 12 { + return nil, fmt.Errorf("data too short to contain a transaction") + } + + id := binary.BigEndian.Uint64(data[:8]) + payloadSize := binary.BigEndian.Uint32(data[8:12]) + if len(data) < 12+int(payloadSize) { + return nil, fmt.Errorf("data too short to contain a transaction") + } + + payload := make([]byte, payloadSize) + copy(payload, data[12:12+payloadSize]) + hash := crand.Address(0, int64(id), 32) + + return &transaction{ + id: id, + hash: hash, + payload: payload, + }, nil +} From 33378ce553a82035cf63909b2173f9d1a9d8334c Mon Sep 17 00:00:00 2001 From: Cody Littley Date: Fri, 27 Mar 2026 09:04:06 -0400 Subject: [PATCH 082/119] bugfix --- sei-db/state_db/sc/flatkv/store_write.go | 2 ++ sei-db/state_db/sc/flatkv/store_write_test.go | 32 +++++++++++++++++++ 2 files changed, 34 insertions(+) diff --git a/sei-db/state_db/sc/flatkv/store_write.go b/sei-db/state_db/sc/flatkv/store_write.go index 2e09d4cd46..c201391f37 100644 --- a/sei-db/state_db/sc/flatkv/store_write.go +++ b/sei-db/state_db/sc/flatkv/store_write.go @@ -104,6 +104,8 @@ func (s *CommitStore) ApplyChangeSets(cs []*proto.NamedChangeSet) error { } else { oldAccountRawValues[addrStr] = paw.value.Encode() } + } else if result, ok := accountOld[addrKey]; ok { + oldAccountRawValues[addrStr] = result.Value } else { oldAccountRawValues[addrStr] = nil } diff --git a/sei-db/state_db/sc/flatkv/store_write_test.go b/sei-db/state_db/sc/flatkv/store_write_test.go index 96f41ad290..0b9da7700f 100644 --- a/sei-db/state_db/sc/flatkv/store_write_test.go +++ b/sei-db/state_db/sc/flatkv/store_write_test.go @@ -1438,6 +1438,38 @@ func TestAccountRowGCWriteZeroOrderIndependent(t *testing.T) { // Write Test Helpers // ============================================================================= +// TestLtHashExistingAccountNonceUpdate is a focused regression test for the +// oldAccountRawValues bug: when an account already exists in the DB and a new +// block updates its nonce (the most common case — every tx increments sender +// nonce), the LtHash delta must MixOut the old encoded AccountValue before +// MixIn'ing the new one. The bug sets oldAccountRawValues[addr] = nil instead +// of the DB value when s.accountWrites has no pending entry, causing the +// MixOut to be skipped and the LtHash to diverge from ground truth. +func TestLtHashExistingAccountNonceUpdate(t *testing.T) { + s := setupTestStore(t) + defer s.Close() + + addr := addrN(0xE1) + + // Block 1: create account with nonce=1 (new account — oldAccountRawValues + // correctly nil here since nothing exists in DB). + require.NoError(t, s.ApplyChangeSets([]*proto.NamedChangeSet{ + namedCS(noncePair(addr, 1)), + })) + commitAndCheck(t, s) + verifyLtHashAtHeight(t, s, 1) // should pass: new account, nil old is correct + + // Block 2: update nonce to 2. The account now EXISTS in accountDB with + // encoded(nonce=1). The buggy code sets oldAccountRawValues[addr] = nil + // because s.accountWrites is empty after the block-1 commit cleared it. + // The correct old value is the DB's encoded(nonce=1). + require.NoError(t, s.ApplyChangeSets([]*proto.NamedChangeSet{ + namedCS(noncePair(addr, 2)), + })) + commitAndCheck(t, s) + verifyLtHashAtHeight(t, s, 2) // FAILS: incremental skipped MixOut of old value +} + func countLiveEntries(t *testing.T, db types.KeyValueDB) int { t.Helper() iter, err := db.NewIter(&types.IterOptions{}) From f8d01136827a68f35449bfe10ae273b2d5ef09bd Mon Sep 17 00:00:00 2001 From: Cody Littley Date: Fri, 27 Mar 2026 10:32:52 -0400 Subject: [PATCH 083/119] Basic impl and unit tests --- sei-db/block_db/block_db.go | 65 +++ sei-db/block_db/block_db_test.go | 434 ++++++++++++++++++ .../bench => block_db}/blocksim/block.go | 0 .../blocksim/blocksim_test.go | 0 .../blocksim/transaction.go | 0 sei-db/block_db/mem_block_db.go | 123 +++++ 6 files changed, 622 insertions(+) create mode 100644 sei-db/block_db/block_db.go create mode 100644 sei-db/block_db/block_db_test.go rename sei-db/{state_db/bench => block_db}/blocksim/block.go (100%) rename sei-db/{state_db/bench => block_db}/blocksim/blocksim_test.go (100%) rename sei-db/{state_db/bench => block_db}/blocksim/transaction.go (100%) create mode 100644 sei-db/block_db/mem_block_db.go diff --git a/sei-db/block_db/block_db.go b/sei-db/block_db/block_db.go new file mode 100644 index 0000000000..2d2b925dfe --- /dev/null +++ b/sei-db/block_db/block_db.go @@ -0,0 +1,65 @@ +package blockdb + +import "context" + +// A binary transaction with its hash. +type BinaryTransaction struct { + // The hash of the transaction. + Hash []byte + // The binary transaction data. + Transaction []byte +} + +// A binary block with its transactions and hash. +type BinaryBlock struct { + // The height of the block. Must be unique. + Height uint64 + // The hash of the block. Must be unique. + Hash []byte + // The binary block data, not including transaction data (unless you are ok with wasting space) + BlockData []byte + // The transactions in the block and their hashes. + Transactions []*BinaryTransaction +} + +// A database for storing binary block and transaction data. +// +// This store is fully threadsafe. All writes are atomic (that is, after a crash you will either see the write or +// you will not see it at all, i.e. partial writes are not possible). Multiple writes are not atomic with respect +// to each other, meaning if you write A then B and crash, you may observe B but not A (only possible when sharding +// is enabled). Within a single session, read-your-writes consistency is provided. +type BlockDB interface { + + // Write a block to the database. + // + // This method may return immediately and does not necessarily wait for the block to be written to disk. + // Call Flush() if you need to wait until the block is written to disk. + WriteBlock(ctx context.Context, block *BinaryBlock) error + + // Blocks until all pending writes are flushed to disk. Any call to WriteBlock issued before calling Flush() + // will be crash-durable after Flush() returns. Calls to WriteBlock() made concurrently with Flush() may or + // may not be crash-durable after Flush() returns (but are otherwise eventually durable). + // + // It is not required to call Flush() in order to ensure data is written to disk. The database asyncronously + // pushes data down to disk even if Flush() is never called. Flush() just allows you to syncronize an external + // goroutine with the database's internal write loop. + Flush(ctx context.Context) error + + // Retrieves a block by its hash. + GetBlockByHash(ctx context.Context, hash []byte) (block *BinaryBlock, ok bool, err error) + + // Retrieves a block by its height. + GetBlockByHeight(ctx context.Context, height uint64) (block *BinaryBlock, ok bool, err error) + + // Retrieves a transaction by its hash. + GetTransactionByHash(ctx context.Context, hash []byte) (transaction *BinaryTransaction, ok bool, err error) + + // Schedules pruning for all blocks with a height less than the given height. Pruning is asyncronous, + // and so this method does not provide any guarantees about when the pruning will complete. It is possible + // that some data will not be pruned if the database is closed before the pruning is scheduled. + Prune(ctx context.Context, lowestHeightToKeep uint64) error + + // Closes the database and releases any resources. Any in-flight writes are fully flushed to disk before this + // method returns. + Close(ctx context.Context) error +} diff --git a/sei-db/block_db/block_db_test.go b/sei-db/block_db/block_db_test.go new file mode 100644 index 0000000000..19f07dcfd2 --- /dev/null +++ b/sei-db/block_db/block_db_test.go @@ -0,0 +1,434 @@ +package blockdb + +import ( + "bytes" + "context" + "fmt" + "testing" + + crand "github.com/sei-protocol/sei-chain/sei-db/common/rand" + "github.com/sei-protocol/sei-chain/sei-db/common/unit" +) + +var testRng = crand.NewCannedRandom(4*unit.MB, 42) + +type blockDBBuilder struct { + name string + builder func(path string) (BlockDB, error) +} + +func buildBuilders() []blockDBBuilder { + return []blockDBBuilder{ + newMemBlockDBBuilder(), + } +} + +func newMemBlockDBBuilder() blockDBBuilder { + store := make(map[string]*memBlockDBData) + return blockDBBuilder{ + name: "mem", + builder: func(path string) (BlockDB, error) { + data, ok := store[path] + if !ok { + data = &memBlockDBData{ + blocksByHash: make(map[string]*BinaryBlock), + blocksByHeight: make(map[uint64]*BinaryBlock), + txByHash: make(map[string]*BinaryTransaction), + } + store[path] = data + } + return &memBlockDB{data: data}, nil + }, + } +} + +func makeBlock(height uint64, numTxs int) *BinaryBlock { + txs := make([]*BinaryTransaction, numTxs) + for i := 0; i < numTxs; i++ { + txs[i] = &BinaryTransaction{ + Hash: []byte(fmt.Sprintf("tx-%d-%d", height, i)), + Transaction: []byte(fmt.Sprintf("tx-data-%d-%d", height, i)), + } + } + return &BinaryBlock{ + Height: height, + Hash: []byte(fmt.Sprintf("block-%d", height)), + BlockData: []byte(fmt.Sprintf("block-data-%d", height)), + Transactions: txs, + } +} + +func forEachBuilder(t *testing.T, fn func(t *testing.T, builder func(path string) (BlockDB, error))) { + for _, b := range buildBuilders() { + t.Run(b.name, func(t *testing.T) { + fn(t, b.builder) + }) + } +} + +func TestWriteAndGetBlockByHeight(t *testing.T) { + forEachBuilder(t, func(t *testing.T, builder func(string) (BlockDB, error)) { + ctx := context.Background() + db, err := builder(t.TempDir()) + requireNoError(t, err) + defer db.Close(ctx) + + block := makeBlock(1, 2) + requireNoError(t, db.WriteBlock(ctx, block)) + + got, ok, err := db.GetBlockByHeight(ctx, 1) + requireNoError(t, err) + requireTrue(t, ok, "expected block at height 1") + requireBlockEqual(t, block, got) + }) +} + +func TestWriteAndGetBlockByHash(t *testing.T) { + forEachBuilder(t, func(t *testing.T, builder func(string) (BlockDB, error)) { + ctx := context.Background() + db, err := builder(t.TempDir()) + requireNoError(t, err) + defer db.Close(ctx) + + block := makeBlock(5, 3) + requireNoError(t, db.WriteBlock(ctx, block)) + + got, ok, err := db.GetBlockByHash(ctx, block.Hash) + requireNoError(t, err) + requireTrue(t, ok, "expected block with matching hash") + requireBlockEqual(t, block, got) + }) +} + +func TestGetTransactionByHash(t *testing.T) { + forEachBuilder(t, func(t *testing.T, builder func(string) (BlockDB, error)) { + ctx := context.Background() + db, err := builder(t.TempDir()) + requireNoError(t, err) + defer db.Close(ctx) + + block := makeBlock(1, 4) + requireNoError(t, db.WriteBlock(ctx, block)) + + for _, tx := range block.Transactions { + got, ok, err := db.GetTransactionByHash(ctx, tx.Hash) + requireNoError(t, err) + requireTrue(t, ok, "expected transaction with hash %s", tx.Hash) + requireBytesEqual(t, tx.Hash, got.Hash, "transaction hash") + requireBytesEqual(t, tx.Transaction, got.Transaction, "transaction data") + } + }) +} + +func TestGetBlockNotFound(t *testing.T) { + forEachBuilder(t, func(t *testing.T, builder func(string) (BlockDB, error)) { + ctx := context.Background() + db, err := builder(t.TempDir()) + requireNoError(t, err) + defer db.Close(ctx) + + _, ok, err := db.GetBlockByHeight(ctx, 999) + requireNoError(t, err) + requireTrue(t, !ok, "expected no block at height 999") + + _, ok, err = db.GetBlockByHash(ctx, []byte("nonexistent")) + requireNoError(t, err) + requireTrue(t, !ok, "expected no block with nonexistent hash") + }) +} + +func TestGetTransactionNotFound(t *testing.T) { + forEachBuilder(t, func(t *testing.T, builder func(string) (BlockDB, error)) { + ctx := context.Background() + db, err := builder(t.TempDir()) + requireNoError(t, err) + defer db.Close(ctx) + + _, ok, err := db.GetTransactionByHash(ctx, []byte("nonexistent")) + requireNoError(t, err) + requireTrue(t, !ok, "expected no transaction with nonexistent hash") + }) +} + +func TestMultipleBlocks(t *testing.T) { + forEachBuilder(t, func(t *testing.T, builder func(string) (BlockDB, error)) { + ctx := context.Background() + db, err := builder(t.TempDir()) + requireNoError(t, err) + defer db.Close(ctx) + + blocks := make([]*BinaryBlock, 10) + for i := range blocks { + blocks[i] = makeBlock(uint64(i+1), 2) + requireNoError(t, db.WriteBlock(ctx, blocks[i])) + } + + for _, block := range blocks { + got, ok, err := db.GetBlockByHeight(ctx, block.Height) + requireNoError(t, err) + requireTrue(t, ok, "expected block at height %d", block.Height) + requireBlockEqual(t, block, got) + } + }) +} + +func TestPrunePreservesUnprunedBlocks(t *testing.T) { + forEachBuilder(t, func(t *testing.T, builder func(string) (BlockDB, error)) { + ctx := context.Background() + db, err := builder(t.TempDir()) + requireNoError(t, err) + defer db.Close(ctx) + + for i := uint64(1); i <= 10; i++ { + requireNoError(t, db.WriteBlock(ctx, makeBlock(i, 1))) + } + + requireNoError(t, db.Flush(ctx)) + requireNoError(t, db.Prune(ctx, 6)) + + for i := uint64(6); i <= 10; i++ { + _, ok, err := db.GetBlockByHeight(ctx, i) + requireNoError(t, err) + requireTrue(t, ok, "expected block at height %d to survive pruning", i) + } + }) +} + +func TestPrunePreservesUnprunedTransactions(t *testing.T) { + forEachBuilder(t, func(t *testing.T, builder func(string) (BlockDB, error)) { + ctx := context.Background() + db, err := builder(t.TempDir()) + requireNoError(t, err) + defer db.Close(ctx) + + survivingBlock := makeBlock(2, 3) + requireNoError(t, db.WriteBlock(ctx, makeBlock(1, 1))) + requireNoError(t, db.WriteBlock(ctx, survivingBlock)) + + requireNoError(t, db.Flush(ctx)) + requireNoError(t, db.Prune(ctx, 2)) + + for _, tx := range survivingBlock.Transactions { + _, ok, err := db.GetTransactionByHash(ctx, tx.Hash) + requireNoError(t, err) + requireTrue(t, ok, "expected transaction %s to survive pruning", tx.Hash) + } + }) +} + +func TestPruneDoesNotError(t *testing.T) { + forEachBuilder(t, func(t *testing.T, builder func(string) (BlockDB, error)) { + ctx := context.Background() + db, err := builder(t.TempDir()) + requireNoError(t, err) + defer db.Close(ctx) + + requireNoError(t, db.Prune(ctx, 100)) + + for i := uint64(1); i <= 5; i++ { + requireNoError(t, db.WriteBlock(ctx, makeBlock(i, 1))) + } + + requireNoError(t, db.Prune(ctx, 3)) + requireNoError(t, db.Prune(ctx, 100)) + }) +} + +func TestCloseAndReopen(t *testing.T) { + forEachBuilder(t, func(t *testing.T, builder func(string) (BlockDB, error)) { + ctx := context.Background() + path := t.TempDir() + + db, err := builder(path) + requireNoError(t, err) + + block := makeBlock(1, 2) + requireNoError(t, db.WriteBlock(ctx, block)) + requireNoError(t, db.Flush(ctx)) + requireNoError(t, db.Close(ctx)) + + db2, err := builder(path) + requireNoError(t, err) + defer db2.Close(ctx) + + got, ok, err := db2.GetBlockByHeight(ctx, 1) + requireNoError(t, err) + requireTrue(t, ok, "expected block to survive close/reopen") + requireBlockEqual(t, block, got) + + for _, tx := range block.Transactions { + gotTx, ok, err := db2.GetTransactionByHash(ctx, tx.Hash) + requireNoError(t, err) + requireTrue(t, ok, "expected tx to survive close/reopen") + requireBytesEqual(t, tx.Transaction, gotTx.Transaction, "transaction data") + } + }) +} + +func TestCloseAndReopenThenWrite(t *testing.T) { + forEachBuilder(t, func(t *testing.T, builder func(string) (BlockDB, error)) { + ctx := context.Background() + path := t.TempDir() + + db, err := builder(path) + requireNoError(t, err) + requireNoError(t, db.WriteBlock(ctx, makeBlock(1, 1))) + requireNoError(t, db.Flush(ctx)) + requireNoError(t, db.Close(ctx)) + + db2, err := builder(path) + requireNoError(t, err) + defer db2.Close(ctx) + + requireNoError(t, db2.WriteBlock(ctx, makeBlock(2, 1))) + + for _, h := range []uint64{1, 2} { + _, ok, err := db2.GetBlockByHeight(ctx, h) + requireNoError(t, err) + requireTrue(t, ok, "expected block at height %d after reopen+write", h) + } + }) +} + +func TestFlush(t *testing.T) { + forEachBuilder(t, func(t *testing.T, builder func(string) (BlockDB, error)) { + ctx := context.Background() + db, err := builder(t.TempDir()) + requireNoError(t, err) + defer db.Close(ctx) + + requireNoError(t, db.Flush(ctx)) + + requireNoError(t, db.WriteBlock(ctx, makeBlock(1, 1))) + requireNoError(t, db.Flush(ctx)) + }) +} + +func TestBulkWriteAndQuery(t *testing.T) { + const numBlocks = 1000 + const txsPerBlock = 50 + + forEachBuilder(t, func(t *testing.T, builder func(string) (BlockDB, error)) { + ctx := context.Background() + db, err := builder(t.TempDir()) + requireNoError(t, err) + defer db.Close(ctx) + + blocks := make([]*BinaryBlock, numBlocks) + for i := range blocks { + blocks[i] = makeRandomBlock(testRng, uint64(i+1), txsPerBlock) + requireNoError(t, db.WriteBlock(ctx, blocks[i])) + } + + requireNoError(t, db.Flush(ctx)) + + for _, expected := range blocks { + byHeight, ok, err := db.GetBlockByHeight(ctx, expected.Height) + requireNoError(t, err) + requireTrue(t, ok, "block not found by height %d", expected.Height) + requireBlockBytesEqual(t, expected, byHeight) + + byHash, ok, err := db.GetBlockByHash(ctx, expected.Hash) + requireNoError(t, err) + requireTrue(t, ok, "block not found by hash at height %d", expected.Height) + requireBlockBytesEqual(t, expected, byHash) + + for _, expectedTx := range expected.Transactions { + gotTx, ok, err := db.GetTransactionByHash(ctx, expectedTx.Hash) + requireNoError(t, err) + requireTrue(t, ok, "tx not found by hash %x (block height %d)", expectedTx.Hash, expected.Height) + requireBytesEqual(t, expectedTx.Hash, gotTx.Hash, "tx hash") + requireBytesEqual(t, expectedTx.Transaction, gotTx.Transaction, "tx data") + } + } + }) +} + +// makeRandomBlock builds a block with deterministic random binary payloads. +// Returned slices are owned copies safe for storage and later comparison. +func makeRandomBlock(rng *crand.CannedRandom, height uint64, numTxs int) *BinaryBlock { + txs := make([]*BinaryTransaction, numTxs) + for i := range txs { + txHash := rng.Address('t', int64(height)*1000+int64(i), 32) + txDataLen := 64 + int(rng.Int64Range(0, 512)) + txData := copyBytes(rng.Bytes(txDataLen)) + txs[i] = &BinaryTransaction{Hash: txHash, Transaction: txData} + } + + blockHash := rng.Address('b', int64(height), 32) + blockDataLen := 128 + int(rng.Int64Range(0, 1024)) + blockData := copyBytes(rng.Bytes(blockDataLen)) + + return &BinaryBlock{ + Height: height, + Hash: blockHash, + BlockData: blockData, + Transactions: txs, + } +} + +func copyBytes(src []byte) []byte { + dst := make([]byte, len(src)) + copy(dst, src) + return dst +} + +// requireBlockBytesEqual does a deep byte-level comparison, suitable for verifying +// round-trip fidelity through serialization. +func requireBlockBytesEqual(t *testing.T, expected, actual *BinaryBlock) { + t.Helper() + if expected.Height != actual.Height { + t.Fatalf("height mismatch: expected %d, got %d", expected.Height, actual.Height) + } + requireBytesEqual(t, expected.Hash, actual.Hash, "block hash") + requireBytesEqual(t, expected.BlockData, actual.BlockData, "block data") + if len(expected.Transactions) != len(actual.Transactions) { + t.Fatalf("transaction count mismatch at height %d: expected %d, got %d", + expected.Height, len(expected.Transactions), len(actual.Transactions)) + } + for i, tx := range expected.Transactions { + label := fmt.Sprintf("height %d tx[%d]", expected.Height, i) + requireBytesEqual(t, tx.Hash, actual.Transactions[i].Hash, label+" hash") + requireBytesEqual(t, tx.Transaction, actual.Transactions[i].Transaction, label+" data") + } +} + +// --- test helpers --- + +func requireNoError(t *testing.T, err error) { + t.Helper() + if err != nil { + t.Fatalf("unexpected error: %v", err) + } +} + +func requireTrue(t *testing.T, cond bool, format string, args ...any) { + t.Helper() + if !cond { + t.Fatalf(format, args...) + } +} + +func requireBytesEqual(t *testing.T, expected, actual []byte, label string) { + t.Helper() + if !bytes.Equal(expected, actual) { + t.Fatalf("%s mismatch: expected %q, got %q", label, expected, actual) + } +} + +func requireBlockEqual(t *testing.T, expected, actual *BinaryBlock) { + t.Helper() + if expected.Height != actual.Height { + t.Fatalf("height mismatch: expected %d, got %d", expected.Height, actual.Height) + } + requireBytesEqual(t, expected.Hash, actual.Hash, "block hash") + requireBytesEqual(t, expected.BlockData, actual.BlockData, "block data") + if len(expected.Transactions) != len(actual.Transactions) { + t.Fatalf("transaction count mismatch: expected %d, got %d", + len(expected.Transactions), len(actual.Transactions)) + } + for i, tx := range expected.Transactions { + requireBytesEqual(t, tx.Hash, actual.Transactions[i].Hash, fmt.Sprintf("tx[%d] hash", i)) + requireBytesEqual(t, tx.Transaction, actual.Transactions[i].Transaction, fmt.Sprintf("tx[%d] data", i)) + } +} diff --git a/sei-db/state_db/bench/blocksim/block.go b/sei-db/block_db/blocksim/block.go similarity index 100% rename from sei-db/state_db/bench/blocksim/block.go rename to sei-db/block_db/blocksim/block.go diff --git a/sei-db/state_db/bench/blocksim/blocksim_test.go b/sei-db/block_db/blocksim/blocksim_test.go similarity index 100% rename from sei-db/state_db/bench/blocksim/blocksim_test.go rename to sei-db/block_db/blocksim/blocksim_test.go diff --git a/sei-db/state_db/bench/blocksim/transaction.go b/sei-db/block_db/blocksim/transaction.go similarity index 100% rename from sei-db/state_db/bench/blocksim/transaction.go rename to sei-db/block_db/blocksim/transaction.go diff --git a/sei-db/block_db/mem_block_db.go b/sei-db/block_db/mem_block_db.go new file mode 100644 index 0000000000..68eef13c5f --- /dev/null +++ b/sei-db/block_db/mem_block_db.go @@ -0,0 +1,123 @@ +package blockdb + +import ( + "context" + "sync" +) + +// Shared backing store, keyed by path in test builders to simulate restarts. +type memBlockDBData struct { + mu sync.RWMutex + blocksByHash map[string]*BinaryBlock + blocksByHeight map[uint64]*BinaryBlock + txByHash map[string]*BinaryTransaction + lowestHeight uint64 + highestHeight uint64 + hasBlocks bool +} + +// An in-memory implementation of the BlockDB interface. Useful as a test fixture to sanity check +// test flows. +type memBlockDB struct { + data *memBlockDBData +} + +func newMemBlockDB() BlockDB { + return &memBlockDB{ + data: &memBlockDBData{ + blocksByHash: make(map[string]*BinaryBlock), + blocksByHeight: make(map[uint64]*BinaryBlock), + txByHash: make(map[string]*BinaryTransaction), + }, + } +} + +func (m *memBlockDB) WriteBlock(_ context.Context, block *BinaryBlock) error { + d := m.data + d.mu.Lock() + defer d.mu.Unlock() + + d.blocksByHash[string(block.Hash)] = block + d.blocksByHeight[block.Height] = block + for _, tx := range block.Transactions { + d.txByHash[string(tx.Hash)] = tx + } + + if !d.hasBlocks { + d.lowestHeight = block.Height + d.highestHeight = block.Height + d.hasBlocks = true + } else { + if block.Height < d.lowestHeight { + d.lowestHeight = block.Height + } + if block.Height > d.highestHeight { + d.highestHeight = block.Height + } + } + return nil +} + +func (m *memBlockDB) Flush(_ context.Context) error { + return nil +} + +func (m *memBlockDB) GetBlockByHash(_ context.Context, hash []byte) (*BinaryBlock, bool, error) { + d := m.data + d.mu.RLock() + defer d.mu.RUnlock() + + block, ok := d.blocksByHash[string(hash)] + return block, ok, nil +} + +func (m *memBlockDB) GetBlockByHeight(_ context.Context, height uint64) (*BinaryBlock, bool, error) { + d := m.data + d.mu.RLock() + defer d.mu.RUnlock() + + block, ok := d.blocksByHeight[height] + return block, ok, nil +} + +func (m *memBlockDB) GetTransactionByHash(_ context.Context, hash []byte) (*BinaryTransaction, bool, error) { + d := m.data + d.mu.RLock() + defer d.mu.RUnlock() + + tx, ok := d.txByHash[string(hash)] + return tx, ok, nil +} + +func (m *memBlockDB) Prune(_ context.Context, lowestHeightToKeep uint64) error { + d := m.data + d.mu.Lock() + defer d.mu.Unlock() + + if !d.hasBlocks || lowestHeightToKeep <= d.lowestHeight { + return nil + } + + for h := d.lowestHeight; h < lowestHeightToKeep && h <= d.highestHeight; h++ { + block, ok := d.blocksByHeight[h] + if !ok { + continue + } + delete(d.blocksByHeight, h) + delete(d.blocksByHash, string(block.Hash)) + for _, tx := range block.Transactions { + delete(d.txByHash, string(tx.Hash)) + } + } + + if lowestHeightToKeep > d.highestHeight { + d.hasBlocks = false + } else { + d.lowestHeight = lowestHeightToKeep + } + return nil +} + +func (m *memBlockDB) Close(_ context.Context) error { + return nil +} From d9c27bcf48cc56ea039e634f5b62af45be01743e Mon Sep 17 00:00:00 2001 From: Cody Littley Date: Fri, 27 Mar 2026 11:30:47 -0400 Subject: [PATCH 084/119] config --- sei-db/block_db/blocksim/block.go | 167 ---------------- sei-db/block_db/blocksim/block_generator.go | 1 + sei-db/block_db/blocksim/blocksim_config.go | 124 ++++++++++++ sei-db/block_db/blocksim/blocksim_test.go | 205 -------------------- sei-db/block_db/blocksim/transaction.go | 88 --------- 5 files changed, 125 insertions(+), 460 deletions(-) delete mode 100644 sei-db/block_db/blocksim/block.go create mode 100644 sei-db/block_db/blocksim/block_generator.go create mode 100644 sei-db/block_db/blocksim/blocksim_config.go delete mode 100644 sei-db/block_db/blocksim/blocksim_test.go delete mode 100644 sei-db/block_db/blocksim/transaction.go diff --git a/sei-db/block_db/blocksim/block.go b/sei-db/block_db/blocksim/block.go deleted file mode 100644 index af58f0ebb2..0000000000 --- a/sei-db/block_db/blocksim/block.go +++ /dev/null @@ -1,167 +0,0 @@ -package blocksim - -import ( - "encoding/binary" - "fmt" - - "github.com/sei-protocol/sei-chain/sei-db/common/rand" -) - -type block struct { - // The height of the block. - height uint64 - - // The (simulated) hash of the block. - hash []byte - - // The transactions in the block. - transactions []*transaction - - // Metadata for the block. Randomly generated. - metadata []byte -} - -// Creates a randomized block with the given height, first transaction ID, last transaction ID, -// transaction size, and metadata size. -func RandomBlock( - height uint64, - crand *rand.CannedRandom, - firstTransactionID uint64, - lastTransactionID uint64, - transactionSize int, - metadataSize int, -) *block { - transactions := make([]*transaction, 0, lastTransactionID-firstTransactionID+1) - for id := firstTransactionID; id <= lastTransactionID; id++ { - transactions = append(transactions, RandomTransaction(id, crand, transactionSize)) - } - metadata := crand.Bytes(metadataSize) - hash := crand.Address(0, int64(height), 32) - return &block{ - height: height, - hash: hash, - transactions: transactions, - metadata: metadata, - } -} - -// Returns the hash of the block. -// -// Data is not safe to modify in place, make a copy before modifying. -func (b *block) Hash() []byte { - return b.hash -} - -// Returns the transactions in the block. -// -// Data is not safe to modify in place, make a copy before modifying. -func (b *block) Transactions() []*transaction { - return b.transactions -} - -// Returns the metadata of the block. -// -// Data is not safe to modify in place, make a copy before modifying. -func (b *block) Metadata() []byte { - return b.metadata -} - -// Returns the height of the block. -func (b *block) Height() uint64 { - return b.height -} - -// Serialized block layout: -// -// [8 bytes: height] -// [4 bytes: metadata size (M)] -// [M bytes: metadata] -// [4 bytes: number of transactions (N)] -// For each transaction: -// [4 bytes: serialized transaction size (S)] -// [S bytes: serialized transaction data] -func (b *block) Serialize() []byte { - serializedTransactions := make([][]byte, 0, len(b.transactions)) - serializedTransactionsSize := 0 - for _, txn := range b.transactions { - serializedTransaction := txn.Serialize() - serializedTransactions = append(serializedTransactions, serializedTransaction) - serializedTransactionsSize += 4 /* size prefix */ + len(serializedTransaction) - } - - dataLen := 8 /* height */ + 4 /* metadata size */ + len(b.metadata) + - 4 /* number of transactions */ + serializedTransactionsSize - - data := make([]byte, dataLen) - off := 0 - - binary.BigEndian.PutUint64(data[off:], b.height) - off += 8 - - binary.BigEndian.PutUint32(data[off:], uint32(len(b.metadata))) - off += 4 - - copy(data[off:], b.metadata) - off += len(b.metadata) - - binary.BigEndian.PutUint32(data[off:], uint32(len(b.transactions))) - off += 4 - - for _, serializedTransaction := range serializedTransactions { - binary.BigEndian.PutUint32(data[off:], uint32(len(serializedTransaction))) - off += 4 - copy(data[off:], serializedTransaction) - off += len(serializedTransaction) - } - return data -} - -func DeserializeBlock(crand *rand.CannedRandom, data []byte) (*block, error) { - if len(data) < 16 { - return nil, fmt.Errorf("data too short to contain a block") - } - - off := 0 - - height := binary.BigEndian.Uint64(data[off:]) - off += 8 - - metadataSize := int(binary.BigEndian.Uint32(data[off:])) - off += 4 - - if len(data) < off+metadataSize+4 { - return nil, fmt.Errorf("data too short to contain block metadata") - } - metadata := make([]byte, metadataSize) - copy(metadata, data[off:off+metadataSize]) - off += metadataSize - - numberOfTransactions := int(binary.BigEndian.Uint32(data[off:])) - off += 4 - - transactions := make([]*transaction, 0, numberOfTransactions) - for i := 0; i < numberOfTransactions; i++ { - if len(data) < off+4 { - return nil, fmt.Errorf("data too short to contain transaction size") - } - transactionSize := int(binary.BigEndian.Uint32(data[off:])) - off += 4 - if len(data) < off+transactionSize { - return nil, fmt.Errorf("data too short to contain transaction") - } - txn, err := DeserializeTransaction(crand, data[off:off+transactionSize]) - if err != nil { - return nil, fmt.Errorf("failed to deserialize transaction: %w", err) - } - off += transactionSize - transactions = append(transactions, txn) - } - - hash := crand.Address(0, int64(height), 32) - return &block{ - height: height, - hash: hash, - metadata: metadata, - transactions: transactions, - }, nil -} diff --git a/sei-db/block_db/blocksim/block_generator.go b/sei-db/block_db/blocksim/block_generator.go new file mode 100644 index 0000000000..cb1b03cca3 --- /dev/null +++ b/sei-db/block_db/blocksim/block_generator.go @@ -0,0 +1 @@ +package blocksim diff --git a/sei-db/block_db/blocksim/blocksim_config.go b/sei-db/block_db/blocksim/blocksim_config.go new file mode 100644 index 0000000000..36e46d7ec2 --- /dev/null +++ b/sei-db/block_db/blocksim/blocksim_config.go @@ -0,0 +1,124 @@ +package blocksim + +import ( + "encoding/json" + "fmt" + "os" + "path/filepath" + + "github.com/sei-protocol/sei-chain/sei-db/common/unit" +) + +const ( + minCannedRandomSize = unit.MB +) + +// Configuration for the blocksim benchmark. +type BlocksimConfig struct { + + // The size of each simulated transaction, in bytes. Each transaction in a block will contain + // this many bytes of random data. + BytesPerTransaction uint64 + + // The number of transactions included in each generated block. + TransactionsPerBlock uint64 + + // Additional bytes of random data added to the block itself, beyond the transaction data. This + // simulates block-level metadata or other non-transaction payload. + ExtraBytesPerBlock uint64 + + // The size of each block hash, in bytes. + BlockHashSize uint64 + + // The size of each transaction hash, in bytes. + TransactionHashSize uint64 + + // The capacity of the queue that holds generated blocks before they are consumed by the + // benchmark. A larger queue allows the block generator to run further ahead of the consumer. + StagedBlockQueueSize uint64 + + // The size of the CannedRandom buffer, in bytes. Altering this value for a pre-existing run + // will change the random data generated, don't change it unless you are starting a new run + // from scratch. + CannedRandomSize uint64 + + // The number of blocks to keep in the database after pruning. + UnprunedBlocks uint64 +} + +// Returns the default configuration for the blocksim benchmark. +func DefaultBlocksimConfig() *BlocksimConfig { + return &BlocksimConfig{ + BytesPerTransaction: 512, + TransactionsPerBlock: 1024, + ExtraBytesPerBlock: 256, + BlockHashSize: 32, + TransactionHashSize: 32, + StagedBlockQueueSize: 8, + CannedRandomSize: unit.GB, + UnprunedBlocks: 100_000, + } +} + +// StringifiedConfig returns the config as human-readable, multi-line JSON. +func (c *BlocksimConfig) StringifiedConfig() (string, error) { + b, err := json.MarshalIndent(c, "", " ") + if err != nil { + return "", err + } + return string(b), nil +} + +// Validate checks that the configuration is sane and returns an error if not. +func (c *BlocksimConfig) Validate() error { + if c.BytesPerTransaction < 1 { + return fmt.Errorf("BytesPerTransaction must be at least 1 (got %d)", c.BytesPerTransaction) + } + if c.TransactionsPerBlock < 1 { + return fmt.Errorf("TransactionsPerBlock must be at least 1 (got %d)", c.TransactionsPerBlock) + } + if c.BlockHashSize < 1 { + return fmt.Errorf("BlockHashSize must be at least 1 (got %d)", c.BlockHashSize) + } + if c.TransactionHashSize < 1 { + return fmt.Errorf("TransactionHashSize must be at least 1 (got %d)", c.TransactionHashSize) + } + if c.StagedBlockQueueSize < 1 { + return fmt.Errorf("StagedBlockQueueSize must be at least 1 (got %d)", c.StagedBlockQueueSize) + } + if c.CannedRandomSize < minCannedRandomSize { + return fmt.Errorf("CannedRandomSize must be at least %d (got %d)", + minCannedRandomSize, c.CannedRandomSize) + } + if c.UnprunedBlocks < 1 { + return fmt.Errorf("UnprunedBlocks must be at least 1 (got %d)", c.UnprunedBlocks) + } + return nil +} + +// LoadConfigFromFile parses a JSON config file at the given path. +// Returns defaults with file values overlaid. Fails if the file contains +// unrecognized configuration keys. +func LoadConfigFromFile(path string) (*BlocksimConfig, error) { + cfg := DefaultBlocksimConfig() + //nolint:gosec // G304 - path comes from CLI arg, filepath.Clean used to mitigate traversal + f, err := os.Open(filepath.Clean(path)) + if err != nil { + return nil, fmt.Errorf("open config file: %w", err) + } + defer func() { + if err := f.Close(); err != nil { + fmt.Printf("failed to close config file: %v\n", err) + } + }() + + dec := json.NewDecoder(f) + dec.DisallowUnknownFields() + if err := dec.Decode(cfg); err != nil { + return nil, fmt.Errorf("decode config: %w", err) + } + if err := cfg.Validate(); err != nil { + return nil, fmt.Errorf("invalid config: %w", err) + } + return cfg, nil +} diff --git a/sei-db/block_db/blocksim/blocksim_test.go b/sei-db/block_db/blocksim/blocksim_test.go deleted file mode 100644 index 0e8f03081c..0000000000 --- a/sei-db/block_db/blocksim/blocksim_test.go +++ /dev/null @@ -1,205 +0,0 @@ -package blocksim - -import ( - "bytes" - "testing" - - "github.com/sei-protocol/sei-chain/sei-db/common/rand" - "github.com/stretchr/testify/require" -) - -const testBufferSize = 1024 - -func newTestCrand() *rand.CannedRandom { - return rand.NewCannedRandom(testBufferSize, 42) -} - -func TestTransactionRoundTrip(t *testing.T) { - crand := newTestCrand() - txn := RandomTransaction(7, crand, 64) - - serialized := txn.Serialize() - crand.Reset() - deserialized, err := DeserializeTransaction(crand, serialized) - require.NoError(t, err) - - require.Equal(t, txn.ID(), deserialized.ID()) - require.True(t, bytes.Equal(txn.Hash(), deserialized.Hash())) - require.True(t, bytes.Equal(txn.Payload(), deserialized.Payload())) -} - -func TestTransactionRoundTripEmptyPayload(t *testing.T) { - crand := newTestCrand() - txn := RandomTransaction(1, crand, 0) - - serialized := txn.Serialize() - crand.Reset() - deserialized, err := DeserializeTransaction(crand, serialized) - require.NoError(t, err) - - require.Equal(t, txn.ID(), deserialized.ID()) - require.True(t, bytes.Equal(txn.Hash(), deserialized.Hash())) - require.Empty(t, deserialized.Payload()) -} - -func TestTransactionDeserializePayloadIsolation(t *testing.T) { - crand := newTestCrand() - txn := RandomTransaction(1, crand, 32) - serialized := txn.Serialize() - - crand.Reset() - deserialized, err := DeserializeTransaction(crand, serialized) - require.NoError(t, err) - - // Mutating the serialized buffer should not affect the deserialized transaction. - for i := 12; i < len(serialized); i++ { - serialized[i] = 0xFF - } - require.True(t, bytes.Equal(txn.Payload(), deserialized.Payload())) -} - -func TestTransactionDeserializeTooShort(t *testing.T) { - crand := newTestCrand() - _, err := DeserializeTransaction(crand, make([]byte, 11)) - require.Error(t, err) - - _, err = DeserializeTransaction(crand, make([]byte, 0)) - require.Error(t, err) -} - -func TestTransactionDeserializeTruncatedPayload(t *testing.T) { - crand := newTestCrand() - txn := RandomTransaction(1, crand, 64) - serialized := txn.Serialize() - - // Truncate the serialized data to cut off part of the payload. - crand.Reset() - _, err := DeserializeTransaction(crand, serialized[:20]) - require.Error(t, err) -} - -func TestBlockRoundTrip(t *testing.T) { - crand := newTestCrand() - blk := RandomBlock(100, crand, 1, 5, 48, 32) - - serialized := blk.Serialize() - crand.Reset() - deserialized, err := DeserializeBlock(crand, serialized) - require.NoError(t, err) - - require.Equal(t, blk.Height(), deserialized.Height()) - require.True(t, bytes.Equal(blk.Hash(), deserialized.Hash())) - require.True(t, bytes.Equal(blk.Metadata(), deserialized.Metadata())) - require.Equal(t, len(blk.Transactions()), len(deserialized.Transactions())) - - for i, txn := range blk.Transactions() { - dtxn := deserialized.Transactions()[i] - require.Equal(t, txn.ID(), dtxn.ID()) - require.True(t, bytes.Equal(txn.Hash(), dtxn.Hash())) - require.True(t, bytes.Equal(txn.Payload(), dtxn.Payload())) - } -} - -func TestBlockRoundTripNoTransactions(t *testing.T) { - crand := newTestCrand() - hash := crand.Address(0, int64(5), 32) - metadata := crand.Bytes(16) - - blk := &block{ - height: 5, - hash: hash, - transactions: []*transaction{}, - metadata: metadata, - } - - serialized := blk.Serialize() - crand.Reset() - deserialized, err := DeserializeBlock(crand, serialized) - require.NoError(t, err) - - require.Equal(t, blk.Height(), deserialized.Height()) - require.True(t, bytes.Equal(blk.Metadata(), deserialized.Metadata())) - require.Empty(t, deserialized.Transactions()) -} - -func TestBlockRoundTripEmptyMetadata(t *testing.T) { - crand := newTestCrand() - blk := RandomBlock(1, crand, 1, 3, 32, 0) - - serialized := blk.Serialize() - crand.Reset() - deserialized, err := DeserializeBlock(crand, serialized) - require.NoError(t, err) - - require.Equal(t, blk.Height(), deserialized.Height()) - require.Empty(t, deserialized.Metadata()) - require.Equal(t, len(blk.Transactions()), len(deserialized.Transactions())) - - for i, txn := range blk.Transactions() { - dtxn := deserialized.Transactions()[i] - require.Equal(t, txn.ID(), dtxn.ID()) - require.True(t, bytes.Equal(txn.Payload(), dtxn.Payload())) - } -} - -func TestBlockRoundTripLargeMetadata(t *testing.T) { - crand := newTestCrand() - blk := RandomBlock(42, crand, 1, 2, 32, 256) - - serialized := blk.Serialize() - crand.Reset() - deserialized, err := DeserializeBlock(crand, serialized) - require.NoError(t, err) - - require.Equal(t, blk.Height(), deserialized.Height()) - require.True(t, bytes.Equal(blk.Metadata(), deserialized.Metadata())) - require.Equal(t, len(blk.Transactions()), len(deserialized.Transactions())) -} - -func TestBlockDeserializeMetadataIsolation(t *testing.T) { - crand := newTestCrand() - blk := RandomBlock(1, crand, 1, 2, 32, 16) - serialized := blk.Serialize() - - crand.Reset() - deserialized, err := DeserializeBlock(crand, serialized) - require.NoError(t, err) - - // Mutating the serialized buffer should not affect deserialized metadata. - for i := 12; i < 12+16; i++ { - serialized[i] = 0xFF - } - require.True(t, bytes.Equal(blk.Metadata(), deserialized.Metadata())) -} - -func TestBlockDeserializeTooShort(t *testing.T) { - crand := newTestCrand() - _, err := DeserializeBlock(crand, make([]byte, 15)) - require.Error(t, err) - - _, err = DeserializeBlock(crand, make([]byte, 0)) - require.Error(t, err) -} - -func TestBlockDeserializeTruncatedMetadata(t *testing.T) { - crand := newTestCrand() - blk := RandomBlock(1, crand, 1, 2, 32, 64) - serialized := blk.Serialize() - - // Truncate so metadata is incomplete. - crand.Reset() - _, err := DeserializeBlock(crand, serialized[:14]) - require.Error(t, err) -} - -func TestBlockDeserializeTruncatedTransaction(t *testing.T) { - crand := newTestCrand() - blk := RandomBlock(1, crand, 1, 3, 48, 8) - serialized := blk.Serialize() - - // Keep header + metadata + num txns + part of first transaction. - truncateAt := 8 + 4 + 8 + 4 + 10 - crand.Reset() - _, err := DeserializeBlock(crand, serialized[:truncateAt]) - require.Error(t, err) -} diff --git a/sei-db/block_db/blocksim/transaction.go b/sei-db/block_db/blocksim/transaction.go deleted file mode 100644 index adc57bad78..0000000000 --- a/sei-db/block_db/blocksim/transaction.go +++ /dev/null @@ -1,88 +0,0 @@ -package blocksim - -import ( - "encoding/binary" - "fmt" - - "github.com/sei-protocol/sei-chain/sei-db/common/rand" -) - -// A simulated transaction for the blocksim benchmark. -type transaction struct { - // The unique ID of the transaction. Used to determinstically generate the transaction hash. - id uint64 - - // The (simulated) hash of the transaction. - hash []byte - - // Data contained by the transaction. These bytes are randomly generated. - payload []byte -} - -// Creates a randomized transaction with the given ID. -func RandomTransaction( - id uint64, - crand *rand.CannedRandom, - size int, -) *transaction { - - hash := crand.Address(0, int64(id), 32) - payload := crand.Bytes(size) - - return &transaction{ - id: id, - hash: hash, - payload: payload, - } -} - -// Returns the hash of the transaction. -// -// Data is not safe to modify in place, make a copy before modifying. -func (t *transaction) Hash() []byte { - return t.hash -} - -// Returns the payload of the transaction. -// -// Data is not safe to modify in place, make a copy before modifying. -func (t *transaction) Payload() []byte { - return t.payload -} - -// Returns the ID of the transaction. -func (t *transaction) ID() uint64 { - return t.id -} - -// Returns the serialized transaction. -func (t *transaction) Serialize() []byte { - data := make([]byte, len(t.payload)+8 /* id */ +4 /* payload size */) - binary.BigEndian.PutUint64(data[:8], t.id) - binary.BigEndian.PutUint32(data[8:12], uint32(len(t.payload))) - copy(data[12:], t.payload) - return data -} - -// Deserializes a transaction from the given data. -func DeserializeTransaction(crand *rand.CannedRandom, data []byte) (*transaction, error) { - if len(data) < 12 { - return nil, fmt.Errorf("data too short to contain a transaction") - } - - id := binary.BigEndian.Uint64(data[:8]) - payloadSize := binary.BigEndian.Uint32(data[8:12]) - if len(data) < 12+int(payloadSize) { - return nil, fmt.Errorf("data too short to contain a transaction") - } - - payload := make([]byte, payloadSize) - copy(payload, data[12:12+payloadSize]) - hash := crand.Address(0, int64(id), 32) - - return &transaction{ - id: id, - hash: hash, - payload: payload, - }, nil -} From 55c871f9e46a1c200204db4b21cf6285c063d84b Mon Sep 17 00:00:00 2001 From: Cody Littley Date: Fri, 27 Mar 2026 11:43:49 -0400 Subject: [PATCH 085/119] block generator --- sei-db/block_db/blocksim/block_generator.go | 90 +++++++++++++++++++++ sei-db/block_db/blocksim/blocksim_config.go | 9 ++- 2 files changed, 95 insertions(+), 4 deletions(-) diff --git a/sei-db/block_db/blocksim/block_generator.go b/sei-db/block_db/blocksim/block_generator.go index cb1b03cca3..6ffbb168c3 100644 --- a/sei-db/block_db/blocksim/block_generator.go +++ b/sei-db/block_db/blocksim/block_generator.go @@ -1 +1,91 @@ package blocksim + +import ( + "context" + + blockdb "github.com/sei-protocol/sei-chain/sei-db/block_db" + "github.com/sei-protocol/sei-chain/sei-db/common/rand" +) + +const ( + blockHashType = 'b' + txHashType = 't' +) + +// Asynchronously generates random blocks and feeds them into a channel. +type BlockGenerator struct { + ctx context.Context + config *BlocksimConfig + rand *rand.CannedRandom + + // The next block height to be assigned. + nextHeight uint64 + + // Generated blocks are sent to this channel. + blocksChan chan *blockdb.BinaryBlock +} + +// Creates a new BlockGenerator and immediately starts its background goroutine. +// The generator stops when the context is cancelled. +func NewBlockGenerator( + ctx context.Context, + config *BlocksimConfig, + rng *rand.CannedRandom, + startHeight uint64, +) *BlockGenerator { + g := &BlockGenerator{ + ctx: ctx, + config: config, + rand: rng, + nextHeight: startHeight, + blocksChan: make(chan *blockdb.BinaryBlock, config.StagedBlockQueueSize), + } + go g.mainLoop() + return g +} + +// NextBlock blocks until the next generated block is available and returns it. +// Returns nil if the context has been cancelled and no more blocks will be produced. +func (g *BlockGenerator) NextBlock() *blockdb.BinaryBlock { + select { + case <-g.ctx.Done(): + return nil + case blk := <-g.blocksChan: + return blk + } +} + +func (g *BlockGenerator) mainLoop() { + for { + blk := g.buildBlock() + select { + case <-g.ctx.Done(): + return + case g.blocksChan <- blk: + } + } +} + +func (g *BlockGenerator) buildBlock() *blockdb.BinaryBlock { + height := g.nextHeight + g.nextHeight++ + + txs := make([]*blockdb.BinaryTransaction, g.config.TransactionsPerBlock) + for i := uint64(0); i < g.config.TransactionsPerBlock; i++ { + txID := int64(height)*int64(g.config.TransactionsPerBlock) + int64(i) //nolint:gosec + txs[i] = &blockdb.BinaryTransaction{ + Hash: g.rand.Address(txHashType, txID, int(g.config.TransactionHashSize)), + Transaction: g.rand.Bytes(int(g.config.BytesPerTransaction)), + } + } + + blockHash := g.rand.Address(blockHashType, int64(height), int(g.config.BlockHashSize)) //nolint:gosec + blockData := g.rand.Bytes(int(g.config.ExtraBytesPerBlock)) + + return &blockdb.BinaryBlock{ + Height: height, + Hash: blockHash, + BlockData: blockData, + Transactions: txs, + } +} diff --git a/sei-db/block_db/blocksim/blocksim_config.go b/sei-db/block_db/blocksim/blocksim_config.go index 36e46d7ec2..5ca218ef17 100644 --- a/sei-db/block_db/blocksim/blocksim_config.go +++ b/sei-db/block_db/blocksim/blocksim_config.go @@ -10,6 +10,7 @@ import ( ) const ( + minHashSize = 20 minCannedRandomSize = unit.MB ) @@ -77,11 +78,11 @@ func (c *BlocksimConfig) Validate() error { if c.TransactionsPerBlock < 1 { return fmt.Errorf("TransactionsPerBlock must be at least 1 (got %d)", c.TransactionsPerBlock) } - if c.BlockHashSize < 1 { - return fmt.Errorf("BlockHashSize must be at least 1 (got %d)", c.BlockHashSize) + if c.BlockHashSize < minHashSize { + return fmt.Errorf("BlockHashSize must be at least %d (got %d)", minHashSize, c.BlockHashSize) } - if c.TransactionHashSize < 1 { - return fmt.Errorf("TransactionHashSize must be at least 1 (got %d)", c.TransactionHashSize) + if c.TransactionHashSize < minHashSize { + return fmt.Errorf("TransactionHashSize must be at least %d (got %d)", minHashSize, c.TransactionHashSize) } if c.StagedBlockQueueSize < 1 { return fmt.Errorf("StagedBlockQueueSize must be at least 1 (got %d)", c.StagedBlockQueueSize) From d59bfab30e43d55747ba95cfd5d9217f2051edc2 Mon Sep 17 00:00:00 2001 From: Cody Littley Date: Mon, 30 Mar 2026 10:47:10 -0500 Subject: [PATCH 086/119] created struct for account data --- .../state_db/sc/flatkv/vtype/account_data.go | 140 +++++++++++++ .../sc/flatkv/vtype/account_data_test.go | 186 ++++++++++++++++++ .../flatkv/vtype/testdata/account_data_v0.hex | 1 + 3 files changed, 327 insertions(+) create mode 100644 sei-db/state_db/sc/flatkv/vtype/account_data.go create mode 100644 sei-db/state_db/sc/flatkv/vtype/account_data_test.go create mode 100644 sei-db/state_db/sc/flatkv/vtype/testdata/account_data_v0.hex diff --git a/sei-db/state_db/sc/flatkv/vtype/account_data.go b/sei-db/state_db/sc/flatkv/vtype/account_data.go new file mode 100644 index 0000000000..0d78173f08 --- /dev/null +++ b/sei-db/state_db/sc/flatkv/vtype/account_data.go @@ -0,0 +1,140 @@ +package vtype + +import ( + "encoding/binary" + "errors" + "fmt" +) + +type AccountDataVersion uint8 + +// DO NOT CHANGE VERSION VALUES!!! Adding new versions is ok, but historical versions should never be removed/changed. +const ( + // The version of the account data field when FlatKV was first launched. + AccountDataVersion0 AccountDataVersion = 0 +) + +/* +Serialization schema for AccountData version 0: + +| Version | Block Height | Balance | Nonce | Code Hash | +|---------|--------------|----------|----------|-----------| +| 1 byte | 8 bytes | 32 bytes | 8 bytes | 32 bytes | + +Data is stored in big-endian order. + +*/ + +const ( + accountVersionStart = 0 + accountBlockHeightStart = 1 + accountBalanceStart = 9 + accountNonceStart = 41 + accountCodeHashStart = 49 + accountDataLength = 81 +) + +// Used for encapsulating and serializating account data in the FlatKV accounts database. +// +// This data structure is not threadsafe. Values passed into and values received from this data structure +// are not safe to modify without first copying them. +type AccountData struct { + data []byte +} + +// Create a new AccountData initialized to all 0s. +func NewAccountData() *AccountData { + return &AccountData{ + data: make([]byte, accountDataLength), + } +} + +// Serialize the account data to a byte slice. +// +// The returned byte slice is not safe to modify without first copying it. +func (a *AccountData) Serialize() []byte { + return a.data +} + +// Deserialize the account data from the given byte slice. +func DeserializeAccountData(data []byte) (*AccountData, error) { + if len(data) == 0 { + return nil, errors.New("data is empty") + } + + accountData := &AccountData{ + data: data, + } + + serializationVersion := accountData.GetSerializationVersion() + if serializationVersion != AccountDataVersion0 { + return nil, fmt.Errorf("unsupported serialization version: %d", serializationVersion) + } + + if len(data) != accountDataLength { + return nil, fmt.Errorf("data length at version %d should be %d, got %d", + serializationVersion, accountDataLength, len(data)) + } + + return accountData, nil +} + +// Get the serialization version for this AccountData instance. +func (a *AccountData) GetSerializationVersion() AccountDataVersion { + return (AccountDataVersion)(a.data[accountVersionStart]) +} + +// Get the account's block height. +func (a *AccountData) GetBlockHeight() uint64 { + return binary.BigEndian.Uint64(a.data[accountBlockHeightStart:accountBalanceStart]) +} + +// Get the account's balance. +func (a *AccountData) GetBalance() *[32]byte { + return (*[32]byte)(a.data[accountBalanceStart:accountNonceStart]) +} + +// Get the account's nonce. +func (a *AccountData) GetNonce() uint64 { + return binary.BigEndian.Uint64(a.data[accountNonceStart:accountCodeHashStart]) +} + +// Get the account's code hash. +func (a *AccountData) GetCodeHash() *[32]byte { + return (*[32]byte)(a.data[accountCodeHashStart:accountDataLength]) +} + +// Check if this account data signifies a deletion operation. A deletion operation is automatically +// performed when all account data fields are 0 (with the exception of the serialization version and block height). +func (a *AccountData) IsDelete() bool { + for i := accountBalanceStart; i < accountDataLength; i++ { + if a.data[i] != 0 { + return false + } + } + return true +} + +// Set the account's block height when this account was last modified/touched. Returns self. +func (a *AccountData) SetBlockHeight(blockHeight uint64) *AccountData { + binary.BigEndian.PutUint64(a.data[accountBlockHeightStart:accountBalanceStart], blockHeight) + return a +} + +// Set the account's balance. Returns self. +func (a *AccountData) SetBalance(balance *[32]byte) *AccountData { + copy(a.data[accountBalanceStart:accountNonceStart], balance[:]) + return a +} + +// Set the account's nonce. Returns self. +func (a *AccountData) SetNonce(nonce uint64) *AccountData { + binary.BigEndian.PutUint64(a.data[accountNonceStart:accountCodeHashStart], nonce) + return a +} + +// Set the account's code hash. Returns self. +func (a *AccountData) SetCodeHash(codeHash *[32]byte) *AccountData { + copy(a.data[accountCodeHashStart:accountDataLength], codeHash[:]) + return a +} diff --git a/sei-db/state_db/sc/flatkv/vtype/account_data_test.go b/sei-db/state_db/sc/flatkv/vtype/account_data_test.go new file mode 100644 index 0000000000..a4a732a98b --- /dev/null +++ b/sei-db/state_db/sc/flatkv/vtype/account_data_test.go @@ -0,0 +1,186 @@ +package vtype + +import ( + "bytes" + "encoding/hex" + "os" + "path/filepath" + "testing" + + "github.com/stretchr/testify/require" +) + +const testdataDir = "testdata" + +// If the golden file does not exist it is created on the first run. +// Subsequent runs verify that serialization still matches, catching +// accidental compatibility breaks. +func TestSerializationGoldenFile_V0(t *testing.T) { + ad := NewAccountData(). + SetBlockHeight(100). + SetBalance(toArray32(leftPad32([]byte{1}))). + SetNonce(42). + SetCodeHash(toArray32(bytes.Repeat([]byte{0xaa}, 32))) + + serialized := ad.Serialize() + + golden := filepath.Join(testdataDir, "account_data_v0.hex") + if _, err := os.Stat(golden); os.IsNotExist(err) { + require.NoError(t, os.MkdirAll(testdataDir, 0o755)) + require.NoError(t, os.WriteFile(golden, []byte(hex.EncodeToString(serialized)), 0o644)) + t.Logf("created golden file %s — re-run to verify", golden) + return + } + + want, err := os.ReadFile(golden) + require.NoError(t, err) + wantBytes, err := hex.DecodeString(string(want)) + require.NoError(t, err) + require.Equal(t, wantBytes, serialized, "serialization differs from golden file") + + // Verify round-trip from the golden bytes. + rt, err := DeserializeAccountData(wantBytes) + require.NoError(t, err) + require.Equal(t, uint64(100), rt.GetBlockHeight()) + require.Equal(t, uint64(42), rt.GetNonce()) + require.Equal(t, toArray32(leftPad32([]byte{1})), rt.GetBalance()) + require.Equal(t, toArray32(bytes.Repeat([]byte{0xaa}, 32)), rt.GetCodeHash()) +} + +func TestNewAccountData_ZeroInitialized(t *testing.T) { + ad := NewAccountData() + var zero [32]byte + require.Equal(t, AccountDataVersion0, ad.GetSerializationVersion()) + require.Equal(t, uint64(0), ad.GetBlockHeight()) + require.Equal(t, uint64(0), ad.GetNonce()) + require.Equal(t, &zero, ad.GetBalance()) + require.Equal(t, &zero, ad.GetCodeHash()) +} + +func TestSerializeLength(t *testing.T) { + ad := NewAccountData() + require.Len(t, ad.Serialize(), accountDataLength) +} + +func TestRoundTrip_AllFieldsSet(t *testing.T) { + balance := toArray32(leftPad32([]byte{0xff, 0xee, 0xdd})) + codeHash := toArray32(bytes.Repeat([]byte{0xab}, 32)) + + ad := NewAccountData(). + SetBlockHeight(999). + SetBalance(balance). + SetNonce(12345). + SetCodeHash(codeHash) + + rt, err := DeserializeAccountData(ad.Serialize()) + require.NoError(t, err) + require.Equal(t, uint64(999), rt.GetBlockHeight()) + require.Equal(t, uint64(12345), rt.GetNonce()) + require.Equal(t, balance, rt.GetBalance()) + require.Equal(t, codeHash, rt.GetCodeHash()) +} + +func TestRoundTrip_ZeroValues(t *testing.T) { + ad := NewAccountData() + rt, err := DeserializeAccountData(ad.Serialize()) + require.NoError(t, err) + var zero [32]byte + require.Equal(t, uint64(0), rt.GetBlockHeight()) + require.Equal(t, uint64(0), rt.GetNonce()) + require.Equal(t, &zero, rt.GetBalance()) + require.Equal(t, &zero, rt.GetCodeHash()) +} + +func TestRoundTrip_MaxValues(t *testing.T) { + maxBalance := toArray32(bytes.Repeat([]byte{0xff}, 32)) + maxCodeHash := toArray32(bytes.Repeat([]byte{0xff}, 32)) + maxNonce := uint64(0xffffffffffffffff) + maxBlockHeight := uint64(0xffffffffffffffff) + + ad := NewAccountData(). + SetBlockHeight(maxBlockHeight). + SetBalance(maxBalance). + SetNonce(maxNonce). + SetCodeHash(maxCodeHash) + + rt, err := DeserializeAccountData(ad.Serialize()) + require.NoError(t, err) + require.Equal(t, maxBlockHeight, rt.GetBlockHeight()) + require.Equal(t, maxNonce, rt.GetNonce()) + require.Equal(t, maxBalance, rt.GetBalance()) + require.Equal(t, maxCodeHash, rt.GetCodeHash()) +} + +func TestIsDelete_AllZeroPayload(t *testing.T) { + ad := NewAccountData().SetBlockHeight(500) + require.True(t, ad.IsDelete()) +} + +func TestIsDelete_NonZeroBalance(t *testing.T) { + ad := NewAccountData().SetBalance(toArray32(leftPad32([]byte{1}))) + require.False(t, ad.IsDelete()) +} + +func TestIsDelete_NonZeroNonce(t *testing.T) { + ad := NewAccountData().SetNonce(1) + require.False(t, ad.IsDelete()) +} + +func TestIsDelete_NonZeroCodeHash(t *testing.T) { + ad := NewAccountData().SetCodeHash(toArray32(bytes.Repeat([]byte{0x01}, 32))) + require.False(t, ad.IsDelete()) +} + +func TestDeserialize_EmptyData(t *testing.T) { + _, err := DeserializeAccountData([]byte{}) + require.Error(t, err) +} + +func TestDeserialize_NilData(t *testing.T) { + _, err := DeserializeAccountData(nil) + require.Error(t, err) +} + +func TestDeserialize_TooShort(t *testing.T) { + _, err := DeserializeAccountData([]byte{0x00, 0x01, 0x02}) + require.Error(t, err) +} + +func TestDeserialize_TooLong(t *testing.T) { + _, err := DeserializeAccountData(make([]byte, accountDataLength+1)) + require.Error(t, err) +} + +func TestDeserialize_UnsupportedVersion(t *testing.T) { + data := make([]byte, accountDataLength) + data[0] = 0xff + _, err := DeserializeAccountData(data) + require.Error(t, err) +} + +func TestSetterChaining(t *testing.T) { + ad := NewAccountData(). + SetBlockHeight(1). + SetBalance(toArray32(leftPad32([]byte{2}))). + SetNonce(3). + SetCodeHash(toArray32(leftPad32([]byte{4}))) + + require.Equal(t, uint64(1), ad.GetBlockHeight()) + require.Equal(t, uint64(3), ad.GetNonce()) +} + +func TestConstantLayout_V0(t *testing.T) { + require.Equal(t, 81, accountDataLength) +} + +// leftPad32 returns a 32-byte slice with b right-aligned (big-endian style). +func leftPad32(b []byte) []byte { + padded := make([]byte, 32) + copy(padded[32-len(b):], b) + return padded +} + +// toArray32 converts a []byte to a *[32]byte. +func toArray32(b []byte) *[32]byte { + return (*[32]byte)(b) +} diff --git a/sei-db/state_db/sc/flatkv/vtype/testdata/account_data_v0.hex b/sei-db/state_db/sc/flatkv/vtype/testdata/account_data_v0.hex new file mode 100644 index 0000000000..e0c1799d14 --- /dev/null +++ b/sei-db/state_db/sc/flatkv/vtype/testdata/account_data_v0.hex @@ -0,0 +1 @@ +0000000000000000640000000000000000000000000000000000000000000000000000000000000001000000000000002aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa \ No newline at end of file From 94ebc9a7e50f87235ad41ce4e23f9a521b59ac7d Mon Sep 17 00:00:00 2001 From: Cody Littley Date: Mon, 30 Mar 2026 10:53:27 -0500 Subject: [PATCH 087/119] added structs --- .../state_db/sc/flatkv/vtype/account_data.go | 10 +- sei-db/state_db/sc/flatkv/vtype/code_data.go | 103 ++++++++++++ .../sc/flatkv/vtype/code_data_test.go | 159 ++++++++++++++++++ .../state_db/sc/flatkv/vtype/legacy_data.go | 103 ++++++++++++ .../sc/flatkv/vtype/legacy_data_test.go | 158 +++++++++++++++++ .../state_db/sc/flatkv/vtype/storage_data.go | 114 +++++++++++++ .../sc/flatkv/vtype/storage_data_test.go | 136 +++++++++++++++ .../sc/flatkv/vtype/testdata/code_data_v0.hex | 1 + .../flatkv/vtype/testdata/legacy_data_v0.hex | 1 + .../flatkv/vtype/testdata/storage_data_v0.hex | 1 + 10 files changed, 781 insertions(+), 5 deletions(-) create mode 100644 sei-db/state_db/sc/flatkv/vtype/code_data.go create mode 100644 sei-db/state_db/sc/flatkv/vtype/code_data_test.go create mode 100644 sei-db/state_db/sc/flatkv/vtype/legacy_data.go create mode 100644 sei-db/state_db/sc/flatkv/vtype/legacy_data_test.go create mode 100644 sei-db/state_db/sc/flatkv/vtype/storage_data.go create mode 100644 sei-db/state_db/sc/flatkv/vtype/storage_data_test.go create mode 100644 sei-db/state_db/sc/flatkv/vtype/testdata/code_data_v0.hex create mode 100644 sei-db/state_db/sc/flatkv/vtype/testdata/legacy_data_v0.hex create mode 100644 sei-db/state_db/sc/flatkv/vtype/testdata/storage_data_v0.hex diff --git a/sei-db/state_db/sc/flatkv/vtype/account_data.go b/sei-db/state_db/sc/flatkv/vtype/account_data.go index 0d78173f08..5e90485ab5 100644 --- a/sei-db/state_db/sc/flatkv/vtype/account_data.go +++ b/sei-db/state_db/sc/flatkv/vtype/account_data.go @@ -26,12 +26,12 @@ Data is stored in big-endian order. */ const ( - accountVersionStart = 0 + accountVersionStart = 0 accountBlockHeightStart = 1 - accountBalanceStart = 9 - accountNonceStart = 41 - accountCodeHashStart = 49 - accountDataLength = 81 + accountBalanceStart = 9 + accountNonceStart = 41 + accountCodeHashStart = 49 + accountDataLength = 81 ) // Used for encapsulating and serializating account data in the FlatKV accounts database. diff --git a/sei-db/state_db/sc/flatkv/vtype/code_data.go b/sei-db/state_db/sc/flatkv/vtype/code_data.go new file mode 100644 index 0000000000..e43d936140 --- /dev/null +++ b/sei-db/state_db/sc/flatkv/vtype/code_data.go @@ -0,0 +1,103 @@ +package vtype + +import ( + "encoding/binary" + "errors" + "fmt" +) + +type CodeDataVersion uint8 + +// DO NOT CHANGE VERSION VALUES!!! Adding new versions is ok, but historical versions should never be removed/changed. +const ( + CodeDataVersion0 CodeDataVersion = 0 +) + +/* +Serialization schema for CodeData version 0: + +| Version | Block Height | Bytecode | +|---------|--------------|--------------| +| 1 byte | 8 bytes | variable | + +Data is stored in big-endian order. Bytecode is variable length. +*/ + +const ( + codeVersionStart = 0 + codeBlockHeightStart = 1 + codeBytecodeStart = 9 + codeHeaderLength = 9 +) + +// Used for encapsulating and serializing contract bytecode in the FlatKV code database. +// +// This data structure is not threadsafe. Values passed into and values received from this data structure +// are not safe to modify without first copying them. +type CodeData struct { + data []byte +} + +// Create a new CodeData with the given bytecode. +func NewCodeData(bytecode []byte) *CodeData { + data := make([]byte, codeHeaderLength+len(bytecode)) + copy(data[codeBytecodeStart:], bytecode) + return &CodeData{data: data} +} + +// Serialize the code data to a byte slice. +// +// The returned byte slice is not safe to modify without first copying it. +func (c *CodeData) Serialize() []byte { + return c.data +} + +// Deserialize the code data from the given byte slice. +func DeserializeCodeData(data []byte) (*CodeData, error) { + if len(data) == 0 { + return nil, errors.New("data is empty") + } + + codeData := &CodeData{ + data: data, + } + + serializationVersion := codeData.GetSerializationVersion() + if serializationVersion != CodeDataVersion0 { + return nil, fmt.Errorf("unsupported serialization version: %d", serializationVersion) + } + + if len(data) < codeHeaderLength { + return nil, fmt.Errorf("data length at version %d should be at least %d, got %d", + serializationVersion, codeHeaderLength, len(data)) + } + + return codeData, nil +} + +// Get the serialization version for this CodeData instance. +func (c *CodeData) GetSerializationVersion() CodeDataVersion { + return (CodeDataVersion)(c.data[codeVersionStart]) +} + +// Get the block height when this code was last modified. +func (c *CodeData) GetBlockHeight() uint64 { + return binary.BigEndian.Uint64(c.data[codeBlockHeightStart:codeBytecodeStart]) +} + +// Get the contract bytecode. +func (c *CodeData) GetBytecode() []byte { + return c.data[codeBytecodeStart:] +} + +// Check if this code data signifies a deletion operation. A deletion operation is automatically +// performed when the bytecode is empty (with the exception of the serialization version and block height). +func (c *CodeData) IsDelete() bool { + return len(c.data) == codeHeaderLength +} + +// Set the block height when this code was last modified/touched. Returns self. +func (c *CodeData) SetBlockHeight(blockHeight uint64) *CodeData { + binary.BigEndian.PutUint64(c.data[codeBlockHeightStart:codeBytecodeStart], blockHeight) + return c +} diff --git a/sei-db/state_db/sc/flatkv/vtype/code_data_test.go b/sei-db/state_db/sc/flatkv/vtype/code_data_test.go new file mode 100644 index 0000000000..c6f0822639 --- /dev/null +++ b/sei-db/state_db/sc/flatkv/vtype/code_data_test.go @@ -0,0 +1,159 @@ +package vtype + +import ( + "bytes" + "encoding/hex" + "os" + "path/filepath" + "testing" + + "github.com/stretchr/testify/require" +) + +func TestCodeSerializationGoldenFile_V0(t *testing.T) { + bytecode := []byte{0x60, 0x80, 0x60, 0x40, 0x52} // PUSH1 0x80 PUSH1 0x40 MSTORE + cd := NewCodeData(bytecode). + SetBlockHeight(100) + + serialized := cd.Serialize() + + golden := filepath.Join(testdataDir, "code_data_v0.hex") + if _, err := os.Stat(golden); os.IsNotExist(err) { + require.NoError(t, os.MkdirAll(testdataDir, 0o755)) + require.NoError(t, os.WriteFile(golden, []byte(hex.EncodeToString(serialized)), 0o644)) + t.Logf("created golden file %s — re-run to verify", golden) + return + } + + want, err := os.ReadFile(golden) + require.NoError(t, err) + wantBytes, err := hex.DecodeString(string(want)) + require.NoError(t, err) + require.Equal(t, wantBytes, serialized, "serialization differs from golden file") + + rt, err := DeserializeCodeData(wantBytes) + require.NoError(t, err) + require.Equal(t, uint64(100), rt.GetBlockHeight()) + require.Equal(t, bytecode, rt.GetBytecode()) +} + +func TestCodeNewWithBytecode(t *testing.T) { + bytecode := []byte{0x01, 0x02, 0x03} + cd := NewCodeData(bytecode) + require.Equal(t, CodeDataVersion0, cd.GetSerializationVersion()) + require.Equal(t, uint64(0), cd.GetBlockHeight()) + require.Equal(t, bytecode, cd.GetBytecode()) +} + +func TestCodeNewEmpty(t *testing.T) { + cd := NewCodeData(nil) + require.Equal(t, CodeDataVersion0, cd.GetSerializationVersion()) + require.Equal(t, uint64(0), cd.GetBlockHeight()) + require.Empty(t, cd.GetBytecode()) +} + +func TestCodeSerializeLength(t *testing.T) { + bytecode := []byte{0x01, 0x02, 0x03} + cd := NewCodeData(bytecode) + require.Len(t, cd.Serialize(), codeHeaderLength+len(bytecode)) +} + +func TestCodeSerializeLength_Empty(t *testing.T) { + cd := NewCodeData(nil) + require.Len(t, cd.Serialize(), codeHeaderLength) +} + +func TestCodeRoundTrip_WithBytecode(t *testing.T) { + bytecode := bytes.Repeat([]byte{0xab}, 1000) + cd := NewCodeData(bytecode). + SetBlockHeight(999) + + rt, err := DeserializeCodeData(cd.Serialize()) + require.NoError(t, err) + require.Equal(t, uint64(999), rt.GetBlockHeight()) + require.Equal(t, bytecode, rt.GetBytecode()) +} + +func TestCodeRoundTrip_EmptyBytecode(t *testing.T) { + cd := NewCodeData(nil). + SetBlockHeight(42) + + rt, err := DeserializeCodeData(cd.Serialize()) + require.NoError(t, err) + require.Equal(t, uint64(42), rt.GetBlockHeight()) + require.Empty(t, rt.GetBytecode()) +} + +func TestCodeRoundTrip_MaxBlockHeight(t *testing.T) { + cd := NewCodeData([]byte{0xff}). + SetBlockHeight(0xffffffffffffffff) + + rt, err := DeserializeCodeData(cd.Serialize()) + require.NoError(t, err) + require.Equal(t, uint64(0xffffffffffffffff), rt.GetBlockHeight()) + require.Equal(t, []byte{0xff}, rt.GetBytecode()) +} + +func TestCodeIsDelete_EmptyBytecode(t *testing.T) { + cd := NewCodeData(nil).SetBlockHeight(500) + require.True(t, cd.IsDelete()) +} + +func TestCodeIsDelete_EmptySlice(t *testing.T) { + cd := NewCodeData([]byte{}) + require.True(t, cd.IsDelete()) +} + +func TestCodeIsDelete_NonEmptyBytecode(t *testing.T) { + cd := NewCodeData([]byte{0x01}) + require.False(t, cd.IsDelete()) +} + +func TestCodeDeserialize_EmptyData(t *testing.T) { + _, err := DeserializeCodeData([]byte{}) + require.Error(t, err) +} + +func TestCodeDeserialize_NilData(t *testing.T) { + _, err := DeserializeCodeData(nil) + require.Error(t, err) +} + +func TestCodeDeserialize_TooShort(t *testing.T) { + _, err := DeserializeCodeData([]byte{0x00, 0x01, 0x02}) + require.Error(t, err) +} + +func TestCodeDeserialize_HeaderOnly(t *testing.T) { + cd := NewCodeData(nil) + rt, err := DeserializeCodeData(cd.Serialize()) + require.NoError(t, err) + require.Empty(t, rt.GetBytecode()) +} + +func TestCodeDeserialize_UnsupportedVersion(t *testing.T) { + data := make([]byte, codeHeaderLength+1) + data[0] = 0xff + _, err := DeserializeCodeData(data) + require.Error(t, err) +} + +func TestCodeSetterChaining(t *testing.T) { + cd := NewCodeData([]byte{0x01}). + SetBlockHeight(42) + + require.Equal(t, uint64(42), cd.GetBlockHeight()) + require.Equal(t, []byte{0x01}, cd.GetBytecode()) +} + +func TestCodeConstantLayout_V0(t *testing.T) { + require.Equal(t, 9, codeHeaderLength) +} + +func TestCodeNewCopiesBytecode(t *testing.T) { + bytecode := []byte{0x01, 0x02, 0x03} + cd := NewCodeData(bytecode) + // Mutating the original should not affect the CodeData. + bytecode[0] = 0xff + require.Equal(t, byte(0x01), cd.GetBytecode()[0]) +} diff --git a/sei-db/state_db/sc/flatkv/vtype/legacy_data.go b/sei-db/state_db/sc/flatkv/vtype/legacy_data.go new file mode 100644 index 0000000000..f6278d483e --- /dev/null +++ b/sei-db/state_db/sc/flatkv/vtype/legacy_data.go @@ -0,0 +1,103 @@ +package vtype + +import ( + "encoding/binary" + "errors" + "fmt" +) + +type LegacyDataVersion uint8 + +// DO NOT CHANGE VERSION VALUES!!! Adding new versions is ok, but historical versions should never be removed/changed. +const ( + LegacyDataVersion0 LegacyDataVersion = 0 +) + +/* +Serialization schema for LegacyData version 0: + +| Version | Block Height | Value | +|---------|--------------|--------------| +| 1 byte | 8 bytes | variable | + +Data is stored in big-endian order. Value is variable length. +*/ + +const ( + legacyVersionStart = 0 + legacyBlockHeightStart = 1 + legacyValueStart = 9 + legacyHeaderLength = 9 +) + +// Used for encapsulating and serializing legacy data in the FlatKV legacy database. +// +// This data structure is not threadsafe. Values passed into and values received from this data structure +// are not safe to modify without first copying them. +type LegacyData struct { + data []byte +} + +// Create a new LegacyData with the given value. +func NewLegacyData(value []byte) *LegacyData { + data := make([]byte, legacyHeaderLength+len(value)) + copy(data[legacyValueStart:], value) + return &LegacyData{data: data} +} + +// Serialize the legacy data to a byte slice. +// +// The returned byte slice is not safe to modify without first copying it. +func (l *LegacyData) Serialize() []byte { + return l.data +} + +// Deserialize the legacy data from the given byte slice. +func DeserializeLegacyData(data []byte) (*LegacyData, error) { + if len(data) == 0 { + return nil, errors.New("data is empty") + } + + legacyData := &LegacyData{ + data: data, + } + + serializationVersion := legacyData.GetSerializationVersion() + if serializationVersion != LegacyDataVersion0 { + return nil, fmt.Errorf("unsupported serialization version: %d", serializationVersion) + } + + if len(data) < legacyHeaderLength { + return nil, fmt.Errorf("data length at version %d should be at least %d, got %d", + serializationVersion, legacyHeaderLength, len(data)) + } + + return legacyData, nil +} + +// Get the serialization version for this LegacyData instance. +func (l *LegacyData) GetSerializationVersion() LegacyDataVersion { + return (LegacyDataVersion)(l.data[legacyVersionStart]) +} + +// Get the block height when this legacy data was last modified. +func (l *LegacyData) GetBlockHeight() uint64 { + return binary.BigEndian.Uint64(l.data[legacyBlockHeightStart:legacyValueStart]) +} + +// Get the legacy value. +func (l *LegacyData) GetValue() []byte { + return l.data[legacyValueStart:] +} + +// Check if this legacy data signifies a deletion operation. A deletion operation is automatically +// performed when the value is empty (with the exception of the serialization version and block height). +func (l *LegacyData) IsDelete() bool { + return len(l.data) == legacyHeaderLength +} + +// Set the block height when this legacy data was last modified/touched. Returns self. +func (l *LegacyData) SetBlockHeight(blockHeight uint64) *LegacyData { + binary.BigEndian.PutUint64(l.data[legacyBlockHeightStart:legacyValueStart], blockHeight) + return l +} diff --git a/sei-db/state_db/sc/flatkv/vtype/legacy_data_test.go b/sei-db/state_db/sc/flatkv/vtype/legacy_data_test.go new file mode 100644 index 0000000000..299e98a85a --- /dev/null +++ b/sei-db/state_db/sc/flatkv/vtype/legacy_data_test.go @@ -0,0 +1,158 @@ +package vtype + +import ( + "bytes" + "encoding/hex" + "os" + "path/filepath" + "testing" + + "github.com/stretchr/testify/require" +) + +func TestLegacySerializationGoldenFile_V0(t *testing.T) { + value := []byte{0xca, 0xfe, 0xba, 0xbe} + ld := NewLegacyData(value). + SetBlockHeight(100) + + serialized := ld.Serialize() + + golden := filepath.Join(testdataDir, "legacy_data_v0.hex") + if _, err := os.Stat(golden); os.IsNotExist(err) { + require.NoError(t, os.MkdirAll(testdataDir, 0o755)) + require.NoError(t, os.WriteFile(golden, []byte(hex.EncodeToString(serialized)), 0o644)) + t.Logf("created golden file %s — re-run to verify", golden) + return + } + + want, err := os.ReadFile(golden) + require.NoError(t, err) + wantBytes, err := hex.DecodeString(string(want)) + require.NoError(t, err) + require.Equal(t, wantBytes, serialized, "serialization differs from golden file") + + rt, err := DeserializeLegacyData(wantBytes) + require.NoError(t, err) + require.Equal(t, uint64(100), rt.GetBlockHeight()) + require.Equal(t, value, rt.GetValue()) +} + +func TestLegacyNewWithValue(t *testing.T) { + value := []byte{0x01, 0x02, 0x03} + ld := NewLegacyData(value) + require.Equal(t, LegacyDataVersion0, ld.GetSerializationVersion()) + require.Equal(t, uint64(0), ld.GetBlockHeight()) + require.Equal(t, value, ld.GetValue()) +} + +func TestLegacyNewEmpty(t *testing.T) { + ld := NewLegacyData(nil) + require.Equal(t, LegacyDataVersion0, ld.GetSerializationVersion()) + require.Equal(t, uint64(0), ld.GetBlockHeight()) + require.Empty(t, ld.GetValue()) +} + +func TestLegacySerializeLength(t *testing.T) { + value := []byte{0x01, 0x02, 0x03} + ld := NewLegacyData(value) + require.Len(t, ld.Serialize(), legacyHeaderLength+len(value)) +} + +func TestLegacySerializeLength_Empty(t *testing.T) { + ld := NewLegacyData(nil) + require.Len(t, ld.Serialize(), legacyHeaderLength) +} + +func TestLegacyRoundTrip_WithValue(t *testing.T) { + value := bytes.Repeat([]byte{0xab}, 1000) + ld := NewLegacyData(value). + SetBlockHeight(999) + + rt, err := DeserializeLegacyData(ld.Serialize()) + require.NoError(t, err) + require.Equal(t, uint64(999), rt.GetBlockHeight()) + require.Equal(t, value, rt.GetValue()) +} + +func TestLegacyRoundTrip_EmptyValue(t *testing.T) { + ld := NewLegacyData(nil). + SetBlockHeight(42) + + rt, err := DeserializeLegacyData(ld.Serialize()) + require.NoError(t, err) + require.Equal(t, uint64(42), rt.GetBlockHeight()) + require.Empty(t, rt.GetValue()) +} + +func TestLegacyRoundTrip_MaxBlockHeight(t *testing.T) { + ld := NewLegacyData([]byte{0xff}). + SetBlockHeight(0xffffffffffffffff) + + rt, err := DeserializeLegacyData(ld.Serialize()) + require.NoError(t, err) + require.Equal(t, uint64(0xffffffffffffffff), rt.GetBlockHeight()) + require.Equal(t, []byte{0xff}, rt.GetValue()) +} + +func TestLegacyIsDelete_EmptyValue(t *testing.T) { + ld := NewLegacyData(nil).SetBlockHeight(500) + require.True(t, ld.IsDelete()) +} + +func TestLegacyIsDelete_EmptySlice(t *testing.T) { + ld := NewLegacyData([]byte{}) + require.True(t, ld.IsDelete()) +} + +func TestLegacyIsDelete_NonEmptyValue(t *testing.T) { + ld := NewLegacyData([]byte{0x01}) + require.False(t, ld.IsDelete()) +} + +func TestLegacyDeserialize_EmptyData(t *testing.T) { + _, err := DeserializeLegacyData([]byte{}) + require.Error(t, err) +} + +func TestLegacyDeserialize_NilData(t *testing.T) { + _, err := DeserializeLegacyData(nil) + require.Error(t, err) +} + +func TestLegacyDeserialize_TooShort(t *testing.T) { + _, err := DeserializeLegacyData([]byte{0x00, 0x01, 0x02}) + require.Error(t, err) +} + +func TestLegacyDeserialize_HeaderOnly(t *testing.T) { + ld := NewLegacyData(nil) + rt, err := DeserializeLegacyData(ld.Serialize()) + require.NoError(t, err) + require.Empty(t, rt.GetValue()) +} + +func TestLegacyDeserialize_UnsupportedVersion(t *testing.T) { + data := make([]byte, legacyHeaderLength+1) + data[0] = 0xff + _, err := DeserializeLegacyData(data) + require.Error(t, err) +} + +func TestLegacySetterChaining(t *testing.T) { + ld := NewLegacyData([]byte{0x01}). + SetBlockHeight(42) + + require.Equal(t, uint64(42), ld.GetBlockHeight()) + require.Equal(t, []byte{0x01}, ld.GetValue()) +} + +func TestLegacyConstantLayout_V0(t *testing.T) { + require.Equal(t, 9, legacyHeaderLength) +} + +func TestLegacyNewCopiesValue(t *testing.T) { + value := []byte{0x01, 0x02, 0x03} + ld := NewLegacyData(value) + value[0] = 0xff + require.Equal(t, byte(0x01), ld.GetValue()[0]) +} diff --git a/sei-db/state_db/sc/flatkv/vtype/storage_data.go b/sei-db/state_db/sc/flatkv/vtype/storage_data.go new file mode 100644 index 0000000000..e8b6f66b23 --- /dev/null +++ b/sei-db/state_db/sc/flatkv/vtype/storage_data.go @@ -0,0 +1,114 @@ +package vtype + +import ( + "encoding/binary" + "errors" + "fmt" +) + +type StorageDataVersion uint8 + +// DO NOT CHANGE VERSION VALUES!!! Adding new versions is ok, but historical versions should never be removed/changed. +const ( + StorageDataVersion0 StorageDataVersion = 0 +) + +/* +Serialization schema for StorageData version 0: + +| Version | Block Height | Value | +|---------|--------------|----------| +| 1 byte | 8 bytes | 32 bytes | + +Data is stored in big-endian order. +*/ + +const ( + storageVersionStart = 0 + storageBlockHeightStart = 1 + storageValueStart = 9 + storageDataLength = 41 +) + +// Used for encapsulating and serializing storage slot data in the FlatKV storage database. +// +// This data structure is not threadsafe. Values passed into and values received from this data structure +// are not safe to modify without first copying them. +type StorageData struct { + data []byte +} + +// Create a new StorageData initialized to all 0s. +func NewStorageData() *StorageData { + return &StorageData{ + data: make([]byte, storageDataLength), + } +} + +// Serialize the storage data to a byte slice. +// +// The returned byte slice is not safe to modify without first copying it. +func (s *StorageData) Serialize() []byte { + return s.data +} + +// Deserialize the storage data from the given byte slice. +func DeserializeStorageData(data []byte) (*StorageData, error) { + if len(data) == 0 { + return nil, errors.New("data is empty") + } + + storageData := &StorageData{ + data: data, + } + + serializationVersion := storageData.GetSerializationVersion() + if serializationVersion != StorageDataVersion0 { + return nil, fmt.Errorf("unsupported serialization version: %d", serializationVersion) + } + + if len(data) != storageDataLength { + return nil, fmt.Errorf("data length at version %d should be %d, got %d", + serializationVersion, storageDataLength, len(data)) + } + + return storageData, nil +} + +// Get the serialization version for this StorageData instance. +func (s *StorageData) GetSerializationVersion() StorageDataVersion { + return (StorageDataVersion)(s.data[storageVersionStart]) +} + +// Get the block height when this storage slot was last modified. +func (s *StorageData) GetBlockHeight() uint64 { + return binary.BigEndian.Uint64(s.data[storageBlockHeightStart:storageValueStart]) +} + +// Get the storage slot value. +func (s *StorageData) GetValue() *[32]byte { + return (*[32]byte)(s.data[storageValueStart:storageDataLength]) +} + +// Check if this storage data signifies a deletion operation. A deletion operation is automatically +// performed when the value is all 0s (with the exception of the serialization version and block height). +func (s *StorageData) IsDelete() bool { + for i := storageValueStart; i < storageDataLength; i++ { + if s.data[i] != 0 { + return false + } + } + return true +} + +// Set the block height when this storage slot was last modified/touched. Returns self. +func (s *StorageData) SetBlockHeight(blockHeight uint64) *StorageData { + binary.BigEndian.PutUint64(s.data[storageBlockHeightStart:storageValueStart], blockHeight) + return s +} + +// Set the storage slot value. Returns self. +func (s *StorageData) SetValue(value *[32]byte) *StorageData { + copy(s.data[storageValueStart:storageDataLength], value[:]) + return s +} diff --git a/sei-db/state_db/sc/flatkv/vtype/storage_data_test.go b/sei-db/state_db/sc/flatkv/vtype/storage_data_test.go new file mode 100644 index 0000000000..397f96fe8f --- /dev/null +++ b/sei-db/state_db/sc/flatkv/vtype/storage_data_test.go @@ -0,0 +1,136 @@ +package vtype + +import ( + "bytes" + "encoding/hex" + "os" + "path/filepath" + "testing" + + "github.com/stretchr/testify/require" +) + +func TestStorageSerializationGoldenFile_V0(t *testing.T) { + val := toArray32(leftPad32([]byte{0xde, 0xad})) + sd := NewStorageData(). + SetBlockHeight(100). + SetValue(val) + + serialized := sd.Serialize() + + golden := filepath.Join(testdataDir, "storage_data_v0.hex") + if _, err := os.Stat(golden); os.IsNotExist(err) { + require.NoError(t, os.MkdirAll(testdataDir, 0o755)) + require.NoError(t, os.WriteFile(golden, []byte(hex.EncodeToString(serialized)), 0o644)) + t.Logf("created golden file %s — re-run to verify", golden) + return + } + + want, err := os.ReadFile(golden) + require.NoError(t, err) + wantBytes, err := hex.DecodeString(string(want)) + require.NoError(t, err) + require.Equal(t, wantBytes, serialized, "serialization differs from golden file") + + rt, err := DeserializeStorageData(wantBytes) + require.NoError(t, err) + require.Equal(t, uint64(100), rt.GetBlockHeight()) + require.Equal(t, val, rt.GetValue()) +} + +func TestStorageNewZeroInitialized(t *testing.T) { + sd := NewStorageData() + var zero [32]byte + require.Equal(t, StorageDataVersion0, sd.GetSerializationVersion()) + require.Equal(t, uint64(0), sd.GetBlockHeight()) + require.Equal(t, &zero, sd.GetValue()) +} + +func TestStorageSerializeLength(t *testing.T) { + sd := NewStorageData() + require.Len(t, sd.Serialize(), storageDataLength) +} + +func TestStorageRoundTrip_AllFieldsSet(t *testing.T) { + val := toArray32(leftPad32([]byte{0xff, 0xee})) + sd := NewStorageData(). + SetBlockHeight(999). + SetValue(val) + + rt, err := DeserializeStorageData(sd.Serialize()) + require.NoError(t, err) + require.Equal(t, uint64(999), rt.GetBlockHeight()) + require.Equal(t, val, rt.GetValue()) +} + +func TestStorageRoundTrip_ZeroValues(t *testing.T) { + sd := NewStorageData() + rt, err := DeserializeStorageData(sd.Serialize()) + require.NoError(t, err) + var zero [32]byte + require.Equal(t, uint64(0), rt.GetBlockHeight()) + require.Equal(t, &zero, rt.GetValue()) +} + +func TestStorageRoundTrip_MaxValues(t *testing.T) { + maxVal := toArray32(bytes.Repeat([]byte{0xff}, 32)) + maxBlockHeight := uint64(0xffffffffffffffff) + + sd := NewStorageData(). + SetBlockHeight(maxBlockHeight). + SetValue(maxVal) + + rt, err := DeserializeStorageData(sd.Serialize()) + require.NoError(t, err) + require.Equal(t, maxBlockHeight, rt.GetBlockHeight()) + require.Equal(t, maxVal, rt.GetValue()) +} + +func TestStorageIsDelete_ZeroValue(t *testing.T) { + sd := NewStorageData().SetBlockHeight(500) + require.True(t, sd.IsDelete()) +} + +func TestStorageIsDelete_NonZeroValue(t *testing.T) { + sd := NewStorageData().SetValue(toArray32(leftPad32([]byte{1}))) + require.False(t, sd.IsDelete()) +} + +func TestStorageDeserialize_EmptyData(t *testing.T) { + _, err := DeserializeStorageData([]byte{}) + require.Error(t, err) +} + +func TestStorageDeserialize_NilData(t *testing.T) { + _, err := DeserializeStorageData(nil) + require.Error(t, err) +} + +func TestStorageDeserialize_TooShort(t *testing.T) { + _, err := DeserializeStorageData([]byte{0x00, 0x01, 0x02}) + require.Error(t, err) +} + +func TestStorageDeserialize_TooLong(t *testing.T) { + _, err := DeserializeStorageData(make([]byte, storageDataLength+1)) + require.Error(t, err) +} + +func TestStorageDeserialize_UnsupportedVersion(t *testing.T) { + data := make([]byte, storageDataLength) + data[0] = 0xff + _, err := DeserializeStorageData(data) + require.Error(t, err) +} + +func TestStorageSetterChaining(t *testing.T) { + sd := NewStorageData(). + SetBlockHeight(1). + SetValue(toArray32(leftPad32([]byte{2}))) + + require.Equal(t, uint64(1), sd.GetBlockHeight()) +} + +func TestStorageConstantLayout_V0(t *testing.T) { + require.Equal(t, 41, storageDataLength) +} diff --git a/sei-db/state_db/sc/flatkv/vtype/testdata/code_data_v0.hex b/sei-db/state_db/sc/flatkv/vtype/testdata/code_data_v0.hex new file mode 100644 index 0000000000..24a13ad1cc --- /dev/null +++ b/sei-db/state_db/sc/flatkv/vtype/testdata/code_data_v0.hex @@ -0,0 +1 @@ +0000000000000000646080604052 \ No newline at end of file diff --git a/sei-db/state_db/sc/flatkv/vtype/testdata/legacy_data_v0.hex b/sei-db/state_db/sc/flatkv/vtype/testdata/legacy_data_v0.hex new file mode 100644 index 0000000000..f46d536177 --- /dev/null +++ b/sei-db/state_db/sc/flatkv/vtype/testdata/legacy_data_v0.hex @@ -0,0 +1 @@ +000000000000000064cafebabe \ No newline at end of file diff --git a/sei-db/state_db/sc/flatkv/vtype/testdata/storage_data_v0.hex b/sei-db/state_db/sc/flatkv/vtype/testdata/storage_data_v0.hex new file mode 100644 index 0000000000..a26bcc4c99 --- /dev/null +++ b/sei-db/state_db/sc/flatkv/vtype/testdata/storage_data_v0.hex @@ -0,0 +1 @@ +000000000000000064000000000000000000000000000000000000000000000000000000000000dead \ No newline at end of file From 9dab6ca63969af3f22a6b9c15f76b31e7a88ad1f Mon Sep 17 00:00:00 2001 From: Cody Littley Date: Mon, 30 Mar 2026 12:29:48 -0500 Subject: [PATCH 088/119] incremental progress --- .../state_db/sc/flatkv/vtype/account_data.go | 7 + .../sc/flatkv/vtype/pending_account_write.go | 76 +++++++ .../vtype/pending_account_write_test.go | 194 ++++++++++++++++++ 3 files changed, 277 insertions(+) create mode 100644 sei-db/state_db/sc/flatkv/vtype/pending_account_write.go create mode 100644 sei-db/state_db/sc/flatkv/vtype/pending_account_write_test.go diff --git a/sei-db/state_db/sc/flatkv/vtype/account_data.go b/sei-db/state_db/sc/flatkv/vtype/account_data.go index 5e90485ab5..0e9cc95e25 100644 --- a/sei-db/state_db/sc/flatkv/vtype/account_data.go +++ b/sei-db/state_db/sc/flatkv/vtype/account_data.go @@ -115,6 +115,13 @@ func (a *AccountData) IsDelete() bool { return true } +// Copy returns a deep copy of this AccountData. The copy has its own backing byte slice. +func (a *AccountData) Copy() *AccountData { + cp := make([]byte, len(a.data)) + copy(cp, a.data) + return &AccountData{data: cp} +} + // Set the account's block height when this account was last modified/touched. Returns self. func (a *AccountData) SetBlockHeight(blockHeight uint64) *AccountData { binary.BigEndian.PutUint64(a.data[accountBlockHeightStart:accountBalanceStart], blockHeight) diff --git a/sei-db/state_db/sc/flatkv/vtype/pending_account_write.go b/sei-db/state_db/sc/flatkv/vtype/pending_account_write.go new file mode 100644 index 0000000000..870421a839 --- /dev/null +++ b/sei-db/state_db/sc/flatkv/vtype/pending_account_write.go @@ -0,0 +1,76 @@ +package vtype + +// PendingAccountWrite tracks field-level changes to an account that have not yet been committed. +// Each field has a value and a flag indicating whether it has been set. Only set fields are +// applied when merging into a base AccountData. +// +// A PendingAccountWrite should only be created when there is at least one change to record. +type PendingAccountWrite struct { + balance *[32]byte + nonce uint64 + nonceSet bool + codeHash *[32]byte +} + +// NewPendingAccountWrite creates a new PendingAccountWrite with no fields set. +func NewPendingAccountWrite() *PendingAccountWrite { + return &PendingAccountWrite{} +} + +// GetBalance returns the pending balance value, or nil if not set. +func (p *PendingAccountWrite) GetBalance() *[32]byte { return p.balance } + +// IsBalanceSet reports whether the balance has been set in this pending write. +func (p *PendingAccountWrite) IsBalanceSet() bool { return p.balance != nil } + +// GetNonce returns the pending nonce value. +func (p *PendingAccountWrite) GetNonce() uint64 { return p.nonce } + +// IsNonceSet reports whether the nonce has been set in this pending write. +func (p *PendingAccountWrite) IsNonceSet() bool { return p.nonceSet } + +// GetCodeHash returns the pending code hash value, or nil if not set. +func (p *PendingAccountWrite) GetCodeHash() *[32]byte { return p.codeHash } + +// IsCodeHashSet reports whether the code hash has been set in this pending write. +func (p *PendingAccountWrite) IsCodeHashSet() bool { return p.codeHash != nil } + +// SetBalance marks the balance as changed. The pointer is stored directly; the caller +// must not modify the underlying array after calling SetBalance. Returns self. +func (p *PendingAccountWrite) SetBalance(balance *[32]byte) *PendingAccountWrite { + p.balance = balance + return p +} + +// SetNonce marks the nonce as changed. Returns self. +func (p *PendingAccountWrite) SetNonce(nonce uint64) *PendingAccountWrite { + p.nonce = nonce + p.nonceSet = true + return p +} + +// SetCodeHash marks the code hash as changed. The pointer is stored directly; the caller +// must not modify the underlying array after calling SetCodeHash. Returns self. +func (p *PendingAccountWrite) SetCodeHash(codeHash *[32]byte) *PendingAccountWrite { + p.codeHash = codeHash + return p +} + +// Merge applies the pending field changes onto a copy of the base AccountData, updating the +// block height. Only fields that have been set via Set* methods are overwritten; all other +// fields are carried over from the base. The base is not modified. +func (p *PendingAccountWrite) Merge(base *AccountData, blockHeight uint64) *AccountData { + result := base.Copy().SetBlockHeight(blockHeight) + + if p.balance != nil { + result.SetBalance(p.balance) + } + if p.nonceSet { + result.SetNonce(p.nonce) + } + if p.codeHash != nil { + result.SetCodeHash(p.codeHash) + } + + return result +} diff --git a/sei-db/state_db/sc/flatkv/vtype/pending_account_write_test.go b/sei-db/state_db/sc/flatkv/vtype/pending_account_write_test.go new file mode 100644 index 0000000000..d21a863884 --- /dev/null +++ b/sei-db/state_db/sc/flatkv/vtype/pending_account_write_test.go @@ -0,0 +1,194 @@ +package vtype + +import ( + "bytes" + "testing" + + "github.com/stretchr/testify/require" +) + +func TestPAW_SetNonce_MergeOntoZeroBase(t *testing.T) { + paw := NewPendingAccountWrite().SetNonce(42) + base := NewAccountData() + + result := paw.Merge(base, 100) + + require.Equal(t, uint64(42), result.GetNonce()) + require.Equal(t, uint64(100), result.GetBlockHeight()) + var zero [32]byte + require.Equal(t, &zero, result.GetBalance()) + require.Equal(t, &zero, result.GetCodeHash()) +} + +func TestPAW_SetCodeHash_MergeOntoExistingAccount(t *testing.T) { + base := NewAccountData(). + SetBlockHeight(50). + SetBalance(toArray32(leftPad32([]byte{0xff}))). + SetNonce(10). + SetCodeHash(toArray32(bytes.Repeat([]byte{0xaa}, 32))) + + newCodeHash := toArray32(bytes.Repeat([]byte{0xbb}, 32)) + paw := NewPendingAccountWrite().SetCodeHash(newCodeHash) + + result := paw.Merge(base, 100) + + // Changed field + require.Equal(t, newCodeHash, result.GetCodeHash()) + // Unchanged fields carried over from base + require.Equal(t, toArray32(leftPad32([]byte{0xff})), result.GetBalance()) + require.Equal(t, uint64(10), result.GetNonce()) + // Block height updated + require.Equal(t, uint64(100), result.GetBlockHeight()) +} + +func TestPAW_SetBalance_MergeOntoExistingAccount(t *testing.T) { + base := NewAccountData(). + SetBlockHeight(50). + SetBalance(toArray32(leftPad32([]byte{0x01}))). + SetNonce(5) + + newBalance := toArray32(leftPad32([]byte{0x02})) + paw := NewPendingAccountWrite().SetBalance(newBalance) + + result := paw.Merge(base, 60) + + require.Equal(t, newBalance, result.GetBalance()) + require.Equal(t, uint64(5), result.GetNonce()) + require.Equal(t, uint64(60), result.GetBlockHeight()) +} + +func TestPAW_MultipleFields(t *testing.T) { + base := NewAccountData(). + SetBlockHeight(1). + SetBalance(toArray32(leftPad32([]byte{0x01}))). + SetNonce(1). + SetCodeHash(toArray32(bytes.Repeat([]byte{0x01}, 32))) + + newBalance := toArray32(leftPad32([]byte{0x02})) + newCodeHash := toArray32(bytes.Repeat([]byte{0x02}, 32)) + paw := NewPendingAccountWrite(). + SetBalance(newBalance). + SetNonce(99). + SetCodeHash(newCodeHash) + + result := paw.Merge(base, 200) + + require.Equal(t, newBalance, result.GetBalance()) + require.Equal(t, uint64(99), result.GetNonce()) + require.Equal(t, newCodeHash, result.GetCodeHash()) + require.Equal(t, uint64(200), result.GetBlockHeight()) +} + +func TestPAW_ZeroNonce(t *testing.T) { + base := NewAccountData().SetNonce(42) + paw := NewPendingAccountWrite().SetNonce(0) + + result := paw.Merge(base, 10) + + require.Equal(t, uint64(0), result.GetNonce()) + require.Equal(t, uint64(10), result.GetBlockHeight()) +} + +func TestPAW_ZeroBalance(t *testing.T) { + base := NewAccountData().SetBalance(toArray32(leftPad32([]byte{0xff}))) + paw := NewPendingAccountWrite().SetBalance(&[32]byte{}) + + result := paw.Merge(base, 10) + + var zero [32]byte + require.Equal(t, &zero, result.GetBalance()) +} + +func TestPAW_ZeroCodeHash(t *testing.T) { + base := NewAccountData().SetCodeHash(toArray32(bytes.Repeat([]byte{0xaa}, 32))) + paw := NewPendingAccountWrite().SetCodeHash(&[32]byte{}) + + result := paw.Merge(base, 10) + + var zero [32]byte + require.Equal(t, &zero, result.GetCodeHash()) +} + +func TestPAW_ZeroAllFields_ResultIsDelete(t *testing.T) { + base := NewAccountData(). + SetBalance(toArray32(leftPad32([]byte{0x01}))). + SetNonce(1). + SetCodeHash(toArray32(bytes.Repeat([]byte{0x01}, 32))) + + paw := NewPendingAccountWrite(). + SetBalance(&[32]byte{}). + SetNonce(0). + SetCodeHash(&[32]byte{}) + + result := paw.Merge(base, 10) + + require.True(t, result.IsDelete()) +} + +func TestPAW_MergeDoesNotModifyBase(t *testing.T) { + base := NewAccountData(). + SetBlockHeight(50). + SetNonce(10) + + paw := NewPendingAccountWrite().SetNonce(99) + _ = paw.Merge(base, 100) + + // Base must be unchanged + require.Equal(t, uint64(50), base.GetBlockHeight()) + require.Equal(t, uint64(10), base.GetNonce()) +} + +func TestPAW_IsSetFlags(t *testing.T) { + paw := NewPendingAccountWrite() + require.False(t, paw.IsBalanceSet()) + require.False(t, paw.IsNonceSet()) + require.False(t, paw.IsCodeHashSet()) + + paw.SetNonce(1) + require.False(t, paw.IsBalanceSet()) + require.True(t, paw.IsNonceSet()) + require.False(t, paw.IsCodeHashSet()) + + paw.SetBalance(&[32]byte{1}) + require.True(t, paw.IsBalanceSet()) + + paw.SetCodeHash(&[32]byte{2}) + require.True(t, paw.IsCodeHashSet()) +} + +func TestPAW_GettersReturnSetValues(t *testing.T) { + bal := [32]byte{0xab} + ch := [32]byte{0xcd} + paw := NewPendingAccountWrite(). + SetBalance(&bal). + SetNonce(123). + SetCodeHash(&ch) + + require.Equal(t, &bal, paw.GetBalance()) + require.Equal(t, uint64(123), paw.GetNonce()) + require.Equal(t, &ch, paw.GetCodeHash()) +} + +func TestPAW_OverwriteField(t *testing.T) { + paw := NewPendingAccountWrite().SetNonce(1).SetNonce(2) + base := NewAccountData() + + result := paw.Merge(base, 10) + require.Equal(t, uint64(2), result.GetNonce()) +} + +func TestPAW_ZeroThenSet(t *testing.T) { + paw := NewPendingAccountWrite().SetNonce(0).SetNonce(42) + base := NewAccountData().SetNonce(10) + + result := paw.Merge(base, 10) + require.Equal(t, uint64(42), result.GetNonce()) +} + +func TestPAW_SetThenZero(t *testing.T) { + paw := NewPendingAccountWrite().SetNonce(42).SetNonce(0) + base := NewAccountData().SetNonce(10) + + result := paw.Merge(base, 10) + require.Equal(t, uint64(0), result.GetNonce()) +} From 3443a06d12c1dda8ff4fc624d88f7f4565179801 Mon Sep 17 00:00:00 2001 From: Cody Littley Date: Mon, 30 Mar 2026 13:52:52 -0500 Subject: [PATCH 089/119] cleanup --- sei-db/state_db/sc/flatkv/store.go | 8 ++++---- sei-db/state_db/sc/flatkv/vtype/code_data.go | 9 ++++----- 2 files changed, 8 insertions(+), 9 deletions(-) diff --git a/sei-db/state_db/sc/flatkv/store.go b/sei-db/state_db/sc/flatkv/store.go index 2d75d827e4..4a9d4185ce 100644 --- a/sei-db/state_db/sc/flatkv/store.go +++ b/sei-db/state_db/sc/flatkv/store.go @@ -82,10 +82,10 @@ type CommitStore struct { // Five separate PebbleDB instances metadataDB seidbtypes.KeyValueDB // Global version + LtHash watermark - accountDB seidbtypes.KeyValueDB // addr(20) → AccountValue (40 or 72 bytes) - codeDB seidbtypes.KeyValueDB // addr(20) → bytecode - storageDB seidbtypes.KeyValueDB // addr(20)||slot(32) → value(32) - legacyDB seidbtypes.KeyValueDB // Legacy data for backward compatibility + accountDB seidbtypes.KeyValueDB // addr(20) → vtype.AccountData + codeDB seidbtypes.KeyValueDB // addr(20) → vtype.CodeData + storageDB seidbtypes.KeyValueDB // addr(20)||slot(32) → vtype.StorageData + legacyDB seidbtypes.KeyValueDB // key → vtype.LegacyValue // Per-DB committed version, keyed by DB dir name (e.g. accountDBDir). localMeta map[string]*LocalMeta diff --git a/sei-db/state_db/sc/flatkv/vtype/code_data.go b/sei-db/state_db/sc/flatkv/vtype/code_data.go index e43d936140..19b3214661 100644 --- a/sei-db/state_db/sc/flatkv/vtype/code_data.go +++ b/sei-db/state_db/sc/flatkv/vtype/code_data.go @@ -27,7 +27,6 @@ const ( codeVersionStart = 0 codeBlockHeightStart = 1 codeBytecodeStart = 9 - codeHeaderLength = 9 ) // Used for encapsulating and serializing contract bytecode in the FlatKV code database. @@ -40,7 +39,7 @@ type CodeData struct { // Create a new CodeData with the given bytecode. func NewCodeData(bytecode []byte) *CodeData { - data := make([]byte, codeHeaderLength+len(bytecode)) + data := make([]byte, codeBytecodeStart+len(bytecode)) copy(data[codeBytecodeStart:], bytecode) return &CodeData{data: data} } @@ -67,9 +66,9 @@ func DeserializeCodeData(data []byte) (*CodeData, error) { return nil, fmt.Errorf("unsupported serialization version: %d", serializationVersion) } - if len(data) < codeHeaderLength { + if len(data) < codeBytecodeStart { return nil, fmt.Errorf("data length at version %d should be at least %d, got %d", - serializationVersion, codeHeaderLength, len(data)) + serializationVersion, codeBytecodeStart, len(data)) } return codeData, nil @@ -93,7 +92,7 @@ func (c *CodeData) GetBytecode() []byte { // Check if this code data signifies a deletion operation. A deletion operation is automatically // performed when the bytecode is empty (with the exception of the serialization version and block height). func (c *CodeData) IsDelete() bool { - return len(c.data) == codeHeaderLength + return len(c.data) == codeBytecodeStart } // Set the block height when this code was last modified/touched. Returns self. From a40223802091da653ffba8687d327880db3a5c40 Mon Sep 17 00:00:00 2001 From: Cody Littley Date: Mon, 30 Mar 2026 13:56:16 -0500 Subject: [PATCH 090/119] don't ignore errors from batch get --- sei-db/state_db/sc/flatkv/store_write.go | 20 +++++++++++++++++++- 1 file changed, 19 insertions(+), 1 deletion(-) diff --git a/sei-db/state_db/sc/flatkv/store_write.go b/sei-db/state_db/sc/flatkv/store_write.go index c201391f37..578c3eb3ab 100644 --- a/sei-db/state_db/sc/flatkv/store_write.go +++ b/sei-db/state_db/sc/flatkv/store_write.go @@ -613,17 +613,35 @@ func (s *CommitStore) batchReadOldValues(cs []*proto.NamedChangeSet) ( return } - // Merge DB results into the result maps. + // Merge DB results into the result maps, failing on any per-key errors. + // BatchGet converts ErrNotFound into nil Value (no error), but surfaces + // real read errors. for k, v := range storageBatch { + if v.Error != nil { + err = fmt.Errorf("storageDB batch read error for key %x: %w", k, v.Error) + return + } storageOld[k] = v } for k, v := range accountBatch { + if v.Error != nil { + err = fmt.Errorf("accountDB batch read error for key %x: %w", k, v.Error) + return + } accountOld[k] = v } for k, v := range codeBatch { + if v.Error != nil { + err = fmt.Errorf("codeDB batch read error for key %x: %w", k, v.Error) + return + } codeOld[k] = v } for k, v := range legacyBatch { + if v.Error != nil { + err = fmt.Errorf("legacyDB batch read error for key %x: %w", k, v.Error) + return + } legacyOld[k] = v } From e8bf28ab3ccb421d04557af6337e94487e2fb905 Mon Sep 17 00:00:00 2001 From: Cody Littley Date: Mon, 30 Mar 2026 14:15:39 -0500 Subject: [PATCH 091/119] incremental progress --- sei-db/state_db/sc/flatkv/store_write.go | 62 ++++++++++++++---------- 1 file changed, 36 insertions(+), 26 deletions(-) diff --git a/sei-db/state_db/sc/flatkv/store_write.go b/sei-db/state_db/sc/flatkv/store_write.go index 578c3eb3ab..35487072c0 100644 --- a/sei-db/state_db/sc/flatkv/store_write.go +++ b/sei-db/state_db/sc/flatkv/store_write.go @@ -10,6 +10,7 @@ import ( "github.com/sei-protocol/sei-chain/sei-db/db_engine/types" "github.com/sei-protocol/sei-chain/sei-db/proto" "github.com/sei-protocol/sei-chain/sei-db/state_db/sc/flatkv/lthash" + iavl "github.com/sei-protocol/sei-chain/sei-iavl/proto" ) // ApplyChangeSets buffers EVM changesets and updates LtHash. @@ -56,37 +57,13 @@ func (s *CommitStore) ApplyChangeSets(cs []*proto.NamedChangeSet) error { kind, keyBytes := evm.ParseEVMKey(pair.Key) if kind == evm.EVMKeyUnknown { // Skip non-EVM keys silently - continue + continue // NO! } // Route to appropriate DB based on key type switch kind { case evm.EVMKeyStorage: - // Storage: keyBytes = addr(20) || slot(32) - keyStr := string(keyBytes) - oldValue := storageOld[keyStr].Value - - if pair.Delete { - s.storageWrites[keyStr] = &pendingKVWrite{ - key: keyBytes, - isDelete: true, - } - storageOld[keyStr] = types.BatchGetResult{Value: nil} - } else { - s.storageWrites[keyStr] = &pendingKVWrite{ - key: keyBytes, - value: pair.Value, - } - storageOld[keyStr] = types.BatchGetResult{Value: pair.Value} - } - - // LtHash pair: internal key directly - storagePairs = append(storagePairs, lthash.KVPairWithLastValue{ - Key: keyBytes, - Value: pair.Value, - LastValue: oldValue, - Delete: pair.Delete, - }) + storagePairs = s.applyEvmStorageChange(keyBytes, pair, storageOld, storagePairs) case evm.EVMKeyNonce, evm.EVMKeyCodeHash: // Account data: keyBytes = addr(20) @@ -261,6 +238,39 @@ func (s *CommitStore) ApplyChangeSets(cs []*proto.NamedChangeSet) error { return nil } +// Apply a single change +func (s *CommitStore) applyEvmStorageChange( + keyBytes []byte, + pair *iavl.KVPair, + storageOld map[string]types.BatchGetResult, + storagePairs []lthash.KVPairWithLastValue, +) []lthash.KVPairWithLastValue { + keyStr := string(keyBytes) + oldValue := storageOld[keyStr].Value + + if pair.Delete { + s.storageWrites[keyStr] = &pendingKVWrite{ + key: keyBytes, + isDelete: true, + } + storageOld[keyStr] = types.BatchGetResult{Value: nil} + } else { + s.storageWrites[keyStr] = &pendingKVWrite{ + key: keyBytes, + value: pair.Value, + } + storageOld[keyStr] = types.BatchGetResult{Value: pair.Value} + } + + // LtHash pair: internal key directly + return append(storagePairs, lthash.KVPairWithLastValue{ + Key: keyBytes, + Value: pair.Value, + LastValue: oldValue, + Delete: pair.Delete, + }) +} + // Commit persists buffered writes and advances the version. // Protocol: WAL → per-DB batch (with LocalMeta) → flush → update metaDB. // On crash, catchup replays WAL to recover incomplete commits. From 73b6d36e4fc04dd9c7512fe4fa738979512e2751 Mon Sep 17 00:00:00 2001 From: Cody Littley Date: Mon, 30 Mar 2026 14:15:55 -0500 Subject: [PATCH 092/119] cleanup --- sei-db/state_db/sc/flatkv/store_write.go | 8 ++++++-- 1 file changed, 6 insertions(+), 2 deletions(-) diff --git a/sei-db/state_db/sc/flatkv/store_write.go b/sei-db/state_db/sc/flatkv/store_write.go index 35487072c0..23cd733c5e 100644 --- a/sei-db/state_db/sc/flatkv/store_write.go +++ b/sei-db/state_db/sc/flatkv/store_write.go @@ -64,7 +64,6 @@ func (s *CommitStore) ApplyChangeSets(cs []*proto.NamedChangeSet) error { switch kind { case evm.EVMKeyStorage: storagePairs = s.applyEvmStorageChange(keyBytes, pair, storageOld, storagePairs) - case evm.EVMKeyNonce, evm.EVMKeyCodeHash: // Account data: keyBytes = addr(20) addr, ok := AddressFromBytes(keyBytes) @@ -238,11 +237,16 @@ func (s *CommitStore) ApplyChangeSets(cs []*proto.NamedChangeSet) error { return nil } -// Apply a single change +// Apply a single change to the evm storage db. func (s *CommitStore) applyEvmStorageChange( + // The key with the prefix stripped. keyBytes []byte, + // The change to apply. pair *iavl.KVPair, + // This map stores the old value to the key prior to this change. This function updates it + // with the new value, so that the next change will see this value as the previous value. storageOld map[string]types.BatchGetResult, + // This slice stores both the new and old values for each key modified in this block. storagePairs []lthash.KVPairWithLastValue, ) []lthash.KVPairWithLastValue { keyStr := string(keyBytes) From fbb3041ab40ecc87b0f46e09bec3afa005f53230 Mon Sep 17 00:00:00 2001 From: Cody Littley Date: Mon, 30 Mar 2026 14:22:09 -0500 Subject: [PATCH 093/119] cleanup --- sei-db/block_db/block_db.go | 65 --- sei-db/block_db/block_db_test.go | 434 ------------------ sei-db/block_db/blocksim/block_generator.go | 91 ---- sei-db/block_db/blocksim/blocksim_config.go | 125 ----- sei-db/block_db/mem_block_db.go | 123 ----- sei-db/state_db/sc/flatkv/store_write.go | 144 +++--- .../sc/flatkv/vtype/code_data_test.go | 8 +- 7 files changed, 86 insertions(+), 904 deletions(-) delete mode 100644 sei-db/block_db/block_db.go delete mode 100644 sei-db/block_db/block_db_test.go delete mode 100644 sei-db/block_db/blocksim/block_generator.go delete mode 100644 sei-db/block_db/blocksim/blocksim_config.go delete mode 100644 sei-db/block_db/mem_block_db.go diff --git a/sei-db/block_db/block_db.go b/sei-db/block_db/block_db.go deleted file mode 100644 index 2d2b925dfe..0000000000 --- a/sei-db/block_db/block_db.go +++ /dev/null @@ -1,65 +0,0 @@ -package blockdb - -import "context" - -// A binary transaction with its hash. -type BinaryTransaction struct { - // The hash of the transaction. - Hash []byte - // The binary transaction data. - Transaction []byte -} - -// A binary block with its transactions and hash. -type BinaryBlock struct { - // The height of the block. Must be unique. - Height uint64 - // The hash of the block. Must be unique. - Hash []byte - // The binary block data, not including transaction data (unless you are ok with wasting space) - BlockData []byte - // The transactions in the block and their hashes. - Transactions []*BinaryTransaction -} - -// A database for storing binary block and transaction data. -// -// This store is fully threadsafe. All writes are atomic (that is, after a crash you will either see the write or -// you will not see it at all, i.e. partial writes are not possible). Multiple writes are not atomic with respect -// to each other, meaning if you write A then B and crash, you may observe B but not A (only possible when sharding -// is enabled). Within a single session, read-your-writes consistency is provided. -type BlockDB interface { - - // Write a block to the database. - // - // This method may return immediately and does not necessarily wait for the block to be written to disk. - // Call Flush() if you need to wait until the block is written to disk. - WriteBlock(ctx context.Context, block *BinaryBlock) error - - // Blocks until all pending writes are flushed to disk. Any call to WriteBlock issued before calling Flush() - // will be crash-durable after Flush() returns. Calls to WriteBlock() made concurrently with Flush() may or - // may not be crash-durable after Flush() returns (but are otherwise eventually durable). - // - // It is not required to call Flush() in order to ensure data is written to disk. The database asyncronously - // pushes data down to disk even if Flush() is never called. Flush() just allows you to syncronize an external - // goroutine with the database's internal write loop. - Flush(ctx context.Context) error - - // Retrieves a block by its hash. - GetBlockByHash(ctx context.Context, hash []byte) (block *BinaryBlock, ok bool, err error) - - // Retrieves a block by its height. - GetBlockByHeight(ctx context.Context, height uint64) (block *BinaryBlock, ok bool, err error) - - // Retrieves a transaction by its hash. - GetTransactionByHash(ctx context.Context, hash []byte) (transaction *BinaryTransaction, ok bool, err error) - - // Schedules pruning for all blocks with a height less than the given height. Pruning is asyncronous, - // and so this method does not provide any guarantees about when the pruning will complete. It is possible - // that some data will not be pruned if the database is closed before the pruning is scheduled. - Prune(ctx context.Context, lowestHeightToKeep uint64) error - - // Closes the database and releases any resources. Any in-flight writes are fully flushed to disk before this - // method returns. - Close(ctx context.Context) error -} diff --git a/sei-db/block_db/block_db_test.go b/sei-db/block_db/block_db_test.go deleted file mode 100644 index 19f07dcfd2..0000000000 --- a/sei-db/block_db/block_db_test.go +++ /dev/null @@ -1,434 +0,0 @@ -package blockdb - -import ( - "bytes" - "context" - "fmt" - "testing" - - crand "github.com/sei-protocol/sei-chain/sei-db/common/rand" - "github.com/sei-protocol/sei-chain/sei-db/common/unit" -) - -var testRng = crand.NewCannedRandom(4*unit.MB, 42) - -type blockDBBuilder struct { - name string - builder func(path string) (BlockDB, error) -} - -func buildBuilders() []blockDBBuilder { - return []blockDBBuilder{ - newMemBlockDBBuilder(), - } -} - -func newMemBlockDBBuilder() blockDBBuilder { - store := make(map[string]*memBlockDBData) - return blockDBBuilder{ - name: "mem", - builder: func(path string) (BlockDB, error) { - data, ok := store[path] - if !ok { - data = &memBlockDBData{ - blocksByHash: make(map[string]*BinaryBlock), - blocksByHeight: make(map[uint64]*BinaryBlock), - txByHash: make(map[string]*BinaryTransaction), - } - store[path] = data - } - return &memBlockDB{data: data}, nil - }, - } -} - -func makeBlock(height uint64, numTxs int) *BinaryBlock { - txs := make([]*BinaryTransaction, numTxs) - for i := 0; i < numTxs; i++ { - txs[i] = &BinaryTransaction{ - Hash: []byte(fmt.Sprintf("tx-%d-%d", height, i)), - Transaction: []byte(fmt.Sprintf("tx-data-%d-%d", height, i)), - } - } - return &BinaryBlock{ - Height: height, - Hash: []byte(fmt.Sprintf("block-%d", height)), - BlockData: []byte(fmt.Sprintf("block-data-%d", height)), - Transactions: txs, - } -} - -func forEachBuilder(t *testing.T, fn func(t *testing.T, builder func(path string) (BlockDB, error))) { - for _, b := range buildBuilders() { - t.Run(b.name, func(t *testing.T) { - fn(t, b.builder) - }) - } -} - -func TestWriteAndGetBlockByHeight(t *testing.T) { - forEachBuilder(t, func(t *testing.T, builder func(string) (BlockDB, error)) { - ctx := context.Background() - db, err := builder(t.TempDir()) - requireNoError(t, err) - defer db.Close(ctx) - - block := makeBlock(1, 2) - requireNoError(t, db.WriteBlock(ctx, block)) - - got, ok, err := db.GetBlockByHeight(ctx, 1) - requireNoError(t, err) - requireTrue(t, ok, "expected block at height 1") - requireBlockEqual(t, block, got) - }) -} - -func TestWriteAndGetBlockByHash(t *testing.T) { - forEachBuilder(t, func(t *testing.T, builder func(string) (BlockDB, error)) { - ctx := context.Background() - db, err := builder(t.TempDir()) - requireNoError(t, err) - defer db.Close(ctx) - - block := makeBlock(5, 3) - requireNoError(t, db.WriteBlock(ctx, block)) - - got, ok, err := db.GetBlockByHash(ctx, block.Hash) - requireNoError(t, err) - requireTrue(t, ok, "expected block with matching hash") - requireBlockEqual(t, block, got) - }) -} - -func TestGetTransactionByHash(t *testing.T) { - forEachBuilder(t, func(t *testing.T, builder func(string) (BlockDB, error)) { - ctx := context.Background() - db, err := builder(t.TempDir()) - requireNoError(t, err) - defer db.Close(ctx) - - block := makeBlock(1, 4) - requireNoError(t, db.WriteBlock(ctx, block)) - - for _, tx := range block.Transactions { - got, ok, err := db.GetTransactionByHash(ctx, tx.Hash) - requireNoError(t, err) - requireTrue(t, ok, "expected transaction with hash %s", tx.Hash) - requireBytesEqual(t, tx.Hash, got.Hash, "transaction hash") - requireBytesEqual(t, tx.Transaction, got.Transaction, "transaction data") - } - }) -} - -func TestGetBlockNotFound(t *testing.T) { - forEachBuilder(t, func(t *testing.T, builder func(string) (BlockDB, error)) { - ctx := context.Background() - db, err := builder(t.TempDir()) - requireNoError(t, err) - defer db.Close(ctx) - - _, ok, err := db.GetBlockByHeight(ctx, 999) - requireNoError(t, err) - requireTrue(t, !ok, "expected no block at height 999") - - _, ok, err = db.GetBlockByHash(ctx, []byte("nonexistent")) - requireNoError(t, err) - requireTrue(t, !ok, "expected no block with nonexistent hash") - }) -} - -func TestGetTransactionNotFound(t *testing.T) { - forEachBuilder(t, func(t *testing.T, builder func(string) (BlockDB, error)) { - ctx := context.Background() - db, err := builder(t.TempDir()) - requireNoError(t, err) - defer db.Close(ctx) - - _, ok, err := db.GetTransactionByHash(ctx, []byte("nonexistent")) - requireNoError(t, err) - requireTrue(t, !ok, "expected no transaction with nonexistent hash") - }) -} - -func TestMultipleBlocks(t *testing.T) { - forEachBuilder(t, func(t *testing.T, builder func(string) (BlockDB, error)) { - ctx := context.Background() - db, err := builder(t.TempDir()) - requireNoError(t, err) - defer db.Close(ctx) - - blocks := make([]*BinaryBlock, 10) - for i := range blocks { - blocks[i] = makeBlock(uint64(i+1), 2) - requireNoError(t, db.WriteBlock(ctx, blocks[i])) - } - - for _, block := range blocks { - got, ok, err := db.GetBlockByHeight(ctx, block.Height) - requireNoError(t, err) - requireTrue(t, ok, "expected block at height %d", block.Height) - requireBlockEqual(t, block, got) - } - }) -} - -func TestPrunePreservesUnprunedBlocks(t *testing.T) { - forEachBuilder(t, func(t *testing.T, builder func(string) (BlockDB, error)) { - ctx := context.Background() - db, err := builder(t.TempDir()) - requireNoError(t, err) - defer db.Close(ctx) - - for i := uint64(1); i <= 10; i++ { - requireNoError(t, db.WriteBlock(ctx, makeBlock(i, 1))) - } - - requireNoError(t, db.Flush(ctx)) - requireNoError(t, db.Prune(ctx, 6)) - - for i := uint64(6); i <= 10; i++ { - _, ok, err := db.GetBlockByHeight(ctx, i) - requireNoError(t, err) - requireTrue(t, ok, "expected block at height %d to survive pruning", i) - } - }) -} - -func TestPrunePreservesUnprunedTransactions(t *testing.T) { - forEachBuilder(t, func(t *testing.T, builder func(string) (BlockDB, error)) { - ctx := context.Background() - db, err := builder(t.TempDir()) - requireNoError(t, err) - defer db.Close(ctx) - - survivingBlock := makeBlock(2, 3) - requireNoError(t, db.WriteBlock(ctx, makeBlock(1, 1))) - requireNoError(t, db.WriteBlock(ctx, survivingBlock)) - - requireNoError(t, db.Flush(ctx)) - requireNoError(t, db.Prune(ctx, 2)) - - for _, tx := range survivingBlock.Transactions { - _, ok, err := db.GetTransactionByHash(ctx, tx.Hash) - requireNoError(t, err) - requireTrue(t, ok, "expected transaction %s to survive pruning", tx.Hash) - } - }) -} - -func TestPruneDoesNotError(t *testing.T) { - forEachBuilder(t, func(t *testing.T, builder func(string) (BlockDB, error)) { - ctx := context.Background() - db, err := builder(t.TempDir()) - requireNoError(t, err) - defer db.Close(ctx) - - requireNoError(t, db.Prune(ctx, 100)) - - for i := uint64(1); i <= 5; i++ { - requireNoError(t, db.WriteBlock(ctx, makeBlock(i, 1))) - } - - requireNoError(t, db.Prune(ctx, 3)) - requireNoError(t, db.Prune(ctx, 100)) - }) -} - -func TestCloseAndReopen(t *testing.T) { - forEachBuilder(t, func(t *testing.T, builder func(string) (BlockDB, error)) { - ctx := context.Background() - path := t.TempDir() - - db, err := builder(path) - requireNoError(t, err) - - block := makeBlock(1, 2) - requireNoError(t, db.WriteBlock(ctx, block)) - requireNoError(t, db.Flush(ctx)) - requireNoError(t, db.Close(ctx)) - - db2, err := builder(path) - requireNoError(t, err) - defer db2.Close(ctx) - - got, ok, err := db2.GetBlockByHeight(ctx, 1) - requireNoError(t, err) - requireTrue(t, ok, "expected block to survive close/reopen") - requireBlockEqual(t, block, got) - - for _, tx := range block.Transactions { - gotTx, ok, err := db2.GetTransactionByHash(ctx, tx.Hash) - requireNoError(t, err) - requireTrue(t, ok, "expected tx to survive close/reopen") - requireBytesEqual(t, tx.Transaction, gotTx.Transaction, "transaction data") - } - }) -} - -func TestCloseAndReopenThenWrite(t *testing.T) { - forEachBuilder(t, func(t *testing.T, builder func(string) (BlockDB, error)) { - ctx := context.Background() - path := t.TempDir() - - db, err := builder(path) - requireNoError(t, err) - requireNoError(t, db.WriteBlock(ctx, makeBlock(1, 1))) - requireNoError(t, db.Flush(ctx)) - requireNoError(t, db.Close(ctx)) - - db2, err := builder(path) - requireNoError(t, err) - defer db2.Close(ctx) - - requireNoError(t, db2.WriteBlock(ctx, makeBlock(2, 1))) - - for _, h := range []uint64{1, 2} { - _, ok, err := db2.GetBlockByHeight(ctx, h) - requireNoError(t, err) - requireTrue(t, ok, "expected block at height %d after reopen+write", h) - } - }) -} - -func TestFlush(t *testing.T) { - forEachBuilder(t, func(t *testing.T, builder func(string) (BlockDB, error)) { - ctx := context.Background() - db, err := builder(t.TempDir()) - requireNoError(t, err) - defer db.Close(ctx) - - requireNoError(t, db.Flush(ctx)) - - requireNoError(t, db.WriteBlock(ctx, makeBlock(1, 1))) - requireNoError(t, db.Flush(ctx)) - }) -} - -func TestBulkWriteAndQuery(t *testing.T) { - const numBlocks = 1000 - const txsPerBlock = 50 - - forEachBuilder(t, func(t *testing.T, builder func(string) (BlockDB, error)) { - ctx := context.Background() - db, err := builder(t.TempDir()) - requireNoError(t, err) - defer db.Close(ctx) - - blocks := make([]*BinaryBlock, numBlocks) - for i := range blocks { - blocks[i] = makeRandomBlock(testRng, uint64(i+1), txsPerBlock) - requireNoError(t, db.WriteBlock(ctx, blocks[i])) - } - - requireNoError(t, db.Flush(ctx)) - - for _, expected := range blocks { - byHeight, ok, err := db.GetBlockByHeight(ctx, expected.Height) - requireNoError(t, err) - requireTrue(t, ok, "block not found by height %d", expected.Height) - requireBlockBytesEqual(t, expected, byHeight) - - byHash, ok, err := db.GetBlockByHash(ctx, expected.Hash) - requireNoError(t, err) - requireTrue(t, ok, "block not found by hash at height %d", expected.Height) - requireBlockBytesEqual(t, expected, byHash) - - for _, expectedTx := range expected.Transactions { - gotTx, ok, err := db.GetTransactionByHash(ctx, expectedTx.Hash) - requireNoError(t, err) - requireTrue(t, ok, "tx not found by hash %x (block height %d)", expectedTx.Hash, expected.Height) - requireBytesEqual(t, expectedTx.Hash, gotTx.Hash, "tx hash") - requireBytesEqual(t, expectedTx.Transaction, gotTx.Transaction, "tx data") - } - } - }) -} - -// makeRandomBlock builds a block with deterministic random binary payloads. -// Returned slices are owned copies safe for storage and later comparison. -func makeRandomBlock(rng *crand.CannedRandom, height uint64, numTxs int) *BinaryBlock { - txs := make([]*BinaryTransaction, numTxs) - for i := range txs { - txHash := rng.Address('t', int64(height)*1000+int64(i), 32) - txDataLen := 64 + int(rng.Int64Range(0, 512)) - txData := copyBytes(rng.Bytes(txDataLen)) - txs[i] = &BinaryTransaction{Hash: txHash, Transaction: txData} - } - - blockHash := rng.Address('b', int64(height), 32) - blockDataLen := 128 + int(rng.Int64Range(0, 1024)) - blockData := copyBytes(rng.Bytes(blockDataLen)) - - return &BinaryBlock{ - Height: height, - Hash: blockHash, - BlockData: blockData, - Transactions: txs, - } -} - -func copyBytes(src []byte) []byte { - dst := make([]byte, len(src)) - copy(dst, src) - return dst -} - -// requireBlockBytesEqual does a deep byte-level comparison, suitable for verifying -// round-trip fidelity through serialization. -func requireBlockBytesEqual(t *testing.T, expected, actual *BinaryBlock) { - t.Helper() - if expected.Height != actual.Height { - t.Fatalf("height mismatch: expected %d, got %d", expected.Height, actual.Height) - } - requireBytesEqual(t, expected.Hash, actual.Hash, "block hash") - requireBytesEqual(t, expected.BlockData, actual.BlockData, "block data") - if len(expected.Transactions) != len(actual.Transactions) { - t.Fatalf("transaction count mismatch at height %d: expected %d, got %d", - expected.Height, len(expected.Transactions), len(actual.Transactions)) - } - for i, tx := range expected.Transactions { - label := fmt.Sprintf("height %d tx[%d]", expected.Height, i) - requireBytesEqual(t, tx.Hash, actual.Transactions[i].Hash, label+" hash") - requireBytesEqual(t, tx.Transaction, actual.Transactions[i].Transaction, label+" data") - } -} - -// --- test helpers --- - -func requireNoError(t *testing.T, err error) { - t.Helper() - if err != nil { - t.Fatalf("unexpected error: %v", err) - } -} - -func requireTrue(t *testing.T, cond bool, format string, args ...any) { - t.Helper() - if !cond { - t.Fatalf(format, args...) - } -} - -func requireBytesEqual(t *testing.T, expected, actual []byte, label string) { - t.Helper() - if !bytes.Equal(expected, actual) { - t.Fatalf("%s mismatch: expected %q, got %q", label, expected, actual) - } -} - -func requireBlockEqual(t *testing.T, expected, actual *BinaryBlock) { - t.Helper() - if expected.Height != actual.Height { - t.Fatalf("height mismatch: expected %d, got %d", expected.Height, actual.Height) - } - requireBytesEqual(t, expected.Hash, actual.Hash, "block hash") - requireBytesEqual(t, expected.BlockData, actual.BlockData, "block data") - if len(expected.Transactions) != len(actual.Transactions) { - t.Fatalf("transaction count mismatch: expected %d, got %d", - len(expected.Transactions), len(actual.Transactions)) - } - for i, tx := range expected.Transactions { - requireBytesEqual(t, tx.Hash, actual.Transactions[i].Hash, fmt.Sprintf("tx[%d] hash", i)) - requireBytesEqual(t, tx.Transaction, actual.Transactions[i].Transaction, fmt.Sprintf("tx[%d] data", i)) - } -} diff --git a/sei-db/block_db/blocksim/block_generator.go b/sei-db/block_db/blocksim/block_generator.go deleted file mode 100644 index 6ffbb168c3..0000000000 --- a/sei-db/block_db/blocksim/block_generator.go +++ /dev/null @@ -1,91 +0,0 @@ -package blocksim - -import ( - "context" - - blockdb "github.com/sei-protocol/sei-chain/sei-db/block_db" - "github.com/sei-protocol/sei-chain/sei-db/common/rand" -) - -const ( - blockHashType = 'b' - txHashType = 't' -) - -// Asynchronously generates random blocks and feeds them into a channel. -type BlockGenerator struct { - ctx context.Context - config *BlocksimConfig - rand *rand.CannedRandom - - // The next block height to be assigned. - nextHeight uint64 - - // Generated blocks are sent to this channel. - blocksChan chan *blockdb.BinaryBlock -} - -// Creates a new BlockGenerator and immediately starts its background goroutine. -// The generator stops when the context is cancelled. -func NewBlockGenerator( - ctx context.Context, - config *BlocksimConfig, - rng *rand.CannedRandom, - startHeight uint64, -) *BlockGenerator { - g := &BlockGenerator{ - ctx: ctx, - config: config, - rand: rng, - nextHeight: startHeight, - blocksChan: make(chan *blockdb.BinaryBlock, config.StagedBlockQueueSize), - } - go g.mainLoop() - return g -} - -// NextBlock blocks until the next generated block is available and returns it. -// Returns nil if the context has been cancelled and no more blocks will be produced. -func (g *BlockGenerator) NextBlock() *blockdb.BinaryBlock { - select { - case <-g.ctx.Done(): - return nil - case blk := <-g.blocksChan: - return blk - } -} - -func (g *BlockGenerator) mainLoop() { - for { - blk := g.buildBlock() - select { - case <-g.ctx.Done(): - return - case g.blocksChan <- blk: - } - } -} - -func (g *BlockGenerator) buildBlock() *blockdb.BinaryBlock { - height := g.nextHeight - g.nextHeight++ - - txs := make([]*blockdb.BinaryTransaction, g.config.TransactionsPerBlock) - for i := uint64(0); i < g.config.TransactionsPerBlock; i++ { - txID := int64(height)*int64(g.config.TransactionsPerBlock) + int64(i) //nolint:gosec - txs[i] = &blockdb.BinaryTransaction{ - Hash: g.rand.Address(txHashType, txID, int(g.config.TransactionHashSize)), - Transaction: g.rand.Bytes(int(g.config.BytesPerTransaction)), - } - } - - blockHash := g.rand.Address(blockHashType, int64(height), int(g.config.BlockHashSize)) //nolint:gosec - blockData := g.rand.Bytes(int(g.config.ExtraBytesPerBlock)) - - return &blockdb.BinaryBlock{ - Height: height, - Hash: blockHash, - BlockData: blockData, - Transactions: txs, - } -} diff --git a/sei-db/block_db/blocksim/blocksim_config.go b/sei-db/block_db/blocksim/blocksim_config.go deleted file mode 100644 index 5ca218ef17..0000000000 --- a/sei-db/block_db/blocksim/blocksim_config.go +++ /dev/null @@ -1,125 +0,0 @@ -package blocksim - -import ( - "encoding/json" - "fmt" - "os" - "path/filepath" - - "github.com/sei-protocol/sei-chain/sei-db/common/unit" -) - -const ( - minHashSize = 20 - minCannedRandomSize = unit.MB -) - -// Configuration for the blocksim benchmark. -type BlocksimConfig struct { - - // The size of each simulated transaction, in bytes. Each transaction in a block will contain - // this many bytes of random data. - BytesPerTransaction uint64 - - // The number of transactions included in each generated block. - TransactionsPerBlock uint64 - - // Additional bytes of random data added to the block itself, beyond the transaction data. This - // simulates block-level metadata or other non-transaction payload. - ExtraBytesPerBlock uint64 - - // The size of each block hash, in bytes. - BlockHashSize uint64 - - // The size of each transaction hash, in bytes. - TransactionHashSize uint64 - - // The capacity of the queue that holds generated blocks before they are consumed by the - // benchmark. A larger queue allows the block generator to run further ahead of the consumer. - StagedBlockQueueSize uint64 - - // The size of the CannedRandom buffer, in bytes. Altering this value for a pre-existing run - // will change the random data generated, don't change it unless you are starting a new run - // from scratch. - CannedRandomSize uint64 - - // The number of blocks to keep in the database after pruning. - UnprunedBlocks uint64 -} - -// Returns the default configuration for the blocksim benchmark. -func DefaultBlocksimConfig() *BlocksimConfig { - return &BlocksimConfig{ - BytesPerTransaction: 512, - TransactionsPerBlock: 1024, - ExtraBytesPerBlock: 256, - BlockHashSize: 32, - TransactionHashSize: 32, - StagedBlockQueueSize: 8, - CannedRandomSize: unit.GB, - UnprunedBlocks: 100_000, - } -} - -// StringifiedConfig returns the config as human-readable, multi-line JSON. -func (c *BlocksimConfig) StringifiedConfig() (string, error) { - b, err := json.MarshalIndent(c, "", " ") - if err != nil { - return "", err - } - return string(b), nil -} - -// Validate checks that the configuration is sane and returns an error if not. -func (c *BlocksimConfig) Validate() error { - if c.BytesPerTransaction < 1 { - return fmt.Errorf("BytesPerTransaction must be at least 1 (got %d)", c.BytesPerTransaction) - } - if c.TransactionsPerBlock < 1 { - return fmt.Errorf("TransactionsPerBlock must be at least 1 (got %d)", c.TransactionsPerBlock) - } - if c.BlockHashSize < minHashSize { - return fmt.Errorf("BlockHashSize must be at least %d (got %d)", minHashSize, c.BlockHashSize) - } - if c.TransactionHashSize < minHashSize { - return fmt.Errorf("TransactionHashSize must be at least %d (got %d)", minHashSize, c.TransactionHashSize) - } - if c.StagedBlockQueueSize < 1 { - return fmt.Errorf("StagedBlockQueueSize must be at least 1 (got %d)", c.StagedBlockQueueSize) - } - if c.CannedRandomSize < minCannedRandomSize { - return fmt.Errorf("CannedRandomSize must be at least %d (got %d)", - minCannedRandomSize, c.CannedRandomSize) - } - if c.UnprunedBlocks < 1 { - return fmt.Errorf("UnprunedBlocks must be at least 1 (got %d)", c.UnprunedBlocks) - } - return nil -} - -// LoadConfigFromFile parses a JSON config file at the given path. -// Returns defaults with file values overlaid. Fails if the file contains -// unrecognized configuration keys. -func LoadConfigFromFile(path string) (*BlocksimConfig, error) { - cfg := DefaultBlocksimConfig() - //nolint:gosec // G304 - path comes from CLI arg, filepath.Clean used to mitigate traversal - f, err := os.Open(filepath.Clean(path)) - if err != nil { - return nil, fmt.Errorf("open config file: %w", err) - } - defer func() { - if err := f.Close(); err != nil { - fmt.Printf("failed to close config file: %v\n", err) - } - }() - - dec := json.NewDecoder(f) - dec.DisallowUnknownFields() - if err := dec.Decode(cfg); err != nil { - return nil, fmt.Errorf("decode config: %w", err) - } - if err := cfg.Validate(); err != nil { - return nil, fmt.Errorf("invalid config: %w", err) - } - return cfg, nil -} diff --git a/sei-db/block_db/mem_block_db.go b/sei-db/block_db/mem_block_db.go deleted file mode 100644 index 68eef13c5f..0000000000 --- a/sei-db/block_db/mem_block_db.go +++ /dev/null @@ -1,123 +0,0 @@ -package blockdb - -import ( - "context" - "sync" -) - -// Shared backing store, keyed by path in test builders to simulate restarts. -type memBlockDBData struct { - mu sync.RWMutex - blocksByHash map[string]*BinaryBlock - blocksByHeight map[uint64]*BinaryBlock - txByHash map[string]*BinaryTransaction - lowestHeight uint64 - highestHeight uint64 - hasBlocks bool -} - -// An in-memory implementation of the BlockDB interface. Useful as a test fixture to sanity check -// test flows. -type memBlockDB struct { - data *memBlockDBData -} - -func newMemBlockDB() BlockDB { - return &memBlockDB{ - data: &memBlockDBData{ - blocksByHash: make(map[string]*BinaryBlock), - blocksByHeight: make(map[uint64]*BinaryBlock), - txByHash: make(map[string]*BinaryTransaction), - }, - } -} - -func (m *memBlockDB) WriteBlock(_ context.Context, block *BinaryBlock) error { - d := m.data - d.mu.Lock() - defer d.mu.Unlock() - - d.blocksByHash[string(block.Hash)] = block - d.blocksByHeight[block.Height] = block - for _, tx := range block.Transactions { - d.txByHash[string(tx.Hash)] = tx - } - - if !d.hasBlocks { - d.lowestHeight = block.Height - d.highestHeight = block.Height - d.hasBlocks = true - } else { - if block.Height < d.lowestHeight { - d.lowestHeight = block.Height - } - if block.Height > d.highestHeight { - d.highestHeight = block.Height - } - } - return nil -} - -func (m *memBlockDB) Flush(_ context.Context) error { - return nil -} - -func (m *memBlockDB) GetBlockByHash(_ context.Context, hash []byte) (*BinaryBlock, bool, error) { - d := m.data - d.mu.RLock() - defer d.mu.RUnlock() - - block, ok := d.blocksByHash[string(hash)] - return block, ok, nil -} - -func (m *memBlockDB) GetBlockByHeight(_ context.Context, height uint64) (*BinaryBlock, bool, error) { - d := m.data - d.mu.RLock() - defer d.mu.RUnlock() - - block, ok := d.blocksByHeight[height] - return block, ok, nil -} - -func (m *memBlockDB) GetTransactionByHash(_ context.Context, hash []byte) (*BinaryTransaction, bool, error) { - d := m.data - d.mu.RLock() - defer d.mu.RUnlock() - - tx, ok := d.txByHash[string(hash)] - return tx, ok, nil -} - -func (m *memBlockDB) Prune(_ context.Context, lowestHeightToKeep uint64) error { - d := m.data - d.mu.Lock() - defer d.mu.Unlock() - - if !d.hasBlocks || lowestHeightToKeep <= d.lowestHeight { - return nil - } - - for h := d.lowestHeight; h < lowestHeightToKeep && h <= d.highestHeight; h++ { - block, ok := d.blocksByHeight[h] - if !ok { - continue - } - delete(d.blocksByHeight, h) - delete(d.blocksByHash, string(block.Hash)) - for _, tx := range block.Transactions { - delete(d.txByHash, string(tx.Hash)) - } - } - - if lowestHeightToKeep > d.highestHeight { - d.hasBlocks = false - } else { - d.lowestHeight = lowestHeightToKeep - } - return nil -} - -func (m *memBlockDB) Close(_ context.Context) error { - return nil -} diff --git a/sei-db/state_db/sc/flatkv/store_write.go b/sei-db/state_db/sc/flatkv/store_write.go index 23cd733c5e..35e102bea7 100644 --- a/sei-db/state_db/sc/flatkv/store_write.go +++ b/sei-db/state_db/sc/flatkv/store_write.go @@ -65,68 +65,9 @@ func (s *CommitStore) ApplyChangeSets(cs []*proto.NamedChangeSet) error { case evm.EVMKeyStorage: storagePairs = s.applyEvmStorageChange(keyBytes, pair, storageOld, storagePairs) case evm.EVMKeyNonce, evm.EVMKeyCodeHash: - // Account data: keyBytes = addr(20) - addr, ok := AddressFromBytes(keyBytes) - if !ok { - return fmt.Errorf("invalid address length %d for key kind %d", len(keyBytes), kind) - } - addrStr := string(addr[:]) - addrKey := string(AccountKey(addr)) - - if _, seen := oldAccountRawValues[addrStr]; !seen { - if paw, ok := s.accountWrites[addrStr]; ok { - if paw.isDelete { - oldAccountRawValues[addrStr] = nil - } else { - oldAccountRawValues[addrStr] = paw.value.Encode() - } - } else if result, ok := accountOld[addrKey]; ok { - oldAccountRawValues[addrStr] = result.Value - } else { - oldAccountRawValues[addrStr] = nil - } - } - - paw := s.accountWrites[addrStr] - if paw == nil { - var existingValue AccountValue - result := accountOld[addrKey] - if result.IsFound() && result.Value != nil { - av, err := DecodeAccountValue(result.Value) - if err != nil { - return fmt.Errorf("corrupted AccountValue for addr %x: %w", addr, err) - } - existingValue = av - } - paw = &pendingAccountWrite{ - addr: addr, - value: existingValue, - } - s.accountWrites[addrStr] = paw - } - - if pair.Delete { - if kind == evm.EVMKeyNonce { - paw.value.Nonce = 0 - } else { - paw.value.CodeHash = CodeHash{} - } - paw.isDelete = paw.value.IsEmpty() - } else { - if kind == evm.EVMKeyNonce { - if len(pair.Value) != NonceLen { - return fmt.Errorf("invalid nonce value length: got %d, expected %d", - len(pair.Value), NonceLen) - } - paw.value.Nonce = binary.BigEndian.Uint64(pair.Value) - } else { - if len(pair.Value) != CodeHashLen { - return fmt.Errorf("invalid codehash value length: got %d, expected %d", - len(pair.Value), CodeHashLen) - } - copy(paw.value.CodeHash[:], pair.Value) - } - paw.isDelete = paw.value.IsEmpty() + err := s.applyEvmAccountFieldChange(kind, keyBytes, pair, accountOld, oldAccountRawValues) + if err != nil { + return fmt.Errorf("failed to apply EVM account field change: %w", err) } case evm.EVMKeyCode: @@ -275,6 +216,85 @@ func (s *CommitStore) applyEvmStorageChange( }) } +// Apply a single nonce or codehash change to the account db. +func (s *CommitStore) applyEvmAccountFieldChange( + // Whether this is a nonce or codehash change. + kind evm.EVMKeyKind, + // The key with the prefix stripped (addr, 20 bytes). + keyBytes []byte, + // The change to apply. + pair *iavl.KVPair, + // Old account values. + accountOld map[string]types.BatchGetResult, + // Snapshots of old encoded account bytes for LtHash delta computation. + // This function populates entries the first time each address is seen. + oldAccountRawValues map[string][]byte, +) error { + addr, ok := AddressFromBytes(keyBytes) + if !ok { + return fmt.Errorf("invalid address length %d for key kind %d", len(keyBytes), kind) + } + addrStr := string(addr[:]) + addrKey := string(AccountKey(addr)) + + // Snapshot the old encoded bytes the first time we touch this address, + // so the LtHash delta uses the correct baseline across multiple + // ApplyChangeSets calls before Commit. + if _, seen := oldAccountRawValues[addrStr]; !seen { + if paw, ok := s.accountWrites[addrStr]; ok { + if paw.isDelete { + oldAccountRawValues[addrStr] = nil + } else { + oldAccountRawValues[addrStr] = paw.value.Encode() + } + } else if result, ok := accountOld[addrKey]; ok { + oldAccountRawValues[addrStr] = result.Value + } else { + oldAccountRawValues[addrStr] = nil + } + } + + paw := s.accountWrites[addrStr] + if paw == nil { + var existingValue AccountValue + result := accountOld[addrKey] + if result.IsFound() && result.Value != nil { + av, err := DecodeAccountValue(result.Value) + if err != nil { + return fmt.Errorf("corrupted AccountValue for addr %x: %w", addr, err) + } + existingValue = av + } + paw = &pendingAccountWrite{addr: addr, value: existingValue} + s.accountWrites[addrStr] = paw + } + + if pair.Delete { + if kind == evm.EVMKeyNonce { + paw.value.Nonce = 0 + } else { + paw.value.CodeHash = CodeHash{} + } + paw.isDelete = paw.value.IsEmpty() + } else { + if kind == evm.EVMKeyNonce { + if len(pair.Value) != NonceLen { + return fmt.Errorf("invalid nonce value length: got %d, expected %d", + len(pair.Value), NonceLen) + } + paw.value.Nonce = binary.BigEndian.Uint64(pair.Value) + } else { + if len(pair.Value) != CodeHashLen { + return fmt.Errorf("invalid codehash value length: got %d, expected %d", + len(pair.Value), CodeHashLen) + } + copy(paw.value.CodeHash[:], pair.Value) + } + paw.isDelete = paw.value.IsEmpty() + } + return nil +} + // Commit persists buffered writes and advances the version. // Protocol: WAL → per-DB batch (with LocalMeta) → flush → update metaDB. // On crash, catchup replays WAL to recover incomplete commits. diff --git a/sei-db/state_db/sc/flatkv/vtype/code_data_test.go b/sei-db/state_db/sc/flatkv/vtype/code_data_test.go index c6f0822639..15a3f46cfc 100644 --- a/sei-db/state_db/sc/flatkv/vtype/code_data_test.go +++ b/sei-db/state_db/sc/flatkv/vtype/code_data_test.go @@ -55,12 +55,12 @@ func TestCodeNewEmpty(t *testing.T) { func TestCodeSerializeLength(t *testing.T) { bytecode := []byte{0x01, 0x02, 0x03} cd := NewCodeData(bytecode) - require.Len(t, cd.Serialize(), codeHeaderLength+len(bytecode)) + require.Len(t, cd.Serialize(), codeBytecodeStart+len(bytecode)) } func TestCodeSerializeLength_Empty(t *testing.T) { cd := NewCodeData(nil) - require.Len(t, cd.Serialize(), codeHeaderLength) + require.Len(t, cd.Serialize(), codeBytecodeStart) } func TestCodeRoundTrip_WithBytecode(t *testing.T) { @@ -132,7 +132,7 @@ func TestCodeDeserialize_HeaderOnly(t *testing.T) { } func TestCodeDeserialize_UnsupportedVersion(t *testing.T) { - data := make([]byte, codeHeaderLength+1) + data := make([]byte, codeBytecodeStart+1) data[0] = 0xff _, err := DeserializeCodeData(data) require.Error(t, err) @@ -147,7 +147,7 @@ func TestCodeSetterChaining(t *testing.T) { } func TestCodeConstantLayout_V0(t *testing.T) { - require.Equal(t, 9, codeHeaderLength) + require.Equal(t, 9, codeBytecodeStart) } func TestCodeNewCopiesBytecode(t *testing.T) { From cd5931984fec456289b2113e5aa7996c15911dfe Mon Sep 17 00:00:00 2001 From: Cody Littley Date: Mon, 30 Mar 2026 14:26:13 -0500 Subject: [PATCH 094/119] incremental progress --- sei-db/state_db/sc/flatkv/store_write.go | 127 ++++++++++++++--------- 1 file changed, 76 insertions(+), 51 deletions(-) diff --git a/sei-db/state_db/sc/flatkv/store_write.go b/sei-db/state_db/sc/flatkv/store_write.go index 35e102bea7..8b701b8d7f 100644 --- a/sei-db/state_db/sc/flatkv/store_write.go +++ b/sei-db/state_db/sc/flatkv/store_write.go @@ -69,58 +69,10 @@ func (s *CommitStore) ApplyChangeSets(cs []*proto.NamedChangeSet) error { if err != nil { return fmt.Errorf("failed to apply EVM account field change: %w", err) } - case evm.EVMKeyCode: - // Code: keyBytes = addr(20) - per x/evm/types/keys.go - keyStr := string(keyBytes) - oldValue := codeOld[keyStr].Value - - if pair.Delete { - s.codeWrites[keyStr] = &pendingKVWrite{ - key: keyBytes, - isDelete: true, - } - codeOld[keyStr] = types.BatchGetResult{Value: nil} - } else { - s.codeWrites[keyStr] = &pendingKVWrite{ - key: keyBytes, - value: pair.Value, - } - codeOld[keyStr] = types.BatchGetResult{Value: pair.Value} - } - - // LtHash pair: internal key directly - codePairs = append(codePairs, lthash.KVPairWithLastValue{ - Key: keyBytes, - Value: pair.Value, - LastValue: oldValue, - Delete: pair.Delete, - }) - + codePairs = s.applyEvmCodeChange(keyBytes, pair, codeOld, codePairs) case evm.EVMKeyLegacy: - keyStr := string(keyBytes) - oldValue := legacyOld[keyStr].Value - - if pair.Delete { - s.legacyWrites[keyStr] = &pendingKVWrite{ - key: keyBytes, - isDelete: true, - } - legacyOld[keyStr] = types.BatchGetResult{Value: nil} - } else { - s.legacyWrites[keyStr] = &pendingKVWrite{ - key: keyBytes, - value: pair.Value, - } - legacyOld[keyStr] = types.BatchGetResult{Value: pair.Value} - } - - legacyPairs = append(legacyPairs, lthash.KVPairWithLastValue{ - Key: keyBytes, - Value: pair.Value, - LastValue: oldValue, - Delete: pair.Delete, - }) + legacyPairs = s.applyEvmLegacyChange(keyBytes, pair, legacyOld, legacyPairs) } } } @@ -207,7 +159,6 @@ func (s *CommitStore) applyEvmStorageChange( storageOld[keyStr] = types.BatchGetResult{Value: pair.Value} } - // LtHash pair: internal key directly return append(storagePairs, lthash.KVPairWithLastValue{ Key: keyBytes, Value: pair.Value, @@ -216,6 +167,80 @@ func (s *CommitStore) applyEvmStorageChange( }) } +// Apply a single change to the evm code db. +func (s *CommitStore) applyEvmCodeChange( + // The key with the prefix stripped (addr, 20 bytes). + keyBytes []byte, + // The change to apply. + pair *iavl.KVPair, + // This map stores the old value to the key prior to this change. This function updates it + // with the new value, so that the next change will see this value as the previous value. + codeOld map[string]types.BatchGetResult, + // This slice stores both the new and old values for each key modified in this block. + codePairs []lthash.KVPairWithLastValue, +) []lthash.KVPairWithLastValue { + keyStr := string(keyBytes) + oldValue := codeOld[keyStr].Value + + if pair.Delete { + s.codeWrites[keyStr] = &pendingKVWrite{ + key: keyBytes, + isDelete: true, + } + codeOld[keyStr] = types.BatchGetResult{Value: nil} + } else { + s.codeWrites[keyStr] = &pendingKVWrite{ + key: keyBytes, + value: pair.Value, + } + codeOld[keyStr] = types.BatchGetResult{Value: pair.Value} + } + + return append(codePairs, lthash.KVPairWithLastValue{ + Key: keyBytes, + Value: pair.Value, + LastValue: oldValue, + Delete: pair.Delete, + }) +} + +// Apply a single change to the evm legacy db. +func (s *CommitStore) applyEvmLegacyChange( + // The key with the prefix stripped. + keyBytes []byte, + // The change to apply. + pair *iavl.KVPair, + // This map stores the old value to the key prior to this change. This function updates it + // with the new value, so that the next change will see this value as the previous value. + legacyOld map[string]types.BatchGetResult, + // This slice stores both the new and old values for each key modified in this block. + legacyPairs []lthash.KVPairWithLastValue, +) []lthash.KVPairWithLastValue { + keyStr := string(keyBytes) + oldValue := legacyOld[keyStr].Value + + if pair.Delete { + s.legacyWrites[keyStr] = &pendingKVWrite{ + key: keyBytes, + isDelete: true, + } + legacyOld[keyStr] = types.BatchGetResult{Value: nil} + } else { + s.legacyWrites[keyStr] = &pendingKVWrite{ + key: keyBytes, + value: pair.Value, + } + legacyOld[keyStr] = types.BatchGetResult{Value: pair.Value} + } + + return append(legacyPairs, lthash.KVPairWithLastValue{ + Key: keyBytes, + Value: pair.Value, + LastValue: oldValue, + Delete: pair.Delete, + }) +} + // Apply a single nonce or codehash change to the account db. func (s *CommitStore) applyEvmAccountFieldChange( // Whether this is a nonce or codehash change. From 8ce84b1b02ba91bec994a332883d472223c166e0 Mon Sep 17 00:00:00 2001 From: Cody Littley Date: Mon, 30 Mar 2026 15:20:17 -0500 Subject: [PATCH 095/119] convert code data to new format --- sei-db/state_db/sc/flatkv/store.go | 5 +- sei-db/state_db/sc/flatkv/store_read.go | 20 +++++- sei-db/state_db/sc/flatkv/store_write.go | 66 +++++++++++++++---- .../state_db/sc/flatkv/vtype/account_data.go | 8 +-- .../sc/flatkv/vtype/account_data_test.go | 13 ++-- sei-db/state_db/sc/flatkv/vtype/code_data.go | 23 +++++-- .../sc/flatkv/vtype/code_data_test.go | 45 ++++++------- .../state_db/sc/flatkv/vtype/legacy_data.go | 8 +-- .../sc/flatkv/vtype/legacy_data_test.go | 17 ++--- .../sc/flatkv/vtype/pending_account_write.go | 2 +- .../vtype/pending_account_write_test.go | 12 ++-- .../state_db/sc/flatkv/vtype/storage_data.go | 8 +-- .../sc/flatkv/vtype/storage_data_test.go | 13 ++-- 13 files changed, 158 insertions(+), 82 deletions(-) diff --git a/sei-db/state_db/sc/flatkv/store.go b/sei-db/state_db/sc/flatkv/store.go index 4a9d4185ce..0db8d49111 100644 --- a/sei-db/state_db/sc/flatkv/store.go +++ b/sei-db/state_db/sc/flatkv/store.go @@ -17,6 +17,7 @@ import ( seidbtypes "github.com/sei-protocol/sei-chain/sei-db/db_engine/types" "github.com/sei-protocol/sei-chain/sei-db/proto" "github.com/sei-protocol/sei-chain/sei-db/state_db/sc/flatkv/lthash" + "github.com/sei-protocol/sei-chain/sei-db/state_db/sc/flatkv/vtype" "github.com/sei-protocol/sei-chain/sei-db/state_db/sc/types" "github.com/sei-protocol/sei-chain/sei-db/wal" "github.com/sei-protocol/seilog" @@ -104,7 +105,7 @@ type CommitStore struct { // accountWrites: key = address string (20 bytes), value = AccountValue // codeWrites/storageWrites/legacyWrites: key = internal DB key string, value = raw bytes accountWrites map[string]*pendingAccountWrite - codeWrites map[string]*pendingKVWrite + codeWrites map[string]*vtype.CodeData storageWrites map[string]*pendingKVWrite legacyWrites map[string]*pendingKVWrite @@ -168,7 +169,7 @@ func NewCommitStore( config: *cfg, localMeta: make(map[string]*LocalMeta), accountWrites: make(map[string]*pendingAccountWrite), - codeWrites: make(map[string]*pendingKVWrite), + codeWrites: make(map[string]*vtype.CodeData), storageWrites: make(map[string]*pendingKVWrite), legacyWrites: make(map[string]*pendingKVWrite), pendingChangeSets: make([]*proto.NamedChangeSet, 0), diff --git a/sei-db/state_db/sc/flatkv/store_read.go b/sei-db/state_db/sc/flatkv/store_read.go index fcf6bfc052..19d4a5480d 100644 --- a/sei-db/state_db/sc/flatkv/store_read.go +++ b/sei-db/state_db/sc/flatkv/store_read.go @@ -8,6 +8,7 @@ import ( errorutils "github.com/sei-protocol/sei-chain/sei-db/common/errors" "github.com/sei-protocol/sei-chain/sei-db/common/evm" seidbtypes "github.com/sei-protocol/sei-chain/sei-db/db_engine/types" + "github.com/sei-protocol/sei-chain/sei-db/state_db/sc/flatkv/vtype" ) // Get returns the value for the given memiavl key. @@ -235,7 +236,24 @@ func (s *CommitStore) getStorageValue(key []byte) ([]byte, error) { } func (s *CommitStore) getCodeValue(key []byte) ([]byte, error) { - return s.getKVValue(key, s.codeWrites, s.codeDB, "codeDB") + pendingWrite, hasPending := s.codeWrites[string(key)] + if hasPending { + return pendingWrite.GetBytecode(), nil + } + + value, err := s.codeDB.Get(key) + if err != nil { + if errorutils.IsNotFound(err) { + return nil, nil + } + return nil, fmt.Errorf("codeDB I/O error for key %x: %w", key, err) + } + + codeData, err := vtype.DeserializeCodeData(value) + if err != nil { + return nil, fmt.Errorf("failed to deserialize code data: %w", err) + } + return codeData.GetBytecode(), nil } func (s *CommitStore) getLegacyValue(key []byte) ([]byte, error) { diff --git a/sei-db/state_db/sc/flatkv/store_write.go b/sei-db/state_db/sc/flatkv/store_write.go index 8b701b8d7f..df85a08ee9 100644 --- a/sei-db/state_db/sc/flatkv/store_write.go +++ b/sei-db/state_db/sc/flatkv/store_write.go @@ -10,6 +10,7 @@ import ( "github.com/sei-protocol/sei-chain/sei-db/db_engine/types" "github.com/sei-protocol/sei-chain/sei-db/proto" "github.com/sei-protocol/sei-chain/sei-db/state_db/sc/flatkv/lthash" + "github.com/sei-protocol/sei-chain/sei-db/state_db/sc/flatkv/vtype" iavl "github.com/sei-protocol/sei-chain/sei-iavl/proto" ) @@ -47,6 +48,9 @@ func (s *CommitStore) ApplyChangeSets(cs []*proto.NamedChangeSet) error { // nil means the account didn't exist (no phantom MixOut for new accounts). oldAccountRawValues := make(map[string][]byte) + s.phaseTimer.SetPhase("apply_change_sets_collect_storage_pairs") + + // For each entry in the change set, accumulate changes for the appropriate DB. for _, namedCS := range cs { if namedCS.Changeset.Pairs == nil { continue @@ -182,20 +186,18 @@ func (s *CommitStore) applyEvmCodeChange( keyStr := string(keyBytes) oldValue := codeOld[keyStr].Value + newCodeData := vtype.NewCodeData().SetBlockHeight(s.committedVersion + 1) + if pair.Delete { - s.codeWrites[keyStr] = &pendingKVWrite{ - key: keyBytes, - isDelete: true, - } + newCodeData.SetBytecode([]byte{}) codeOld[keyStr] = types.BatchGetResult{Value: nil} } else { - s.codeWrites[keyStr] = &pendingKVWrite{ - key: keyBytes, - value: pair.Value, - } + newCodeData.SetBytecode(pair.Value) codeOld[keyStr] = types.BatchGetResult{Value: pair.Value} } + s.codeWrites[keyStr] = newCodeData + return append(codePairs, lthash.KVPairWithLastValue{ Key: keyBytes, Value: pair.Value, @@ -405,7 +407,7 @@ func (s *CommitStore) flushAllDBs() error { // clearPendingWrites clears all pending write buffers func (s *CommitStore) clearPendingWrites() { s.accountWrites = make(map[string]*pendingAccountWrite) - s.codeWrites = make(map[string]*pendingKVWrite) + s.codeWrites = make(map[string]*vtype.CodeData) s.storageWrites = make(map[string]*pendingKVWrite) s.legacyWrites = make(map[string]*pendingKVWrite) s.pendingChangeSets = make([]*proto.NamedChangeSet, 0) @@ -451,6 +453,12 @@ func (s *CommitStore) commitBatches(version int64) error { pending = append(pending, pendingCommit{accountDBDir, batch}) } + batch, err := s.prepareBatchCodeDB(version) + if err != nil { + return fmt.Errorf("codeDB commit: %w", err) + } + pending = append(pending, pendingCommit{codeDBDir, batch}) + // Commit to codeDB, storageDB, legacyDB (identical logic per KV DB). kvDBs := [...]struct { dir string @@ -458,7 +466,6 @@ func (s *CommitStore) commitBatches(version int64) error { writes map[string]*pendingKVWrite db types.KeyValueDB }{ - {codeDBDir, "commit_code_db_prepare", s.codeWrites, s.codeDB}, {storageDBDir, "commit_storage_db_prepare", s.storageWrites, s.storageDB}, {legacyDBDir, "commit_legacy_db_prepare", s.legacyWrites, s.legacyDB}, } @@ -524,6 +531,39 @@ func (s *CommitStore) commitBatches(version int64) error { return nil } +// Prepare a batch of writes for the codeDB. +func (s *CommitStore) prepareBatchCodeDB(version int64) (types.Batch, error) { + if len(s.codeWrites) == 0 && version <= s.localMeta[codeDBDir].CommittedVersion { + return nil, nil + } + + s.phaseTimer.SetPhase("commit_code_db_prepare") + + batch := s.codeDB.NewBatch() + + for keyStr, cw := range s.codeWrites { + key := []byte(keyStr) + if cw.IsDelete() { + if err := batch.Delete(key); err != nil { + _ = batch.Close() + return nil, fmt.Errorf("codeDB delete: %w", err) + } + } else { + if err := batch.Set(key, cw.Serialize()); err != nil { + _ = batch.Close() + return nil, fmt.Errorf("codeDB set: %w", err) + } + } + } + + if err := writeLocalMetaToBatch(batch, version, s.perDBWorkingLtHash[codeDBDir]); err != nil { + _ = batch.Close() + return nil, fmt.Errorf("codeDB local meta: %w", err) + } + + return batch, nil +} + // batchReadOldValues scans all changeset pairs and returns one result map per // DB containing the "old value" for each key. Keys that already have uncommitted // pending writes (from a prior ApplyChangeSets call in the same block) are @@ -596,7 +636,11 @@ func (s *CommitStore) batchReadOldValues(cs []*proto.NamedChangeSet) ( continue } if pw, ok := s.codeWrites[k]; ok { - codeOld[k] = pendingKVResult(pw) + if pw.IsDelete() { + codeOld[k] = types.BatchGetResult{Value: nil} + } else { + codeOld[k] = types.BatchGetResult{Value: pw.Serialize()} + } } else { codeBatch[k] = types.BatchGetResult{} } diff --git a/sei-db/state_db/sc/flatkv/vtype/account_data.go b/sei-db/state_db/sc/flatkv/vtype/account_data.go index 0e9cc95e25..4e03004a00 100644 --- a/sei-db/state_db/sc/flatkv/vtype/account_data.go +++ b/sei-db/state_db/sc/flatkv/vtype/account_data.go @@ -85,8 +85,8 @@ func (a *AccountData) GetSerializationVersion() AccountDataVersion { } // Get the account's block height. -func (a *AccountData) GetBlockHeight() uint64 { - return binary.BigEndian.Uint64(a.data[accountBlockHeightStart:accountBalanceStart]) +func (a *AccountData) GetBlockHeight() int64 { + return int64(binary.BigEndian.Uint64(a.data[accountBlockHeightStart:accountBalanceStart])) //nolint:gosec // block height is always within int64 range } // Get the account's balance. @@ -123,8 +123,8 @@ func (a *AccountData) Copy() *AccountData { } // Set the account's block height when this account was last modified/touched. Returns self. -func (a *AccountData) SetBlockHeight(blockHeight uint64) *AccountData { - binary.BigEndian.PutUint64(a.data[accountBlockHeightStart:accountBalanceStart], blockHeight) +func (a *AccountData) SetBlockHeight(blockHeight int64) *AccountData { + binary.BigEndian.PutUint64(a.data[accountBlockHeightStart:accountBalanceStart], uint64(blockHeight)) //nolint:gosec // block height is always non-negative return a } diff --git a/sei-db/state_db/sc/flatkv/vtype/account_data_test.go b/sei-db/state_db/sc/flatkv/vtype/account_data_test.go index a4a732a98b..6bb5149496 100644 --- a/sei-db/state_db/sc/flatkv/vtype/account_data_test.go +++ b/sei-db/state_db/sc/flatkv/vtype/account_data_test.go @@ -3,6 +3,7 @@ package vtype import ( "bytes" "encoding/hex" + "math" "os" "path/filepath" "testing" @@ -41,7 +42,7 @@ func TestSerializationGoldenFile_V0(t *testing.T) { // Verify round-trip from the golden bytes. rt, err := DeserializeAccountData(wantBytes) require.NoError(t, err) - require.Equal(t, uint64(100), rt.GetBlockHeight()) + require.Equal(t, int64(100), rt.GetBlockHeight()) require.Equal(t, uint64(42), rt.GetNonce()) require.Equal(t, toArray32(leftPad32([]byte{1})), rt.GetBalance()) require.Equal(t, toArray32(bytes.Repeat([]byte{0xaa}, 32)), rt.GetCodeHash()) @@ -51,7 +52,7 @@ func TestNewAccountData_ZeroInitialized(t *testing.T) { ad := NewAccountData() var zero [32]byte require.Equal(t, AccountDataVersion0, ad.GetSerializationVersion()) - require.Equal(t, uint64(0), ad.GetBlockHeight()) + require.Equal(t, int64(0), ad.GetBlockHeight()) require.Equal(t, uint64(0), ad.GetNonce()) require.Equal(t, &zero, ad.GetBalance()) require.Equal(t, &zero, ad.GetCodeHash()) @@ -74,7 +75,7 @@ func TestRoundTrip_AllFieldsSet(t *testing.T) { rt, err := DeserializeAccountData(ad.Serialize()) require.NoError(t, err) - require.Equal(t, uint64(999), rt.GetBlockHeight()) + require.Equal(t, int64(999), rt.GetBlockHeight()) require.Equal(t, uint64(12345), rt.GetNonce()) require.Equal(t, balance, rt.GetBalance()) require.Equal(t, codeHash, rt.GetCodeHash()) @@ -85,7 +86,7 @@ func TestRoundTrip_ZeroValues(t *testing.T) { rt, err := DeserializeAccountData(ad.Serialize()) require.NoError(t, err) var zero [32]byte - require.Equal(t, uint64(0), rt.GetBlockHeight()) + require.Equal(t, int64(0), rt.GetBlockHeight()) require.Equal(t, uint64(0), rt.GetNonce()) require.Equal(t, &zero, rt.GetBalance()) require.Equal(t, &zero, rt.GetCodeHash()) @@ -95,7 +96,7 @@ func TestRoundTrip_MaxValues(t *testing.T) { maxBalance := toArray32(bytes.Repeat([]byte{0xff}, 32)) maxCodeHash := toArray32(bytes.Repeat([]byte{0xff}, 32)) maxNonce := uint64(0xffffffffffffffff) - maxBlockHeight := uint64(0xffffffffffffffff) + maxBlockHeight := int64(math.MaxInt64) ad := NewAccountData(). SetBlockHeight(maxBlockHeight). @@ -165,7 +166,7 @@ func TestSetterChaining(t *testing.T) { SetNonce(3). SetCodeHash(toArray32(leftPad32([]byte{4}))) - require.Equal(t, uint64(1), ad.GetBlockHeight()) + require.Equal(t, int64(1), ad.GetBlockHeight()) require.Equal(t, uint64(3), ad.GetNonce()) } diff --git a/sei-db/state_db/sc/flatkv/vtype/code_data.go b/sei-db/state_db/sc/flatkv/vtype/code_data.go index 19b3214661..5fdacc8258 100644 --- a/sei-db/state_db/sc/flatkv/vtype/code_data.go +++ b/sei-db/state_db/sc/flatkv/vtype/code_data.go @@ -38,9 +38,9 @@ type CodeData struct { } // Create a new CodeData with the given bytecode. -func NewCodeData(bytecode []byte) *CodeData { - data := make([]byte, codeBytecodeStart+len(bytecode)) - copy(data[codeBytecodeStart:], bytecode) +func NewCodeData() *CodeData { + data := make([]byte, codeBytecodeStart) + data[codeVersionStart] = byte(CodeDataVersion0) return &CodeData{data: data} } @@ -80,8 +80,8 @@ func (c *CodeData) GetSerializationVersion() CodeDataVersion { } // Get the block height when this code was last modified. -func (c *CodeData) GetBlockHeight() uint64 { - return binary.BigEndian.Uint64(c.data[codeBlockHeightStart:codeBytecodeStart]) +func (c *CodeData) GetBlockHeight() int64 { + return int64(binary.BigEndian.Uint64(c.data[codeBlockHeightStart:codeBytecodeStart])) //nolint:gosec // block height is always within int64 range } // Get the contract bytecode. @@ -89,6 +89,15 @@ func (c *CodeData) GetBytecode() []byte { return c.data[codeBytecodeStart:] } +// Set the contract bytecode. +func (c *CodeData) SetBytecode(bytecode []byte) *CodeData { + newData := make([]byte, codeBytecodeStart+len(bytecode)) + copy(newData, c.data[:codeBytecodeStart]) + copy(newData[codeBytecodeStart:], bytecode) + c.data = newData + return c +} + // Check if this code data signifies a deletion operation. A deletion operation is automatically // performed when the bytecode is empty (with the exception of the serialization version and block height). func (c *CodeData) IsDelete() bool { @@ -96,7 +105,7 @@ func (c *CodeData) IsDelete() bool { } // Set the block height when this code was last modified/touched. Returns self. -func (c *CodeData) SetBlockHeight(blockHeight uint64) *CodeData { - binary.BigEndian.PutUint64(c.data[codeBlockHeightStart:codeBytecodeStart], blockHeight) +func (c *CodeData) SetBlockHeight(blockHeight int64) *CodeData { + binary.BigEndian.PutUint64(c.data[codeBlockHeightStart:codeBytecodeStart], uint64(blockHeight)) //nolint:gosec // block height is always non-negative return c } diff --git a/sei-db/state_db/sc/flatkv/vtype/code_data_test.go b/sei-db/state_db/sc/flatkv/vtype/code_data_test.go index 15a3f46cfc..4b434f9707 100644 --- a/sei-db/state_db/sc/flatkv/vtype/code_data_test.go +++ b/sei-db/state_db/sc/flatkv/vtype/code_data_test.go @@ -3,6 +3,7 @@ package vtype import ( "bytes" "encoding/hex" + "math" "os" "path/filepath" "testing" @@ -12,7 +13,7 @@ import ( func TestCodeSerializationGoldenFile_V0(t *testing.T) { bytecode := []byte{0x60, 0x80, 0x60, 0x40, 0x52} // PUSH1 0x80 PUSH1 0x40 MSTORE - cd := NewCodeData(bytecode). + cd := NewCodeData().SetBytecode(bytecode). SetBlockHeight(100) serialized := cd.Serialize() @@ -33,79 +34,79 @@ func TestCodeSerializationGoldenFile_V0(t *testing.T) { rt, err := DeserializeCodeData(wantBytes) require.NoError(t, err) - require.Equal(t, uint64(100), rt.GetBlockHeight()) + require.Equal(t, int64(100), rt.GetBlockHeight()) require.Equal(t, bytecode, rt.GetBytecode()) } func TestCodeNewWithBytecode(t *testing.T) { bytecode := []byte{0x01, 0x02, 0x03} - cd := NewCodeData(bytecode) + cd := NewCodeData().SetBytecode(bytecode) require.Equal(t, CodeDataVersion0, cd.GetSerializationVersion()) - require.Equal(t, uint64(0), cd.GetBlockHeight()) + require.Equal(t, int64(0), cd.GetBlockHeight()) require.Equal(t, bytecode, cd.GetBytecode()) } func TestCodeNewEmpty(t *testing.T) { - cd := NewCodeData(nil) + cd := NewCodeData() require.Equal(t, CodeDataVersion0, cd.GetSerializationVersion()) - require.Equal(t, uint64(0), cd.GetBlockHeight()) + require.Equal(t, int64(0), cd.GetBlockHeight()) require.Empty(t, cd.GetBytecode()) } func TestCodeSerializeLength(t *testing.T) { bytecode := []byte{0x01, 0x02, 0x03} - cd := NewCodeData(bytecode) + cd := NewCodeData().SetBytecode(bytecode) require.Len(t, cd.Serialize(), codeBytecodeStart+len(bytecode)) } func TestCodeSerializeLength_Empty(t *testing.T) { - cd := NewCodeData(nil) + cd := NewCodeData() require.Len(t, cd.Serialize(), codeBytecodeStart) } func TestCodeRoundTrip_WithBytecode(t *testing.T) { bytecode := bytes.Repeat([]byte{0xab}, 1000) - cd := NewCodeData(bytecode). + cd := NewCodeData().SetBytecode(bytecode). SetBlockHeight(999) rt, err := DeserializeCodeData(cd.Serialize()) require.NoError(t, err) - require.Equal(t, uint64(999), rt.GetBlockHeight()) + require.Equal(t, int64(999), rt.GetBlockHeight()) require.Equal(t, bytecode, rt.GetBytecode()) } func TestCodeRoundTrip_EmptyBytecode(t *testing.T) { - cd := NewCodeData(nil). + cd := NewCodeData(). SetBlockHeight(42) rt, err := DeserializeCodeData(cd.Serialize()) require.NoError(t, err) - require.Equal(t, uint64(42), rt.GetBlockHeight()) + require.Equal(t, int64(42), rt.GetBlockHeight()) require.Empty(t, rt.GetBytecode()) } func TestCodeRoundTrip_MaxBlockHeight(t *testing.T) { - cd := NewCodeData([]byte{0xff}). - SetBlockHeight(0xffffffffffffffff) + cd := NewCodeData().SetBytecode([]byte{0xff}). + SetBlockHeight(math.MaxInt64) rt, err := DeserializeCodeData(cd.Serialize()) require.NoError(t, err) - require.Equal(t, uint64(0xffffffffffffffff), rt.GetBlockHeight()) + require.Equal(t, int64(math.MaxInt64), rt.GetBlockHeight()) require.Equal(t, []byte{0xff}, rt.GetBytecode()) } func TestCodeIsDelete_EmptyBytecode(t *testing.T) { - cd := NewCodeData(nil).SetBlockHeight(500) + cd := NewCodeData().SetBlockHeight(500) require.True(t, cd.IsDelete()) } func TestCodeIsDelete_EmptySlice(t *testing.T) { - cd := NewCodeData([]byte{}) + cd := NewCodeData().SetBytecode([]byte{}) require.True(t, cd.IsDelete()) } func TestCodeIsDelete_NonEmptyBytecode(t *testing.T) { - cd := NewCodeData([]byte{0x01}) + cd := NewCodeData().SetBytecode([]byte{0x01}) require.False(t, cd.IsDelete()) } @@ -125,7 +126,7 @@ func TestCodeDeserialize_TooShort(t *testing.T) { } func TestCodeDeserialize_HeaderOnly(t *testing.T) { - cd := NewCodeData(nil) + cd := NewCodeData() rt, err := DeserializeCodeData(cd.Serialize()) require.NoError(t, err) require.Empty(t, rt.GetBytecode()) @@ -139,10 +140,10 @@ func TestCodeDeserialize_UnsupportedVersion(t *testing.T) { } func TestCodeSetterChaining(t *testing.T) { - cd := NewCodeData([]byte{0x01}). + cd := NewCodeData().SetBytecode([]byte{0x01}). SetBlockHeight(42) - require.Equal(t, uint64(42), cd.GetBlockHeight()) + require.Equal(t, int64(42), cd.GetBlockHeight()) require.Equal(t, []byte{0x01}, cd.GetBytecode()) } @@ -152,7 +153,7 @@ func TestCodeConstantLayout_V0(t *testing.T) { func TestCodeNewCopiesBytecode(t *testing.T) { bytecode := []byte{0x01, 0x02, 0x03} - cd := NewCodeData(bytecode) + cd := NewCodeData().SetBytecode(bytecode) // Mutating the original should not affect the CodeData. bytecode[0] = 0xff require.Equal(t, byte(0x01), cd.GetBytecode()[0]) diff --git a/sei-db/state_db/sc/flatkv/vtype/legacy_data.go b/sei-db/state_db/sc/flatkv/vtype/legacy_data.go index f6278d483e..0762336eb4 100644 --- a/sei-db/state_db/sc/flatkv/vtype/legacy_data.go +++ b/sei-db/state_db/sc/flatkv/vtype/legacy_data.go @@ -81,8 +81,8 @@ func (l *LegacyData) GetSerializationVersion() LegacyDataVersion { } // Get the block height when this legacy data was last modified. -func (l *LegacyData) GetBlockHeight() uint64 { - return binary.BigEndian.Uint64(l.data[legacyBlockHeightStart:legacyValueStart]) +func (l *LegacyData) GetBlockHeight() int64 { + return int64(binary.BigEndian.Uint64(l.data[legacyBlockHeightStart:legacyValueStart])) //nolint:gosec // block height is always within int64 range } // Get the legacy value. @@ -97,7 +97,7 @@ func (l *LegacyData) IsDelete() bool { } // Set the block height when this legacy data was last modified/touched. Returns self. -func (l *LegacyData) SetBlockHeight(blockHeight uint64) *LegacyData { - binary.BigEndian.PutUint64(l.data[legacyBlockHeightStart:legacyValueStart], blockHeight) +func (l *LegacyData) SetBlockHeight(blockHeight int64) *LegacyData { + binary.BigEndian.PutUint64(l.data[legacyBlockHeightStart:legacyValueStart], uint64(blockHeight)) //nolint:gosec // block height is always non-negative return l } diff --git a/sei-db/state_db/sc/flatkv/vtype/legacy_data_test.go b/sei-db/state_db/sc/flatkv/vtype/legacy_data_test.go index 299e98a85a..6a8cfa96ce 100644 --- a/sei-db/state_db/sc/flatkv/vtype/legacy_data_test.go +++ b/sei-db/state_db/sc/flatkv/vtype/legacy_data_test.go @@ -3,6 +3,7 @@ package vtype import ( "bytes" "encoding/hex" + "math" "os" "path/filepath" "testing" @@ -33,7 +34,7 @@ func TestLegacySerializationGoldenFile_V0(t *testing.T) { rt, err := DeserializeLegacyData(wantBytes) require.NoError(t, err) - require.Equal(t, uint64(100), rt.GetBlockHeight()) + require.Equal(t, int64(100), rt.GetBlockHeight()) require.Equal(t, value, rt.GetValue()) } @@ -41,14 +42,14 @@ func TestLegacyNewWithValue(t *testing.T) { value := []byte{0x01, 0x02, 0x03} ld := NewLegacyData(value) require.Equal(t, LegacyDataVersion0, ld.GetSerializationVersion()) - require.Equal(t, uint64(0), ld.GetBlockHeight()) + require.Equal(t, int64(0), ld.GetBlockHeight()) require.Equal(t, value, ld.GetValue()) } func TestLegacyNewEmpty(t *testing.T) { ld := NewLegacyData(nil) require.Equal(t, LegacyDataVersion0, ld.GetSerializationVersion()) - require.Equal(t, uint64(0), ld.GetBlockHeight()) + require.Equal(t, int64(0), ld.GetBlockHeight()) require.Empty(t, ld.GetValue()) } @@ -70,7 +71,7 @@ func TestLegacyRoundTrip_WithValue(t *testing.T) { rt, err := DeserializeLegacyData(ld.Serialize()) require.NoError(t, err) - require.Equal(t, uint64(999), rt.GetBlockHeight()) + require.Equal(t, int64(999), rt.GetBlockHeight()) require.Equal(t, value, rt.GetValue()) } @@ -80,17 +81,17 @@ func TestLegacyRoundTrip_EmptyValue(t *testing.T) { rt, err := DeserializeLegacyData(ld.Serialize()) require.NoError(t, err) - require.Equal(t, uint64(42), rt.GetBlockHeight()) + require.Equal(t, int64(42), rt.GetBlockHeight()) require.Empty(t, rt.GetValue()) } func TestLegacyRoundTrip_MaxBlockHeight(t *testing.T) { ld := NewLegacyData([]byte{0xff}). - SetBlockHeight(0xffffffffffffffff) + SetBlockHeight(math.MaxInt64) rt, err := DeserializeLegacyData(ld.Serialize()) require.NoError(t, err) - require.Equal(t, uint64(0xffffffffffffffff), rt.GetBlockHeight()) + require.Equal(t, int64(math.MaxInt64), rt.GetBlockHeight()) require.Equal(t, []byte{0xff}, rt.GetValue()) } @@ -142,7 +143,7 @@ func TestLegacySetterChaining(t *testing.T) { ld := NewLegacyData([]byte{0x01}). SetBlockHeight(42) - require.Equal(t, uint64(42), ld.GetBlockHeight()) + require.Equal(t, int64(42), ld.GetBlockHeight()) require.Equal(t, []byte{0x01}, ld.GetValue()) } diff --git a/sei-db/state_db/sc/flatkv/vtype/pending_account_write.go b/sei-db/state_db/sc/flatkv/vtype/pending_account_write.go index 870421a839..f4d96ab6de 100644 --- a/sei-db/state_db/sc/flatkv/vtype/pending_account_write.go +++ b/sei-db/state_db/sc/flatkv/vtype/pending_account_write.go @@ -59,7 +59,7 @@ func (p *PendingAccountWrite) SetCodeHash(codeHash *[32]byte) *PendingAccountWri // Merge applies the pending field changes onto a copy of the base AccountData, updating the // block height. Only fields that have been set via Set* methods are overwritten; all other // fields are carried over from the base. The base is not modified. -func (p *PendingAccountWrite) Merge(base *AccountData, blockHeight uint64) *AccountData { +func (p *PendingAccountWrite) Merge(base *AccountData, blockHeight int64) *AccountData { result := base.Copy().SetBlockHeight(blockHeight) if p.balance != nil { diff --git a/sei-db/state_db/sc/flatkv/vtype/pending_account_write_test.go b/sei-db/state_db/sc/flatkv/vtype/pending_account_write_test.go index d21a863884..6c9832b67c 100644 --- a/sei-db/state_db/sc/flatkv/vtype/pending_account_write_test.go +++ b/sei-db/state_db/sc/flatkv/vtype/pending_account_write_test.go @@ -14,7 +14,7 @@ func TestPAW_SetNonce_MergeOntoZeroBase(t *testing.T) { result := paw.Merge(base, 100) require.Equal(t, uint64(42), result.GetNonce()) - require.Equal(t, uint64(100), result.GetBlockHeight()) + require.Equal(t, int64(100), result.GetBlockHeight()) var zero [32]byte require.Equal(t, &zero, result.GetBalance()) require.Equal(t, &zero, result.GetCodeHash()) @@ -38,7 +38,7 @@ func TestPAW_SetCodeHash_MergeOntoExistingAccount(t *testing.T) { require.Equal(t, toArray32(leftPad32([]byte{0xff})), result.GetBalance()) require.Equal(t, uint64(10), result.GetNonce()) // Block height updated - require.Equal(t, uint64(100), result.GetBlockHeight()) + require.Equal(t, int64(100), result.GetBlockHeight()) } func TestPAW_SetBalance_MergeOntoExistingAccount(t *testing.T) { @@ -54,7 +54,7 @@ func TestPAW_SetBalance_MergeOntoExistingAccount(t *testing.T) { require.Equal(t, newBalance, result.GetBalance()) require.Equal(t, uint64(5), result.GetNonce()) - require.Equal(t, uint64(60), result.GetBlockHeight()) + require.Equal(t, int64(60), result.GetBlockHeight()) } func TestPAW_MultipleFields(t *testing.T) { @@ -76,7 +76,7 @@ func TestPAW_MultipleFields(t *testing.T) { require.Equal(t, newBalance, result.GetBalance()) require.Equal(t, uint64(99), result.GetNonce()) require.Equal(t, newCodeHash, result.GetCodeHash()) - require.Equal(t, uint64(200), result.GetBlockHeight()) + require.Equal(t, int64(200), result.GetBlockHeight()) } func TestPAW_ZeroNonce(t *testing.T) { @@ -86,7 +86,7 @@ func TestPAW_ZeroNonce(t *testing.T) { result := paw.Merge(base, 10) require.Equal(t, uint64(0), result.GetNonce()) - require.Equal(t, uint64(10), result.GetBlockHeight()) + require.Equal(t, int64(10), result.GetBlockHeight()) } func TestPAW_ZeroBalance(t *testing.T) { @@ -134,7 +134,7 @@ func TestPAW_MergeDoesNotModifyBase(t *testing.T) { _ = paw.Merge(base, 100) // Base must be unchanged - require.Equal(t, uint64(50), base.GetBlockHeight()) + require.Equal(t, int64(50), base.GetBlockHeight()) require.Equal(t, uint64(10), base.GetNonce()) } diff --git a/sei-db/state_db/sc/flatkv/vtype/storage_data.go b/sei-db/state_db/sc/flatkv/vtype/storage_data.go index e8b6f66b23..aab7b7e440 100644 --- a/sei-db/state_db/sc/flatkv/vtype/storage_data.go +++ b/sei-db/state_db/sc/flatkv/vtype/storage_data.go @@ -81,8 +81,8 @@ func (s *StorageData) GetSerializationVersion() StorageDataVersion { } // Get the block height when this storage slot was last modified. -func (s *StorageData) GetBlockHeight() uint64 { - return binary.BigEndian.Uint64(s.data[storageBlockHeightStart:storageValueStart]) +func (s *StorageData) GetBlockHeight() int64 { + return int64(binary.BigEndian.Uint64(s.data[storageBlockHeightStart:storageValueStart])) //nolint:gosec // block height is always within int64 range } // Get the storage slot value. @@ -102,8 +102,8 @@ func (s *StorageData) IsDelete() bool { } // Set the block height when this storage slot was last modified/touched. Returns self. -func (s *StorageData) SetBlockHeight(blockHeight uint64) *StorageData { - binary.BigEndian.PutUint64(s.data[storageBlockHeightStart:storageValueStart], blockHeight) +func (s *StorageData) SetBlockHeight(blockHeight int64) *StorageData { + binary.BigEndian.PutUint64(s.data[storageBlockHeightStart:storageValueStart], uint64(blockHeight)) //nolint:gosec // block height is always non-negative return s } diff --git a/sei-db/state_db/sc/flatkv/vtype/storage_data_test.go b/sei-db/state_db/sc/flatkv/vtype/storage_data_test.go index 397f96fe8f..80ac966aae 100644 --- a/sei-db/state_db/sc/flatkv/vtype/storage_data_test.go +++ b/sei-db/state_db/sc/flatkv/vtype/storage_data_test.go @@ -3,6 +3,7 @@ package vtype import ( "bytes" "encoding/hex" + "math" "os" "path/filepath" "testing" @@ -34,7 +35,7 @@ func TestStorageSerializationGoldenFile_V0(t *testing.T) { rt, err := DeserializeStorageData(wantBytes) require.NoError(t, err) - require.Equal(t, uint64(100), rt.GetBlockHeight()) + require.Equal(t, int64(100), rt.GetBlockHeight()) require.Equal(t, val, rt.GetValue()) } @@ -42,7 +43,7 @@ func TestStorageNewZeroInitialized(t *testing.T) { sd := NewStorageData() var zero [32]byte require.Equal(t, StorageDataVersion0, sd.GetSerializationVersion()) - require.Equal(t, uint64(0), sd.GetBlockHeight()) + require.Equal(t, int64(0), sd.GetBlockHeight()) require.Equal(t, &zero, sd.GetValue()) } @@ -59,7 +60,7 @@ func TestStorageRoundTrip_AllFieldsSet(t *testing.T) { rt, err := DeserializeStorageData(sd.Serialize()) require.NoError(t, err) - require.Equal(t, uint64(999), rt.GetBlockHeight()) + require.Equal(t, int64(999), rt.GetBlockHeight()) require.Equal(t, val, rt.GetValue()) } @@ -68,13 +69,13 @@ func TestStorageRoundTrip_ZeroValues(t *testing.T) { rt, err := DeserializeStorageData(sd.Serialize()) require.NoError(t, err) var zero [32]byte - require.Equal(t, uint64(0), rt.GetBlockHeight()) + require.Equal(t, int64(0), rt.GetBlockHeight()) require.Equal(t, &zero, rt.GetValue()) } func TestStorageRoundTrip_MaxValues(t *testing.T) { maxVal := toArray32(bytes.Repeat([]byte{0xff}, 32)) - maxBlockHeight := uint64(0xffffffffffffffff) + maxBlockHeight := int64(math.MaxInt64) sd := NewStorageData(). SetBlockHeight(maxBlockHeight). @@ -128,7 +129,7 @@ func TestStorageSetterChaining(t *testing.T) { SetBlockHeight(1). SetValue(toArray32(leftPad32([]byte{2}))) - require.Equal(t, uint64(1), sd.GetBlockHeight()) + require.Equal(t, int64(1), sd.GetBlockHeight()) } func TestStorageConstantLayout_V0(t *testing.T) { From ee30ca9ab6c8553726a7be79e6f07ae6e74854f3 Mon Sep 17 00:00:00 2001 From: Cody Littley Date: Tue, 31 Mar 2026 09:13:16 -0500 Subject: [PATCH 096/119] made suggested changes --- .../cryptosim/cmd/configure-logger/main.go | 2 +- .../bench/cryptosim/cmd/cryptosim/main.go | 37 +++++++++------ .../bench/cryptosim/config/basic-config.json | 2 + .../bench/cryptosim/config/debug.json | 4 +- .../bench/cryptosim/cryptosim_config.go | 12 ++++- sei-db/state_db/sc/flatkv/config.go | 22 ++++++++- sei-db/state_db/sc/flatkv/store_write.go | 45 ++++++++----------- 7 files changed, 79 insertions(+), 45 deletions(-) diff --git a/sei-db/state_db/bench/cryptosim/cmd/configure-logger/main.go b/sei-db/state_db/bench/cryptosim/cmd/configure-logger/main.go index 9a48afc410..262910d53e 100644 --- a/sei-db/state_db/bench/cryptosim/cmd/configure-logger/main.go +++ b/sei-db/state_db/bench/cryptosim/cmd/configure-logger/main.go @@ -42,7 +42,7 @@ func run() error { return fmt.Errorf("LogDir is empty, refusing to proceed") } - if cfg.DeleteDataDirOnStartup { + if cfg.DeleteLogDirOnStartup { resolved, err := filepath.Abs(cfg.LogDir) if err != nil { return fmt.Errorf("failed to resolve log directory: %w", err) diff --git a/sei-db/state_db/bench/cryptosim/cmd/cryptosim/main.go b/sei-db/state_db/bench/cryptosim/cmd/cryptosim/main.go index 7847630a07..c1b7385cbb 100644 --- a/sei-db/state_db/bench/cryptosim/cmd/cryptosim/main.go +++ b/sei-db/state_db/bench/cryptosim/cmd/cryptosim/main.go @@ -151,19 +151,30 @@ func run() error { cs.BlockUntilHalted() if config.DeleteDataDirOnShutdown { - for _, dir := range []string{config.DataDir, config.LogDir} { - if dir == "" { - return fmt.Errorf("directory path is empty, refusing to delete") - } - resolved, err := filepath.Abs(dir) - if err != nil { - return fmt.Errorf("failed to resolve directory: %w", err) - } - fmt.Printf("Deleting directory: %s\n", resolved) - err = os.RemoveAll(resolved) - if err != nil { - return fmt.Errorf("failed to delete directory %s: %w", resolved, err) - } + if config.DataDir == "" { + return fmt.Errorf("DataDir is empty, refusing to delete") + } + resolved, err := filepath.Abs(config.DataDir) + if err != nil { + return fmt.Errorf("failed to resolve data directory: %w", err) + } + fmt.Printf("Deleting data directory: %s\n", resolved) + if err := os.RemoveAll(resolved); err != nil { + return fmt.Errorf("failed to delete data directory %s: %w", resolved, err) + } + } + + if config.DeleteLogDirOnShutdown { + if config.LogDir == "" { + return fmt.Errorf("LogDir is empty, refusing to delete") + } + resolved, err := filepath.Abs(config.LogDir) + if err != nil { + return fmt.Errorf("failed to resolve log directory: %w", err) + } + fmt.Printf("Deleting log directory: %s\n", resolved) + if err := os.RemoveAll(resolved); err != nil { + return fmt.Errorf("failed to delete log directory %s: %w", resolved, err) } } diff --git a/sei-db/state_db/bench/cryptosim/config/basic-config.json b/sei-db/state_db/bench/cryptosim/config/basic-config.json index 283016de21..397d8b4a0b 100644 --- a/sei-db/state_db/bench/cryptosim/config/basic-config.json +++ b/sei-db/state_db/bench/cryptosim/config/basic-config.json @@ -12,7 +12,9 @@ "Erc20InteractionsPerAccount": 10, "Erc20StorageSlotSize": 32, "DeleteDataDirOnStartup": false, + "DeleteLogDirOnStartup": false, "DeleteDataDirOnShutdown": false, + "DeleteLogDirOnShutdown": false, "ExecutorQueueSize": 1024, "HotAccountProbability": 0.1, "HotErc20ContractProbability": 0.5, diff --git a/sei-db/state_db/bench/cryptosim/config/debug.json b/sei-db/state_db/bench/cryptosim/config/debug.json index a8e5666931..35f4bfa4f7 100644 --- a/sei-db/state_db/bench/cryptosim/config/debug.json +++ b/sei-db/state_db/bench/cryptosim/config/debug.json @@ -3,6 +3,8 @@ "DataDir": "data", "LogDir": "logs", "DeleteDataDirOnStartup": true, - "DeleteDataDirOnShutdown": true + "DeleteLogDirOnStartup": true, + "DeleteDataDirOnShutdown": true, + "DeleteLogDirOnShutdown": true } diff --git a/sei-db/state_db/bench/cryptosim/cryptosim_config.go b/sei-db/state_db/bench/cryptosim/cryptosim_config.go index 9eb7ebb037..5aaeb40145 100644 --- a/sei-db/state_db/bench/cryptosim/cryptosim_config.go +++ b/sei-db/state_db/bench/cryptosim/cryptosim_config.go @@ -138,12 +138,18 @@ type CryptoSimConfig struct { // If false, Enter has no effect. EnableSuspension bool - // If true, the data directory and log directory will be deleted on startup if they exist. + // If true, the data directory will be deleted on startup if it exists. DeleteDataDirOnStartup bool - // If true, the data directory and log directory will be deleted on a clean shutdown. + // If true, the log directory will be deleted on startup if it exists. + DeleteLogDirOnStartup bool + + // If true, the data directory will be deleted on a clean shutdown. DeleteDataDirOnShutdown bool + // If true, the log directory will be deleted on a clean shutdown. + DeleteLogDirOnShutdown bool + // Configures the FlatKV database. Ignored if Backend is not "FlatKV". FlatKVConfig *flatkv.Config @@ -221,7 +227,9 @@ func DefaultCryptoSimConfig() *CryptoSimConfig { BackgroundMetricsScrapeInterval: 60, EnableSuspension: true, DeleteDataDirOnStartup: false, + DeleteLogDirOnStartup: false, DeleteDataDirOnShutdown: false, + DeleteLogDirOnShutdown: false, FlatKVConfig: flatkv.DefaultConfig(), BlockChannelCapacity: 8, GenerateReceipts: false, diff --git a/sei-db/state_db/sc/flatkv/config.go b/sei-db/state_db/sc/flatkv/config.go index 9959dcbde4..1da9f1b6e0 100644 --- a/sei-db/state_db/sc/flatkv/config.go +++ b/sei-db/state_db/sc/flatkv/config.go @@ -49,26 +49,31 @@ type Config struct { // AccountDBConfig defines the PebbleDB configuration for the account database. AccountDBConfig pebbledb.PebbleDBConfig + // AccountCacheConfig defines the cache configuration for the account database. AccountCacheConfig dbcache.CacheConfig // CodeDBConfig defines the PebbleDB configuration for the code database. CodeDBConfig pebbledb.PebbleDBConfig + // CodeCacheConfig defines the cache configuration for the code database. CodeCacheConfig dbcache.CacheConfig // StorageDBConfig defines the PebbleDB configuration for the storage database. StorageDBConfig pebbledb.PebbleDBConfig + // StorageCacheConfig defines the cache configuration for the storage database. StorageCacheConfig dbcache.CacheConfig // LegacyDBConfig defines the PebbleDB configuration for the legacy database. LegacyDBConfig pebbledb.PebbleDBConfig + // LegacyCacheConfig defines the cache configuration for the legacy database. LegacyCacheConfig dbcache.CacheConfig // MetadataDBConfig defines the PebbleDB configuration for the metadata database. MetadataDBConfig pebbledb.PebbleDBConfig + // MetadataCacheConfig defines the cache configuration for the metadata database. MetadataCacheConfig dbcache.CacheConfig @@ -154,6 +159,21 @@ func (c *Config) InitializeDataDirectories() { // Validate checks that the configuration is sane and returns an error if it is not. func (c *Config) Validate() error { + if err := c.AccountCacheConfig.Validate(); err != nil { + return fmt.Errorf("account cache config is invalid: %w", err) + } + if err := c.CodeCacheConfig.Validate(); err != nil { + return fmt.Errorf("code cache config is invalid: %w", err) + } + if err := c.StorageCacheConfig.Validate(); err != nil { + return fmt.Errorf("storage cache config is invalid: %w", err) + } + if err := c.LegacyCacheConfig.Validate(); err != nil { + return fmt.Errorf("legacy cache config is invalid: %w", err) + } + if err := c.MetadataCacheConfig.Validate(); err != nil { + return fmt.Errorf("metadata cache config is invalid: %w", err) + } if c.DataDir == "" { return fmt.Errorf("data dir is required") } @@ -173,7 +193,7 @@ func (c *Config) Validate() error { return fmt.Errorf("metadata db config is invalid: %w", err) } - if c.ReaderThreadsPerCore < 0 { + if c.ReaderThreadsPerCore <= 0 { return fmt.Errorf("reader threads per core must be greater than 0") } if c.ReaderConstantThreadCount < 0 { diff --git a/sei-db/state_db/sc/flatkv/store_write.go b/sei-db/state_db/sc/flatkv/store_write.go index 578c3eb3ab..d1b62fbee8 100644 --- a/sei-db/state_db/sc/flatkv/store_write.go +++ b/sei-db/state_db/sc/flatkv/store_write.go @@ -94,24 +94,23 @@ func (s *CommitStore) ApplyChangeSets(cs []*proto.NamedChangeSet) error { if !ok { return fmt.Errorf("invalid address length %d for key kind %d", len(keyBytes), kind) } - addrStr := string(addr[:]) addrKey := string(AccountKey(addr)) - if _, seen := oldAccountRawValues[addrStr]; !seen { - if paw, ok := s.accountWrites[addrStr]; ok { + if _, seen := oldAccountRawValues[addrKey]; !seen { + if paw, ok := s.accountWrites[addrKey]; ok { if paw.isDelete { - oldAccountRawValues[addrStr] = nil + oldAccountRawValues[addrKey] = nil } else { - oldAccountRawValues[addrStr] = paw.value.Encode() + oldAccountRawValues[addrKey] = paw.value.Encode() } } else if result, ok := accountOld[addrKey]; ok { - oldAccountRawValues[addrStr] = result.Value + oldAccountRawValues[addrKey] = result.Value } else { - oldAccountRawValues[addrStr] = nil + oldAccountRawValues[addrKey] = nil } } - paw := s.accountWrites[addrStr] + paw := s.accountWrites[addrKey] if paw == nil { var existingValue AccountValue result := accountOld[addrKey] @@ -126,7 +125,7 @@ func (s *CommitStore) ApplyChangeSets(cs []*proto.NamedChangeSet) error { addr: addr, value: existingValue, } - s.accountWrites[addrStr] = paw + s.accountWrites[addrKey] = paw } if pair.Delete { @@ -326,8 +325,8 @@ func (s *CommitStore) flushAllDBs() error { wg.Add(4) for i, db := range []types.KeyValueDB{s.accountDB, s.codeDB, s.storageDB, s.legacyDB} { err := s.miscPool.Submit(s.ctx, func() { + defer wg.Done() errs[i] = db.Flush() - wg.Done() }) if err != nil { return fmt.Errorf("failed to submit flush: %w", err) @@ -521,7 +520,7 @@ func (s *CommitStore) batchReadOldValues(cs []*proto.NamedChangeSet) ( if !ok { continue } - k := string(AccountKey(addr)) + k := string(addr[:]) if _, done := accountOld[k]; done { continue } @@ -567,8 +566,7 @@ func (s *CommitStore) batchReadOldValues(cs []*proto.NamedChangeSet) ( storageErr = s.storageDB.BatchGet(storageBatch) }) if err != nil { - err = fmt.Errorf("failed to submit batch get: %w", err) - return + return nil, nil, nil, nil, fmt.Errorf("failed to submit batch get: %w", err) } } @@ -579,8 +577,7 @@ func (s *CommitStore) batchReadOldValues(cs []*proto.NamedChangeSet) ( accountErr = s.accountDB.BatchGet(accountBatch) }) if err != nil { - err = fmt.Errorf("failed to submit batch get: %w", err) - return + return nil, nil, nil, nil, fmt.Errorf("failed to submit batch get: %w", err) } } @@ -591,8 +588,7 @@ func (s *CommitStore) batchReadOldValues(cs []*proto.NamedChangeSet) ( codeErr = s.codeDB.BatchGet(codeBatch) }) if err != nil { - err = fmt.Errorf("failed to submit batch get: %w", err) - return + return nil, nil, nil, nil, fmt.Errorf("failed to submit batch get: %w", err) } } @@ -603,8 +599,7 @@ func (s *CommitStore) batchReadOldValues(cs []*proto.NamedChangeSet) ( legacyErr = s.legacyDB.BatchGet(legacyBatch) }) if err != nil { - err = fmt.Errorf("failed to submit batch get: %w", err) - return + return nil, nil, nil, nil, fmt.Errorf("failed to submit batch get: %w", err) } } @@ -618,29 +613,25 @@ func (s *CommitStore) batchReadOldValues(cs []*proto.NamedChangeSet) ( // real read errors. for k, v := range storageBatch { if v.Error != nil { - err = fmt.Errorf("storageDB batch read error for key %x: %w", k, v.Error) - return + return nil, nil, nil, nil, fmt.Errorf("storageDB batch read error for key %x: %w", k, v.Error) } storageOld[k] = v } for k, v := range accountBatch { if v.Error != nil { - err = fmt.Errorf("accountDB batch read error for key %x: %w", k, v.Error) - return + return nil, nil, nil, nil, fmt.Errorf("accountDB batch read error for key %x: %w", k, v.Error) } accountOld[k] = v } for k, v := range codeBatch { if v.Error != nil { - err = fmt.Errorf("codeDB batch read error for key %x: %w", k, v.Error) - return + return nil, nil, nil, nil, fmt.Errorf("codeDB batch read error for key %x: %w", k, v.Error) } codeOld[k] = v } for k, v := range legacyBatch { if v.Error != nil { - err = fmt.Errorf("legacyDB batch read error for key %x: %w", k, v.Error) - return + return nil, nil, nil, nil, fmt.Errorf("legacyDB batch read error for key %x: %w", k, v.Error) } legacyOld[k] = v } From 51560a39a0de4ed393fe976e4c1ca08da54685c6 Mon Sep 17 00:00:00 2001 From: Cody Littley Date: Tue, 31 Mar 2026 14:52:56 -0500 Subject: [PATCH 097/119] incremental progress --- sei-db/state_db/sc/composite/store_test.go | 20 +- sei-db/state_db/sc/flatkv/exporter.go | 37 +++- sei-db/state_db/sc/flatkv/exporter_test.go | 14 +- sei-db/state_db/sc/flatkv/iterator.go | 14 +- .../sc/flatkv/lthash_correctness_test.go | 8 +- sei-db/state_db/sc/flatkv/snapshot_test.go | 34 +-- sei-db/state_db/sc/flatkv/store.go | 17 +- sei-db/state_db/sc/flatkv/store_meta_test.go | 2 +- sei-db/state_db/sc/flatkv/store_read.go | 53 +++-- sei-db/state_db/sc/flatkv/store_read_test.go | 20 +- sei-db/state_db/sc/flatkv/store_test.go | 67 +++--- sei-db/state_db/sc/flatkv/store_write.go | 208 +++++++++++------- sei-db/state_db/sc/flatkv/store_write_test.go | 69 +++--- 13 files changed, 333 insertions(+), 230 deletions(-) diff --git a/sei-db/state_db/sc/composite/store_test.go b/sei-db/state_db/sc/composite/store_test.go index 4152eb1bd4..86a39a60a2 100644 --- a/sei-db/state_db/sc/composite/store_test.go +++ b/sei-db/state_db/sc/composite/store_test.go @@ -41,6 +41,12 @@ func (f *failingEVMStore) GetPhaseTimer() *metrics.PhaseTimer { retur func (f *failingEVMStore) CommittedRootHash() []byte { return nil } func (f *failingEVMStore) Close() error { return nil } +func padLeft32(val ...byte) []byte { + var b [32]byte + copy(b[32-len(val):], val) + return b[:] +} + func TestCompositeStoreBasicOperations(t *testing.T) { dir := t.TempDir() cfg := config.DefaultStateCommitConfig() @@ -202,7 +208,7 @@ func TestLatticeHashCommitInfo(t *testing.T) { Name: EVMStoreName, Changeset: iavl.ChangeSet{ Pairs: []*iavl.KVPair{ - {Key: evmStorageKey, Value: []byte{round}}, + {Key: evmStorageKey, Value: padLeft32(round)}, }, }, }, @@ -510,7 +516,7 @@ func TestExportImportSplitWrite(t *testing.T) { slot := flatkv.Slot{0xBB} storageKey := evm.BuildMemIAVLEVMKey(evm.EVMKeyStorage, flatkv.StorageKey(addr, slot)) - storageVal := []byte{0x42} + storageVal := padLeft32(0x42) nonceKey := evm.BuildMemIAVLEVMKey(evm.EVMKeyNonce, addr[:]) nonceVal := []byte{0, 0, 0, 0, 0, 0, 0, 10} @@ -700,7 +706,7 @@ func TestReconcileVersionsAfterCrash(t *testing.T) { Name: EVMStoreName, Changeset: iavl.ChangeSet{ Pairs: []*iavl.KVPair{ - {Key: storageKey, Value: []byte{i}}, + {Key: storageKey, Value: padLeft32(i)}, }, }, }, @@ -767,7 +773,7 @@ func TestReconcileVersionsThenContinueCommitting(t *testing.T) { {Key: []byte("bal"), Value: []byte{i}}, }}}, {Name: EVMStoreName, Changeset: iavl.ChangeSet{Pairs: []*iavl.KVPair{ - {Key: storageKey, Value: []byte{i}}, + {Key: storageKey, Value: padLeft32(i)}, }}}, })) _, err = cs.Commit() @@ -803,7 +809,7 @@ func TestReconcileVersionsThenContinueCommitting(t *testing.T) { {Key: []byte("bal"), Value: v}, }}}, {Name: EVMStoreName, Changeset: iavl.ChangeSet{Pairs: []*iavl.KVPair{ - {Key: storageKey, Value: v}, + {Key: storageKey, Value: padLeft32(v...)}, }}}, })) ver, err := cs2.Commit() @@ -830,7 +836,7 @@ func TestReconcileVersionsThenContinueCommitting(t *testing.T) { got, found := cs3.evmCommitter.Get(storageKey) require.True(t, found) - require.Equal(t, []byte{0xA5}, got) + require.Equal(t, padLeft32(0xA5), got) } func TestReconcileVersionsCosmosAheadByMultiple(t *testing.T) { @@ -861,7 +867,7 @@ func TestReconcileVersionsCosmosAheadByMultiple(t *testing.T) { Name: EVMStoreName, Changeset: iavl.ChangeSet{ Pairs: []*iavl.KVPair{ - {Key: storageKey, Value: []byte{i}}, + {Key: storageKey, Value: padLeft32(i)}, }, }, }, diff --git a/sei-db/state_db/sc/flatkv/exporter.go b/sei-db/state_db/sc/flatkv/exporter.go index 60413672e6..4252a43358 100644 --- a/sei-db/state_db/sc/flatkv/exporter.go +++ b/sei-db/state_db/sc/flatkv/exporter.go @@ -8,6 +8,7 @@ import ( errorutils "github.com/sei-protocol/sei-chain/sei-db/common/errors" "github.com/sei-protocol/sei-chain/sei-db/common/evm" dbtypes "github.com/sei-protocol/sei-chain/sei-db/db_engine/types" + "github.com/sei-protocol/sei-chain/sei-db/state_db/sc/flatkv/vtype" "github.com/sei-protocol/sei-chain/sei-db/state_db/sc/types" ) @@ -154,11 +155,11 @@ func (e *KVExporter) convertToNodes(db exportDBKind, key, value []byte) ([]*type case exportDBAccount: return e.accountToNodes(key, value) case exportDBCode: - return e.codeToNodes(key, value), nil + return e.codeToNodes(key, value) case exportDBStorage: - return e.storageToNodes(key, value), nil + return e.storageToNodes(key, value) case exportDBLegacy: - return e.legacyToNodes(key, value), nil + return e.legacyToNodes(key, value) default: return nil, nil } @@ -197,31 +198,43 @@ func (e *KVExporter) accountToNodes(key, value []byte) ([]*types.SnapshotNode, e return nodes, nil } -func (e *KVExporter) codeToNodes(key, value []byte) []*types.SnapshotNode { +func (e *KVExporter) codeToNodes(key, value []byte) ([]*types.SnapshotNode, error) { + codeData, err := vtype.DeserializeCodeData(value) + if err != nil { + return nil, fmt.Errorf("corrupt code entry key=%x: %w", key, err) + } memiavlKey := evm.BuildMemIAVLEVMKey(evm.EVMKeyCode, key) return []*types.SnapshotNode{{ Key: memiavlKey, - Value: value, + Value: codeData.GetBytecode(), Version: e.version, Height: 0, - }} + }}, nil } -func (e *KVExporter) storageToNodes(key, value []byte) []*types.SnapshotNode { +func (e *KVExporter) storageToNodes(key, value []byte) ([]*types.SnapshotNode, error) { + storageData, err := vtype.DeserializeStorageData(value) + if err != nil { + return nil, fmt.Errorf("corrupt storage entry key=%x: %w", key, err) + } memiavlKey := evm.BuildMemIAVLEVMKey(evm.EVMKeyStorage, key) return []*types.SnapshotNode{{ Key: memiavlKey, - Value: value, + Value: storageData.GetValue()[:], Version: e.version, Height: 0, - }} + }}, nil } -func (e *KVExporter) legacyToNodes(key, value []byte) []*types.SnapshotNode { +func (e *KVExporter) legacyToNodes(key, value []byte) ([]*types.SnapshotNode, error) { + legacyData, err := vtype.DeserializeLegacyData(value) + if err != nil { + return nil, fmt.Errorf("corrupt legacy entry key=%x: %w", key, err) + } return []*types.SnapshotNode{{ Key: key, - Value: value, + Value: legacyData.GetValue(), Version: e.version, Height: 0, - }} + }}, nil } diff --git a/sei-db/state_db/sc/flatkv/exporter_test.go b/sei-db/state_db/sc/flatkv/exporter_test.go index 55475959cc..915cdd6d4d 100644 --- a/sei-db/state_db/sc/flatkv/exporter_test.go +++ b/sei-db/state_db/sc/flatkv/exporter_test.go @@ -51,8 +51,8 @@ func TestExporterStorageKeys(t *testing.T) { addr := Address{0xAA} slot1 := Slot{0x01} slot2 := Slot{0x02} - val1 := []byte{0x11} - val2 := []byte{0x22} + val1 := padLeft32(0x11) + val2 := padLeft32(0x22) key1 := evm.BuildMemIAVLEVMKey(evm.EVMKeyStorage, StorageKey(addr, slot1)) key2 := evm.BuildMemIAVLEVMKey(evm.EVMKeyStorage, StorageKey(addr, slot2)) @@ -159,7 +159,7 @@ func TestExporterRoundTrip(t *testing.T) { slot := Slot{0xEE} storageKey := evm.BuildMemIAVLEVMKey(evm.EVMKeyStorage, StorageKey(addr, slot)) - storageVal := []byte{0xFF} + storageVal := padLeft32(0xFF) nonceKey := evm.BuildMemIAVLEVMKey(evm.EVMKeyNonce, addr[:]) nonceVal := []byte{0, 0, 0, 0, 0, 0, 0, 7} codeKey := evm.BuildMemIAVLEVMKey(evm.EVMKeyCode, addr[:]) @@ -273,7 +273,7 @@ func TestImportSurvivesReopen(t *testing.T) { slot := Slot{0xEE} storageKey := evm.BuildMemIAVLEVMKey(evm.EVMKeyStorage, StorageKey(addr, slot)) - storageVal := []byte{0xFF} + storageVal := padLeft32(0xFF) nonceKey := evm.BuildMemIAVLEVMKey(evm.EVMKeyNonce, addr[:]) nonceVal := []byte{0, 0, 0, 0, 0, 0, 0, 7} @@ -377,8 +377,8 @@ func TestImportPurgesStaleData(t *testing.T) { require.NoError(t, s.ApplyChangeSets([]*proto.NamedChangeSet{ {Name: "evm", Changeset: iavl.ChangeSet{Pairs: []*iavl.KVPair{ - {Key: storageA, Value: []byte{0x0A}}, - {Key: storageStale, Value: []byte{0x0C}}, + {Key: storageA, Value: padLeft32(0x0A)}, + {Key: storageStale, Value: padLeft32(0x0C)}, {Key: nonceA, Value: nonceVal}, {Key: nonceStale, Value: nonceVal}, {Key: codeHashB, Value: codeHashVal}, @@ -400,7 +400,7 @@ func TestImportPurgesStaleData(t *testing.T) { src := setupTestStore(t) defer src.Close() - newStorageVal := []byte{0xA1} + newStorageVal := padLeft32(0xA1) newNonceVal := []byte{0, 0, 0, 0, 0, 0, 0, 5} newCodeHashVal := make([]byte, CodeHashLen) newCodeHashVal[31] = 0xCD diff --git a/sei-db/state_db/sc/flatkv/iterator.go b/sei-db/state_db/sc/flatkv/iterator.go index 6ea8557c86..44a90e6fc0 100644 --- a/sei-db/state_db/sc/flatkv/iterator.go +++ b/sei-db/state_db/sc/flatkv/iterator.go @@ -5,6 +5,7 @@ import ( "github.com/sei-protocol/sei-chain/sei-db/common/evm" "github.com/sei-protocol/sei-chain/sei-db/db_engine/types" + "github.com/sei-protocol/sei-chain/sei-db/state_db/sc/flatkv/vtype" ) // dbIterator is a generic iterator that wraps a PebbleDB iterator @@ -226,7 +227,18 @@ func (it *dbIterator) Value() []byte { if !it.Valid() { return nil } - return it.iter.Value() + raw := it.iter.Value() + switch it.kind { + case evm.EVMKeyStorage: + sd, err := vtype.DeserializeStorageData(raw) + if err != nil { + it.err = fmt.Errorf("deserialize storage value: %w", err) + return nil + } + return sd.GetValue()[:] + default: + return raw + } } // CommitStore factory methods for creating iterators diff --git a/sei-db/state_db/sc/flatkv/lthash_correctness_test.go b/sei-db/state_db/sc/flatkv/lthash_correctness_test.go index 3a408cfc3a..d6c58b4286 100644 --- a/sei-db/state_db/sc/flatkv/lthash_correctness_test.go +++ b/sei-db/state_db/sc/flatkv/lthash_correctness_test.go @@ -105,9 +105,11 @@ func codeDeletePair(addr Address) *iavl.KVPair { } func storagePair(addr Address, slot Slot, val []byte) *iavl.KVPair { + padded := make([]byte, 32) + copy(padded[32-len(val):], val) return &iavl.KVPair{ Key: evm.BuildMemIAVLEVMKey(evm.EVMKeyStorage, StorageKey(addr, slot)), - Value: val, + Value: padded, } } @@ -767,7 +769,7 @@ func TestLtHashCrossApplyStorageOverwrite(t *testing.T) { key := evm.BuildMemIAVLEVMKey(evm.EVMKeyStorage, StorageKey(addr, slot)) val, found := s.Get(key) require.True(t, found) - require.Equal(t, []byte{0x33}, val) + require.Equal(t, padLeft32(0x33), val) } // TestLtHashCrossApplyCodeOverwrite verifies that overwriting the same code @@ -912,7 +914,7 @@ func TestLtHashCrossApplyMixedOverwrite(t *testing.T) { storageKey := evm.BuildMemIAVLEVMKey(evm.EVMKeyStorage, StorageKey(addr, slot)) storageVal, found := s.Get(storageKey) require.True(t, found) - require.Equal(t, []byte{0x33}, storageVal) + require.Equal(t, padLeft32(0x33), storageVal) legacyVal, found := s.Get(legacyKey) require.True(t, found) diff --git a/sei-db/state_db/sc/flatkv/snapshot_test.go b/sei-db/state_db/sc/flatkv/snapshot_test.go index 2bc48bd75a..11d9560386 100644 --- a/sei-db/state_db/sc/flatkv/snapshot_test.go +++ b/sei-db/state_db/sc/flatkv/snapshot_test.go @@ -17,11 +17,13 @@ import ( func commitStorageEntry(t *testing.T, s *CommitStore, addr Address, slot Slot, value []byte) int64 { t.Helper() + padded := make([]byte, 32) + copy(padded[32-len(value):], value) key := evm.BuildMemIAVLEVMKey(evm.EVMKeyStorage, StorageKey(addr, slot)) cs := &proto.NamedChangeSet{ Name: "evm", Changeset: iavl.ChangeSet{ - Pairs: []*iavl.KVPair{{Key: key, Value: value}}, + Pairs: []*iavl.KVPair{{Key: key, Value: padded}}, }, } require.NoError(t, s.ApplyChangeSets([]*proto.NamedChangeSet{cs})) @@ -121,10 +123,10 @@ func TestOpenFromSnapshot(t *testing.T) { key3 := evm.BuildMemIAVLEVMKey(evm.EVMKeyStorage, StorageKey(Address{0x10}, Slot{0x03})) v, ok := s2.Get(key1) require.True(t, ok) - require.Equal(t, []byte{0x01}, v) + require.Equal(t, padLeft32(0x01), v) v, ok = s2.Get(key3) require.True(t, ok) - require.Equal(t, []byte{0x03}, v) + require.Equal(t, padLeft32(0x03), v) } func TestCatchupUpdatesLtHash(t *testing.T) { @@ -197,7 +199,7 @@ func TestRollbackRewindsState(t *testing.T) { key4 := evm.BuildMemIAVLEVMKey(evm.EVMKeyStorage, StorageKey(Address{0x30}, Slot{0x04})) v, ok := s.Get(key4) require.True(t, ok) - require.Equal(t, []byte{0x04}, v) + require.Equal(t, padLeft32(0x04), v) require.NoError(t, s.Close()) } @@ -474,7 +476,7 @@ func TestSnapshotThenCatchupThenVerifyCorrectness(t *testing.T) { // Record baseline value at v2 for the same key. vAtV2, ok := s1.Get(key) require.True(t, ok) - require.Equal(t, []byte{0x01}, vAtV2) + require.Equal(t, padLeft32(0x01), vAtV2) // Phase 2: advance state beyond the snapshot (v3..v4). commitStorageEntry(t, s1, addr, slot, []byte{0x03}) // v3 @@ -492,7 +494,7 @@ func TestSnapshotThenCatchupThenVerifyCorrectness(t *testing.T) { require.NoError(t, err) gotV2, ok := s2.Get(key) require.True(t, ok) - require.Equal(t, []byte{0x01}, gotV2, "snapshot baseline should remain stable") + require.Equal(t, padLeft32(0x01), gotV2, "snapshot baseline should remain stable") require.NoError(t, s2.Close()) // Phase 4: reopen latest again to ensure catchup/replay still reaches v4. @@ -507,7 +509,7 @@ func TestSnapshotThenCatchupThenVerifyCorrectness(t *testing.T) { require.Equal(t, int64(4), s3.Version()) gotLatest, ok := s3.Get(key) require.True(t, ok) - require.Equal(t, []byte{0x04}, gotLatest) + require.Equal(t, padLeft32(0x04), gotLatest) } // TestLoadVersionMixedSequence: load-old -> load-latest -> load-old-again. @@ -547,7 +549,7 @@ func TestLoadVersionMixedSequence(t *testing.T) { require.Equal(t, hashAtV2, s1.RootHash()) v, ok := s1.Get(key) require.True(t, ok) - require.Equal(t, []byte{0x02}, v) + require.Equal(t, padLeft32(0x02), v) require.NoError(t, s1.Close()) // Round 2: load latest (catches up through v3, v4) @@ -561,7 +563,7 @@ func TestLoadVersionMixedSequence(t *testing.T) { require.Equal(t, hashAtV4, s2.RootHash()) v, ok = s2.Get(key) require.True(t, ok) - require.Equal(t, []byte{0x04}, v) + require.Equal(t, padLeft32(0x04), v) require.NoError(t, s2.Close()) // Round 3: load v2 AGAIN — snapshot must still be clean. @@ -575,7 +577,7 @@ func TestLoadVersionMixedSequence(t *testing.T) { require.Equal(t, hashAtV2, s3.RootHash()) v, ok = s3.Get(key) require.True(t, ok) - require.Equal(t, []byte{0x02}, v) + require.Equal(t, padLeft32(0x02), v) require.NoError(t, s3.Close()) } @@ -1231,7 +1233,7 @@ func TestSnapshotPreservesAllKeyTypes(t *testing.T) { slot := Slot{0xCD} pairs := []*iavl.KVPair{ - {Key: evm.BuildMemIAVLEVMKey(evm.EVMKeyStorage, StorageKey(addr, slot)), Value: []byte{0x11}}, + {Key: evm.BuildMemIAVLEVMKey(evm.EVMKeyStorage, StorageKey(addr, slot)), Value: padLeft32(0x11)}, {Key: evm.BuildMemIAVLEVMKey(evm.EVMKeyNonce, addr[:]), Value: []byte{0, 0, 0, 0, 0, 0, 0, 7}}, {Key: evm.BuildMemIAVLEVMKey(evm.EVMKeyCode, addr[:]), Value: []byte{0x60, 0x80}}, } @@ -1258,7 +1260,7 @@ func TestSnapshotPreservesAllKeyTypes(t *testing.T) { storageKey := evm.BuildMemIAVLEVMKey(evm.EVMKeyStorage, StorageKey(addr, slot)) v, ok := s2.Get(storageKey) require.True(t, ok) - require.Equal(t, []byte{0x11}, v) + require.Equal(t, padLeft32(0x11), v) nonceKey := evm.BuildMemIAVLEVMKey(evm.EVMKeyNonce, addr[:]) v, ok = s2.Get(nonceKey) @@ -1330,7 +1332,7 @@ func TestReopenAfterDeletes(t *testing.T) { cs := &proto.NamedChangeSet{ Name: "evm", Changeset: iavl.ChangeSet{Pairs: []*iavl.KVPair{ - {Key: evm.BuildMemIAVLEVMKey(evm.EVMKeyStorage, StorageKey(addr, slot)), Value: []byte{0x11}}, + {Key: evm.BuildMemIAVLEVMKey(evm.EVMKeyStorage, StorageKey(addr, slot)), Value: padLeft32(0x11)}, {Key: evm.BuildMemIAVLEVMKey(evm.EVMKeyNonce, addr[:]), Value: []byte{0, 0, 0, 0, 0, 0, 0, 42}}, {Key: evm.BuildMemIAVLEVMKey(evm.EVMKeyCodeHash, addr[:]), Value: ch[:]}, {Key: evm.BuildMemIAVLEVMKey(evm.EVMKeyCode, addr[:]), Value: []byte{0x60, 0x80}}, @@ -1412,7 +1414,7 @@ func TestWALTruncationThenRollback(t *testing.T) { key := evm.BuildMemIAVLEVMKey(evm.EVMKeyStorage, StorageKey(addrN(byte(i)), slotN(byte(i)))) val, found := s.Get(key) require.True(t, found, "key at block %d should exist after rollback to v5", i) - require.Equal(t, []byte{byte(i)}, val) + require.Equal(t, padLeft32(byte(i)), val) } for i := 6; i <= 10; i++ { @@ -1459,7 +1461,7 @@ func TestReopenAfterSnapshotAndTruncation(t *testing.T) { key := evm.BuildMemIAVLEVMKey(evm.EVMKeyStorage, StorageKey(addrN(byte(i)), slotN(byte(i)))) val, found := s2.Get(key) require.True(t, found, "key at block %d should exist after reopen", i) - require.Equal(t, []byte{byte(i)}, val) + require.Equal(t, padLeft32(byte(i)), val) } } @@ -1585,7 +1587,7 @@ func TestWALDirectoryDeleted(t *testing.T) { key := evm.BuildMemIAVLEVMKey(evm.EVMKeyStorage, StorageKey(Address{0x03}, Slot{0x03})) val, found := s2.Get(key) require.True(t, found) - require.Equal(t, []byte{0xCC}, val) + require.Equal(t, padLeft32(0xCC), val) } func TestLocalMetaCorruption(t *testing.T) { diff --git a/sei-db/state_db/sc/flatkv/store.go b/sei-db/state_db/sc/flatkv/store.go index 9476c1dfe2..28316ddcd2 100644 --- a/sei-db/state_db/sc/flatkv/store.go +++ b/sei-db/state_db/sc/flatkv/store.go @@ -53,13 +53,6 @@ const ( // dataDBDirs lists all data DB directory names (used for per-DB LtHash iteration). var dataDBDirs = []string{accountDBDir, codeDBDir, storageDBDir, legacyDBDir} -// pendingKVWrite tracks a buffered key-value write for code/storage DBs. -type pendingKVWrite struct { - key []byte // Internal DB key - value []byte - isDelete bool -} - // pendingAccountWrite tracks a buffered account write. // Uses AccountValue structure: balance(32) || nonce(8) || codehash(32) // @@ -103,12 +96,10 @@ type CommitStore struct { perDBWorkingLtHash map[string]*lthash.LtHash // Pending writes buffer - // accountWrites: key = address string (20 bytes), value = AccountValue - // codeWrites/storageWrites/legacyWrites: key = internal DB key string, value = raw bytes accountWrites map[string]*pendingAccountWrite codeWrites map[string]*vtype.CodeData - storageWrites map[string]*pendingKVWrite - legacyWrites map[string]*pendingKVWrite + storageWrites map[string]*vtype.StorageData + legacyWrites map[string]*vtype.LegacyData changelog wal.ChangelogWAL pendingChangeSets []*proto.NamedChangeSet @@ -171,8 +162,8 @@ func NewCommitStore( localMeta: make(map[string]*LocalMeta), accountWrites: make(map[string]*pendingAccountWrite), codeWrites: make(map[string]*vtype.CodeData), - storageWrites: make(map[string]*pendingKVWrite), - legacyWrites: make(map[string]*pendingKVWrite), + storageWrites: make(map[string]*vtype.StorageData), + legacyWrites: make(map[string]*vtype.LegacyData), pendingChangeSets: make([]*proto.NamedChangeSet, 0), committedLtHash: lthash.New(), workingLtHash: lthash.New(), diff --git a/sei-db/state_db/sc/flatkv/store_meta_test.go b/sei-db/state_db/sc/flatkv/store_meta_test.go index 840f9bc968..c51db83b12 100644 --- a/sei-db/state_db/sc/flatkv/store_meta_test.go +++ b/sei-db/state_db/sc/flatkv/store_meta_test.go @@ -61,7 +61,7 @@ func TestStoreCommitBatchesUpdatesLocalMeta(t *testing.T) { slot := Slot{0x34} key := memiavlStorageKey(addr, slot) - cs := makeChangeSet(key, []byte{0x56}, false) + cs := makeChangeSet(key, padLeft32(0x56), false) require.NoError(t, s.ApplyChangeSets([]*proto.NamedChangeSet{cs})) v := commitAndCheck(t, s) require.Equal(t, int64(1), v) diff --git a/sei-db/state_db/sc/flatkv/store_read.go b/sei-db/state_db/sc/flatkv/store_read.go index 19d4a5480d..7c7ce4503d 100644 --- a/sei-db/state_db/sc/flatkv/store_read.go +++ b/sei-db/state_db/sc/flatkv/store_read.go @@ -7,7 +7,6 @@ import ( errorutils "github.com/sei-protocol/sei-chain/sei-db/common/errors" "github.com/sei-protocol/sei-chain/sei-db/common/evm" - seidbtypes "github.com/sei-protocol/sei-chain/sei-db/db_engine/types" "github.com/sei-protocol/sei-chain/sei-db/state_db/sc/flatkv/vtype" ) @@ -207,32 +206,28 @@ func (s *CommitStore) getAccountValue(addr Address) (AccountValue, error) { return av, nil } -// getKVValue returns the value from pending writes or the backing DB. -// Returns (nil, nil) if not found. Returns (nil, error) on I/O error. -func (s *CommitStore) getKVValue( - key []byte, - writes map[string]*pendingKVWrite, - db seidbtypes.KeyValueDB, - dbName string, -) ([]byte, error) { - if pw, ok := writes[string(key)]; ok { - if pw.isDelete { +func (s *CommitStore) getStorageValue(key []byte) ([]byte, error) { + pendingWrite, hasPending := s.storageWrites[string(key)] + if hasPending { + if pendingWrite.IsDelete() { return nil, nil } - return pw.value, nil + return pendingWrite.GetValue()[:], nil } - value, err := db.Get(key) + + value, err := s.storageDB.Get(key) if err != nil { if errorutils.IsNotFound(err) { return nil, nil } - return nil, fmt.Errorf("%s I/O error for key %x: %w", dbName, key, err) + return nil, fmt.Errorf("storageDB I/O error for key %x: %w", key, err) } - return value, nil -} -func (s *CommitStore) getStorageValue(key []byte) ([]byte, error) { - return s.getKVValue(key, s.storageWrites, s.storageDB, "storageDB") + storageData, err := vtype.DeserializeStorageData(value) + if err != nil { + return nil, fmt.Errorf("failed to deserialize storage data: %w", err) + } + return storageData.GetValue()[:], nil } func (s *CommitStore) getCodeValue(key []byte) ([]byte, error) { @@ -257,5 +252,25 @@ func (s *CommitStore) getCodeValue(key []byte) ([]byte, error) { } func (s *CommitStore) getLegacyValue(key []byte) ([]byte, error) { - return s.getKVValue(key, s.legacyWrites, s.legacyDB, "legacyDB") + pendingWrite, hasPending := s.legacyWrites[string(key)] + if hasPending { + if pendingWrite.IsDelete() { + return nil, nil + } + return pendingWrite.GetValue(), nil + } + + value, err := s.legacyDB.Get(key) + if err != nil { + if errorutils.IsNotFound(err) { + return nil, nil + } + return nil, fmt.Errorf("legacyDB I/O error for key %x: %w", key, err) + } + + legacyData, err := vtype.DeserializeLegacyData(value) + if err != nil { + return nil, fmt.Errorf("failed to deserialize legacy data: %w", err) + } + return legacyData.GetValue(), nil } diff --git a/sei-db/state_db/sc/flatkv/store_read_test.go b/sei-db/state_db/sc/flatkv/store_read_test.go index 5a370c4a2f..dc19f481a6 100644 --- a/sei-db/state_db/sc/flatkv/store_read_test.go +++ b/sei-db/state_db/sc/flatkv/store_read_test.go @@ -19,7 +19,7 @@ func TestStoreGetPendingWrites(t *testing.T) { addr := Address{0x11} slot := Slot{0x22} - value := []byte{0x33} + value := padLeft32(0x33) key := memiavlStorageKey(addr, slot) // No data initially @@ -53,7 +53,7 @@ func TestStoreGetPendingDelete(t *testing.T) { key := memiavlStorageKey(addr, slot) // Write and commit - cs1 := makeChangeSet(key, []byte{0x66}, false) + cs1 := makeChangeSet(key, padLeft32(0x66), false) require.NoError(t, s.ApplyChangeSets([]*proto.NamedChangeSet{cs1})) commitAndCheck(t, s) @@ -108,7 +108,7 @@ func TestStoreHas(t *testing.T) { require.False(t, s.Has(key)) // Write and commit - cs := makeChangeSet(key, []byte{0xAA}, false) + cs := makeChangeSet(key, padLeft32(0xAA), false) require.NoError(t, s.ApplyChangeSets([]*proto.NamedChangeSet{cs})) commitAndCheck(t, s) @@ -189,14 +189,14 @@ func TestStoreDelete(t *testing.T) { key := memiavlStorageKey(addr, slot) // Write - cs1 := makeChangeSet(key, []byte{0x77}, false) + cs1 := makeChangeSet(key, padLeft32(0x77), false) require.NoError(t, s.ApplyChangeSets([]*proto.NamedChangeSet{cs1})) commitAndCheck(t, s) // Verify exists got, found := s.Get(key) require.True(t, found) - require.Equal(t, []byte{0x77}, got) + require.Equal(t, padLeft32(0x77), got) // Delete cs2 := makeChangeSet(key, nil, true) @@ -229,7 +229,7 @@ func TestStoreIteratorSingleKey(t *testing.T) { addr := Address{0xAA} slot := Slot{0xBB} - value := []byte{0xCC} + value := padLeft32(0xCC) memiavlKey := memiavlStorageKey(addr, slot) internalKey := StorageKey(addr, slot) // addr(20) || slot(32) @@ -270,7 +270,7 @@ func TestStoreIteratorMultipleKeys(t *testing.T) { pairs := make([]*iavl.KVPair, len(entries)) for i, e := range entries { key := memiavlStorageKey(addr, e.slot) - pairs[i] = &iavl.KVPair{Key: key, Value: []byte{e.value}} + pairs[i] = &iavl.KVPair{Key: key, Value: padLeft32(e.value)} } cs := &proto.NamedChangeSet{ @@ -324,7 +324,7 @@ func TestStoreStoragePrefixIteration(t *testing.T) { for i := byte(1); i <= 3; i++ { slot := Slot{i} key := memiavlStorageKey(addr, slot) - cs := makeChangeSet(key, []byte{i * 10}, false) + cs := makeChangeSet(key, padLeft32(i*10), false) require.NoError(t, s.ApplyChangeSets([]*proto.NamedChangeSet{cs})) } commitAndCheck(t, s) @@ -354,7 +354,7 @@ func TestStoreIteratorByPrefixAddress(t *testing.T) { for i := byte(1); i <= 3; i++ { slot := Slot{i} key := memiavlStorageKey(addr1, slot) - cs := makeChangeSet(key, []byte{i * 10}, false) + cs := makeChangeSet(key, padLeft32(i*10), false) require.NoError(t, s.ApplyChangeSets([]*proto.NamedChangeSet{cs})) } @@ -362,7 +362,7 @@ func TestStoreIteratorByPrefixAddress(t *testing.T) { for i := byte(1); i <= 2; i++ { slot := Slot{i} key := memiavlStorageKey(addr2, slot) - cs := makeChangeSet(key, []byte{i * 20}, false) + cs := makeChangeSet(key, padLeft32(i*20), false) require.NoError(t, s.ApplyChangeSets([]*proto.NamedChangeSet{cs})) } diff --git a/sei-db/state_db/sc/flatkv/store_test.go b/sei-db/state_db/sc/flatkv/store_test.go index 2034dd616f..1b939567aa 100644 --- a/sei-db/state_db/sc/flatkv/store_test.go +++ b/sei-db/state_db/sc/flatkv/store_test.go @@ -44,6 +44,13 @@ func memiavlStorageKey(addr Address, slot Slot) []byte { return evm.BuildMemIAVLEVMKey(evm.EVMKeyStorage, internal) } +// padLeft32 returns a 32-byte big-endian value with the given bytes right-aligned. +func padLeft32(val ...byte) []byte { + var b [32]byte + copy(b[32-len(val):], val) + return b[:] +} + // makeChangeSet creates a changeset func makeChangeSet(key, value []byte, delete bool) *proto.NamedChangeSet { return &proto.NamedChangeSet{ @@ -137,7 +144,7 @@ func TestStoreCommitVersionAutoIncrement(t *testing.T) { slot := Slot{0xBB} key := memiavlStorageKey(addr, slot) - cs := makeChangeSet(key, []byte{0xCC}, false) + cs := makeChangeSet(key, padLeft32(0xCC), false) // Initial version is 0 require.Equal(t, int64(0), s.Version()) @@ -170,7 +177,7 @@ func TestStoreApplyAndCommit(t *testing.T) { addr := Address{0x11} slot := Slot{0x22} - value := []byte{0x33} + value := padLeft32(0x33) key := memiavlStorageKey(addr, slot) cs := makeChangeSet(key, value, false) @@ -208,7 +215,7 @@ func TestStoreMultipleWrites(t *testing.T) { pairs := make([]*iavl.KVPair, len(entries)) for i, e := range entries { key := memiavlStorageKey(addr, e.slot) - pairs[i] = &iavl.KVPair{Key: key, Value: []byte{e.value}} + pairs[i] = &iavl.KVPair{Key: key, Value: padLeft32(e.value)} } cs := &proto.NamedChangeSet{ @@ -226,7 +233,7 @@ func TestStoreMultipleWrites(t *testing.T) { key := memiavlStorageKey(addr, e.slot) got, found := s.Get(key) require.True(t, found) - require.Equal(t, []byte{e.value}, got) + require.Equal(t, padLeft32(e.value), got) } } @@ -254,7 +261,7 @@ func TestStoreClearsPendingAfterCommit(t *testing.T) { slot := Slot{0xBB} key := memiavlStorageKey(addr, slot) - cs := makeChangeSet(key, []byte{0xCC}, false) + cs := makeChangeSet(key, padLeft32(0xCC), false) require.NoError(t, s.ApplyChangeSets([]*proto.NamedChangeSet{cs})) // Should have pending writes @@ -281,14 +288,14 @@ func TestStoreVersioning(t *testing.T) { key := memiavlStorageKey(addr, slot) // Version 1 - cs1 := makeChangeSet(key, []byte{0x01}, false) + cs1 := makeChangeSet(key, padLeft32(0x01), false) require.NoError(t, s.ApplyChangeSets([]*proto.NamedChangeSet{cs1})) commitAndCheck(t, s) require.Equal(t, int64(1), s.Version()) // Version 2 with updated value - cs2 := makeChangeSet(key, []byte{0x02}, false) + cs2 := makeChangeSet(key, padLeft32(0x02), false) require.NoError(t, s.ApplyChangeSets([]*proto.NamedChangeSet{cs2})) commitAndCheck(t, s) @@ -297,7 +304,7 @@ func TestStoreVersioning(t *testing.T) { // Latest value should be from version 2 got, found := s.Get(key) require.True(t, found) - require.Equal(t, []byte{0x02}, got) + require.Equal(t, padLeft32(0x02), got) } func TestStorePersistence(t *testing.T) { @@ -305,7 +312,7 @@ func TestStorePersistence(t *testing.T) { addr := Address{0xDD} slot := Slot{0xEE} - value := []byte{0xFF} + value := padLeft32(0xFF) key := memiavlStorageKey(addr, slot) // Write and close @@ -355,7 +362,7 @@ func TestStoreRootHashChanges(t *testing.T) { slot := Slot{0xCD} key := memiavlStorageKey(addr, slot) - cs := makeChangeSet(key, []byte{0xEF}, false) + cs := makeChangeSet(key, padLeft32(0xEF), false) require.NoError(t, s.ApplyChangeSets([]*proto.NamedChangeSet{cs})) // Working hash should change @@ -383,7 +390,7 @@ func TestStoreRootHashChangesOnApply(t *testing.T) { slot := Slot{0xFF} key := memiavlStorageKey(addr, slot) - cs := makeChangeSet(key, []byte{0x11}, false) + cs := makeChangeSet(key, padLeft32(0x11), false) require.NoError(t, s.ApplyChangeSets([]*proto.NamedChangeSet{cs})) // Working hash should change @@ -399,7 +406,7 @@ func TestStoreRootHashStableAfterCommit(t *testing.T) { slot := Slot{0x34} key := memiavlStorageKey(addr, slot) - cs := makeChangeSet(key, []byte{0x56}, false) + cs := makeChangeSet(key, padLeft32(0x56), false) require.NoError(t, s.ApplyChangeSets([]*proto.NamedChangeSet{cs})) // Get working hash @@ -712,7 +719,7 @@ func TestPersistenceAllKeyTypes(t *testing.T) { nonceKey := evm.BuildMemIAVLEVMKey(evm.EVMKeyNonce, addr[:]) codeKey := evm.BuildMemIAVLEVMKey(evm.EVMKeyCode, addr[:]) - cs := makeChangeSet(storageKey, []byte{0x11}, false) + cs := makeChangeSet(storageKey, padLeft32(0x11), false) require.NoError(t, s1.ApplyChangeSets([]*proto.NamedChangeSet{cs})) cs2 := makeChangeSet(nonceKey, []byte{0, 0, 0, 0, 0, 0, 0, 5}, false) require.NoError(t, s1.ApplyChangeSets([]*proto.NamedChangeSet{cs2})) @@ -736,7 +743,7 @@ func TestPersistenceAllKeyTypes(t *testing.T) { v, ok := s2.Get(storageKey) require.True(t, ok) - require.Equal(t, []byte{0x11}, v) + require.Equal(t, padLeft32(0x11), v) v, ok = s2.Get(nonceKey) require.True(t, ok) @@ -760,7 +767,7 @@ func TestReadOnlyBasicLoadAndRead(t *testing.T) { addr := Address{0xAA} slot := Slot{0xBB} key := memiavlStorageKey(addr, slot) - value := []byte{0xCC} + value := padLeft32(0xCC) require.NoError(t, s.ApplyChangeSets([]*proto.NamedChangeSet{makeChangeSet(key, value, false)})) commitAndCheck(t, s) @@ -787,7 +794,7 @@ func TestReadOnlyLoadFromUnopenedStore(t *testing.T) { addr := Address{0xCC} slot := Slot{0xDD} key := memiavlStorageKey(addr, slot) - value := []byte{0xEE} + value := padLeft32(0xEE) require.NoError(t, writer.ApplyChangeSets([]*proto.NamedChangeSet{makeChangeSet(key, value, false)})) commitAndCheck(t, writer) @@ -818,7 +825,7 @@ func TestReadOnlyAtSpecificVersion(t *testing.T) { for i := byte(1); i <= 5; i++ { require.NoError(t, s.ApplyChangeSets([]*proto.NamedChangeSet{ - makeChangeSet(key, []byte{i}, false), + makeChangeSet(key, padLeft32(i), false), })) commitAndCheck(t, s) } @@ -830,7 +837,7 @@ func TestReadOnlyAtSpecificVersion(t *testing.T) { require.Equal(t, int64(3), ro.Version()) got, found := ro.Get(key) require.True(t, found) - require.Equal(t, []byte{3}, got) + require.Equal(t, padLeft32(3), got) } func TestReadOnlyWriteGuards(t *testing.T) { @@ -843,7 +850,7 @@ func TestReadOnlyWriteGuards(t *testing.T) { addr := Address{0xAA} slot := Slot{0xBB} key := memiavlStorageKey(addr, slot) - require.NoError(t, s.ApplyChangeSets([]*proto.NamedChangeSet{makeChangeSet(key, []byte{1}, false)})) + require.NoError(t, s.ApplyChangeSets([]*proto.NamedChangeSet{makeChangeSet(key, padLeft32(1), false)})) commitAndCheck(t, s) ro, err := s.LoadVersion(0, true) @@ -872,16 +879,16 @@ func TestReadOnlyParentWritesDuringReadOnly(t *testing.T) { addr := Address{0xAA} slot := Slot{0xBB} key := memiavlStorageKey(addr, slot) - require.NoError(t, s.ApplyChangeSets([]*proto.NamedChangeSet{makeChangeSet(key, []byte{1}, false)})) + require.NoError(t, s.ApplyChangeSets([]*proto.NamedChangeSet{makeChangeSet(key, padLeft32(1), false)})) commitAndCheck(t, s) ro, err := s.LoadVersion(0, true) require.NoError(t, err) defer ro.Close() - require.NoError(t, s.ApplyChangeSets([]*proto.NamedChangeSet{makeChangeSet(key, []byte{2}, false)})) + require.NoError(t, s.ApplyChangeSets([]*proto.NamedChangeSet{makeChangeSet(key, padLeft32(2), false)})) commitAndCheck(t, s) - require.NoError(t, s.ApplyChangeSets([]*proto.NamedChangeSet{makeChangeSet(key, []byte{3}, false)})) + require.NoError(t, s.ApplyChangeSets([]*proto.NamedChangeSet{makeChangeSet(key, padLeft32(3), false)})) commitAndCheck(t, s) require.Equal(t, int64(3), s.Version()) @@ -889,7 +896,7 @@ func TestReadOnlyParentWritesDuringReadOnly(t *testing.T) { require.Equal(t, int64(1), ro.Version()) got, found := ro.Get(key) require.True(t, found) - require.Equal(t, []byte{1}, got) + require.Equal(t, padLeft32(1), got) } func TestReadOnlyConcurrentInstances(t *testing.T) { @@ -907,7 +914,7 @@ func TestReadOnlyConcurrentInstances(t *testing.T) { for i := byte(1); i <= 4; i++ { require.NoError(t, s.ApplyChangeSets([]*proto.NamedChangeSet{ - makeChangeSet(key, []byte{i}, false), + makeChangeSet(key, padLeft32(i), false), })) commitAndCheck(t, s) } @@ -927,8 +934,8 @@ func TestReadOnlyConcurrentInstances(t *testing.T) { g2, ok2 := ro2.Get(key) require.True(t, ok1) require.True(t, ok2) - require.Equal(t, []byte{4}, g1) - require.Equal(t, []byte{4}, g2) + require.Equal(t, padLeft32(4), g1) + require.Equal(t, padLeft32(4), g2) } func TestReadOnlyFailureDoesNotAffectParent(t *testing.T) { @@ -941,20 +948,20 @@ func TestReadOnlyFailureDoesNotAffectParent(t *testing.T) { addr := Address{0xAA} slot := Slot{0xBB} key := memiavlStorageKey(addr, slot) - require.NoError(t, s.ApplyChangeSets([]*proto.NamedChangeSet{makeChangeSet(key, []byte{1}, false)})) + require.NoError(t, s.ApplyChangeSets([]*proto.NamedChangeSet{makeChangeSet(key, padLeft32(1), false)})) commitAndCheck(t, s) _, err = s.LoadVersion(999, true) require.Error(t, err) - require.NoError(t, s.ApplyChangeSets([]*proto.NamedChangeSet{makeChangeSet(key, []byte{2}, false)})) + require.NoError(t, s.ApplyChangeSets([]*proto.NamedChangeSet{makeChangeSet(key, padLeft32(2), false)})) v, err := s.Commit() require.NoError(t, err) require.Equal(t, int64(2), v) got, found := s.Get(key) require.True(t, found) - require.Equal(t, []byte{2}, got) + require.Equal(t, padLeft32(2), got) } func TestReadOnlyCloseRemovesTempDir(t *testing.T) { @@ -967,7 +974,7 @@ func TestReadOnlyCloseRemovesTempDir(t *testing.T) { addr := Address{0xAA} slot := Slot{0xBB} key := memiavlStorageKey(addr, slot) - require.NoError(t, s.ApplyChangeSets([]*proto.NamedChangeSet{makeChangeSet(key, []byte{1}, false)})) + require.NoError(t, s.ApplyChangeSets([]*proto.NamedChangeSet{makeChangeSet(key, padLeft32(1), false)})) commitAndCheck(t, s) roStore, err := s.LoadVersion(0, true) diff --git a/sei-db/state_db/sc/flatkv/store_write.go b/sei-db/state_db/sc/flatkv/store_write.go index 314bbf4be1..5ad45b45ba 100644 --- a/sei-db/state_db/sc/flatkv/store_write.go +++ b/sei-db/state_db/sc/flatkv/store_write.go @@ -67,16 +67,19 @@ func (s *CommitStore) ApplyChangeSets(cs []*proto.NamedChangeSet) error { // Route to appropriate DB based on key type switch kind { case evm.EVMKeyStorage: - storagePairs = s.applyEvmStorageChange(keyBytes, pair, storageOld, storagePairs) + if !pair.Delete && len(pair.Value) != 32 { + return fmt.Errorf("invalid storage value length: got %d, expected 32", len(pair.Value)) + } + storagePairs = s.accumulateEvmStorageChanges(keyBytes, pair, storageOld, storagePairs) case evm.EVMKeyNonce, evm.EVMKeyCodeHash: - err := s.applyEvmAccountFieldChange(kind, keyBytes, pair, accountOld, oldAccountRawValues) + err := s.accumulateEvmAccountFieldChanges(kind, keyBytes, pair, accountOld, oldAccountRawValues) if err != nil { return fmt.Errorf("failed to apply EVM account field change: %w", err) } case evm.EVMKeyCode: - codePairs = s.applyEvmCodeChange(keyBytes, pair, codeOld, codePairs) + codePairs = s.accumulateEvmCodeChanges(keyBytes, pair, codeOld, codePairs) case evm.EVMKeyLegacy: - legacyPairs = s.applyEvmLegacyChange(keyBytes, pair, legacyOld, legacyPairs) + legacyPairs = s.accumulateEvmLegacyChanges(keyBytes, pair, legacyOld, legacyPairs) } } } @@ -135,7 +138,7 @@ func (s *CommitStore) ApplyChangeSets(cs []*proto.NamedChangeSet) error { } // Apply a single change to the evm storage db. -func (s *CommitStore) applyEvmStorageChange( +func (s *CommitStore) accumulateEvmStorageChanges( // The key with the prefix stripped. keyBytes []byte, // The change to apply. @@ -149,30 +152,32 @@ func (s *CommitStore) applyEvmStorageChange( keyStr := string(keyBytes) oldValue := storageOld[keyStr].Value + newStorageData := vtype.NewStorageData().SetBlockHeight(s.committedVersion + 1) + if pair.Delete { - s.storageWrites[keyStr] = &pendingKVWrite{ - key: keyBytes, - isDelete: true, - } + // Value stays all-zeros → IsDelete() returns true. storageOld[keyStr] = types.BatchGetResult{Value: nil} } else { - s.storageWrites[keyStr] = &pendingKVWrite{ - key: keyBytes, - value: pair.Value, - } - storageOld[keyStr] = types.BatchGetResult{Value: pair.Value} + newStorageData.SetValue((*[32]byte)(pair.Value)) + storageOld[keyStr] = types.BatchGetResult{Value: newStorageData.Serialize()} } + s.storageWrites[keyStr] = newStorageData + + var serializedValue []byte + if !pair.Delete { + serializedValue = newStorageData.Serialize() + } return append(storagePairs, lthash.KVPairWithLastValue{ Key: keyBytes, - Value: pair.Value, + Value: serializedValue, LastValue: oldValue, Delete: pair.Delete, }) } // Apply a single change to the evm code db. -func (s *CommitStore) applyEvmCodeChange( +func (s *CommitStore) accumulateEvmCodeChanges( // The key with the prefix stripped (addr, 20 bytes). keyBytes []byte, // The change to apply. @@ -193,21 +198,25 @@ func (s *CommitStore) applyEvmCodeChange( codeOld[keyStr] = types.BatchGetResult{Value: nil} } else { newCodeData.SetBytecode(pair.Value) - codeOld[keyStr] = types.BatchGetResult{Value: pair.Value} + codeOld[keyStr] = types.BatchGetResult{Value: newCodeData.Serialize()} } s.codeWrites[keyStr] = newCodeData + var serializedValue []byte + if !pair.Delete { + serializedValue = newCodeData.Serialize() + } return append(codePairs, lthash.KVPairWithLastValue{ Key: keyBytes, - Value: pair.Value, + Value: serializedValue, LastValue: oldValue, Delete: pair.Delete, }) } // Apply a single change to the evm legacy db. -func (s *CommitStore) applyEvmLegacyChange( +func (s *CommitStore) accumulateEvmLegacyChanges( // The key with the prefix stripped. keyBytes []byte, // The change to apply. @@ -221,30 +230,32 @@ func (s *CommitStore) applyEvmLegacyChange( keyStr := string(keyBytes) oldValue := legacyOld[keyStr].Value + var newLegacyData *vtype.LegacyData if pair.Delete { - s.legacyWrites[keyStr] = &pendingKVWrite{ - key: keyBytes, - isDelete: true, - } + newLegacyData = vtype.NewLegacyData([]byte{}) legacyOld[keyStr] = types.BatchGetResult{Value: nil} } else { - s.legacyWrites[keyStr] = &pendingKVWrite{ - key: keyBytes, - value: pair.Value, - } - legacyOld[keyStr] = types.BatchGetResult{Value: pair.Value} + newLegacyData = vtype.NewLegacyData(pair.Value) + legacyOld[keyStr] = types.BatchGetResult{Value: newLegacyData.Serialize()} } + newLegacyData.SetBlockHeight(s.committedVersion + 1) + + s.legacyWrites[keyStr] = newLegacyData + var serializedValue []byte + if !pair.Delete { + serializedValue = newLegacyData.Serialize() + } return append(legacyPairs, lthash.KVPairWithLastValue{ Key: keyBytes, - Value: pair.Value, + Value: serializedValue, LastValue: oldValue, Delete: pair.Delete, }) } // Apply a single nonce or codehash change to the account db. -func (s *CommitStore) applyEvmAccountFieldChange( +func (s *CommitStore) accumulateEvmAccountFieldChanges( // Whether this is a nonce or codehash change. kind evm.EVMKeyKind, // The key with the prefix stripped (addr, 20 bytes). @@ -407,8 +418,8 @@ func (s *CommitStore) flushAllDBs() error { func (s *CommitStore) clearPendingWrites() { s.accountWrites = make(map[string]*pendingAccountWrite) s.codeWrites = make(map[string]*vtype.CodeData) - s.storageWrites = make(map[string]*pendingKVWrite) - s.legacyWrites = make(map[string]*pendingKVWrite) + s.storageWrites = make(map[string]*vtype.StorageData) + s.legacyWrites = make(map[string]*vtype.LegacyData) s.pendingChangeSets = make([]*proto.NamedChangeSet, 0) } @@ -456,42 +467,24 @@ func (s *CommitStore) commitBatches(version int64) error { if err != nil { return fmt.Errorf("codeDB commit: %w", err) } - pending = append(pending, pendingCommit{codeDBDir, batch}) + if batch != nil { + pending = append(pending, pendingCommit{codeDBDir, batch}) + } - // Commit to codeDB, storageDB, legacyDB (identical logic per KV DB). - kvDBs := [...]struct { - dir string - phase string - writes map[string]*pendingKVWrite - db types.KeyValueDB - }{ - {storageDBDir, "commit_storage_db_prepare", s.storageWrites, s.storageDB}, - {legacyDBDir, "commit_legacy_db_prepare", s.legacyWrites, s.legacyDB}, + batch, err = s.prepareBatchStorageDB(version) + if err != nil { + return fmt.Errorf("storageDB commit: %w", err) + } + if batch != nil { + pending = append(pending, pendingCommit{storageDBDir, batch}) } - for _, spec := range kvDBs { - if len(spec.writes) == 0 && version <= s.localMeta[spec.dir].CommittedVersion { - continue - } - s.phaseTimer.SetPhase(spec.phase) - batch := spec.db.NewBatch() - defer func(b types.Batch) { _ = b.Close() }(batch) - - for _, pw := range spec.writes { - if pw.isDelete { - if err := batch.Delete(pw.key); err != nil { - return fmt.Errorf("%s delete: %w", spec.dir, err) - } - } else { - if err := batch.Set(pw.key, pw.value); err != nil { - return fmt.Errorf("%s set: %w", spec.dir, err) - } - } - } - if err := writeLocalMetaToBatch(batch, version, s.perDBWorkingLtHash[spec.dir]); err != nil { - return fmt.Errorf("%s local meta: %w", spec.dir, err) - } - pending = append(pending, pendingCommit{spec.dir, batch}) + batch, err = s.prepareBatchLegacyDB(version) + if err != nil { + return fmt.Errorf("legacyDB commit: %w", err) + } + if batch != nil { + pending = append(pending, pendingCommit{legacyDBDir, batch}) } if len(pending) == 0 { @@ -563,6 +556,72 @@ func (s *CommitStore) prepareBatchCodeDB(version int64) (types.Batch, error) { return batch, nil } +// Prepare a batch of writes for the storageDB. +func (s *CommitStore) prepareBatchStorageDB(version int64) (types.Batch, error) { + if len(s.storageWrites) == 0 && version <= s.localMeta[storageDBDir].CommittedVersion { + return nil, nil + } + + s.phaseTimer.SetPhase("commit_storage_db_prepare") + + batch := s.storageDB.NewBatch() + + for keyStr, sw := range s.storageWrites { + key := []byte(keyStr) + if sw.IsDelete() { + if err := batch.Delete(key); err != nil { + _ = batch.Close() + return nil, fmt.Errorf("storageDB delete: %w", err) + } + } else { + if err := batch.Set(key, sw.Serialize()); err != nil { + _ = batch.Close() + return nil, fmt.Errorf("storageDB set: %w", err) + } + } + } + + if err := writeLocalMetaToBatch(batch, version, s.perDBWorkingLtHash[storageDBDir]); err != nil { + _ = batch.Close() + return nil, fmt.Errorf("storageDB local meta: %w", err) + } + + return batch, nil +} + +// Prepare a batch of writes for the legacyDB. +func (s *CommitStore) prepareBatchLegacyDB(version int64) (types.Batch, error) { + if len(s.legacyWrites) == 0 && version <= s.localMeta[legacyDBDir].CommittedVersion { + return nil, nil + } + + s.phaseTimer.SetPhase("commit_legacy_db_prepare") + + batch := s.legacyDB.NewBatch() + + for keyStr, lw := range s.legacyWrites { + key := []byte(keyStr) + if lw.IsDelete() { + if err := batch.Delete(key); err != nil { + _ = batch.Close() + return nil, fmt.Errorf("legacyDB delete: %w", err) + } + } else { + if err := batch.Set(key, lw.Serialize()); err != nil { + _ = batch.Close() + return nil, fmt.Errorf("legacyDB set: %w", err) + } + } + } + + if err := writeLocalMetaToBatch(batch, version, s.perDBWorkingLtHash[legacyDBDir]); err != nil { + _ = batch.Close() + return nil, fmt.Errorf("legacyDB local meta: %w", err) + } + + return batch, nil +} + // batchReadOldValues scans all changeset pairs and returns one result map per // DB containing the "old value" for each key. Keys that already have uncommitted // pending writes (from a prior ApplyChangeSets call in the same block) are @@ -586,13 +645,6 @@ func (s *CommitStore) batchReadOldValues(cs []*proto.NamedChangeSet) ( codeBatch := make(map[string]types.BatchGetResult) legacyBatch := make(map[string]types.BatchGetResult) - pendingKVResult := func(pw *pendingKVWrite) types.BatchGetResult { - if pw.isDelete { - return types.BatchGetResult{Value: nil} - } - return types.BatchGetResult{Value: pw.value} - } - // Partition changeset keys: resolve from pending writes when available // (prior ApplyChangeSets call in the same block), otherwise queue for // a DB batch read. @@ -609,7 +661,11 @@ func (s *CommitStore) batchReadOldValues(cs []*proto.NamedChangeSet) ( continue } if pw, ok := s.storageWrites[k]; ok { - storageOld[k] = pendingKVResult(pw) + if pw.IsDelete() { + storageOld[k] = types.BatchGetResult{Value: nil} + } else { + storageOld[k] = types.BatchGetResult{Value: pw.Serialize()} + } } else { storageBatch[k] = types.BatchGetResult{} } @@ -650,7 +706,11 @@ func (s *CommitStore) batchReadOldValues(cs []*proto.NamedChangeSet) ( continue } if pw, ok := s.legacyWrites[k]; ok { - legacyOld[k] = pendingKVResult(pw) + if pw.IsDelete() { + legacyOld[k] = types.BatchGetResult{Value: nil} + } else { + legacyOld[k] = types.BatchGetResult{Value: pw.Serialize()} + } } else { legacyBatch[k] = types.BatchGetResult{} } diff --git a/sei-db/state_db/sc/flatkv/store_write_test.go b/sei-db/state_db/sc/flatkv/store_write_test.go index 0b9da7700f..b1bc2db546 100644 --- a/sei-db/state_db/sc/flatkv/store_write_test.go +++ b/sei-db/state_db/sc/flatkv/store_write_test.go @@ -64,7 +64,7 @@ func TestStoreWriteAllDBs(t *testing.T) { // Storage key { Key: evm.BuildMemIAVLEVMKey(evm.EVMKeyStorage, StorageKey(addr, slot)), - Value: []byte{0x11, 0x22}, + Value: padLeft32(0x11, 0x22), }, // Account nonce key { @@ -105,13 +105,13 @@ func TestStoreWriteAllDBs(t *testing.T) { require.Equal(t, int64(1), int64(binary.BigEndian.Uint64(raw)), "%s persisted version", name) } - // Verify storage data was written - storageData, err := s.storageDB.Get(StorageKey(addr, slot)) - require.NoError(t, err) - require.Equal(t, []byte{0x11, 0x22}, storageData) + // Verify storage data was written (via Store.Get which deserializes) + storageMemiavlKey := evm.BuildMemIAVLEVMKey(evm.EVMKeyStorage, StorageKey(addr, slot)) + storageValue, found := s.Get(storageMemiavlKey) + require.True(t, found, "Storage should be found") + require.Equal(t, padLeft32(0x11, 0x22), storageValue) // Verify account and code data was written - // Use Store.Get method which handles the kind prefix correctly nonceKey := evm.BuildMemIAVLEVMKey(evm.EVMKeyNonce, addr[:]) nonceValue, found := s.Get(nonceKey) require.True(t, found, "Nonce should be found") @@ -122,14 +122,9 @@ func TestStoreWriteAllDBs(t *testing.T) { require.True(t, found, "Code should be found") require.Equal(t, []byte{0x60, 0x60, 0x60}, codeValue) - // Verify bytecode stored directly in codeDB (raw key = addr) - codeRaw, err := s.codeDB.Get(addr[:]) - require.NoError(t, err) - require.Equal(t, []byte{0x60, 0x60, 0x60}, codeRaw) - - // Verify legacy data persisted in legacyDB (full key preserved) - legacyVal, err := s.legacyDB.Get(legacyKey) - require.NoError(t, err) + // Verify legacy data persisted (via Store.Get which deserializes) + legacyVal, found := s.Get(legacyKey) + require.True(t, found, "Legacy should be found") require.Equal(t, []byte{0x00, 0x03}, legacyVal) } @@ -151,7 +146,7 @@ func TestStoreWriteEmptyCommit(t *testing.T) { addr := Address{0x99} slot := Slot{0x88} key := memiavlStorageKey(addr, slot) - cs := makeChangeSet(key, []byte{0x77}, false) + cs := makeChangeSet(key, padLeft32(0x77), false) require.NoError(t, s.ApplyChangeSets([]*proto.NamedChangeSet{cs})) commitAndCheck(t, s) @@ -238,7 +233,7 @@ func TestStoreWriteDelete(t *testing.T) { pairs := []*iavl.KVPair{ { Key: evm.BuildMemIAVLEVMKey(evm.EVMKeyStorage, StorageKey(addr, slot)), - Value: []byte{0x11}, + Value: padLeft32(0x11), }, { Key: evm.BuildMemIAVLEVMKey(evm.EVMKeyNonce, addr[:]), @@ -382,10 +377,10 @@ func TestStoreWriteLegacyKeys(t *testing.T) { // Verify legacyDB LocalMeta is updated require.Equal(t, int64(1), s.localMeta[legacyDBDir].CommittedVersion) - // Verify data persisted in legacyDB (full key preserved) - stored, err := s.legacyDB.Get(codeSizeKey) - require.NoError(t, err) - require.Equal(t, codeSizeValue, stored) + // Verify data persisted (via Store.Get which deserializes) + got, found := s.Get(codeSizeKey) + require.True(t, found) + require.Equal(t, codeSizeValue, got) } func TestStoreWriteLegacyAndOptimizedKeys(t *testing.T) { @@ -399,7 +394,7 @@ func TestStoreWriteLegacyAndOptimizedKeys(t *testing.T) { // Storage (optimized) { Key: evm.BuildMemIAVLEVMKey(evm.EVMKeyStorage, StorageKey(addr, slot)), - Value: []byte{0x11, 0x22}, + Value: padLeft32(0x11, 0x22), }, // Nonce (optimized) { @@ -428,11 +423,11 @@ func TestStoreWriteLegacyAndOptimizedKeys(t *testing.T) { requireAllLocalMetaAt(t, s, 1) - // Verify legacy data persisted + // Verify legacy data persisted (via Store.Get which deserializes) codeSizeKey := append([]byte{0x09}, addr[:]...) - stored, err := s.legacyDB.Get(codeSizeKey) - require.NoError(t, err) - require.Equal(t, []byte{0x00, 0x03}, stored) + got, found := s.Get(codeSizeKey) + require.True(t, found) + require.Equal(t, []byte{0x00, 0x03}, got) } func TestStoreWriteDeleteLegacyKey(t *testing.T) { @@ -533,14 +528,14 @@ func TestStoreFsyncConfig(t *testing.T) { key := memiavlStorageKey(addr, slot) // Write and commit with fsync disabled - cs := makeChangeSet(key, []byte{0xCC}, false) + cs := makeChangeSet(key, padLeft32(0xCC), false) require.NoError(t, store.ApplyChangeSets([]*proto.NamedChangeSet{cs})) commitAndCheck(t, store) // Data should be readable got, found := store.Get(key) require.True(t, found) - require.Equal(t, []byte{0xCC}, got) + require.Equal(t, padLeft32(0xCC), got) // Version should be updated require.Equal(t, int64(1), store.Version()) @@ -646,21 +641,21 @@ func TestMultipleApplyChangeSetsBeforeCommit(t *testing.T) { key1 := memiavlStorageKey(addr, slot1) key2 := memiavlStorageKey(addr, slot2) - cs1 := makeChangeSet(key1, []byte{0x11}, false) + cs1 := makeChangeSet(key1, padLeft32(0x11), false) require.NoError(t, s.ApplyChangeSets([]*proto.NamedChangeSet{cs1})) - cs2 := makeChangeSet(key2, []byte{0x22}, false) + cs2 := makeChangeSet(key2, padLeft32(0x22), false) require.NoError(t, s.ApplyChangeSets([]*proto.NamedChangeSet{cs2})) commitAndCheck(t, s) v1, ok := s.Get(key1) require.True(t, ok) - require.Equal(t, []byte{0x11}, v1) + require.Equal(t, padLeft32(0x11), v1) v2, ok := s.Get(key2) require.True(t, ok) - require.Equal(t, []byte{0x22}, v2) + require.Equal(t, padLeft32(0x22), v2) } func TestMultipleApplyAccountFieldsPreservesOther(t *testing.T) { @@ -726,7 +721,7 @@ func TestLtHashUpdatedByDelete(t *testing.T) { slot := Slot{0xEE} key := memiavlStorageKey(addr, slot) - cs1 := makeChangeSet(key, []byte{0xFF}, false) + cs1 := makeChangeSet(key, padLeft32(0xFF), false) require.NoError(t, s.ApplyChangeSets([]*proto.NamedChangeSet{cs1})) commitAndCheck(t, s) hashAfterWrite := s.RootHash() @@ -783,8 +778,8 @@ func TestOverwriteSameKeyInSingleBlock(t *testing.T) { key := memiavlStorageKey(addr, slot) pairs := []*iavl.KVPair{ - {Key: key, Value: []byte{0x01}}, - {Key: key, Value: []byte{0x02}}, + {Key: key, Value: padLeft32(0x01)}, + {Key: key, Value: padLeft32(0x02)}, } cs := &proto.NamedChangeSet{ Name: "evm", @@ -795,7 +790,7 @@ func TestOverwriteSameKeyInSingleBlock(t *testing.T) { v, ok := s.Get(key) require.True(t, ok) - require.Equal(t, []byte{0x02}, v, "last write should win") + require.Equal(t, padLeft32(0x02), v, "last write should win") } // ============================================================================= @@ -837,7 +832,7 @@ func TestStoreFsyncEnabled(t *testing.T) { v, ok := s.Get(memiavlStorageKey(Address{0x01}, Slot{0x01})) require.True(t, ok) - require.Equal(t, []byte{0x01}, v) + require.Equal(t, padLeft32(0x01), v) } // ============================================================================= @@ -987,7 +982,7 @@ func TestCrossApplyChangeSetsOrdering(t *testing.T) { key := evm.BuildMemIAVLEVMKey(evm.EVMKeyStorage, StorageKey(addr, slot)) val, found := s.Get(key) require.True(t, found, "delete-then-write: key should exist") - require.Equal(t, []byte{0xBB}, val) + require.Equal(t, padLeft32(0xBB), val) }) } From faf48713218b59abb48a41dc4d5afe72ae509956 Mon Sep 17 00:00:00 2001 From: Cody Littley Date: Wed, 1 Apr 2026 08:26:18 -0500 Subject: [PATCH 098/119] make suggested change to pool --- sei-db/common/threading/adhoc_pool.go | 30 ++- sei-db/common/threading/elastic_pool.go | 57 +++--- sei-db/common/threading/fixed_pool.go | 53 ++--- sei-db/common/threading/pool.go | 21 +- sei-db/common/threading/pool_test.go | 203 ++++++++++++------- sei-db/db_engine/dbcache/cache_impl.go | 11 +- sei-db/db_engine/dbcache/cache_impl_test.go | 31 --- sei-db/db_engine/dbcache/shard.go | 10 +- sei-db/db_engine/dbcache/shard_test.go | 27 --- sei-db/state_db/sc/flatkv/store.go | 8 +- sei-db/state_db/sc/flatkv/store_lifecycle.go | 12 +- sei-db/state_db/sc/flatkv/store_write.go | 30 +-- 12 files changed, 231 insertions(+), 262 deletions(-) diff --git a/sei-db/common/threading/adhoc_pool.go b/sei-db/common/threading/adhoc_pool.go index d7d5ebc837..57a158067e 100644 --- a/sei-db/common/threading/adhoc_pool.go +++ b/sei-db/common/threading/adhoc_pool.go @@ -1,28 +1,36 @@ package threading -import ( - "context" - "fmt" -) +import "sync" var _ Pool = (*adHocPool)(nil) // adHocPool is a Pool that runs each task in a new goroutine. // Intended for use in unit tests or where performance is not important. -type adHocPool struct{} +type adHocPool struct { + wg sync.WaitGroup + closed bool +} // NewAdHocPool creates a Pool that runs each submitted task in a one-off goroutine. func NewAdHocPool() Pool { return &adHocPool{} } -func (p *adHocPool) Submit(ctx context.Context, task func()) error { +func (p *adHocPool) Submit(task func()) { if task == nil { - return fmt.Errorf("adhoc pool: nil task") + return } - if ctx.Err() != nil { - return ctx.Err() + if p.closed { + panic("threading: submit on closed pool") } - go task() - return nil + p.wg.Add(1) + go func() { + defer p.wg.Done() + task() + }() +} + +func (p *adHocPool) Close() { + p.closed = true + p.wg.Wait() } diff --git a/sei-db/common/threading/elastic_pool.go b/sei-db/common/threading/elastic_pool.go index 52e07bfbde..e9aec08d03 100644 --- a/sei-db/common/threading/elastic_pool.go +++ b/sei-db/common/threading/elastic_pool.go @@ -1,9 +1,6 @@ package threading -import ( - "context" - "fmt" -) +import "sync" var _ Pool = (*elasticPool)(nil) @@ -20,60 +17,64 @@ var _ Pool = (*elasticPool)(nil) // goroutine if all workers are busy. type elasticPool struct { workQueue chan func() - ctx context.Context + wg sync.WaitGroup + closeOnce sync.Once + closed bool } // NewElasticPool creates a pool with the given number of warm workers. Submitted // tasks are handed off to an idle warm worker if one is available, otherwise a // temporary goroutine is spawned. Tasks are never queued behind other tasks. func NewElasticPool( - ctx context.Context, name string, warmWorkers int, ) Pool { workQueue := make(chan func()) ep := &elasticPool{ workQueue: workQueue, - ctx: ctx, } + ep.wg.Add(warmWorkers) for i := 0; i < warmWorkers; i++ { - go ep.worker() + go func() { + defer ep.wg.Done() + ep.worker() + }() } - go func() { - <-ctx.Done() - for i := 0; i < warmWorkers; i++ { - workQueue <- nil - } - }() - return ep } -func (ep *elasticPool) Submit(ctx context.Context, task func()) error { +func (ep *elasticPool) Submit(task func()) { if task == nil { - return fmt.Errorf("elastic pool: nil task") + return + } + if ep.closed { + panic("threading: submit on closed pool") } select { - case <-ctx.Done(): - return ctx.Err() - case <-ep.ctx.Done(): - return fmt.Errorf("elastic pool is shut down") case ep.workQueue <- task: - return nil default: - // All warm workers are busy; spawn a temporary goroutine. - go task() - return nil + ep.wg.Add(1) + go func() { + defer ep.wg.Done() + task() + }() } } +// Close shuts down warm workers, waits for all in-flight tasks (including +// temporary goroutines) to finish, and returns. +func (ep *elasticPool) Close() { + ep.closed = true + ep.closeOnce.Do(func() { + close(ep.workQueue) + }) + ep.wg.Wait() +} + func (ep *elasticPool) worker() { for task := range ep.workQueue { - if task == nil { - return - } task() } } diff --git a/sei-db/common/threading/fixed_pool.go b/sei-db/common/threading/fixed_pool.go index 2e393ef5c3..2b627ab91c 100644 --- a/sei-db/common/threading/fixed_pool.go +++ b/sei-db/common/threading/fixed_pool.go @@ -1,9 +1,6 @@ package threading -import ( - "context" - "fmt" -) +import "sync" var _ Pool = (*fixedPool)(nil) @@ -11,13 +8,13 @@ var _ Pool = (*fixedPool)(nil) // More efficient than spawning large numbers of short lived goroutines. type fixedPool struct { workQueue chan func() - ctx context.Context + wg sync.WaitGroup + closeOnce sync.Once + closed bool } // Create a new work pool. func NewFixedPool( - // The work pool shuts down when the context is done. - ctx context.Context, // The name of the work pool. Used for metrics. name string, // The number of workers to create. @@ -33,45 +30,39 @@ func NewFixedPool( workQueue := make(chan func(), queueSize) fp := &fixedPool{ workQueue: workQueue, - ctx: ctx, } + fp.wg.Add(workers) for i := 0; i < workers; i++ { - go fp.worker() + go func() { + defer fp.wg.Done() + fp.worker() + }() } - go func() { - <-ctx.Done() - // Send a nil sentinel to each worker. Because nils are enqueued behind any - // buffered tasks, every previously-submitted task is guaranteed to complete - // before workers exit. - for i := 0; i < workers; i++ { - workQueue <- nil - } - }() - return fp } -func (fp *fixedPool) Submit(ctx context.Context, task func()) error { +func (fp *fixedPool) Submit(task func()) { if task == nil { - return fmt.Errorf("fixed pool: nil task") + return } - select { - case <-ctx.Done(): - return ctx.Err() - case <-fp.ctx.Done(): - return fmt.Errorf("fixed pool is shut down") - case fp.workQueue <- task: - return nil + if fp.closed { + panic("threading: submit on closed pool") } + fp.workQueue <- task +} + +func (fp *fixedPool) Close() { + fp.closed = true + fp.closeOnce.Do(func() { + close(fp.workQueue) + }) + fp.wg.Wait() } func (fp *fixedPool) worker() { for task := range fp.workQueue { - if task == nil { - return - } task() } } diff --git a/sei-db/common/threading/pool.go b/sei-db/common/threading/pool.go index 4af9cebfa1..b0aa7fd1c0 100644 --- a/sei-db/common/threading/pool.go +++ b/sei-db/common/threading/pool.go @@ -1,20 +1,15 @@ package threading -import "context" - // Pool is a pool of workers that can be used to execute tasks concurrently. type Pool interface { // Submit submits a task to the pool. The task must not be nil. + + // Although it is thread safe to call Submit() from concurrent goroutines, it is not thread safe to call + // Submit and Close() concurrently. + Submit(task func()) + + // Close shuts down the pool, draining any buffered tasks, and blocks until all workers have exited. // - // If Submit is called concurrently with or after shutdown (i.e. when ctx is done/cancelled), the task may - // be silently dropped. Callers that need a guarantee of execution must - // ensure Submit happens-before shutdown. - // - // This method is permitted to return an error only under the following conditions: - // - the pool is shutting down (i.e. its context is done/cancelled) - // - the provided ctx parameter is done/cancelled before this method returns - // - invalid input (e.g. the task is nil) - // - // If this method returns an error, the task may or may not have been executed. - Submit(ctx context.Context, task func()) error + // Safe to call multiple times (idempotent). Not safe to call concurrently with Submit(). + Close() } diff --git a/sei-db/common/threading/pool_test.go b/sei-db/common/threading/pool_test.go index bd2844e597..de0dabf57b 100644 --- a/sei-db/common/threading/pool_test.go +++ b/sei-db/common/threading/pool_test.go @@ -1,7 +1,6 @@ package threading import ( - "context" "sync" "sync/atomic" "testing" @@ -24,36 +23,33 @@ func waitOrFail(t *testing.T, wg *sync.WaitGroup) { } } -func createPools(ctx context.Context) []struct { +type testPool struct { name string pool Pool -} { - return []struct { - name string - pool Pool - }{ - {"FixedPool", NewFixedPool(ctx, "test-fixed", 4, 16)}, - {"ElasticPool", NewElasticPool(ctx, "test-elastic", 4)}, +} + +func createPools() []testPool { + return []testPool{ + {"FixedPool", NewFixedPool("test-fixed", 4, 16)}, + {"ElasticPool", NewElasticPool("test-elastic", 4)}, {"AdHocPool", NewAdHocPool()}, } } func TestPool_AllTasksComplete(t *testing.T) { - for _, tc := range createPools(t.Context()) { + for _, tc := range createPools() { t.Run(tc.name, func(t *testing.T) { + defer tc.pool.Close() const n = 100 var counter atomic.Int64 var wg sync.WaitGroup wg.Add(n) for i := 0; i < n; i++ { - err := tc.pool.Submit(t.Context(), func() { + tc.pool.Submit(func() { counter.Add(1) wg.Done() }) - if err != nil { - t.Fatalf("Submit failed: %v", err) - } } waitOrFail(t, &wg) @@ -65,22 +61,20 @@ func TestPool_AllTasksComplete(t *testing.T) { } func TestPool_BlockedTasksDontCompleteUntilUnblocked(t *testing.T) { - for _, tc := range createPools(t.Context()) { + for _, tc := range createPools() { t.Run(tc.name, func(t *testing.T) { + defer tc.pool.Close() blocker := make(chan struct{}) var counter atomic.Int64 var wg sync.WaitGroup wg.Add(2) for i := 0; i < 2; i++ { - err := tc.pool.Submit(t.Context(), func() { + tc.pool.Submit(func() { defer wg.Done() <-blocker counter.Add(1) }) - if err != nil { - t.Fatalf("Submit failed: %v", err) - } } time.Sleep(10 * time.Millisecond) @@ -101,45 +95,37 @@ func TestPool_BlockedTasksDontCompleteUntilUnblocked(t *testing.T) { func TestFixedPool_SubmitBlocksWhenFull(t *testing.T) { const workers = 2 const queueSize = 2 - pool := NewFixedPool(t.Context(), "test-fixed-block", workers, queueSize) + pool := NewFixedPool("test-fixed-block", workers, queueSize) + defer pool.Close() blocker := make(chan struct{}) var completed atomic.Int64 var wg sync.WaitGroup - // Phase 1: occupy all workers with blocking tasks. wg.Add(workers) for i := 0; i < workers; i++ { - err := pool.Submit(t.Context(), func() { + pool.Submit(func() { defer wg.Done() <-blocker completed.Add(1) }) - if err != nil { - t.Fatalf("Submit failed: %v", err) - } } time.Sleep(10 * time.Millisecond) - // Phase 2: fill the queue buffer. wg.Add(queueSize) for i := 0; i < queueSize; i++ { - err := pool.Submit(t.Context(), func() { + pool.Submit(func() { defer wg.Done() <-blocker completed.Add(1) }) - if err != nil { - t.Fatalf("Submit failed: %v", err) - } } - // Phase 3: the next Submit must block — queue full, all workers busy. wg.Add(1) submitDone := make(chan struct{}) start := time.Now() go func() { - _ = pool.Submit(t.Context(), func() { + pool.Submit(func() { defer wg.Done() <-blocker completed.Add(1) @@ -171,7 +157,8 @@ func TestFixedPool_SubmitBlocksWhenFull(t *testing.T) { func TestElasticPool_ScalesBeyondWarmWorkers(t *testing.T) { const warmWorkers = 2 const totalTasks = 10 - pool := NewElasticPool(t.Context(), "test-elastic-scale", warmWorkers) + pool := NewElasticPool("test-elastic-scale", warmWorkers) + defer pool.Close() blocker := make(chan struct{}) var started atomic.Int64 @@ -179,17 +166,13 @@ func TestElasticPool_ScalesBeyondWarmWorkers(t *testing.T) { wg.Add(totalTasks) for i := 0; i < totalTasks; i++ { - err := pool.Submit(t.Context(), func() { + pool.Submit(func() { defer wg.Done() started.Add(1) <-blocker }) - if err != nil { - t.Fatalf("Submit failed: %v", err) - } } - // All tasks should start promptly — elastic pool spawns extra goroutines. time.Sleep(50 * time.Millisecond) if got := started.Load(); got <= int64(warmWorkers) { t.Errorf("expected started > %d (warm workers), got %d", warmWorkers, got) @@ -202,50 +185,124 @@ func TestElasticPool_ScalesBeyondWarmWorkers(t *testing.T) { waitOrFail(t, &wg) } -func TestFixedPool_SubmitReturnsErrorOnCancelledContext(t *testing.T) { - poolCtx, poolCancel := context.WithCancel(t.Context()) - defer poolCancel() +func TestPool_NilTaskIsIgnored(t *testing.T) { + for _, tc := range createPools() { + t.Run(tc.name, func(t *testing.T) { + defer tc.pool.Close() + tc.pool.Submit(nil) + }) + } +} - // Use a zero-buffer queue so submit blocks once the worker is busy. - pool := NewFixedPool(poolCtx, "test-ctx", 1, 0) +func TestElasticPool_CloseWaitsForAdHocGoroutines(t *testing.T) { + const warmWorkers = 1 + const totalTasks = 20 + pool := NewElasticPool("test-elastic-close", warmWorkers) blocker := make(chan struct{}) - defer close(blocker) + var completed atomic.Int64 - _ = pool.Submit(poolCtx, func() { <-blocker }) - time.Sleep(10 * time.Millisecond) + // Fill the warm worker, then submit more tasks to force ad-hoc goroutines. + for i := 0; i < totalTasks; i++ { + pool.Submit(func() { + <-blocker + completed.Add(1) + }) + } + + time.Sleep(50 * time.Millisecond) + + close(blocker) + + closeDone := make(chan struct{}) + go func() { + pool.Close() + close(closeDone) + }() - submitCtx, submitCancel := context.WithCancel(t.Context()) - submitCancel() + select { + case <-closeDone: + case <-time.After(testTimeout): + t.Fatal("timed out waiting for Close to return") + } - err := pool.Submit(submitCtx, func() {}) - if err == nil { - t.Error("expected error from Submit with cancelled context") + if got := completed.Load(); got != totalTasks { + t.Errorf("expected %d tasks completed before Close returned, got %d", totalTasks, got) } } -func TestPool_SubmitAfterShutdown(t *testing.T) { - for _, tc := range []struct { - name string - pool Pool - }{ - {"FixedPool", func() Pool { - ctx, cancel := context.WithCancel(t.Context()) - p := NewFixedPool(ctx, "test-shutdown", 2, 4) - cancel() - return p - }()}, - {"ElasticPool", func() Pool { - ctx, cancel := context.WithCancel(t.Context()) - p := NewElasticPool(ctx, "test-shutdown", 2) - cancel() - return p - }()}, - } { - t.Run(tc.name, func(t *testing.T) { - time.Sleep(10 * time.Millisecond) - // Must not panic. May or may not return an error. - _ = tc.pool.Submit(t.Context(), func() {}) +func TestFixedPool_CloseDrainsPendingTasks(t *testing.T) { + pool := NewFixedPool("test-drain", 1, 100) + + blocker := make(chan struct{}) + var completed atomic.Int64 + + const pendingTasks = 50 + var wg sync.WaitGroup + wg.Add(1 + pendingTasks) + + pool.Submit(func() { + defer wg.Done() + <-blocker + completed.Add(1) + }) + time.Sleep(10 * time.Millisecond) + + for i := 0; i < pendingTasks; i++ { + pool.Submit(func() { + defer wg.Done() + completed.Add(1) }) } + + // Unblock the worker, then Close should drain all buffered tasks. + close(blocker) + pool.Close() + + expected := int64(1 + pendingTasks) + if got := completed.Load(); got != expected { + t.Errorf("expected %d tasks drained on Close, got %d", expected, got) + } +} + +func TestFixedPool_CloseBlocksUntilDrained(t *testing.T) { + pool := NewFixedPool("test-close-blocks", 2, 0) + + var completed atomic.Int64 + blocker := make(chan struct{}) + + pool.Submit(func() { + <-blocker + completed.Add(1) + }) + pool.Submit(func() { + <-blocker + completed.Add(1) + }) + time.Sleep(10 * time.Millisecond) + + closeDone := make(chan struct{}) + go func() { + pool.Close() + close(closeDone) + }() + + time.Sleep(20 * time.Millisecond) + select { + case <-closeDone: + t.Fatal("Close returned while tasks are still running") + default: + } + + close(blocker) + + select { + case <-closeDone: + case <-time.After(testTimeout): + t.Fatal("timed out waiting for Close to return") + } + + if got := completed.Load(); got != 2 { + t.Errorf("expected 2 tasks completed, got %d", got) + } } diff --git a/sei-db/db_engine/dbcache/cache_impl.go b/sei-db/db_engine/dbcache/cache_impl.go index 8dc5704f50..dfcaa24cee 100644 --- a/sei-db/db_engine/dbcache/cache_impl.go +++ b/sei-db/db_engine/dbcache/cache_impl.go @@ -98,13 +98,10 @@ func (c *cache) BatchSet(updates []CacheUpdate) error { var wg sync.WaitGroup for shardIndex, shardEntries := range shardMap { wg.Add(1) - err := c.miscPool.Submit(c.ctx, func() { + c.miscPool.Submit(func() { defer wg.Done() c.shards[shardIndex].BatchSet(shardEntries) }) - if err != nil { - return fmt.Errorf("failed to submit batch set: %w", err) - } } wg.Wait() @@ -124,8 +121,7 @@ func (c *cache) BatchGet(read Reader, keys map[string]types.BatchGetResult) erro var wg sync.WaitGroup for shardIndex, subMap := range work { wg.Add(1) - - err := c.miscPool.Submit(c.ctx, func() { + c.miscPool.Submit(func() { defer wg.Done() err := c.shards[shardIndex].BatchGet(read, subMap) if err != nil { @@ -134,9 +130,6 @@ func (c *cache) BatchGet(read Reader, keys map[string]types.BatchGetResult) erro } } }) - if err != nil { - return fmt.Errorf("failed to submit batch get: %w", err) - } } wg.Wait() diff --git a/sei-db/db_engine/dbcache/cache_impl_test.go b/sei-db/db_engine/dbcache/cache_impl_test.go index 4c44d5283a..d5f5b9a557 100644 --- a/sei-db/db_engine/dbcache/cache_impl_test.go +++ b/sei-db/db_engine/dbcache/cache_impl_test.go @@ -341,16 +341,6 @@ func TestCacheBatchSetEmpty(t *testing.T) { require.NoError(t, c.BatchSet([]CacheUpdate{})) } -func TestCacheBatchSetPoolFailure(t *testing.T) { - readPool := threading.NewAdHocPool() - c, _ := NewStandardCache(context.Background(), &CacheConfig{ShardCount: 1, MaxSize: 4096}, readPool, &failPool{}) - - err := c.BatchSet([]CacheUpdate{ - {Key: []byte("k"), Value: []byte("v")}, - }) - require.Error(t, err) -} - // --------------------------------------------------------------------------- // BatchGet // --------------------------------------------------------------------------- @@ -434,27 +424,6 @@ func TestCacheBatchGetEmpty(t *testing.T) { require.NoError(t, c.BatchGet(read, keys)) } -func TestCacheBatchGetPoolFailure(t *testing.T) { - readPool := threading.NewAdHocPool() - c, _ := NewStandardCache(context.Background(), &CacheConfig{ShardCount: 1, MaxSize: 4096}, readPool, &failPool{}) - - keys := map[string]types.BatchGetResult{"k": {}} - err := c.BatchGet(noopRead, keys) - require.Error(t, err) -} - -func TestCacheBatchGetShardReadPoolFailure(t *testing.T) { - miscPool := threading.NewAdHocPool() - c, _ := NewStandardCache(context.Background(), &CacheConfig{ShardCount: 1, MaxSize: 4096}, &failPool{}, miscPool) - - keys := map[string]types.BatchGetResult{"a": {}, "b": {}} - require.NoError(t, c.BatchGet(noopRead, keys)) - - for k, r := range keys { - require.Error(t, r.Error, "key=%q should have per-key error", k) - } -} - // --------------------------------------------------------------------------- // Cross-shard distribution // --------------------------------------------------------------------------- diff --git a/sei-db/db_engine/dbcache/shard.go b/sei-db/db_engine/dbcache/shard.go index 6a71105add..21e3f619fa 100644 --- a/sei-db/db_engine/dbcache/shard.go +++ b/sei-db/db_engine/dbcache/shard.go @@ -183,13 +183,10 @@ func (s *shard) getUnknown(read Reader, entry *shardEntry, key []byte) ([]byte, s.lock.Unlock() s.metrics.reportCacheMisses(1) startTime := time.Now() - err := s.readPool.Submit(s.ctx, func() { + s.readPool.Submit(func() { value, _, readErr := read(key) entry.injectValue(key, readResult{value: value, err: readErr}) }) - if err != nil { - return nil, false, fmt.Errorf("failed to schedule read: %w", err) - } result, err := threading.InterruptiblePull(s.ctx, valueChan) s.metrics.reportCacheMissLatency(time.Since(startTime)) if err != nil { @@ -307,13 +304,10 @@ func (s *shard) BatchGet(read Reader, keys map[string]types.BatchGetResult) erro for i := range pending { if pending[i].needsSchedule { p := &pending[i] - err := s.readPool.Submit(s.ctx, func() { + s.readPool.Submit(func() { value, _, readErr := read([]byte(p.key)) p.entry.valueChan <- readResult{value: value, err: readErr} }) - if err != nil { - return fmt.Errorf("failed to schedule read: %w", err) - } } } diff --git a/sei-db/db_engine/dbcache/shard_test.go b/sei-db/db_engine/dbcache/shard_test.go index 5e438dd72b..534eb57d03 100644 --- a/sei-db/db_engine/dbcache/shard_test.go +++ b/sei-db/db_engine/dbcache/shard_test.go @@ -842,33 +842,6 @@ func TestConcurrentBatchSetAndBatchGet(t *testing.T) { wg.Wait() } -// --------------------------------------------------------------------------- -// Pool submission failure -// --------------------------------------------------------------------------- - -type failPool struct{} - -func (fp *failPool) Submit(_ context.Context, _ func()) error { - return errors.New("pool exhausted") -} - -func TestGetPoolSubmitFailure(t *testing.T) { - readFunc := Reader(func(key []byte) ([]byte, bool, error) { return []byte("v"), true, nil }) - s, _ := NewShard(context.Background(), &failPool{}, 4096, 0) - - _, _, err := s.Get(readFunc, []byte("k"), true) - require.Error(t, err) -} - -func TestBatchGetPoolSubmitFailure(t *testing.T) { - readFunc := Reader(func(key []byte) ([]byte, bool, error) { return []byte("v"), true, nil }) - s, _ := NewShard(context.Background(), &failPool{}, 4096, 0) - - keys := map[string]types.BatchGetResult{"k": {}} - err := s.BatchGet(readFunc, keys) - require.Error(t, err) -} - // --------------------------------------------------------------------------- // Large values // --------------------------------------------------------------------------- diff --git a/sei-db/state_db/sc/flatkv/store.go b/sei-db/state_db/sc/flatkv/store.go index be3d6ae72a..632382635a 100644 --- a/sei-db/state_db/sc/flatkv/store.go +++ b/sei-db/state_db/sc/flatkv/store.go @@ -158,10 +158,10 @@ func NewCommitStore( coreCount := runtime.NumCPU() readPoolSize := int(cfg.ReaderThreadsPerCore*float64(coreCount) + float64(cfg.ReaderConstantThreadCount)) - readPool := threading.NewFixedPool(ctx, "flatkv-read", readPoolSize, cfg.ReaderPoolQueueSize) + readPool := threading.NewFixedPool("flatkv-read", readPoolSize, cfg.ReaderPoolQueueSize) miscPoolSize := int(cfg.MiscPoolThreadsPerCore*float64(coreCount) + float64(cfg.MiscConstantThreadCount)) - miscPool := threading.NewElasticPool(ctx, "flatkv-misc", miscPoolSize) + miscPool := threading.NewElasticPool("flatkv-misc", miscPoolSize) return &CommitStore{ ctx: ctx, @@ -189,10 +189,10 @@ func (s *CommitStore) resetPools() { s.ctx, s.cancel = context.WithCancel(context.Background()) readPoolSize := int(s.config.ReaderThreadsPerCore*float64(coreCount) + float64(s.config.ReaderConstantThreadCount)) - s.readPool = threading.NewFixedPool(s.ctx, "flatkv-read", readPoolSize, s.config.ReaderPoolQueueSize) + s.readPool = threading.NewFixedPool("flatkv-read", readPoolSize, s.config.ReaderPoolQueueSize) miscPoolSize := int(s.config.MiscPoolThreadsPerCore*float64(coreCount) + float64(s.config.MiscConstantThreadCount)) - s.miscPool = threading.NewElasticPool(s.ctx, "flatkv-misc", miscPoolSize) + s.miscPool = threading.NewElasticPool("flatkv-misc", miscPoolSize) } func (s *CommitStore) flatkvDir() string { diff --git a/sei-db/state_db/sc/flatkv/store_lifecycle.go b/sei-db/state_db/sc/flatkv/store_lifecycle.go index 73075ec101..56f0a085c8 100644 --- a/sei-db/state_db/sc/flatkv/store_lifecycle.go +++ b/sei-db/state_db/sc/flatkv/store_lifecycle.go @@ -69,10 +69,16 @@ func (s *CommitStore) closeDBsOnly() error { return nil } -// Close closes all database instances, cancels the store's context to -// stop background goroutines (pools, caches, metrics), and releases the -// file lock. +// Close drains thread pools, closes all database instances, cancels the +// store's context to stop background goroutines (caches, metrics), and +// releases the file lock. func (s *CommitStore) Close() error { + if s.readPool != nil { + s.readPool.Close() + } + if s.miscPool != nil { + s.miscPool.Close() + } err := s.closeDBsOnly() s.cancel() diff --git a/sei-db/state_db/sc/flatkv/store_write.go b/sei-db/state_db/sc/flatkv/store_write.go index d1b62fbee8..08a2122543 100644 --- a/sei-db/state_db/sc/flatkv/store_write.go +++ b/sei-db/state_db/sc/flatkv/store_write.go @@ -324,13 +324,10 @@ func (s *CommitStore) flushAllDBs() error { var wg sync.WaitGroup wg.Add(4) for i, db := range []types.KeyValueDB{s.accountDB, s.codeDB, s.storageDB, s.legacyDB} { - err := s.miscPool.Submit(s.ctx, func() { + s.miscPool.Submit(func() { defer wg.Done() errs[i] = db.Flush() }) - if err != nil { - return fmt.Errorf("failed to submit flush: %w", err) - } } wg.Wait() names := [4]string{"accountDB", "codeDB", "storageDB", "legacyDB"} @@ -438,13 +435,10 @@ func (s *CommitStore) commitBatches(version int64) error { var wg sync.WaitGroup wg.Add(len(pending)) for i, p := range pending { - err := s.miscPool.Submit(s.ctx, func() { + s.miscPool.Submit(func() { errs[i] = p.batch.Commit(syncOpt) wg.Done() }) - if err != nil { - return fmt.Errorf("failed to submit commit: %w", err) - } } wg.Wait() @@ -561,46 +555,34 @@ func (s *CommitStore) batchReadOldValues(cs []*proto.NamedChangeSet) ( if len(storageBatch) > 0 { wg.Add(1) - err = s.miscPool.Submit(s.ctx, func() { + s.miscPool.Submit(func() { defer wg.Done() storageErr = s.storageDB.BatchGet(storageBatch) }) - if err != nil { - return nil, nil, nil, nil, fmt.Errorf("failed to submit batch get: %w", err) - } } if len(accountBatch) > 0 { wg.Add(1) - err = s.miscPool.Submit(s.ctx, func() { + s.miscPool.Submit(func() { defer wg.Done() accountErr = s.accountDB.BatchGet(accountBatch) }) - if err != nil { - return nil, nil, nil, nil, fmt.Errorf("failed to submit batch get: %w", err) - } } if len(codeBatch) > 0 { wg.Add(1) - err = s.miscPool.Submit(s.ctx, func() { + s.miscPool.Submit(func() { defer wg.Done() codeErr = s.codeDB.BatchGet(codeBatch) }) - if err != nil { - return nil, nil, nil, nil, fmt.Errorf("failed to submit batch get: %w", err) - } } if len(legacyBatch) > 0 { wg.Add(1) - err = s.miscPool.Submit(s.ctx, func() { + s.miscPool.Submit(func() { defer wg.Done() legacyErr = s.legacyDB.BatchGet(legacyBatch) }) - if err != nil { - return nil, nil, nil, nil, fmt.Errorf("failed to submit batch get: %w", err) - } } wg.Wait() From 10172fd528d31e922cba63a147ca3066e82aa080 Mon Sep 17 00:00:00 2001 From: Cody Littley Date: Wed, 1 Apr 2026 10:08:14 -0500 Subject: [PATCH 099/119] started big refactor --- sei-db/state_db/sc/flatkv/keys.go | 2 +- sei-db/state_db/sc/flatkv/store.go | 18 +- sei-db/state_db/sc/flatkv/store_write.go | 204 ++++++++---------- sei-db/state_db/sc/flatkv/store_write_test.go | 8 +- .../state_db/sc/flatkv/vtype/account_data.go | 16 +- sei-db/state_db/sc/flatkv/vtype/base_types.go | 51 +++++ .../sc/flatkv/vtype/pending_account_write.go | 91 ++++++-- 7 files changed, 230 insertions(+), 160 deletions(-) create mode 100644 sei-db/state_db/sc/flatkv/vtype/base_types.go diff --git a/sei-db/state_db/sc/flatkv/keys.go b/sei-db/state_db/sc/flatkv/keys.go index 3070296aef..11a6720d95 100644 --- a/sei-db/state_db/sc/flatkv/keys.go +++ b/sei-db/state_db/sc/flatkv/keys.go @@ -119,7 +119,7 @@ func PrefixEnd(prefix []byte) []byte { // // CodeHash == CodeHash{} (all zeros) means the account has no code (EOA). // Note: empty code contracts have CodeHash = keccak256("") which is non-zero. -type AccountValue struct { +type AccountValue struct { // TODO delete Balance Balance Nonce uint64 CodeHash CodeHash diff --git a/sei-db/state_db/sc/flatkv/store.go b/sei-db/state_db/sc/flatkv/store.go index 6eee8326b9..d0f148e8d8 100644 --- a/sei-db/state_db/sc/flatkv/store.go +++ b/sei-db/state_db/sc/flatkv/store.go @@ -53,20 +53,6 @@ const ( // dataDBDirs lists all data DB directory names (used for per-DB LtHash iteration). var dataDBDirs = []string{accountDBDir, codeDBDir, storageDBDir, legacyDBDir} -// pendingAccountWrite tracks a buffered account write. -// Uses AccountValue structure: balance(32) || nonce(8) || codehash(32) -// -// Account-field deletes (KVPair.Delete for nonce or codehash) reset the -// individual field within value. When all fields become zero after resets, -// isDelete is set to true and the accountDB row is physically deleted at -// commit time. Any subsequent write to the same address within the same -// block clears isDelete back to false (row is recreated). -type pendingAccountWrite struct { - addr Address - value AccountValue - isDelete bool // true = row will be physically deleted (all fields zero) -} - // CommitStore implements flatkv.Store for EVM state storage. // NOT thread-safe; callers must serialize all operations. type CommitStore struct { @@ -96,7 +82,7 @@ type CommitStore struct { perDBWorkingLtHash map[string]*lthash.LtHash // Pending writes buffer - accountWrites map[string]*pendingAccountWrite + accountWrites map[string]*vtype.AccountData codeWrites map[string]*vtype.CodeData storageWrites map[string]*vtype.StorageData legacyWrites map[string]*vtype.LegacyData @@ -160,7 +146,7 @@ func NewCommitStore( cancel: cancel, config: *cfg, localMeta: make(map[string]*LocalMeta), - accountWrites: make(map[string]*pendingAccountWrite), + accountWrites: make(map[string]*vtype.AccountData), codeWrites: make(map[string]*vtype.CodeData), storageWrites: make(map[string]*vtype.StorageData), legacyWrites: make(map[string]*vtype.LegacyData), diff --git a/sei-db/state_db/sc/flatkv/store_write.go b/sei-db/state_db/sc/flatkv/store_write.go index 75fe635074..2804028042 100644 --- a/sei-db/state_db/sc/flatkv/store_write.go +++ b/sei-db/state_db/sc/flatkv/store_write.go @@ -1,7 +1,6 @@ package flatkv import ( - "encoding/binary" "errors" "fmt" "sync" @@ -37,18 +36,25 @@ func (s *CommitStore) ApplyChangeSets(cs []*proto.NamedChangeSet) error { s.phaseTimer.SetPhase("apply_change_sets_prepare") s.pendingChangeSets = append(s.pendingChangeSets, cs...) - // Collect LtHash pairs per DB (using internal key format) + // TODO refactor into a parse+sort phase + + // Gather LTHash pairs for accounts. Accounts are handled seperately from the other DBs, + // since accounts have a special workflow due to having multiple fields. + s.phaseTimer.SetPhase("apply_change_sets_gather_account_pairs") + accountWrites, err := s.gatherAccountUpdates(cs) + if err != nil { + return fmt.Errorf("failed to gather account updates: %w", err) + } + accountPairs, err := s.gatherAccountPairs(accountWrites, accountOld) + if err != nil { + return fmt.Errorf("failed to gather account pairs: %w", err) + } + + // For all remaining DBs, collect LtHash pairs. + s.phaseTimer.SetPhase("apply_change_sets_collect_pairs") var storagePairs []lthash.KVPairWithLastValue var codePairs []lthash.KVPairWithLastValue var legacyPairs []lthash.KVPairWithLastValue - // Account pairs are collected at the end after all account changes are processed - - // Pre-capture raw encoded account bytes so LtHash delta uses the correct - // baseline across multiple ApplyChangeSets calls before Commit. - // nil means the account didn't exist (no phantom MixOut for new accounts). - oldAccountRawValues := make(map[string][]byte) - - s.phaseTimer.SetPhase("apply_change_sets_collect_storage_pairs") // For each entry in the change set, accumulate changes for the appropriate DB. for _, namedCS := range cs { @@ -71,40 +77,19 @@ func (s *CommitStore) ApplyChangeSets(cs []*proto.NamedChangeSet) error { return fmt.Errorf("invalid storage value length: got %d, expected 32", len(pair.Value)) } storagePairs = s.accumulateEvmStorageChanges(keyBytes, pair, storageOld, storagePairs) - case evm.EVMKeyNonce, evm.EVMKeyCodeHash: - err := s.accumulateEvmAccountFieldChanges(kind, keyBytes, pair, accountOld, oldAccountRawValues) - if err != nil { - return fmt.Errorf("failed to apply EVM account field change: %w", err) - } + case evm.EVMKeyCode: codePairs = s.accumulateEvmCodeChanges(keyBytes, pair, codeOld, codePairs) case evm.EVMKeyLegacy: legacyPairs = s.accumulateEvmLegacyChanges(keyBytes, pair, legacyOld, legacyPairs) + case evm.EVMKeyNonce, evm.EVMKeyCodeHash: + // Intentional no-op, accounts are handled separately. + default: + // TODO return an error here } } } - s.phaseTimer.SetPhase("apply_change_sets_collect_account_pairs") - - accountPairs := make([]lthash.KVPairWithLastValue, 0, len(oldAccountRawValues)) - for addrStr, oldRaw := range oldAccountRawValues { - paw, ok := s.accountWrites[addrStr] - if !ok { - continue - } - - var encodedValue []byte - if !paw.isDelete { - encodedValue = paw.value.Encode() - } - accountPairs = append(accountPairs, lthash.KVPairWithLastValue{ - Key: AccountKey(paw.addr), - Value: encodedValue, - LastValue: oldRaw, - Delete: paw.isDelete, - }) - } - s.phaseTimer.SetPhase("apply_change_compute_lt_hash") // Per-DB LTHash updates @@ -254,82 +239,82 @@ func (s *CommitStore) accumulateEvmLegacyChanges( }) } -// Apply a single nonce or codehash change to the account db. -func (s *CommitStore) accumulateEvmAccountFieldChanges( - // Whether this is a nonce or codehash change. - kind evm.EVMKeyKind, - // The key with the prefix stripped (addr, 20 bytes). - keyBytes []byte, - // The change to apply. - pair *iavl.KVPair, - // Old account values. - accountOld map[string]types.BatchGetResult, - // Snapshots of old encoded account bytes for LtHash delta computation. - // This function populates entries the first time each address is seen. - oldAccountRawValues map[string][]byte, -) error { - addr, ok := AddressFromBytes(keyBytes) - if !ok { - return fmt.Errorf("invalid address length %d for key kind %d", len(keyBytes), kind) - } - addrKey := string(AccountKey(addr)) - - // Snapshot the old encoded bytes the first time we touch this address, - // so the LtHash delta uses the correct baseline across multiple - // ApplyChangeSets calls before Commit. - if _, seen := oldAccountRawValues[addrKey]; !seen { - if paw, ok := s.accountWrites[addrKey]; ok { - if paw.isDelete { - oldAccountRawValues[addrKey] = nil +// An account has multiple distict parts, and each part has its own key and can be set in a different changeset. +// This method iterates over the changesets and combines updates for each account into a single PendingAccountWrite. +func (s *CommitStore) gatherAccountUpdates(cs []*proto.NamedChangeSet) (map[string]*vtype.PendingAccountWrite, error) { + updates := make(map[string]*vtype.PendingAccountWrite) + + for _, cs := range cs { + if cs.Changeset.Pairs == nil { + continue + } + + for _, pair := range cs.Changeset.Pairs { + kind, keyBytes := evm.ParseEVMKey(pair.Key) + // FUTURE WORK: we also need to add a kind for balance changes. + if kind != evm.EVMKeyNonce && kind != evm.EVMKeyCodeHash { + // This is not an account field change, skip + continue + } + + keyStr := string(keyBytes) + + // Note: PendingAccountWrite can be used as nil, so no need to bootstrap the map entries + if kind == evm.EVMKeyNonce { + nonce, err := vtype.ParseNonce(pair.Value) + if err != nil { + return nil, fmt.Errorf("invalid nonce value: %w", err) + } + updates[keyStr] = updates[keyStr].SetNonce(nonce) } else { - oldAccountRawValues[addrKey] = paw.value.Encode() + codeHash, err := vtype.ParseCodeHash(pair.Value) + if err != nil { + return nil, fmt.Errorf("invalid codehash value: %w", err) + } + updates[keyStr] = updates[keyStr].SetCodeHash(codeHash) } - } else if result, ok := accountOld[addrKey]; ok { - oldAccountRawValues[addrKey] = result.Value - } else { - oldAccountRawValues[addrKey] = nil } } - paw := s.accountWrites[addrKey] - if paw == nil { - var existingValue AccountValue - result := accountOld[addrKey] - if result.IsFound() && result.Value != nil { - av, err := DecodeAccountValue(result.Value) + return updates, nil +} + +// For each update being applied to an account, gather the new/old values for use by LtHash delta computation. +func (s *CommitStore) gatherAccountPairs( + // Writes being performed. Writes to different account fields are combined per account. + pendingWrites map[string]*vtype.PendingAccountWrite, + // Account values from the database. + databaseAccountValues map[string]types.BatchGetResult, +) ([]lthash.KVPairWithLastValue, error) { + + result := make([]lthash.KVPairWithLastValue, 0, len(pendingWrites)) + + for addrStr, pendingWrite := range pendingWrites { + var oldValue *vtype.AccountData + + if stagedWrite, ok := s.accountWrites[addrStr]; ok { + // We've got a pending write staged in memory + oldValue = stagedWrite + } else if dbValue, ok := databaseAccountValues[addrStr]; ok { + // This account is in the DB + var err error + oldValue, err = vtype.DeserializeAccountData(dbValue.Value) if err != nil { - return fmt.Errorf("corrupted AccountValue for addr %x: %w", addr, err) + return nil, fmt.Errorf("invalid account data in DB: %w", err) } - existingValue = av } - paw = &pendingAccountWrite{addr: addr, value: existingValue} - s.accountWrites[addrKey] = paw - } - if pair.Delete { - if kind == evm.EVMKeyNonce { - paw.value.Nonce = 0 - } else { - paw.value.CodeHash = CodeHash{} - } - paw.isDelete = paw.value.IsEmpty() - } else { - if kind == evm.EVMKeyNonce { - if len(pair.Value) != NonceLen { - return fmt.Errorf("invalid nonce value length: got %d, expected %d", - len(pair.Value), NonceLen) - } - paw.value.Nonce = binary.BigEndian.Uint64(pair.Value) - } else { - if len(pair.Value) != CodeHashLen { - return fmt.Errorf("invalid codehash value length: got %d, expected %d", - len(pair.Value), CodeHashLen) - } - copy(paw.value.CodeHash[:], pair.Value) - } - paw.isDelete = paw.value.IsEmpty() + newValue := pendingWrite.Merge(oldValue, s.committedVersion+1) + + result = append(result, lthash.KVPairWithLastValue{ + Key: []byte(addrStr), + Value: newValue.Serialize(), + LastValue: oldValue.Serialize(), + Delete: newValue.IsDelete(), + }) } - return nil + + return result, nil } // Commit persists buffered writes and advances the version. @@ -413,7 +398,7 @@ func (s *CommitStore) flushAllDBs() error { // clearPendingWrites clears all pending write buffers func (s *CommitStore) clearPendingWrites() { - s.accountWrites = make(map[string]*pendingAccountWrite) + s.accountWrites = make(map[string]*vtype.AccountData) s.codeWrites = make(map[string]*vtype.CodeData) s.storageWrites = make(map[string]*vtype.StorageData) s.legacyWrites = make(map[string]*vtype.LegacyData) @@ -440,15 +425,14 @@ func (s *CommitStore) commitBatches(version int64) error { batch := s.accountDB.NewBatch() defer func() { _ = batch.Close() }() - for _, paw := range s.accountWrites { - key := AccountKey(paw.addr) - if paw.isDelete { + for keyStr, accountWrite := range s.accountWrites { + key := []byte(keyStr) // TODO verify this is correct! + if accountWrite.IsDelete() { if err := batch.Delete(key); err != nil { return fmt.Errorf("accountDB delete: %w", err) } } else { - encoded := EncodeAccountValue(paw.value) - if err := batch.Set(key, encoded); err != nil { + if err := batch.Set(key, accountWrite.Serialize()); err != nil { return fmt.Errorf("accountDB set: %w", err) } } @@ -673,8 +657,8 @@ func (s *CommitStore) batchReadOldValues(cs []*proto.NamedChangeSet) ( if _, done := accountOld[k]; done { continue } - if paw, ok := s.accountWrites[k]; ok { - accountOld[k] = types.BatchGetResult{Value: EncodeAccountValue(paw.value)} + if accountWrite, ok := s.accountWrites[k]; ok { + accountOld[k] = types.BatchGetResult{Value: accountWrite.Serialize()} } else { accountBatch[k] = types.BatchGetResult{} } diff --git a/sei-db/state_db/sc/flatkv/store_write_test.go b/sei-db/state_db/sc/flatkv/store_write_test.go index b1bc2db546..5446c56251 100644 --- a/sei-db/state_db/sc/flatkv/store_write_test.go +++ b/sei-db/state_db/sc/flatkv/store_write_test.go @@ -759,10 +759,10 @@ func TestLtHashAccountFieldMerge(t *testing.T) { require.Len(t, s.accountWrites, 1, "both nonce and codehash should merge into one AccountValue") - paw := s.accountWrites[string(addr[:])] - require.NotNil(t, paw) - require.Equal(t, uint64(10), paw.value.Nonce) - require.Equal(t, codeHash, paw.value.CodeHash) + accountWrite := s.accountWrites[string(addr[:])] + require.NotNil(t, accountWrite) + require.Equal(t, uint64(10), accountWrite.GetNonce()) + require.Equal(t, codeHash, accountWrite.GetCodeHash()) } // ============================================================================= diff --git a/sei-db/state_db/sc/flatkv/vtype/account_data.go b/sei-db/state_db/sc/flatkv/vtype/account_data.go index 4e03004a00..e536828c5f 100644 --- a/sei-db/state_db/sc/flatkv/vtype/account_data.go +++ b/sei-db/state_db/sc/flatkv/vtype/account_data.go @@ -86,12 +86,12 @@ func (a *AccountData) GetSerializationVersion() AccountDataVersion { // Get the account's block height. func (a *AccountData) GetBlockHeight() int64 { - return int64(binary.BigEndian.Uint64(a.data[accountBlockHeightStart:accountBalanceStart])) //nolint:gosec // block height is always within int64 range + return int64(binary.BigEndian.Uint64(a.data[accountBlockHeightStart:accountBalanceStart])) //nolint:gosec } // Get the account's balance. -func (a *AccountData) GetBalance() *[32]byte { - return (*[32]byte)(a.data[accountBalanceStart:accountNonceStart]) +func (a *AccountData) GetBalance() *Balance { + return (*Balance)(a.data[accountBalanceStart:accountNonceStart]) } // Get the account's nonce. @@ -100,8 +100,8 @@ func (a *AccountData) GetNonce() uint64 { } // Get the account's code hash. -func (a *AccountData) GetCodeHash() *[32]byte { - return (*[32]byte)(a.data[accountCodeHashStart:accountDataLength]) +func (a *AccountData) GetCodeHash() *CodeHash { + return (*CodeHash)(a.data[accountCodeHashStart:accountDataLength]) } // Check if this account data signifies a deletion operation. A deletion operation is automatically @@ -124,12 +124,12 @@ func (a *AccountData) Copy() *AccountData { // Set the account's block height when this account was last modified/touched. Returns self. func (a *AccountData) SetBlockHeight(blockHeight int64) *AccountData { - binary.BigEndian.PutUint64(a.data[accountBlockHeightStart:accountBalanceStart], uint64(blockHeight)) //nolint:gosec // block height is always non-negative + binary.BigEndian.PutUint64(a.data[accountBlockHeightStart:accountBalanceStart], uint64(blockHeight)) //nolint:gosec return a } // Set the account's balance. Returns self. -func (a *AccountData) SetBalance(balance *[32]byte) *AccountData { +func (a *AccountData) SetBalance(balance *Balance) *AccountData { copy(a.data[accountBalanceStart:accountNonceStart], balance[:]) return a } @@ -141,7 +141,7 @@ func (a *AccountData) SetNonce(nonce uint64) *AccountData { } // Set the account's code hash. Returns self. -func (a *AccountData) SetCodeHash(codeHash *[32]byte) *AccountData { +func (a *AccountData) SetCodeHash(codeHash *CodeHash) *AccountData { copy(a.data[accountCodeHashStart:accountDataLength], codeHash[:]) return a } diff --git a/sei-db/state_db/sc/flatkv/vtype/base_types.go b/sei-db/state_db/sc/flatkv/vtype/base_types.go new file mode 100644 index 0000000000..4ae0e251ea --- /dev/null +++ b/sei-db/state_db/sc/flatkv/vtype/base_types.go @@ -0,0 +1,51 @@ +package vtype + +import ( + "encoding/binary" + "fmt" +) + +const ( + AddressLen = 20 + CodeHashLen = 32 + NonceLen = 8 + SlotLen = 32 + BalanceLen = 32 +) + +// Address is an EVM address (20 bytes). +type Address [AddressLen]byte + +// CodeHash is a contract code hash (32 bytes). +type CodeHash [CodeHashLen]byte + +// Slot is a storage slot key (32 bytes). +type Slot [SlotLen]byte + +// Balance is an EVM balance (32 bytes, big-endian uint256). +type Balance [BalanceLen]byte + +// ParseNonce parses a nonce value from a byte slice. +func ParseNonce(b []byte) (uint64, error) { + if len(b) != NonceLen { + return 0, fmt.Errorf("invalid nonce value length: got %d, expected %d", + len(b), NonceLen) + } + return binary.BigEndian.Uint64(b), nil +} + +func ParseCodeHash(b []byte) (*CodeHash, error) { + if len(b) != CodeHashLen { + return nil, fmt.Errorf( + "invalid codehash value length: got %d, expected %d", + len(b), CodeHashLen, + ) + } + + var result CodeHash + copy(result[:], b) + + return &result, nil +} + +// TODO implement others!!! diff --git a/sei-db/state_db/sc/flatkv/vtype/pending_account_write.go b/sei-db/state_db/sc/flatkv/vtype/pending_account_write.go index f4d96ab6de..19a6141cbb 100644 --- a/sei-db/state_db/sc/flatkv/vtype/pending_account_write.go +++ b/sei-db/state_db/sc/flatkv/vtype/pending_account_write.go @@ -4,12 +4,13 @@ package vtype // Each field has a value and a flag indicating whether it has been set. Only set fields are // applied when merging into a base AccountData. // -// A PendingAccountWrite should only be created when there is at least one change to record. +// It is legal to operate on a nil PendingAccountWrite. A nil PendingAccountWrite will always return 0s from getters, +// and will return a non-nil result when a setter is called. type PendingAccountWrite struct { - balance *[32]byte + balance *Balance nonce uint64 nonceSet bool - codeHash *[32]byte + codeHash *CodeHash } // NewPendingAccountWrite creates a new PendingAccountWrite with no fields set. @@ -18,32 +19,70 @@ func NewPendingAccountWrite() *PendingAccountWrite { } // GetBalance returns the pending balance value, or nil if not set. -func (p *PendingAccountWrite) GetBalance() *[32]byte { return p.balance } +func (p *PendingAccountWrite) GetBalance() *Balance { + if p == nil { + zero := Balance{} + return &zero + } + return p.balance +} // IsBalanceSet reports whether the balance has been set in this pending write. -func (p *PendingAccountWrite) IsBalanceSet() bool { return p.balance != nil } +func (p *PendingAccountWrite) IsBalanceSet() bool { + if p == nil { + return false + } + return p.balance != nil +} // GetNonce returns the pending nonce value. -func (p *PendingAccountWrite) GetNonce() uint64 { return p.nonce } +func (p *PendingAccountWrite) GetNonce() uint64 { + if p == nil { + return 0 + } + return p.nonce +} // IsNonceSet reports whether the nonce has been set in this pending write. -func (p *PendingAccountWrite) IsNonceSet() bool { return p.nonceSet } +func (p *PendingAccountWrite) IsNonceSet() bool { + if p == nil { + return false + } + return p.nonceSet +} // GetCodeHash returns the pending code hash value, or nil if not set. -func (p *PendingAccountWrite) GetCodeHash() *[32]byte { return p.codeHash } +func (p *PendingAccountWrite) GetCodeHash() *CodeHash { + if p == nil { + zero := CodeHash{} + return &zero + } + return p.codeHash +} // IsCodeHashSet reports whether the code hash has been set in this pending write. -func (p *PendingAccountWrite) IsCodeHashSet() bool { return p.codeHash != nil } +func (p *PendingAccountWrite) IsCodeHashSet() bool { + if p == nil { + return false + } + return p.codeHash != nil +} // SetBalance marks the balance as changed. The pointer is stored directly; the caller // must not modify the underlying array after calling SetBalance. Returns self. -func (p *PendingAccountWrite) SetBalance(balance *[32]byte) *PendingAccountWrite { +func (p *PendingAccountWrite) SetBalance(balance *Balance) *PendingAccountWrite { + if p == nil { + p = NewPendingAccountWrite() + } p.balance = balance return p } // SetNonce marks the nonce as changed. Returns self. func (p *PendingAccountWrite) SetNonce(nonce uint64) *PendingAccountWrite { + if p == nil { + p = NewPendingAccountWrite() + } p.nonce = nonce p.nonceSet = true return p @@ -51,25 +90,35 @@ func (p *PendingAccountWrite) SetNonce(nonce uint64) *PendingAccountWrite { // SetCodeHash marks the code hash as changed. The pointer is stored directly; the caller // must not modify the underlying array after calling SetCodeHash. Returns self. -func (p *PendingAccountWrite) SetCodeHash(codeHash *[32]byte) *PendingAccountWrite { +func (p *PendingAccountWrite) SetCodeHash(codeHash *CodeHash) *PendingAccountWrite { p.codeHash = codeHash return p } // Merge applies the pending field changes onto a copy of the base AccountData, updating the // block height. Only fields that have been set via Set* methods are overwritten; all other -// fields are carried over from the base. The base is not modified. +// fields are carried over from the base. The base is not modified. If a nil base is provided, +// the pending writes are applied to a new AccountData instantiated to all 0s. func (p *PendingAccountWrite) Merge(base *AccountData, blockHeight int64) *AccountData { - result := base.Copy().SetBlockHeight(blockHeight) - - if p.balance != nil { - result.SetBalance(p.balance) - } - if p.nonceSet { - result.SetNonce(p.nonce) + var result *AccountData + if base == nil { + result = NewAccountData() + } else { + result = base.Copy() } - if p.codeHash != nil { - result.SetCodeHash(p.codeHash) + + result.SetBlockHeight(blockHeight) + + if p != nil { + if p.balance != nil { + result.SetBalance(p.balance) + } + if p.nonceSet { + result.SetNonce(p.nonce) + } + if p.codeHash != nil { + result.SetCodeHash(p.codeHash) + } } return result From aa3ef36adbffedb8b495462649d01695106559dc Mon Sep 17 00:00:00 2001 From: Cody Littley Date: Wed, 1 Apr 2026 10:58:24 -0500 Subject: [PATCH 100/119] incremental progress --- sei-db/state_db/sc/flatkv/store_write.go | 258 ++++++------------ sei-db/state_db/sc/flatkv/vtype/base_types.go | 15 + 2 files changed, 100 insertions(+), 173 deletions(-) diff --git a/sei-db/state_db/sc/flatkv/store_write.go b/sei-db/state_db/sc/flatkv/store_write.go index 2804028042..1d27106bb5 100644 --- a/sei-db/state_db/sc/flatkv/store_write.go +++ b/sei-db/state_db/sc/flatkv/store_write.go @@ -10,7 +10,6 @@ import ( "github.com/sei-protocol/sei-chain/sei-db/proto" "github.com/sei-protocol/sei-chain/sei-db/state_db/sc/flatkv/lthash" "github.com/sei-protocol/sei-chain/sei-db/state_db/sc/flatkv/vtype" - iavl "github.com/sei-protocol/sei-chain/sei-iavl/proto" ) // ApplyChangeSets buffers EVM changesets and updates LtHash. @@ -36,12 +35,18 @@ func (s *CommitStore) ApplyChangeSets(cs []*proto.NamedChangeSet) error { s.phaseTimer.SetPhase("apply_change_sets_prepare") s.pendingChangeSets = append(s.pendingChangeSets, cs...) - // TODO refactor into a parse+sort phase + s.phaseTimer.SetPhase("apply_change_sets_sort") + changesByType := sortChangeSets(cs) - // Gather LTHash pairs for accounts. Accounts are handled seperately from the other DBs, - // since accounts have a special workflow due to having multiple fields. - s.phaseTimer.SetPhase("apply_change_sets_gather_account_pairs") - accountWrites, err := s.gatherAccountUpdates(cs) + // Gather LTHash pairs. + s.phaseTimer.SetPhase("apply_change_sets_gather_pairs") + + // Gather Account Pairs (special case since accounts have multiple fields) + accountWrites, err := s.mergeAccountUpdates( + changesByType[evm.EVMKeyNonce], + changesByType[evm.EVMKeyCodeHash], + nil, // TODO: update this when we add a balance key! + ) if err != nil { return fmt.Errorf("failed to gather account updates: %w", err) } @@ -50,45 +55,10 @@ func (s *CommitStore) ApplyChangeSets(cs []*proto.NamedChangeSet) error { return fmt.Errorf("failed to gather account pairs: %w", err) } - // For all remaining DBs, collect LtHash pairs. - s.phaseTimer.SetPhase("apply_change_sets_collect_pairs") - var storagePairs []lthash.KVPairWithLastValue - var codePairs []lthash.KVPairWithLastValue - var legacyPairs []lthash.KVPairWithLastValue - - // For each entry in the change set, accumulate changes for the appropriate DB. - for _, namedCS := range cs { - if namedCS.Changeset.Pairs == nil { - continue - } - - for _, pair := range namedCS.Changeset.Pairs { - // Parse memiavl key to determine type - kind, keyBytes := evm.ParseEVMKey(pair.Key) - if kind == evm.EVMKeyUnknown { - // Skip non-EVM keys silently - continue // NO! - } - - // Route to appropriate DB based on key type - switch kind { - case evm.EVMKeyStorage: - if !pair.Delete && len(pair.Value) != 32 { - return fmt.Errorf("invalid storage value length: got %d, expected 32", len(pair.Value)) - } - storagePairs = s.accumulateEvmStorageChanges(keyBytes, pair, storageOld, storagePairs) - - case evm.EVMKeyCode: - codePairs = s.accumulateEvmCodeChanges(keyBytes, pair, codeOld, codePairs) - case evm.EVMKeyLegacy: - legacyPairs = s.accumulateEvmLegacyChanges(keyBytes, pair, legacyOld, legacyPairs) - case evm.EVMKeyNonce, evm.EVMKeyCodeHash: - // Intentional no-op, accounts are handled separately. - default: - // TODO return an error here - } - } - } + // Gather all of the other DBs pairs. + storagePairs := s.gatherPairs(changesByType[evm.EVMKeyStorage], storageOld) + codePairs := s.gatherPairs(changesByType[evm.EVMKeyCode], codeOld) + legacyPairs := s.gatherPairs(changesByType[evm.EVMKeyLegacy], legacyOld) s.phaseTimer.SetPhase("apply_change_compute_lt_hash") @@ -122,157 +92,99 @@ func (s *CommitStore) ApplyChangeSets(cs []*proto.NamedChangeSet) error { return nil } -// Apply a single change to the evm storage db. -func (s *CommitStore) accumulateEvmStorageChanges( - // The key with the prefix stripped. - keyBytes []byte, - // The change to apply. - pair *iavl.KVPair, - // This map stores the old value to the key prior to this change. This function updates it - // with the new value, so that the next change will see this value as the previous value. - storageOld map[string]types.BatchGetResult, - // This slice stores both the new and old values for each key modified in this block. - storagePairs []lthash.KVPairWithLastValue, -) []lthash.KVPairWithLastValue { - keyStr := string(keyBytes) - oldValue := storageOld[keyStr].Value +// Sort the change sets by type. +func sortChangeSets(cs []*proto.NamedChangeSet) map[evm.EVMKeyKind]map[string][]byte { + // TODO add the ability to detect and report/err for unexected types! - newStorageData := vtype.NewStorageData().SetBlockHeight(s.committedVersion + 1) + result := make(map[evm.EVMKeyKind]map[string][]byte) - if pair.Delete { - // Value stays all-zeros → IsDelete() returns true. - storageOld[keyStr] = types.BatchGetResult{Value: nil} - } else { - newStorageData.SetValue((*[32]byte)(pair.Value)) - storageOld[keyStr] = types.BatchGetResult{Value: newStorageData.Serialize()} - } + for _, cs := range cs { + if cs.Changeset.Pairs == nil { + continue + } + for _, pair := range cs.Changeset.Pairs { + kind, keyBytes := evm.ParseEVMKey(pair.Key) + keyStr := string(keyBytes) - s.storageWrites[keyStr] = newStorageData + kindMap, ok := result[kind] + if !ok { + kindMap = make(map[string][]byte) + result[kind] = kindMap + } - var serializedValue []byte - if !pair.Delete { - serializedValue = newStorageData.Serialize() + kindMap[keyStr] = pair.Value + } } - return append(storagePairs, lthash.KVPairWithLastValue{ - Key: keyBytes, - Value: serializedValue, - LastValue: oldValue, - Delete: pair.Delete, - }) + + return result } -// Apply a single change to the evm code db. -func (s *CommitStore) accumulateEvmCodeChanges( - // The key with the prefix stripped (addr, 20 bytes). - keyBytes []byte, - // The change to apply. - pair *iavl.KVPair, - // This map stores the old value to the key prior to this change. This function updates it - // with the new value, so that the next change will see this value as the previous value. - codeOld map[string]types.BatchGetResult, - // This slice stores both the new and old values for each key modified in this block. - codePairs []lthash.KVPairWithLastValue, +// Gather LtHash pairs for a DB. Not suitable for the storage DB, but ok for the others. +func (s *CommitStore) gatherPairs( + changes map[string][]byte, + oldValues map[string]types.BatchGetResult, ) []lthash.KVPairWithLastValue { - keyStr := string(keyBytes) - oldValue := codeOld[keyStr].Value - newCodeData := vtype.NewCodeData().SetBlockHeight(s.committedVersion + 1) + var pairs []lthash.KVPairWithLastValue = make([]lthash.KVPairWithLastValue, 0, len(changes)) - if pair.Delete { - newCodeData.SetBytecode([]byte{}) - codeOld[keyStr] = types.BatchGetResult{Value: nil} - } else { - newCodeData.SetBytecode(pair.Value) - codeOld[keyStr] = types.BatchGetResult{Value: newCodeData.Serialize()} - } + for keyStr, newValue := range changes { - s.codeWrites[keyStr] = newCodeData + var oldValue []byte + if value, ok := oldValues[keyStr]; ok && value.IsFound() { + // We've got a value in the database for this key, use it as the old value. + oldValue = oldValue + } - var serializedValue []byte - if !pair.Delete { - serializedValue = newCodeData.Serialize() + pairs = append(pairs, lthash.KVPairWithLastValue{ + Key: []byte(keyStr), + Value: newValue, + LastValue: oldValue, + Delete: false, // TODO how to handle deletion here? + }) } - return append(codePairs, lthash.KVPairWithLastValue{ - Key: keyBytes, - Value: serializedValue, - LastValue: oldValue, - Delete: pair.Delete, - }) -} -// Apply a single change to the evm legacy db. -func (s *CommitStore) accumulateEvmLegacyChanges( - // The key with the prefix stripped. - keyBytes []byte, - // The change to apply. - pair *iavl.KVPair, - // This map stores the old value to the key prior to this change. This function updates it - // with the new value, so that the next change will see this value as the previous value. - legacyOld map[string]types.BatchGetResult, - // This slice stores both the new and old values for each key modified in this block. - legacyPairs []lthash.KVPairWithLastValue, -) []lthash.KVPairWithLastValue { - keyStr := string(keyBytes) - oldValue := legacyOld[keyStr].Value - - var newLegacyData *vtype.LegacyData - if pair.Delete { - newLegacyData = vtype.NewLegacyData([]byte{}) - legacyOld[keyStr] = types.BatchGetResult{Value: nil} - } else { - newLegacyData = vtype.NewLegacyData(pair.Value) - legacyOld[keyStr] = types.BatchGetResult{Value: newLegacyData.Serialize()} - } - newLegacyData.SetBlockHeight(s.committedVersion + 1) - - s.legacyWrites[keyStr] = newLegacyData - - var serializedValue []byte - if !pair.Delete { - serializedValue = newLegacyData.Serialize() - } - return append(legacyPairs, lthash.KVPairWithLastValue{ - Key: keyBytes, - Value: serializedValue, - LastValue: oldValue, - Delete: pair.Delete, - }) + return pairs } -// An account has multiple distict parts, and each part has its own key and can be set in a different changeset. -// This method iterates over the changesets and combines updates for each account into a single PendingAccountWrite. -func (s *CommitStore) gatherAccountUpdates(cs []*proto.NamedChangeSet) (map[string]*vtype.PendingAccountWrite, error) { +// Merge account updates down into a single update per account. +func (s *CommitStore) mergeAccountUpdates( + nonceChanges map[string][]byte, + codeHashChanges map[string][]byte, + balanceChanges map[string][]byte, +) (map[string]*vtype.PendingAccountWrite, error) { + updates := make(map[string]*vtype.PendingAccountWrite) - for _, cs := range cs { - if cs.Changeset.Pairs == nil { - continue + if nonceChanges != nil { + for key, nonceChange := range nonceChanges { + nonce, err := vtype.ParseNonce(nonceChange) + if err != nil { + return nil, fmt.Errorf("invalid nonce value: %w", err) + } + // nil handled internally, no need to bootstrap map entries + updates[key] = updates[key].SetNonce(nonce) } + } - for _, pair := range cs.Changeset.Pairs { - kind, keyBytes := evm.ParseEVMKey(pair.Key) - // FUTURE WORK: we also need to add a kind for balance changes. - if kind != evm.EVMKeyNonce && kind != evm.EVMKeyCodeHash { - // This is not an account field change, skip - continue + if codeHashChanges != nil { + for key, codeHashChange := range codeHashChanges { + codeHash, err := vtype.ParseCodeHash(codeHashChange) + if err != nil { + return nil, fmt.Errorf("invalid codehash value: %w", err) } + // nil handled internally, no need to bootstrap map entries + updates[key] = updates[key].SetCodeHash(codeHash) + } + } - keyStr := string(keyBytes) - - // Note: PendingAccountWrite can be used as nil, so no need to bootstrap the map entries - if kind == evm.EVMKeyNonce { - nonce, err := vtype.ParseNonce(pair.Value) - if err != nil { - return nil, fmt.Errorf("invalid nonce value: %w", err) - } - updates[keyStr] = updates[keyStr].SetNonce(nonce) - } else { - codeHash, err := vtype.ParseCodeHash(pair.Value) - if err != nil { - return nil, fmt.Errorf("invalid codehash value: %w", err) - } - updates[keyStr] = updates[keyStr].SetCodeHash(codeHash) + if balanceChanges != nil { + for key, balanceChange := range balanceChanges { + balance, err := vtype.ParseBalance(balanceChange) + if err != nil { + return nil, fmt.Errorf("invalid balance value: %w", err) } + // nil handled internally, no need to bootstrap map entries + updates[key] = updates[key].SetBalance(balance) } } diff --git a/sei-db/state_db/sc/flatkv/vtype/base_types.go b/sei-db/state_db/sc/flatkv/vtype/base_types.go index 4ae0e251ea..f604533c74 100644 --- a/sei-db/state_db/sc/flatkv/vtype/base_types.go +++ b/sei-db/state_db/sc/flatkv/vtype/base_types.go @@ -13,6 +13,8 @@ const ( BalanceLen = 32 ) +// TODO unit test this file!!! + // Address is an EVM address (20 bytes). type Address [AddressLen]byte @@ -34,6 +36,7 @@ func ParseNonce(b []byte) (uint64, error) { return binary.BigEndian.Uint64(b), nil } +// ParseCodeHash parses a codehash value from a byte slice. func ParseCodeHash(b []byte) (*CodeHash, error) { if len(b) != CodeHashLen { return nil, fmt.Errorf( @@ -48,4 +51,16 @@ func ParseCodeHash(b []byte) (*CodeHash, error) { return &result, nil } +// ParseBalance parses a balance value from a byte slice. +func ParseBalance(b []byte) (*Balance, error) { + if len(b) != BalanceLen { + return nil, fmt.Errorf("invalid balance value length: got %d, expected %d", + len(b), BalanceLen, + ) + } + var result Balance + copy(result[:], b) + return &result, nil +} + // TODO implement others!!! From cdc1a7d054d4611ec76491b8150289c304b29eeb Mon Sep 17 00:00:00 2001 From: Cody Littley Date: Wed, 1 Apr 2026 12:00:41 -0500 Subject: [PATCH 101/119] incremental progress --- sei-db/state_db/sc/flatkv/store_write.go | 342 ++++++++++-------- .../state_db/sc/flatkv/vtype/account_data.go | 46 ++- sei-db/state_db/sc/flatkv/vtype/code_data.go | 33 +- .../state_db/sc/flatkv/vtype/legacy_data.go | 26 +- .../state_db/sc/flatkv/vtype/storage_data.go | 2 + sei-db/state_db/sc/flatkv/vtype/vtype.go | 18 + 6 files changed, 304 insertions(+), 163 deletions(-) create mode 100644 sei-db/state_db/sc/flatkv/vtype/vtype.go diff --git a/sei-db/state_db/sc/flatkv/store_write.go b/sei-db/state_db/sc/flatkv/store_write.go index 1d27106bb5..2a56c753e4 100644 --- a/sei-db/state_db/sc/flatkv/store_write.go +++ b/sei-db/state_db/sc/flatkv/store_write.go @@ -19,29 +19,35 @@ import ( // - accountDB: key=addr, value=AccountValue (balance(32)||nonce(8)||codehash(32) // - codeDB: key=addr, value=bytecode // - legacyDB: key=full original key (with prefix), value=raw value -func (s *CommitStore) ApplyChangeSets(cs []*proto.NamedChangeSet) error { +func (s *CommitStore) ApplyChangeSets(changeSets []*proto.NamedChangeSet) error { if s.readOnly { return errReadOnly } + /////////// + // Setup // + /////////// + s.phaseTimer.SetPhase("apply_change_sets_prepare") + s.pendingChangeSets = append(s.pendingChangeSets, changeSets...) + + changesByType := sortChangeSets(changeSets) + + //////////////////// + // Batch Read Old // + //////////////////// s.phaseTimer.SetPhase("apply_change_sets_batch_read") - // Batch read all old values from DBs in parallel. - storageOld, accountOld, codeOld, legacyOld, err := s.batchReadOldValues(cs) + storageOld, accountOld, codeOld, legacyOld, err := s.batchReadOldValues(changesByType) if err != nil { return fmt.Errorf("failed to batch read old values: %w", err) } - s.phaseTimer.SetPhase("apply_change_sets_prepare") - s.pendingChangeSets = append(s.pendingChangeSets, cs...) - - s.phaseTimer.SetPhase("apply_change_sets_sort") - changesByType := sortChangeSets(cs) - - // Gather LTHash pairs. + ////////////////// + // Gather Pairs // + ////////////////// s.phaseTimer.SetPhase("apply_change_sets_gather_pairs") - // Gather Account Pairs (special case since accounts have multiple fields) + // Gather account pairs accountWrites, err := s.mergeAccountUpdates( changesByType[evm.EVMKeyNonce], changesByType[evm.EVMKeyCodeHash], @@ -50,19 +56,35 @@ func (s *CommitStore) ApplyChangeSets(cs []*proto.NamedChangeSet) error { if err != nil { return fmt.Errorf("failed to gather account updates: %w", err) } - accountPairs, err := s.gatherAccountPairs(accountWrites, accountOld) + newAccountValues := s.deriveNewAccountValues(accountWrites, accountOld) + accountPairs := gatherLTHashPairs(newAccountValues, accountOld) + + // Gather storage pairs + storageChanges, err := parseChanges(changesByType[evm.EVMKeyStorage], vtype.DeserializeStorageData) + if err != nil { + return fmt.Errorf("failed to parse storage changes: %w", err) + } + storagePairs := gatherLTHashPairs(storageChanges, storageOld) + + // Gather code pairs + codeChanges, err := parseChanges(changesByType[evm.EVMKeyCode], vtype.DeserializeCodeData) if err != nil { - return fmt.Errorf("failed to gather account pairs: %w", err) + return fmt.Errorf("failed to parse code changes: %w", err) } + codePairs := gatherLTHashPairs(codeChanges, codeOld) - // Gather all of the other DBs pairs. - storagePairs := s.gatherPairs(changesByType[evm.EVMKeyStorage], storageOld) - codePairs := s.gatherPairs(changesByType[evm.EVMKeyCode], codeOld) - legacyPairs := s.gatherPairs(changesByType[evm.EVMKeyLegacy], legacyOld) + // Gather legacy pairs + legacyChanges, err := parseChanges(changesByType[evm.EVMKeyLegacy], vtype.DeserializeLegacyData) + if err != nil { + return fmt.Errorf("failed to parse legacy changes: %w", err) + } + legacyPairs := gatherLTHashPairs(legacyChanges, legacyOld) + //////////////////// + // Compute LTHash // + //////////////////// s.phaseTimer.SetPhase("apply_change_compute_lt_hash") - // Per-DB LTHash updates type dbPairs struct { dir string pairs []lthash.KVPairWithLastValue @@ -119,27 +141,52 @@ func sortChangeSets(cs []*proto.NamedChangeSet) map[evm.EVMKeyKind]map[string][] return result } -// Gather LtHash pairs for a DB. Not suitable for the storage DB, but ok for the others. -func (s *CommitStore) gatherPairs( - changes map[string][]byte, - oldValues map[string]types.BatchGetResult, +// Parse into the VType. +func parseChanges[T vtype.VType]( + rawChanges map[string][]byte, + builder vtype.VTypeBuilder[T], +) (map[string]T, error) { + + result := make(map[string]T) + + for keyStr, rawChange := range rawChanges { + value, err := builder(rawChange) + if err != nil { + return nil, fmt.Errorf("failed to parse value for key %s: %w", keyStr, err) + } + result[keyStr] = value + } + + return result, nil +} + +// Gather LtHash pairs for a DB. +func gatherLTHashPairs[T vtype.VType]( + newValues map[string]T, + oldValues map[string]T, ) []lthash.KVPairWithLastValue { - var pairs []lthash.KVPairWithLastValue = make([]lthash.KVPairWithLastValue, 0, len(changes)) + var pairs []lthash.KVPairWithLastValue = make([]lthash.KVPairWithLastValue, 0, len(newValues)) - for keyStr, newValue := range changes { + for keyStr, newValue := range newValues { + var oldValue = oldValues[keyStr] + + var newBytes []byte + if !newValue.IsDelete() { + newBytes = newValue.Serialize() + + } - var oldValue []byte - if value, ok := oldValues[keyStr]; ok && value.IsFound() { - // We've got a value in the database for this key, use it as the old value. - oldValue = oldValue + var oldBytes []byte + if !oldValue.IsDelete() { + oldBytes = oldValue.Serialize() } pairs = append(pairs, lthash.KVPairWithLastValue{ Key: []byte(keyStr), - Value: newValue, - LastValue: oldValue, - Delete: false, // TODO how to handle deletion here? + Value: newBytes, + LastValue: oldBytes, + Delete: newValue.IsDelete(), }) } @@ -191,42 +238,31 @@ func (s *CommitStore) mergeAccountUpdates( return updates, nil } -// For each update being applied to an account, gather the new/old values for use by LtHash delta computation. -func (s *CommitStore) gatherAccountPairs( - // Writes being performed. Writes to different account fields are combined per account. +// Combine the pending account writes with prior values to determine the new account values. +// +// We need to take this step because accounts are split into multiple fields, and its possible to overwrite just a +// single field (thus requring us to copy the unmodified fields from the prior value). +func (s *CommitStore) deriveNewAccountValues( pendingWrites map[string]*vtype.PendingAccountWrite, - // Account values from the database. - databaseAccountValues map[string]types.BatchGetResult, -) ([]lthash.KVPairWithLastValue, error) { + databaseAccountData map[string]*vtype.AccountData, +) map[string]*vtype.AccountData { - result := make([]lthash.KVPairWithLastValue, 0, len(pendingWrites)) + result := make(map[string]*vtype.AccountData) for addrStr, pendingWrite := range pendingWrites { var oldValue *vtype.AccountData - if stagedWrite, ok := s.accountWrites[addrStr]; ok { // We've got a pending write staged in memory oldValue = stagedWrite - } else if dbValue, ok := databaseAccountValues[addrStr]; ok { + } else if dbValue, ok := databaseAccountData[addrStr]; ok { // This account is in the DB - var err error - oldValue, err = vtype.DeserializeAccountData(dbValue.Value) - if err != nil { - return nil, fmt.Errorf("invalid account data in DB: %w", err) - } + oldValue = dbValue } newValue := pendingWrite.Merge(oldValue, s.committedVersion+1) - - result = append(result, lthash.KVPairWithLastValue{ - Key: []byte(addrStr), - Value: newValue.Serialize(), - LastValue: oldValue.Serialize(), - Delete: newValue.IsDelete(), - }) + result[addrStr] = newValue } - - return result, nil + return result } // Commit persists buffered writes and advances the version. @@ -517,101 +553,33 @@ func (s *CommitStore) prepareBatchLegacyDB(version int64) (types.Batch, error) { // pending writes (from a prior ApplyChangeSets call in the same block) are // resolved from those pending writes directly and excluded from the DB batch // read, avoiding unnecessary I/O and cache pollution. -func (s *CommitStore) batchReadOldValues(cs []*proto.NamedChangeSet) ( - storageOld map[string]types.BatchGetResult, - accountOld map[string]types.BatchGetResult, - codeOld map[string]types.BatchGetResult, - legacyOld map[string]types.BatchGetResult, +func (s *CommitStore) batchReadOldValues(changesByType map[evm.EVMKeyKind]map[string][]byte) ( + storageOld map[string]*vtype.StorageData, + accountOld map[string]*vtype.AccountData, + codeOld map[string]*vtype.CodeData, + legacyOld map[string]*vtype.LegacyData, err error, ) { - storageOld = make(map[string]types.BatchGetResult) - accountOld = make(map[string]types.BatchGetResult) - codeOld = make(map[string]types.BatchGetResult) - legacyOld = make(map[string]types.BatchGetResult) - - // Separate maps for keys that need a DB read (no pending write). - storageBatch := make(map[string]types.BatchGetResult) - accountBatch := make(map[string]types.BatchGetResult) - codeBatch := make(map[string]types.BatchGetResult) - legacyBatch := make(map[string]types.BatchGetResult) - - // Partition changeset keys: resolve from pending writes when available - // (prior ApplyChangeSets call in the same block), otherwise queue for - // a DB batch read. - for _, namedCS := range cs { - if namedCS.Changeset.Pairs == nil { - continue - } - for _, pair := range namedCS.Changeset.Pairs { - kind, keyBytes := evm.ParseEVMKey(pair.Key) - switch kind { - case evm.EVMKeyStorage: - k := string(keyBytes) - if _, done := storageOld[k]; done { - continue - } - if pw, ok := s.storageWrites[k]; ok { - if pw.IsDelete() { - storageOld[k] = types.BatchGetResult{Value: nil} - } else { - storageOld[k] = types.BatchGetResult{Value: pw.Serialize()} - } - } else { - storageBatch[k] = types.BatchGetResult{} - } - - case evm.EVMKeyNonce, evm.EVMKeyCodeHash: - addr, ok := AddressFromBytes(keyBytes) - if !ok { - continue - } - k := string(addr[:]) - if _, done := accountOld[k]; done { - continue - } - if accountWrite, ok := s.accountWrites[k]; ok { - accountOld[k] = types.BatchGetResult{Value: accountWrite.Serialize()} - } else { - accountBatch[k] = types.BatchGetResult{} - } - - case evm.EVMKeyCode: - k := string(keyBytes) - if _, done := codeOld[k]; done { - continue - } - if pw, ok := s.codeWrites[k]; ok { - if pw.IsDelete() { - codeOld[k] = types.BatchGetResult{Value: nil} - } else { - codeOld[k] = types.BatchGetResult{Value: pw.Serialize()} - } - } else { - codeBatch[k] = types.BatchGetResult{} - } - - case evm.EVMKeyLegacy: - k := string(keyBytes) - if _, done := legacyOld[k]; done { - continue - } - if pw, ok := s.legacyWrites[k]; ok { - if pw.IsDelete() { - legacyOld[k] = types.BatchGetResult{Value: nil} - } else { - legacyOld[k] = types.BatchGetResult{Value: pw.Serialize()} - } - } else { - legacyBatch[k] = types.BatchGetResult{} - } - } - } - } + storageOld = make(map[string]*vtype.StorageData) + accountOld = make(map[string]*vtype.AccountData) + codeOld = make(map[string]*vtype.CodeData) + legacyOld = make(map[string]*vtype.LegacyData) - // Issue parallel BatchGet calls only for keys that need a DB read. + // Issue reads to each DB if we don't already have the old value in memory. var wg sync.WaitGroup var storageErr, accountErr, codeErr, legacyErr error + // EVM storage + storageBatch := make(map[string]types.BatchGetResult) + for key, _ := range changesByType[evm.EVMKeyStorage] { + if _, ok := s.storageWrites[key]; ok { + // We've got the old value in the pending writes buffer. + storageOld[key] = s.storageWrites[key] + } else { + // Schedule a read for this key. + storageBatch[key] = types.BatchGetResult{} + } + } if len(storageBatch) > 0 { wg.Add(1) s.miscPool.Submit(func() { @@ -620,6 +588,27 @@ func (s *CommitStore) batchReadOldValues(cs []*proto.NamedChangeSet) ( }) } + // Accounts + accountBatch := make(map[string]types.BatchGetResult) + for key, _ := range changesByType[evm.EVMKeyNonce] { + if _, ok := s.accountWrites[key]; ok { + // We've got the old value in the pending writes buffer. + accountOld[key] = s.accountWrites[key] + } else { + // Schedule a read for this key. + accountBatch[key] = types.BatchGetResult{} + } + } + for key, _ := range changesByType[evm.EVMKeyCodeHash] { + if _, ok := s.accountWrites[key]; ok { + // We've got the old value in the pending writes buffer. + accountOld[key] = s.accountWrites[key] + } else { + // Schedule a read for this key. + accountBatch[key] = types.BatchGetResult{} + } + } + // TODO: when we eventually add a balance key, we will need to add it to the accountBatch map here. if len(accountBatch) > 0 { wg.Add(1) s.miscPool.Submit(func() { @@ -628,6 +617,17 @@ func (s *CommitStore) batchReadOldValues(cs []*proto.NamedChangeSet) ( }) } + // EVM bytecode + codeBatch := make(map[string]types.BatchGetResult) + for key, _ := range changesByType[evm.EVMKeyCode] { + if _, ok := s.codeWrites[key]; ok { + // We've got the old value in the pending writes buffer. + codeOld[key] = s.codeWrites[key] + } else { + // Schedule a read for this key. + codeBatch[key] = types.BatchGetResult{} + } + } if len(codeBatch) > 0 { wg.Add(1) s.miscPool.Submit(func() { @@ -636,6 +636,17 @@ func (s *CommitStore) batchReadOldValues(cs []*proto.NamedChangeSet) ( }) } + // Legacy data + legacyBatch := make(map[string]types.BatchGetResult) + for key, _ := range changesByType[evm.EVMKeyLegacy] { + if _, ok := s.legacyWrites[key]; ok { + // We've got the old value in the pending writes buffer. + legacyOld[key] = s.legacyWrites[key] + } else { + // Schedule a read for this key. + legacyBatch[key] = types.BatchGetResult{} + } + } if len(legacyBatch) > 0 { wg.Add(1) s.miscPool.Submit(func() { @@ -644,38 +655,65 @@ func (s *CommitStore) batchReadOldValues(cs []*proto.NamedChangeSet) ( }) } + // Wait for all reads to complete. wg.Wait() if err = errors.Join(storageErr, accountErr, codeErr, legacyErr); err != nil { - return + return nil, nil, nil, nil, fmt.Errorf("failed to batch read old values: %w", err) } - // Merge DB results into the result maps, failing on any per-key errors. - // BatchGet converts ErrNotFound into nil Value (no error), but surfaces - // real read errors. + // Merge DB results into the result maps. + + // Storage for k, v := range storageBatch { if v.Error != nil { return nil, nil, nil, nil, fmt.Errorf("storageDB batch read error for key %x: %w", k, v.Error) } - storageOld[k] = v + if v.IsFound() { + storageOld[k], err = vtype.DeserializeStorageData(v.Value) + if err != nil { + return nil, nil, nil, nil, fmt.Errorf("failed to deserialize storage data: %w", err) + } + } } + + // Accounts for k, v := range accountBatch { if v.Error != nil { return nil, nil, nil, nil, fmt.Errorf("accountDB batch read error for key %x: %w", k, v.Error) } - accountOld[k] = v + if v.IsFound() { + accountOld[k], err = vtype.DeserializeAccountData(v.Value) + if err != nil { + return nil, nil, nil, nil, fmt.Errorf("failed to deserialize account data: %w", err) + } + } } + + // EVM bytecode for k, v := range codeBatch { if v.Error != nil { return nil, nil, nil, nil, fmt.Errorf("codeDB batch read error for key %x: %w", k, v.Error) } - codeOld[k] = v + if v.IsFound() { + codeOld[k], err = vtype.DeserializeCodeData(v.Value) + if err != nil { + return nil, nil, nil, nil, fmt.Errorf("failed to deserialize code data: %w", err) + } + } } + + // Legacy data for k, v := range legacyBatch { if v.Error != nil { return nil, nil, nil, nil, fmt.Errorf("legacyDB batch read error for key %x: %w", k, v.Error) } - legacyOld[k] = v + if v.IsFound() { + legacyOld[k], err = vtype.DeserializeLegacyData(v.Value) + if err != nil { + return nil, nil, nil, nil, fmt.Errorf("failed to deserialize legacy data: %w", err) + } + } } - return + return storageOld, accountOld, codeOld, legacyOld, nil } diff --git a/sei-db/state_db/sc/flatkv/vtype/account_data.go b/sei-db/state_db/sc/flatkv/vtype/account_data.go index e536828c5f..da8a7524ec 100644 --- a/sei-db/state_db/sc/flatkv/vtype/account_data.go +++ b/sei-db/state_db/sc/flatkv/vtype/account_data.go @@ -34,6 +34,8 @@ const ( accountDataLength = 81 ) +var _ VType = (*AccountData)(nil) + // Used for encapsulating and serializating account data in the FlatKV accounts database. // // This data structure is not threadsafe. Values passed into and values received from this data structure @@ -53,6 +55,9 @@ func NewAccountData() *AccountData { // // The returned byte slice is not safe to modify without first copying it. func (a *AccountData) Serialize() []byte { + if a == nil { + return make([]byte, accountDataLength) + } return a.data } @@ -81,32 +86,52 @@ func DeserializeAccountData(data []byte) (*AccountData, error) { // Get the serialization version for this AccountData instance. func (a *AccountData) GetSerializationVersion() AccountDataVersion { + if a == nil { + return AccountDataVersion0 + } return (AccountDataVersion)(a.data[accountVersionStart]) } // Get the account's block height. func (a *AccountData) GetBlockHeight() int64 { + if a == nil { + return 0 + } return int64(binary.BigEndian.Uint64(a.data[accountBlockHeightStart:accountBalanceStart])) //nolint:gosec } // Get the account's balance. func (a *AccountData) GetBalance() *Balance { + if a == nil { + var zero Balance + return &zero + } return (*Balance)(a.data[accountBalanceStart:accountNonceStart]) } // Get the account's nonce. func (a *AccountData) GetNonce() uint64 { + if a == nil { + return 0 + } return binary.BigEndian.Uint64(a.data[accountNonceStart:accountCodeHashStart]) } // Get the account's code hash. func (a *AccountData) GetCodeHash() *CodeHash { + if a == nil { + var zero CodeHash + return &zero + } return (*CodeHash)(a.data[accountCodeHashStart:accountDataLength]) } // Check if this account data signifies a deletion operation. A deletion operation is automatically // performed when all account data fields are 0 (with the exception of the serialization version and block height). func (a *AccountData) IsDelete() bool { + if a == nil { + return true + } for i := accountBalanceStart; i < accountDataLength; i++ { if a.data[i] != 0 { return false @@ -117,6 +142,9 @@ func (a *AccountData) IsDelete() bool { // Copy returns a deep copy of this AccountData. The copy has its own backing byte slice. func (a *AccountData) Copy() *AccountData { + if a == nil { + return NewAccountData() + } cp := make([]byte, len(a.data)) copy(cp, a.data) return &AccountData{data: cp} @@ -124,24 +152,36 @@ func (a *AccountData) Copy() *AccountData { // Set the account's block height when this account was last modified/touched. Returns self. func (a *AccountData) SetBlockHeight(blockHeight int64) *AccountData { + if a == nil { + a = NewAccountData() + } binary.BigEndian.PutUint64(a.data[accountBlockHeightStart:accountBalanceStart], uint64(blockHeight)) //nolint:gosec return a } -// Set the account's balance. Returns self. +// Set the account's balance. Returns self (or a new AccountData if nil). func (a *AccountData) SetBalance(balance *Balance) *AccountData { + if a == nil { + a = NewAccountData() + } copy(a.data[accountBalanceStart:accountNonceStart], balance[:]) return a } -// Set the account's nonce. Returns self. +// Set the account's nonce. Returns self (or a new AccountData if nil). func (a *AccountData) SetNonce(nonce uint64) *AccountData { + if a == nil { + a = NewAccountData() + } binary.BigEndian.PutUint64(a.data[accountNonceStart:accountCodeHashStart], nonce) return a } -// Set the account's code hash. Returns self. +// Set the account's code hash. Returns self (or a new AccountData if nil). func (a *AccountData) SetCodeHash(codeHash *CodeHash) *AccountData { + if a == nil { + a = NewAccountData() + } copy(a.data[accountCodeHashStart:accountDataLength], codeHash[:]) return a } diff --git a/sei-db/state_db/sc/flatkv/vtype/code_data.go b/sei-db/state_db/sc/flatkv/vtype/code_data.go index 5fdacc8258..061d439c58 100644 --- a/sei-db/state_db/sc/flatkv/vtype/code_data.go +++ b/sei-db/state_db/sc/flatkv/vtype/code_data.go @@ -29,6 +29,8 @@ const ( codeBytecodeStart = 9 ) +var _ VType = (*CodeData)(nil) + // Used for encapsulating and serializing contract bytecode in the FlatKV code database. // // This data structure is not threadsafe. Values passed into and values received from this data structure @@ -48,6 +50,9 @@ func NewCodeData() *CodeData { // // The returned byte slice is not safe to modify without first copying it. func (c *CodeData) Serialize() []byte { + if c == nil { + return make([]byte, codeBytecodeStart) + } return c.data } @@ -76,21 +81,33 @@ func DeserializeCodeData(data []byte) (*CodeData, error) { // Get the serialization version for this CodeData instance. func (c *CodeData) GetSerializationVersion() CodeDataVersion { + if c == nil { + return CodeDataVersion0 + } return (CodeDataVersion)(c.data[codeVersionStart]) } // Get the block height when this code was last modified. func (c *CodeData) GetBlockHeight() int64 { - return int64(binary.BigEndian.Uint64(c.data[codeBlockHeightStart:codeBytecodeStart])) //nolint:gosec // block height is always within int64 range + if c == nil { + return 0 + } + return int64(binary.BigEndian.Uint64(c.data[codeBlockHeightStart:codeBytecodeStart])) //nolint:gosec } // Get the contract bytecode. func (c *CodeData) GetBytecode() []byte { + if c == nil { + return make([]byte, 0) + } return c.data[codeBytecodeStart:] } -// Set the contract bytecode. +// Set the contract bytecode. Returns self (or a new CodeData if nil). func (c *CodeData) SetBytecode(bytecode []byte) *CodeData { + if c == nil { + c = NewCodeData() + } newData := make([]byte, codeBytecodeStart+len(bytecode)) copy(newData, c.data[:codeBytecodeStart]) copy(newData[codeBytecodeStart:], bytecode) @@ -101,11 +118,17 @@ func (c *CodeData) SetBytecode(bytecode []byte) *CodeData { // Check if this code data signifies a deletion operation. A deletion operation is automatically // performed when the bytecode is empty (with the exception of the serialization version and block height). func (c *CodeData) IsDelete() bool { - return len(c.data) == codeBytecodeStart + if c == nil { + return true + } + return len(c.data) == codeBytecodeStart // TODO verify that this is the correct semantics! } -// Set the block height when this code was last modified/touched. Returns self. +// Set the block height when this code was last modified/touched. Returns self (or a new CodeData if nil). func (c *CodeData) SetBlockHeight(blockHeight int64) *CodeData { - binary.BigEndian.PutUint64(c.data[codeBlockHeightStart:codeBytecodeStart], uint64(blockHeight)) //nolint:gosec // block height is always non-negative + if c == nil { + c = NewCodeData() + } + binary.BigEndian.PutUint64(c.data[codeBlockHeightStart:codeBytecodeStart], uint64(blockHeight)) //nolint:gosec return c } diff --git a/sei-db/state_db/sc/flatkv/vtype/legacy_data.go b/sei-db/state_db/sc/flatkv/vtype/legacy_data.go index 0762336eb4..63bd758bb6 100644 --- a/sei-db/state_db/sc/flatkv/vtype/legacy_data.go +++ b/sei-db/state_db/sc/flatkv/vtype/legacy_data.go @@ -30,6 +30,8 @@ const ( legacyHeaderLength = 9 ) +var _ VType = (*LegacyData)(nil) + // Used for encapsulating and serializing legacy data in the FlatKV legacy database. // // This data structure is not threadsafe. Values passed into and values received from this data structure @@ -49,6 +51,9 @@ func NewLegacyData(value []byte) *LegacyData { // // The returned byte slice is not safe to modify without first copying it. func (l *LegacyData) Serialize() []byte { + if l == nil { + return make([]byte, legacyHeaderLength) + } return l.data } @@ -77,27 +82,42 @@ func DeserializeLegacyData(data []byte) (*LegacyData, error) { // Get the serialization version for this LegacyData instance. func (l *LegacyData) GetSerializationVersion() LegacyDataVersion { + if l == nil { + return LegacyDataVersion0 + } return (LegacyDataVersion)(l.data[legacyVersionStart]) } // Get the block height when this legacy data was last modified. func (l *LegacyData) GetBlockHeight() int64 { - return int64(binary.BigEndian.Uint64(l.data[legacyBlockHeightStart:legacyValueStart])) //nolint:gosec // block height is always within int64 range + if l == nil { + return 0 + } + return int64(binary.BigEndian.Uint64(l.data[legacyBlockHeightStart:legacyValueStart])) //nolint:gosec } // Get the legacy value. func (l *LegacyData) GetValue() []byte { + if l == nil { + return make([]byte, 0) + } return l.data[legacyValueStart:] } // Check if this legacy data signifies a deletion operation. A deletion operation is automatically // performed when the value is empty (with the exception of the serialization version and block height). func (l *LegacyData) IsDelete() bool { + if l == nil { + return true + } return len(l.data) == legacyHeaderLength } -// Set the block height when this legacy data was last modified/touched. Returns self. +// Set the block height when this legacy data was last modified/touched. Returns self (or a new LegacyData if nil). func (l *LegacyData) SetBlockHeight(blockHeight int64) *LegacyData { - binary.BigEndian.PutUint64(l.data[legacyBlockHeightStart:legacyValueStart], uint64(blockHeight)) //nolint:gosec // block height is always non-negative + if l == nil { + l = NewLegacyData(nil) + } + binary.BigEndian.PutUint64(l.data[legacyBlockHeightStart:legacyValueStart], uint64(blockHeight)) //nolint:gosec return l } diff --git a/sei-db/state_db/sc/flatkv/vtype/storage_data.go b/sei-db/state_db/sc/flatkv/vtype/storage_data.go index aab7b7e440..c1f719d9dc 100644 --- a/sei-db/state_db/sc/flatkv/vtype/storage_data.go +++ b/sei-db/state_db/sc/flatkv/vtype/storage_data.go @@ -30,6 +30,8 @@ const ( storageDataLength = 41 ) +var _ VType = (*StorageData)(nil) + // Used for encapsulating and serializing storage slot data in the FlatKV storage database. // // This data structure is not threadsafe. Values passed into and values received from this data structure diff --git a/sei-db/state_db/sc/flatkv/vtype/vtype.go b/sei-db/state_db/sc/flatkv/vtype/vtype.go new file mode 100644 index 0000000000..b361c6003e --- /dev/null +++ b/sei-db/state_db/sc/flatkv/vtype/vtype.go @@ -0,0 +1,18 @@ +package vtype + +// All values in FLatKV are Vtypes (except for the metadata table). +// +// VTypes should be well-behaved when nil, and it should be safe to call into them without checking for nil. +// Nil VTypes should identify themselves as deletion operations with all zero values. +type VType interface { + + // Serialize the value to a byte slice. + Serialize() []byte + + // IsDelete returns true if the value is a deletion operation. + IsDelete() bool +} + +// VTypeBuilder is a function that builds a VType from a byte slice, returning an error if the byte +// slice cannot be parsed. +type VTypeBuilder[T VType] func([]byte) (T, error) From 95b392c488f96ecf263eeedf87f1bac34f5d7ad4 Mon Sep 17 00:00:00 2001 From: Cody Littley Date: Wed, 1 Apr 2026 12:25:50 -0500 Subject: [PATCH 102/119] fix parsing --- sei-db/state_db/sc/flatkv/config.go | 5 + sei-db/state_db/sc/flatkv/store_apply.go | 302 ++++++++++++++++++ sei-db/state_db/sc/flatkv/store_write.go | 253 --------------- sei-db/state_db/sc/flatkv/vtype/base_types.go | 12 +- .../state_db/sc/flatkv/vtype/legacy_data.go | 16 +- .../state_db/sc/flatkv/vtype/storage_data.go | 26 +- sei-db/state_db/sc/flatkv/vtype/vtype.go | 5 - 7 files changed, 355 insertions(+), 264 deletions(-) create mode 100644 sei-db/state_db/sc/flatkv/store_apply.go diff --git a/sei-db/state_db/sc/flatkv/config.go b/sei-db/state_db/sc/flatkv/config.go index 1da9f1b6e0..0617ee3f43 100644 --- a/sei-db/state_db/sc/flatkv/config.go +++ b/sei-db/state_db/sc/flatkv/config.go @@ -95,6 +95,10 @@ type Config struct { // Controls the number of goroutines pre-allocated in the thread pool for miscellaneous operations. // The number of threads in this pool is equal to MiscThreadsPerCore * runtime.NumCPU() + MiscConstantThreadCount. MiscConstantThreadCount int + + // If true, FlatKV will return an error if it encounters an usupported key type. Otherwise, + // it will log a warning and continue. + StrictKeyTypeCheck bool } // DefaultConfig returns Config with safe default values. @@ -120,6 +124,7 @@ func DefaultConfig() *Config { ReaderPoolQueueSize: 1024, MiscPoolThreadsPerCore: 4.0, MiscConstantThreadCount: 0, + StrictKeyTypeCheck: true, } cfg.AccountCacheConfig.MaxSize = unit.GB diff --git a/sei-db/state_db/sc/flatkv/store_apply.go b/sei-db/state_db/sc/flatkv/store_apply.go new file mode 100644 index 0000000000..d362cd3a6f --- /dev/null +++ b/sei-db/state_db/sc/flatkv/store_apply.go @@ -0,0 +1,302 @@ +package flatkv + +import ( + "fmt" + + "github.com/sei-protocol/sei-chain/sei-db/common/evm" + "github.com/sei-protocol/sei-chain/sei-db/proto" + "github.com/sei-protocol/sei-chain/sei-db/state_db/sc/flatkv/lthash" + "github.com/sei-protocol/sei-chain/sei-db/state_db/sc/flatkv/vtype" +) + +// Supported key types for FlatKV. +// TODO: add balance key when that is eventually supported +var supportedKeyTypes = map[evm.EVMKeyKind]struct{}{ + evm.EVMKeyStorage: {}, + evm.EVMKeyNonce: {}, + evm.EVMKeyCodeHash: {}, + evm.EVMKeyCode: {}, + evm.EVMKeyLegacy: {}, +} + +// ApplyChangeSets buffers EVM changesets and updates LtHash. +func (s *CommitStore) ApplyChangeSets(changeSets []*proto.NamedChangeSet) error { + if s.readOnly { + return errReadOnly + } + + /////////// + // Setup // + /////////// + s.phaseTimer.SetPhase("apply_change_sets_prepare") + s.pendingChangeSets = append(s.pendingChangeSets, changeSets...) // TODO this is wrong!! + + changesByType, err := sortChangeSets(changeSets, s.config.StrictKeyTypeCheck) + if err != nil { + return fmt.Errorf("failed to sort change sets: %w", err) + } + + blockHeight := s.committedVersion + 1 + + //////////////////// + // Batch Read Old // + //////////////////// + s.phaseTimer.SetPhase("apply_change_sets_batch_read") + + storageOld, accountOld, codeOld, legacyOld, err := s.batchReadOldValues(changesByType) + if err != nil { + return fmt.Errorf("failed to batch read old values: %w", err) + } + + ////////////////// + // Gather Pairs // + ////////////////// + s.phaseTimer.SetPhase("apply_change_sets_gather_pairs") + + // Gather account pairs + accountWrites, err := mergeAccountUpdates( + changesByType[evm.EVMKeyNonce], + changesByType[evm.EVMKeyCodeHash], + nil, // TODO: update this when we add a balance key! + ) + if err != nil { + return fmt.Errorf("failed to gather account updates: %w", err) + } + newAccountValues := deriveNewAccountValues(accountWrites, accountOld, blockHeight) + accountPairs := gatherLTHashPairs(newAccountValues, accountOld) + + // Gather storage pairs + storageChanges, err := processStorageChanges(changesByType[evm.EVMKeyStorage], blockHeight) + if err != nil { + return fmt.Errorf("failed to parse storage changes: %w", err) + } + storagePairs := gatherLTHashPairs(storageChanges, storageOld) + + // Gather code pairs + codeChanges, err := processCodeChanges(changesByType[evm.EVMKeyCode], blockHeight) + if err != nil { + return fmt.Errorf("failed to parse code changes: %w", err) + } + codePairs := gatherLTHashPairs(codeChanges, codeOld) + + // Gather legacy pairs + legacyChanges, err := processLegacyChanges(changesByType[evm.EVMKeyLegacy], blockHeight) + if err != nil { + return fmt.Errorf("failed to parse legacy changes: %w", err) + } + legacyPairs := gatherLTHashPairs(legacyChanges, legacyOld) + + //////////////////// + // Compute LTHash // + //////////////////// + s.phaseTimer.SetPhase("apply_change_compute_lt_hash") + + type dbPairs struct { + dir string + pairs []lthash.KVPairWithLastValue + } + for _, dp := range [4]dbPairs{ + {storageDBDir, storagePairs}, + {accountDBDir, accountPairs}, + {codeDBDir, codePairs}, + {legacyDBDir, legacyPairs}, + } { + if len(dp.pairs) > 0 { + newHash, _ := lthash.ComputeLtHash(s.perDBWorkingLtHash[dp.dir], dp.pairs) + s.perDBWorkingLtHash[dp.dir] = newHash + } + } + + // Global LTHash = sum of per-DB hashes (homomorphic property). + // Compute into a fresh hash and swap to avoid a transient empty state + // on workingLtHash (safe for future pipelining / async callers). + globalHash := lthash.New() + for _, dir := range dataDBDirs { + globalHash.MixIn(s.perDBWorkingLtHash[dir]) + } + s.workingLtHash = globalHash + + s.phaseTimer.SetPhase("apply_change_done") + return nil +} + +// Sort the change sets by type. +func sortChangeSets( + cs []*proto.NamedChangeSet, + // If true, returns an error if an unsupported key type is encountered. + strict bool, +) (map[evm.EVMKeyKind]map[string][]byte, error) { + result := make(map[evm.EVMKeyKind]map[string][]byte) + + for _, cs := range cs { + if cs.Changeset.Pairs == nil { + continue + } + for _, pair := range cs.Changeset.Pairs { + kind, keyBytes := evm.ParseEVMKey(pair.Key) + + if _, ok := supportedKeyTypes[kind]; !ok { + if strict { + return nil, fmt.Errorf("unsupported key type: %s", kind) + } else { + logger.Warn("unsupported key type", "key", kind) + } + } + + keyStr := string(keyBytes) + + kindMap, ok := result[kind] + if !ok { + kindMap = make(map[string][]byte) + result[kind] = kindMap + } + + kindMap[keyStr] = pair.Value + } + } + + return result, nil +} + +// Process incoming storage changes into a form appropriate for hashing and insertion into the DB. +func processStorageChanges( + rawChanges map[string][]byte, + blockHeight int64, +) (map[string]*vtype.StorageData, error) { + result := make(map[string]*vtype.StorageData) + + for keyStr, rawChange := range rawChanges { + value, err := vtype.ParseStorageValue(rawChange) + if err != nil { + return nil, fmt.Errorf("failed to parse storage value: %w", err) + } + + result[keyStr] = vtype.NewStorageData().SetBlockHeight(blockHeight).SetValue(value) + } + + return result, nil +} + +// Process incoming code changes into a form appropriate for hashing and insertion into the DB. +func processCodeChanges( + rawChanges map[string][]byte, + blockHeight int64, +) (map[string]*vtype.CodeData, error) { + result := make(map[string]*vtype.CodeData) + + for keyStr, rawChange := range rawChanges { + result[keyStr] = vtype.NewCodeData().SetBlockHeight(blockHeight).SetBytecode(rawChange) + } + return result, nil +} + +// Process incoming legacy changes into a form appropriate for hashing and insertion into the DB. +func processLegacyChanges( + rawChanges map[string][]byte, + blockHeight int64, +) (map[string]*vtype.LegacyData, error) { + result := make(map[string]*vtype.LegacyData) + + for keyStr, rawChange := range rawChanges { + result[keyStr] = vtype.NewLegacyData().SetBlockHeight(blockHeight).SetValue(rawChange) + } + return result, nil +} + +// Gather LtHash pairs for a DB. +func gatherLTHashPairs[T vtype.VType]( + newValues map[string]T, + oldValues map[string]T, +) []lthash.KVPairWithLastValue { + + var pairs []lthash.KVPairWithLastValue = make([]lthash.KVPairWithLastValue, 0, len(newValues)) + + for keyStr, newValue := range newValues { + var oldValue = oldValues[keyStr] + + var newBytes []byte + if !newValue.IsDelete() { + newBytes = newValue.Serialize() + + } + + var oldBytes []byte + if !oldValue.IsDelete() { + oldBytes = oldValue.Serialize() + } + + pairs = append(pairs, lthash.KVPairWithLastValue{ + Key: []byte(keyStr), + Value: newBytes, + LastValue: oldBytes, + Delete: newValue.IsDelete(), + }) + } + + return pairs +} + +// Merge account updates down into a single update per account. +func mergeAccountUpdates( + nonceChanges map[string][]byte, + codeHashChanges map[string][]byte, + balanceChanges map[string][]byte, +) (map[string]*vtype.PendingAccountWrite, error) { + + updates := make(map[string]*vtype.PendingAccountWrite) + + if nonceChanges != nil { + for key, nonceChange := range nonceChanges { + nonce, err := vtype.ParseNonce(nonceChange) + if err != nil { + return nil, fmt.Errorf("invalid nonce value: %w", err) + } + // nil handled internally, no need to bootstrap map entries + updates[key] = updates[key].SetNonce(nonce) + } + } + + if codeHashChanges != nil { + for key, codeHashChange := range codeHashChanges { + codeHash, err := vtype.ParseCodeHash(codeHashChange) + if err != nil { + return nil, fmt.Errorf("invalid codehash value: %w", err) + } + // nil handled internally, no need to bootstrap map entries + updates[key] = updates[key].SetCodeHash(codeHash) + } + } + + if balanceChanges != nil { + for key, balanceChange := range balanceChanges { + balance, err := vtype.ParseBalance(balanceChange) + if err != nil { + return nil, fmt.Errorf("invalid balance value: %w", err) + } + // nil handled internally, no need to bootstrap map entries + updates[key] = updates[key].SetBalance(balance) + } + } + + return updates, nil +} + +// Combine the pending account writes with prior values to determine the new account values. +// +// We need to take this step because accounts are split into multiple fields, and its possible to overwrite just a +// single field (thus requring us to copy the unmodified fields from the prior value). +func deriveNewAccountValues( + pendingWrites map[string]*vtype.PendingAccountWrite, + oldValues map[string]*vtype.AccountData, + blockHeight int64, +) map[string]*vtype.AccountData { + result := make(map[string]*vtype.AccountData) + + for addrStr, pendingWrite := range pendingWrites { + oldValue := oldValues[addrStr] + + newValue := pendingWrite.Merge(oldValue, blockHeight) + result[addrStr] = newValue + } + return result +} diff --git a/sei-db/state_db/sc/flatkv/store_write.go b/sei-db/state_db/sc/flatkv/store_write.go index 2a56c753e4..5cc9b380d8 100644 --- a/sei-db/state_db/sc/flatkv/store_write.go +++ b/sei-db/state_db/sc/flatkv/store_write.go @@ -12,259 +12,6 @@ import ( "github.com/sei-protocol/sei-chain/sei-db/state_db/sc/flatkv/vtype" ) -// ApplyChangeSets buffers EVM changesets and updates LtHash. -// -// LtHash is computed based on actual storage format (internal keys): -// - storageDB: key=addr||slot, value=storage_value -// - accountDB: key=addr, value=AccountValue (balance(32)||nonce(8)||codehash(32) -// - codeDB: key=addr, value=bytecode -// - legacyDB: key=full original key (with prefix), value=raw value -func (s *CommitStore) ApplyChangeSets(changeSets []*proto.NamedChangeSet) error { - if s.readOnly { - return errReadOnly - } - - /////////// - // Setup // - /////////// - s.phaseTimer.SetPhase("apply_change_sets_prepare") - s.pendingChangeSets = append(s.pendingChangeSets, changeSets...) - - changesByType := sortChangeSets(changeSets) - - //////////////////// - // Batch Read Old // - //////////////////// - s.phaseTimer.SetPhase("apply_change_sets_batch_read") - - storageOld, accountOld, codeOld, legacyOld, err := s.batchReadOldValues(changesByType) - if err != nil { - return fmt.Errorf("failed to batch read old values: %w", err) - } - - ////////////////// - // Gather Pairs // - ////////////////// - s.phaseTimer.SetPhase("apply_change_sets_gather_pairs") - - // Gather account pairs - accountWrites, err := s.mergeAccountUpdates( - changesByType[evm.EVMKeyNonce], - changesByType[evm.EVMKeyCodeHash], - nil, // TODO: update this when we add a balance key! - ) - if err != nil { - return fmt.Errorf("failed to gather account updates: %w", err) - } - newAccountValues := s.deriveNewAccountValues(accountWrites, accountOld) - accountPairs := gatherLTHashPairs(newAccountValues, accountOld) - - // Gather storage pairs - storageChanges, err := parseChanges(changesByType[evm.EVMKeyStorage], vtype.DeserializeStorageData) - if err != nil { - return fmt.Errorf("failed to parse storage changes: %w", err) - } - storagePairs := gatherLTHashPairs(storageChanges, storageOld) - - // Gather code pairs - codeChanges, err := parseChanges(changesByType[evm.EVMKeyCode], vtype.DeserializeCodeData) - if err != nil { - return fmt.Errorf("failed to parse code changes: %w", err) - } - codePairs := gatherLTHashPairs(codeChanges, codeOld) - - // Gather legacy pairs - legacyChanges, err := parseChanges(changesByType[evm.EVMKeyLegacy], vtype.DeserializeLegacyData) - if err != nil { - return fmt.Errorf("failed to parse legacy changes: %w", err) - } - legacyPairs := gatherLTHashPairs(legacyChanges, legacyOld) - - //////////////////// - // Compute LTHash // - //////////////////// - s.phaseTimer.SetPhase("apply_change_compute_lt_hash") - - type dbPairs struct { - dir string - pairs []lthash.KVPairWithLastValue - } - for _, dp := range [4]dbPairs{ - {storageDBDir, storagePairs}, - {accountDBDir, accountPairs}, - {codeDBDir, codePairs}, - {legacyDBDir, legacyPairs}, - } { - if len(dp.pairs) > 0 { - newHash, _ := lthash.ComputeLtHash(s.perDBWorkingLtHash[dp.dir], dp.pairs) - s.perDBWorkingLtHash[dp.dir] = newHash - } - } - - // Global LTHash = sum of per-DB hashes (homomorphic property). - // Compute into a fresh hash and swap to avoid a transient empty state - // on workingLtHash (safe for future pipelining / async callers). - globalHash := lthash.New() - for _, dir := range dataDBDirs { - globalHash.MixIn(s.perDBWorkingLtHash[dir]) - } - s.workingLtHash = globalHash - - s.phaseTimer.SetPhase("apply_change_done") - return nil -} - -// Sort the change sets by type. -func sortChangeSets(cs []*proto.NamedChangeSet) map[evm.EVMKeyKind]map[string][]byte { - // TODO add the ability to detect and report/err for unexected types! - - result := make(map[evm.EVMKeyKind]map[string][]byte) - - for _, cs := range cs { - if cs.Changeset.Pairs == nil { - continue - } - for _, pair := range cs.Changeset.Pairs { - kind, keyBytes := evm.ParseEVMKey(pair.Key) - keyStr := string(keyBytes) - - kindMap, ok := result[kind] - if !ok { - kindMap = make(map[string][]byte) - result[kind] = kindMap - } - - kindMap[keyStr] = pair.Value - } - } - - return result -} - -// Parse into the VType. -func parseChanges[T vtype.VType]( - rawChanges map[string][]byte, - builder vtype.VTypeBuilder[T], -) (map[string]T, error) { - - result := make(map[string]T) - - for keyStr, rawChange := range rawChanges { - value, err := builder(rawChange) - if err != nil { - return nil, fmt.Errorf("failed to parse value for key %s: %w", keyStr, err) - } - result[keyStr] = value - } - - return result, nil -} - -// Gather LtHash pairs for a DB. -func gatherLTHashPairs[T vtype.VType]( - newValues map[string]T, - oldValues map[string]T, -) []lthash.KVPairWithLastValue { - - var pairs []lthash.KVPairWithLastValue = make([]lthash.KVPairWithLastValue, 0, len(newValues)) - - for keyStr, newValue := range newValues { - var oldValue = oldValues[keyStr] - - var newBytes []byte - if !newValue.IsDelete() { - newBytes = newValue.Serialize() - - } - - var oldBytes []byte - if !oldValue.IsDelete() { - oldBytes = oldValue.Serialize() - } - - pairs = append(pairs, lthash.KVPairWithLastValue{ - Key: []byte(keyStr), - Value: newBytes, - LastValue: oldBytes, - Delete: newValue.IsDelete(), - }) - } - - return pairs -} - -// Merge account updates down into a single update per account. -func (s *CommitStore) mergeAccountUpdates( - nonceChanges map[string][]byte, - codeHashChanges map[string][]byte, - balanceChanges map[string][]byte, -) (map[string]*vtype.PendingAccountWrite, error) { - - updates := make(map[string]*vtype.PendingAccountWrite) - - if nonceChanges != nil { - for key, nonceChange := range nonceChanges { - nonce, err := vtype.ParseNonce(nonceChange) - if err != nil { - return nil, fmt.Errorf("invalid nonce value: %w", err) - } - // nil handled internally, no need to bootstrap map entries - updates[key] = updates[key].SetNonce(nonce) - } - } - - if codeHashChanges != nil { - for key, codeHashChange := range codeHashChanges { - codeHash, err := vtype.ParseCodeHash(codeHashChange) - if err != nil { - return nil, fmt.Errorf("invalid codehash value: %w", err) - } - // nil handled internally, no need to bootstrap map entries - updates[key] = updates[key].SetCodeHash(codeHash) - } - } - - if balanceChanges != nil { - for key, balanceChange := range balanceChanges { - balance, err := vtype.ParseBalance(balanceChange) - if err != nil { - return nil, fmt.Errorf("invalid balance value: %w", err) - } - // nil handled internally, no need to bootstrap map entries - updates[key] = updates[key].SetBalance(balance) - } - } - - return updates, nil -} - -// Combine the pending account writes with prior values to determine the new account values. -// -// We need to take this step because accounts are split into multiple fields, and its possible to overwrite just a -// single field (thus requring us to copy the unmodified fields from the prior value). -func (s *CommitStore) deriveNewAccountValues( - pendingWrites map[string]*vtype.PendingAccountWrite, - databaseAccountData map[string]*vtype.AccountData, -) map[string]*vtype.AccountData { - - result := make(map[string]*vtype.AccountData) - - for addrStr, pendingWrite := range pendingWrites { - var oldValue *vtype.AccountData - if stagedWrite, ok := s.accountWrites[addrStr]; ok { - // We've got a pending write staged in memory - oldValue = stagedWrite - } else if dbValue, ok := databaseAccountData[addrStr]; ok { - // This account is in the DB - oldValue = dbValue - } - - newValue := pendingWrite.Merge(oldValue, s.committedVersion+1) - result[addrStr] = newValue - } - return result -} - // Commit persists buffered writes and advances the version. // Protocol: WAL → per-DB batch (with LocalMeta) → flush → update metaDB. // On crash, catchup replays WAL to recover incomplete commits. diff --git a/sei-db/state_db/sc/flatkv/vtype/base_types.go b/sei-db/state_db/sc/flatkv/vtype/base_types.go index f604533c74..49cee3e3f6 100644 --- a/sei-db/state_db/sc/flatkv/vtype/base_types.go +++ b/sei-db/state_db/sc/flatkv/vtype/base_types.go @@ -63,4 +63,14 @@ func ParseBalance(b []byte) (*Balance, error) { return &result, nil } -// TODO implement others!!! +// ParseStorageValue parses a storage value from a byte slice. +func ParseStorageValue(b []byte) (*[32]byte, error) { + if len(b) != SlotLen { + return nil, fmt.Errorf("invalid storage value length: got %d, expected %d", + len(b), SlotLen, + ) + } + var result [32]byte + copy(result[:], b) + return &result, nil +} diff --git a/sei-db/state_db/sc/flatkv/vtype/legacy_data.go b/sei-db/state_db/sc/flatkv/vtype/legacy_data.go index 63bd758bb6..b904cfc4e8 100644 --- a/sei-db/state_db/sc/flatkv/vtype/legacy_data.go +++ b/sei-db/state_db/sc/flatkv/vtype/legacy_data.go @@ -32,6 +32,8 @@ const ( var _ VType = (*LegacyData)(nil) +// TODO revisit types with variable sized fields!!! Not elegegant how we currently do this. + // Used for encapsulating and serializing legacy data in the FlatKV legacy database. // // This data structure is not threadsafe. Values passed into and values received from this data structure @@ -41,9 +43,8 @@ type LegacyData struct { } // Create a new LegacyData with the given value. -func NewLegacyData(value []byte) *LegacyData { - data := make([]byte, legacyHeaderLength+len(value)) - copy(data[legacyValueStart:], value) +func NewLegacyData() *LegacyData { + data := make([]byte, legacyHeaderLength) return &LegacyData{data: data} } @@ -104,6 +105,15 @@ func (l *LegacyData) GetValue() []byte { return l.data[legacyValueStart:] } +// Set the legacy value. Returns self (or a new LegacyData if nil). +func (l *LegacyData) SetValue(value []byte) *LegacyData { + if l == nil { + l = NewLegacyData(nil) + } + copy(l.data[legacyValueStart:], value) + return l +} + // Check if this legacy data signifies a deletion operation. A deletion operation is automatically // performed when the value is empty (with the exception of the serialization version and block height). func (l *LegacyData) IsDelete() bool { diff --git a/sei-db/state_db/sc/flatkv/vtype/storage_data.go b/sei-db/state_db/sc/flatkv/vtype/storage_data.go index c1f719d9dc..1ac3d30644 100644 --- a/sei-db/state_db/sc/flatkv/vtype/storage_data.go +++ b/sei-db/state_db/sc/flatkv/vtype/storage_data.go @@ -51,6 +51,9 @@ func NewStorageData() *StorageData { // // The returned byte slice is not safe to modify without first copying it. func (s *StorageData) Serialize() []byte { + if s == nil { + return make([]byte, storageDataLength) + } return s.data } @@ -79,22 +82,35 @@ func DeserializeStorageData(data []byte) (*StorageData, error) { // Get the serialization version for this StorageData instance. func (s *StorageData) GetSerializationVersion() StorageDataVersion { + if s == nil { + return StorageDataVersion0 + } return (StorageDataVersion)(s.data[storageVersionStart]) } // Get the block height when this storage slot was last modified. func (s *StorageData) GetBlockHeight() int64 { + if s == nil { + return 0 + } return int64(binary.BigEndian.Uint64(s.data[storageBlockHeightStart:storageValueStart])) //nolint:gosec // block height is always within int64 range } // Get the storage slot value. func (s *StorageData) GetValue() *[32]byte { + if s == nil { + var zero [32]byte + return &zero + } return (*[32]byte)(s.data[storageValueStart:storageDataLength]) } // Check if this storage data signifies a deletion operation. A deletion operation is automatically // performed when the value is all 0s (with the exception of the serialization version and block height). func (s *StorageData) IsDelete() bool { + if s == nil { + return true + } for i := storageValueStart; i < storageDataLength; i++ { if s.data[i] != 0 { return false @@ -103,14 +119,20 @@ func (s *StorageData) IsDelete() bool { return true } -// Set the block height when this storage slot was last modified/touched. Returns self. +// Set the block height when this storage slot was last modified/touched. Returns self (or a new StorageData if nil). func (s *StorageData) SetBlockHeight(blockHeight int64) *StorageData { + if s == nil { + s = NewStorageData() + } binary.BigEndian.PutUint64(s.data[storageBlockHeightStart:storageValueStart], uint64(blockHeight)) //nolint:gosec // block height is always non-negative return s } -// Set the storage slot value. Returns self. +// Set the storage slot value. Returns self (or a new StorageData if nil). func (s *StorageData) SetValue(value *[32]byte) *StorageData { + if s == nil { + s = NewStorageData() + } copy(s.data[storageValueStart:storageDataLength], value[:]) return s } diff --git a/sei-db/state_db/sc/flatkv/vtype/vtype.go b/sei-db/state_db/sc/flatkv/vtype/vtype.go index b361c6003e..1b85339236 100644 --- a/sei-db/state_db/sc/flatkv/vtype/vtype.go +++ b/sei-db/state_db/sc/flatkv/vtype/vtype.go @@ -5,14 +5,9 @@ package vtype // VTypes should be well-behaved when nil, and it should be safe to call into them without checking for nil. // Nil VTypes should identify themselves as deletion operations with all zero values. type VType interface { - // Serialize the value to a byte slice. Serialize() []byte // IsDelete returns true if the value is a deletion operation. IsDelete() bool } - -// VTypeBuilder is a function that builds a VType from a byte slice, returning an error if the byte -// slice cannot be parsed. -type VTypeBuilder[T VType] func([]byte) (T, error) From 0dfb271f94bdf138f7045361030710cdf77007e6 Mon Sep 17 00:00:00 2001 From: Cody Littley Date: Wed, 1 Apr 2026 12:47:15 -0500 Subject: [PATCH 103/119] fix store read --- sei-db/state_db/sc/flatkv/keys.go | 104 +----------------- sei-db/state_db/sc/flatkv/store.go | 4 +- sei-db/state_db/sc/flatkv/store_read.go | 57 +++------- sei-db/state_db/sc/flatkv/store_write.go | 1 - .../state_db/sc/flatkv/vtype/legacy_data.go | 7 +- 5 files changed, 25 insertions(+), 148 deletions(-) diff --git a/sei-db/state_db/sc/flatkv/keys.go b/sei-db/state_db/sc/flatkv/keys.go index 11a6720d95..24da5df9ed 100644 --- a/sei-db/state_db/sc/flatkv/keys.go +++ b/sei-db/state_db/sc/flatkv/keys.go @@ -2,8 +2,6 @@ package flatkv import ( "bytes" - "encoding/binary" - "fmt" "github.com/sei-protocol/sei-chain/sei-db/state_db/sc/flatkv/lthash" ) @@ -33,11 +31,8 @@ func isMetaKey(key []byte) bool { } const ( - AddressLen = 20 - CodeHashLen = 32 - SlotLen = 32 - BalanceLen = 32 - NonceLen = 8 + AddressLen = 20 + SlotLen = 32 ) // LocalMeta stores per-DB version tracking metadata. @@ -50,15 +45,9 @@ type LocalMeta struct { // Address is an EVM address (20 bytes). type Address [AddressLen]byte -// CodeHash is a contract code hash (32 bytes). -type CodeHash [CodeHashLen]byte - // Slot is a storage slot key (32 bytes). type Slot [SlotLen]byte -// Balance is an EVM balance (32 bytes, big-endian uint256). -type Balance [BalanceLen]byte - func AddressFromBytes(b []byte) (Address, bool) { if len(b) != AddressLen { return Address{}, false @@ -68,15 +57,6 @@ func AddressFromBytes(b []byte) (Address, bool) { return a, true } -func SlotFromBytes(b []byte) (Slot, bool) { - if len(b) != SlotLen { - return Slot{}, false - } - var s Slot - copy(s[:], b) - return s, true -} - // ============================================================================= // DB Key Builders // ============================================================================= @@ -110,83 +90,3 @@ func PrefixEnd(prefix []byte) []byte { } return nil } - -// AccountValue is the account record. -// -// Encoding is variable-length to save space for EOA accounts: -// - EOA (no code): balance(32) || nonce(8) = 40 bytes -// - Contract (has code): balance(32) || nonce(8) || codehash(32) = 72 bytes -// -// CodeHash == CodeHash{} (all zeros) means the account has no code (EOA). -// Note: empty code contracts have CodeHash = keccak256("") which is non-zero. -type AccountValue struct { // TODO delete - Balance Balance - Nonce uint64 - CodeHash CodeHash -} - -const ( - // accountValueEOALen is the encoded length for EOA accounts (no code). - accountValueEOALen = BalanceLen + NonceLen // 40 bytes - - // accountValueContractLen is the encoded length for contract accounts. - accountValueContractLen = BalanceLen + NonceLen + CodeHashLen // 72 bytes -) - -// HasCode returns true if the account has code (is a contract). -func (v AccountValue) HasCode() bool { - return v.CodeHash != CodeHash{} -} - -// IsEmpty returns true when all fields are zero-valued, indicating the -// account can be physically deleted from accountDB. -func (v AccountValue) IsEmpty() bool { - return v.Balance == (Balance{}) && v.Nonce == 0 && v.CodeHash == (CodeHash{}) -} - -// Encode encodes the AccountValue to bytes. -func (v AccountValue) Encode() []byte { - return EncodeAccountValue(v) -} - -// EncodeAccountValue encodes v into a variable-length slice. -// EOA accounts (no code) are encoded as 40 bytes, contracts as 72 bytes. -func EncodeAccountValue(v AccountValue) []byte { - size := accountValueEOALen - if v.HasCode() { - size = accountValueContractLen - } - b := make([]byte, size) - copy(b, v.Balance[:]) - binary.BigEndian.PutUint64(b[BalanceLen:], v.Nonce) - if v.HasCode() { - copy(b[BalanceLen+NonceLen:], v.CodeHash[:]) - } - return b -} - -// DecodeAccountValue decodes a variable-length account record. -// Returns an error if the length is neither 40 (EOA) nor 72 (contract) bytes. -func DecodeAccountValue(b []byte) (AccountValue, error) { - switch len(b) { - case accountValueEOALen: - // EOA: balance(32) || nonce(8) - var v AccountValue - copy(v.Balance[:], b[:BalanceLen]) - v.Nonce = binary.BigEndian.Uint64(b[BalanceLen:]) - // CodeHash remains zero (no code) - return v, nil - - case accountValueContractLen: - // Contract: balance(32) || nonce(8) || codehash(32) - var v AccountValue - copy(v.Balance[:], b[:BalanceLen]) - v.Nonce = binary.BigEndian.Uint64(b[BalanceLen : BalanceLen+NonceLen]) - copy(v.CodeHash[:], b[BalanceLen+NonceLen:]) - return v, nil - - default: - return AccountValue{}, fmt.Errorf("invalid account value length: got %d, want %d (EOA) or %d (contract)", - len(b), accountValueEOALen, accountValueContractLen) - } -} diff --git a/sei-db/state_db/sc/flatkv/store.go b/sei-db/state_db/sc/flatkv/store.go index d0f148e8d8..bb655a6cb0 100644 --- a/sei-db/state_db/sc/flatkv/store.go +++ b/sei-db/state_db/sc/flatkv/store.go @@ -87,7 +87,9 @@ type CommitStore struct { storageWrites map[string]*vtype.StorageData legacyWrites map[string]*vtype.LegacyData - changelog wal.ChangelogWAL + changelog wal.ChangelogWAL + + // Changes to feed into the WAL at the next commit. pendingChangeSets []*proto.NamedChangeSet lastSnapshotTime time.Time diff --git a/sei-db/state_db/sc/flatkv/store_read.go b/sei-db/state_db/sc/flatkv/store_read.go index 7c7ce4503d..14fda3331f 100644 --- a/sei-db/state_db/sc/flatkv/store_read.go +++ b/sei-db/state_db/sc/flatkv/store_read.go @@ -35,20 +35,22 @@ func (s *CommitStore) Get(key []byte) ([]byte, bool) { } // Check pending writes first - if paw, found := s.accountWrites[string(addr[:])]; found { - if paw.isDelete { + if accountValue, found := s.accountWrites[string(addr[:])]; found { + if accountValue.IsDelete() { return nil, false } if kind == evm.EVMKeyNonce { - nonce := make([]byte, NonceLen) - binary.BigEndian.PutUint64(nonce, paw.value.Nonce) - return nonce, true + nonceBytes := make([]byte, vtype.NonceLen) + binary.BigEndian.PutUint64(nonceBytes, accountValue.GetNonce()) + return nonceBytes, true } // CodeHash - if paw.value.CodeHash == (CodeHash{}) { + codeHash := accountValue.GetCodeHash() + var zeroCodeHash vtype.CodeHash + if *codeHash == zeroCodeHash { return nil, false } - return paw.value.CodeHash[:], true + return codeHash[:], true } // Read from accountDB @@ -56,21 +58,23 @@ func (s *CommitStore) Get(key []byte) ([]byte, bool) { if err != nil { return nil, false } - av, err := DecodeAccountValue(encoded) + accountData, err := vtype.DeserializeAccountData(encoded) if err != nil { return nil, false } if kind == evm.EVMKeyNonce { - nonce := make([]byte, NonceLen) - binary.BigEndian.PutUint64(nonce, av.Nonce) + nonce := make([]byte, vtype.NonceLen) + binary.BigEndian.PutUint64(nonce, accountData.GetNonce()) return nonce, true } // CodeHash - if av.CodeHash == (CodeHash{}) { + codeHash := accountData.GetCodeHash() + var zeroCodeHash vtype.CodeHash + if *codeHash == zeroCodeHash { return nil, false } - return av.CodeHash[:], true + return codeHash[:], true case evm.EVMKeyCode: value, err := s.getCodeValue(keyBytes) @@ -177,35 +181,6 @@ func (s *CommitStore) IteratorByPrefix(prefix []byte) Iterator { // Internal Getters (used by ApplyChangeSets for LtHash computation) // ============================================================================= -// getAccountValue loads AccountValue from pending writes or DB. -// Returns zero AccountValue if not found (new account) or if the pending -// write is marked for deletion (row logically absent). -// Returns error if existing data is corrupted (decode fails) or I/O error occurs. -func (s *CommitStore) getAccountValue(addr Address) (AccountValue, error) { - // Check pending writes first - if paw, ok := s.accountWrites[string(addr[:])]; ok { - if paw.isDelete { - return AccountValue{}, nil - } - return paw.value, nil - } - - // Read from accountDB - value, err := s.accountDB.Get(AccountKey(addr)) - if err != nil { - if errorutils.IsNotFound(err) { - return AccountValue{}, nil // New account - } - return AccountValue{}, fmt.Errorf("accountDB I/O error for addr %x: %w", addr, err) - } - - av, err := DecodeAccountValue(value) - if err != nil { - return AccountValue{}, fmt.Errorf("corrupted AccountValue for addr %x: %w", addr, err) - } - return av, nil -} - func (s *CommitStore) getStorageValue(key []byte) ([]byte, error) { pendingWrite, hasPending := s.storageWrites[string(key)] if hasPending { diff --git a/sei-db/state_db/sc/flatkv/store_write.go b/sei-db/state_db/sc/flatkv/store_write.go index 5cc9b380d8..6c66bf3338 100644 --- a/sei-db/state_db/sc/flatkv/store_write.go +++ b/sei-db/state_db/sc/flatkv/store_write.go @@ -8,7 +8,6 @@ import ( "github.com/sei-protocol/sei-chain/sei-db/common/evm" "github.com/sei-protocol/sei-chain/sei-db/db_engine/types" "github.com/sei-protocol/sei-chain/sei-db/proto" - "github.com/sei-protocol/sei-chain/sei-db/state_db/sc/flatkv/lthash" "github.com/sei-protocol/sei-chain/sei-db/state_db/sc/flatkv/vtype" ) diff --git a/sei-db/state_db/sc/flatkv/vtype/legacy_data.go b/sei-db/state_db/sc/flatkv/vtype/legacy_data.go index b904cfc4e8..c86f867ef6 100644 --- a/sei-db/state_db/sc/flatkv/vtype/legacy_data.go +++ b/sei-db/state_db/sc/flatkv/vtype/legacy_data.go @@ -108,9 +108,10 @@ func (l *LegacyData) GetValue() []byte { // Set the legacy value. Returns self (or a new LegacyData if nil). func (l *LegacyData) SetValue(value []byte) *LegacyData { if l == nil { - l = NewLegacyData(nil) + l = NewLegacyData() } - copy(l.data[legacyValueStart:], value) + newData := make([]byte, legacyHeaderLength+len(value)) + copy(newData, l.data[:legacyValueStart]) return l } @@ -126,7 +127,7 @@ func (l *LegacyData) IsDelete() bool { // Set the block height when this legacy data was last modified/touched. Returns self (or a new LegacyData if nil). func (l *LegacyData) SetBlockHeight(blockHeight int64) *LegacyData { if l == nil { - l = NewLegacyData(nil) + l = NewLegacyData() } binary.BigEndian.PutUint64(l.data[legacyBlockHeightStart:legacyValueStart], uint64(blockHeight)) //nolint:gosec return l From 872b0a6b69d1a472dad7c26f281e43aa4fd14bab Mon Sep 17 00:00:00 2001 From: Cody Littley Date: Wed, 1 Apr 2026 13:25:51 -0500 Subject: [PATCH 104/119] fix merge problem --- sei-cosmos/server/config/config.go | 2 +- sei-db/config/ss_config.go | 4 ++-- sei-db/config/toml_test.go | 8 ++++---- sei-db/db_engine/pebbledb/mvcc/bench_test.go | 2 +- sei-db/db_engine/pebbledb/mvcc/db_test.go | 4 ++-- sei-db/ledger_db/receipt/receipt_store.go | 2 +- sei-db/ledger_db/receipt/receipt_store_test.go | 2 +- sei-db/state_db/bench/wrappers/db_implementations.go | 2 +- sei-db/state_db/ss/composite/recovery_test.go | 6 +++--- sei-db/tools/cmd/seidb/benchmark/iteration.go | 2 +- sei-db/tools/cmd/seidb/benchmark/random_read.go | 2 +- sei-db/tools/cmd/seidb/benchmark/reverse_iteration.go | 2 +- sei-db/tools/cmd/seidb/benchmark/write.go | 2 +- sei-db/tools/cmd/seidb/operations/dump_db.go | 2 +- sei-db/tools/cmd/seidb/operations/prune.go | 2 +- sei-db/tools/cmd/seidb/operations/replay_changelog.go | 2 +- 16 files changed, 23 insertions(+), 23 deletions(-) diff --git a/sei-cosmos/server/config/config.go b/sei-cosmos/server/config/config.go index 355020b83e..b3ac1b746c 100644 --- a/sei-cosmos/server/config/config.go +++ b/sei-cosmos/server/config/config.go @@ -317,7 +317,7 @@ func DefaultConfig() *Config { SnapshotDirectory: "", }, StateCommit: config.DefaultStateCommitConfig(), - StateStore: *config.DefaultStateStoreConfig(), + StateStore: config.DefaultStateStoreConfig(), Genesis: GenesisConfig{ StreamImport: false, GenesisStreamFile: "", diff --git a/sei-db/config/ss_config.go b/sei-db/config/ss_config.go index 3cf93294dd..1cda8d2ae0 100644 --- a/sei-db/config/ss_config.go +++ b/sei-db/config/ss_config.go @@ -94,8 +94,8 @@ func (c StateStoreConfig) EVMEnabled() bool { } // DefaultStateStoreConfig returns the default StateStoreConfig -func DefaultStateStoreConfig() *StateStoreConfig { - return &StateStoreConfig{ +func DefaultStateStoreConfig() StateStoreConfig { + return StateStoreConfig{ Enable: true, Backend: DefaultSSBackend, AsyncWriteBuffer: DefaultSSAsyncBuffer, diff --git a/sei-db/config/toml_test.go b/sei-db/config/toml_test.go index a465ef765a..5a18eaf53b 100644 --- a/sei-db/config/toml_test.go +++ b/sei-db/config/toml_test.go @@ -21,7 +21,7 @@ func TestStateCommitConfigTemplate(t *testing.T) { cfg := TemplateConfig{ StateCommit: DefaultStateCommitConfig(), - StateStore: *DefaultStateStoreConfig(), + StateStore: DefaultStateStoreConfig(), } // Parse and execute the StateCommit template @@ -64,7 +64,7 @@ func TestStateStoreConfigTemplate(t *testing.T) { cfg := TemplateConfig{ StateCommit: DefaultStateCommitConfig(), - StateStore: *DefaultStateStoreConfig(), + StateStore: DefaultStateStoreConfig(), } // Parse and execute the StateStore template @@ -127,7 +127,7 @@ func TestDefaultConfigTemplate(t *testing.T) { cfg := TemplateConfig{ StateCommit: DefaultStateCommitConfig(), - StateStore: *DefaultStateStoreConfig(), + StateStore: DefaultStateStoreConfig(), ReceiptStore: DefaultReceiptStoreConfig(), } @@ -293,7 +293,7 @@ func TestTemplateFieldPathsExist(t *testing.T) { cfg := TemplateConfig{ StateCommit: DefaultStateCommitConfig(), - StateStore: *DefaultStateStoreConfig(), + StateStore: DefaultStateStoreConfig(), ReceiptStore: DefaultReceiptStoreConfig(), } diff --git a/sei-db/db_engine/pebbledb/mvcc/bench_test.go b/sei-db/db_engine/pebbledb/mvcc/bench_test.go index bc08774a36..c7ce6c7728 100644 --- a/sei-db/db_engine/pebbledb/mvcc/bench_test.go +++ b/sei-db/db_engine/pebbledb/mvcc/bench_test.go @@ -11,7 +11,7 @@ import ( func BenchmarkDBBackend(b *testing.B) { s := &sstest.StorageBenchSuite{ NewDB: func(dir string) (types.StateStore, error) { - return OpenDB(dir, *config.DefaultStateStoreConfig()) + return OpenDB(dir, config.DefaultStateStoreConfig()) }, BenchBackendName: "PebbleDB", } diff --git a/sei-db/db_engine/pebbledb/mvcc/db_test.go b/sei-db/db_engine/pebbledb/mvcc/db_test.go index de7826e6d9..b923188718 100644 --- a/sei-db/db_engine/pebbledb/mvcc/db_test.go +++ b/sei-db/db_engine/pebbledb/mvcc/db_test.go @@ -11,7 +11,7 @@ import ( ) func TestStorageTestSuite(t *testing.T) { - pebbleConfig := *config.DefaultStateStoreConfig() + pebbleConfig := config.DefaultStateStoreConfig() pebbleConfig.Backend = "pebbledb" s := &sstest.StorageTestSuite{ BaseStorageTestSuite: sstest.BaseStorageTestSuite{ @@ -32,7 +32,7 @@ func TestStorageTestSuite(t *testing.T) { // configured for MVCC key encoding, so NextPrefix/SeekLT operations won't work correctly. // BaseStorageTestSuite contains only tests that work with both comparers. func TestStorageTestSuiteDefaultComparer(t *testing.T) { - pebbleConfig := *config.DefaultStateStoreConfig() + pebbleConfig := config.DefaultStateStoreConfig() pebbleConfig.Backend = "pebbledb" pebbleConfig.UseDefaultComparer = true diff --git a/sei-db/ledger_db/receipt/receipt_store.go b/sei-db/ledger_db/receipt/receipt_store.go index a204afdc88..37276b0113 100644 --- a/sei-db/ledger_db/receipt/receipt_store.go +++ b/sei-db/ledger_db/receipt/receipt_store.go @@ -124,7 +124,7 @@ func newReceiptBackend(config dbconfig.ReceiptStoreConfig, storeKey sdk.StoreKey ssConfig.KeepLastVersion = false ssConfig.Backend = "pebbledb" - db, err := mvcc.OpenDB(ssConfig.DBDirectory, *ssConfig) + db, err := mvcc.OpenDB(ssConfig.DBDirectory, ssConfig) if err != nil { return nil, err } diff --git a/sei-db/ledger_db/receipt/receipt_store_test.go b/sei-db/ledger_db/receipt/receipt_store_test.go index e9793388f1..7ce696e350 100644 --- a/sei-db/ledger_db/receipt/receipt_store_test.go +++ b/sei-db/ledger_db/receipt/receipt_store_test.go @@ -205,7 +205,7 @@ func TestRecoverReceiptStoreReplaysChangelog(t *testing.T) { cfg := dbconfig.DefaultReceiptStoreConfig() cfg.DBDirectory = dir cfg.KeepRecent = 0 - ssConfig := *dbconfig.DefaultStateStoreConfig() + ssConfig := dbconfig.DefaultStateStoreConfig() ssConfig.DBDirectory = cfg.DBDirectory ssConfig.KeepRecent = cfg.KeepRecent if cfg.PruneIntervalSeconds > 0 { diff --git a/sei-db/state_db/bench/wrappers/db_implementations.go b/sei-db/state_db/bench/wrappers/db_implementations.go index 7e0044defd..47bcaf7d02 100644 --- a/sei-db/state_db/bench/wrappers/db_implementations.go +++ b/sei-db/state_db/bench/wrappers/db_implementations.go @@ -34,7 +34,7 @@ func DefaultBenchStateStoreConfig() *config.StateStoreConfig { cfg.AsyncWriteBuffer = config.DefaultSSAsyncBuffer cfg.WriteMode = config.SplitWrite cfg.ReadMode = config.EVMFirstRead - return cfg + return &cfg } func newMemIAVLCommitStore(dbDir string) (DBWrapper, error) { diff --git a/sei-db/state_db/ss/composite/recovery_test.go b/sei-db/state_db/ss/composite/recovery_test.go index d6ca50fa48..e38342d123 100644 --- a/sei-db/state_db/ss/composite/recovery_test.go +++ b/sei-db/state_db/ss/composite/recovery_test.go @@ -35,7 +35,7 @@ func TestRecoverCompositeStateStore(t *testing.T) { require.NoError(t, err) defer os.RemoveAll(dir) - ssConfig := *config.DefaultStateStoreConfig() + ssConfig := config.DefaultStateStoreConfig() ssConfig.Backend = "pebbledb" dbHome := utils.GetStateStorePath(dir, ssConfig.Backend) mvccDB, err := backend.ResolveBackend(ssConfig.Backend)(dbHome, ssConfig) @@ -107,7 +107,7 @@ func TestSyncEVMStoreBehind(t *testing.T) { require.NoError(t, err) defer os.RemoveAll(dir) - ssConfig := *config.DefaultStateStoreConfig() + ssConfig := config.DefaultStateStoreConfig() ssConfig.Backend = "pebbledb" dbHome := utils.GetStateStorePath(dir, ssConfig.Backend) mvccDB, err := backend.ResolveBackend(ssConfig.Backend)(dbHome, ssConfig) @@ -219,7 +219,7 @@ func TestConstructorRecoversStalEVM(t *testing.T) { require.NoError(t, err) defer os.RemoveAll(dir) - ssConfig := *config.DefaultStateStoreConfig() + ssConfig := config.DefaultStateStoreConfig() ssConfig.Backend = "pebbledb" dbHome := utils.GetStateStorePath(dir, ssConfig.Backend) diff --git a/sei-db/tools/cmd/seidb/benchmark/iteration.go b/sei-db/tools/cmd/seidb/benchmark/iteration.go index 2fc43d36fb..256931a933 100644 --- a/sei-db/tools/cmd/seidb/benchmark/iteration.go +++ b/sei-db/tools/cmd/seidb/benchmark/iteration.go @@ -62,7 +62,7 @@ func DBIteration(inputKVDir string, numVersions int, outputDir string, dbBackend fmt.Printf("Iterating Over DB at %s\n", outputDir) ssConfig := config.DefaultStateStoreConfig() ssConfig.Backend = dbBackend - backend, err := ss.NewStateStore(outputDir, *ssConfig) + backend, err := ss.NewStateStore(outputDir, ssConfig) if err != nil { panic(err) } diff --git a/sei-db/tools/cmd/seidb/benchmark/random_read.go b/sei-db/tools/cmd/seidb/benchmark/random_read.go index 0b4cd14b79..c4526ae9fb 100644 --- a/sei-db/tools/cmd/seidb/benchmark/random_read.go +++ b/sei-db/tools/cmd/seidb/benchmark/random_read.go @@ -67,7 +67,7 @@ func DBRandomRead(inputKVDir string, numVersions int, outputDir string, dbBacken fmt.Printf("Reading Raw Keys and Values from %s\n", inputKVDir) ssConfig := config.DefaultStateStoreConfig() ssConfig.Backend = dbBackend - backend, err := ss.NewStateStore(outputDir, *ssConfig) + backend, err := ss.NewStateStore(outputDir, ssConfig) if err != nil { panic(err) } diff --git a/sei-db/tools/cmd/seidb/benchmark/reverse_iteration.go b/sei-db/tools/cmd/seidb/benchmark/reverse_iteration.go index bd4a06d154..fc1079a87e 100644 --- a/sei-db/tools/cmd/seidb/benchmark/reverse_iteration.go +++ b/sei-db/tools/cmd/seidb/benchmark/reverse_iteration.go @@ -62,7 +62,7 @@ func DBReverseIteration(inputKVDir string, numVersions int, outputDir string, db fmt.Printf("Iterating Over DB at %s\n", outputDir) ssConfig := config.DefaultStateStoreConfig() ssConfig.Backend = dbBackend - backend, err := ss.NewStateStore(outputDir, *ssConfig) + backend, err := ss.NewStateStore(outputDir, ssConfig) if err != nil { panic(err) } diff --git a/sei-db/tools/cmd/seidb/benchmark/write.go b/sei-db/tools/cmd/seidb/benchmark/write.go index f63ecbd55a..6dd82629c4 100644 --- a/sei-db/tools/cmd/seidb/benchmark/write.go +++ b/sei-db/tools/cmd/seidb/benchmark/write.go @@ -67,7 +67,7 @@ func DBWrite(inputKVDir string, numVersions int, outputDir string, dbBackend str fmt.Printf("Reading Raw Keys and Values from %s\n", inputKVDir) ssConfig := config.DefaultStateStoreConfig() ssConfig.Backend = dbBackend - backend, err := ss.NewStateStore(outputDir, *ssConfig) + backend, err := ss.NewStateStore(outputDir, ssConfig) if err != nil { panic(err) } diff --git a/sei-db/tools/cmd/seidb/operations/dump_db.go b/sei-db/tools/cmd/seidb/operations/dump_db.go index 37a2d76631..f3dd3fe37d 100644 --- a/sei-db/tools/cmd/seidb/operations/dump_db.go +++ b/sei-db/tools/cmd/seidb/operations/dump_db.go @@ -74,7 +74,7 @@ func DumpDbData(dbBackend string, module string, outputDir string, dbDir string) // TODO: Defer Close Db ssConfig := config.DefaultStateStoreConfig() ssConfig.Backend = dbBackend - backend, err := ss.NewStateStore(outputDir, *ssConfig) + backend, err := ss.NewStateStore(outputDir, ssConfig) if err != nil { panic(err) } diff --git a/sei-db/tools/cmd/seidb/operations/prune.go b/sei-db/tools/cmd/seidb/operations/prune.go index f8bb12f8cd..aff0767a9c 100644 --- a/sei-db/tools/cmd/seidb/operations/prune.go +++ b/sei-db/tools/cmd/seidb/operations/prune.go @@ -53,7 +53,7 @@ func PruneDB(dbBackend string, dbDir string, version int64) { // TODO: Defer Close Db ssConfig := config.DefaultStateStoreConfig() ssConfig.Backend = dbBackend - backend, err := ss.NewStateStore(dbDir, *ssConfig) + backend, err := ss.NewStateStore(dbDir, ssConfig) if err != nil { panic(err) } diff --git a/sei-db/tools/cmd/seidb/operations/replay_changelog.go b/sei-db/tools/cmd/seidb/operations/replay_changelog.go index f960f51aa7..29ebb5f744 100644 --- a/sei-db/tools/cmd/seidb/operations/replay_changelog.go +++ b/sei-db/tools/cmd/seidb/operations/replay_changelog.go @@ -70,7 +70,7 @@ func executeReplayChangelog(cmd *cobra.Command, _ []string) { ssConfig := config.DefaultStateStoreConfig() ssConfig.KeepRecent = 0 ssConfig.DBDirectory = dbDir - ssStore, err = ss.NewStateStore(dbDir, *ssConfig) + ssStore, err = ss.NewStateStore(dbDir, ssConfig) if err != nil { panic(err) } From 1aef1c3714a181dcfed5fd0ff196b1768ba2233f Mon Sep 17 00:00:00 2001 From: Cody Littley Date: Wed, 1 Apr 2026 13:51:08 -0500 Subject: [PATCH 105/119] bugfixes --- sei-db/state_db/sc/flatkv/config.go | 2 +- sei-db/state_db/sc/flatkv/store_apply.go | 106 +++++++++++++----- sei-db/state_db/sc/flatkv/store_read.go | 3 + sei-db/state_db/sc/flatkv/store_write.go | 12 +- .../state_db/sc/flatkv/vtype/legacy_data.go | 2 + .../sc/flatkv/vtype/pending_account_write.go | 3 + 6 files changed, 92 insertions(+), 36 deletions(-) diff --git a/sei-db/state_db/sc/flatkv/config.go b/sei-db/state_db/sc/flatkv/config.go index 0617ee3f43..8f39eef9a1 100644 --- a/sei-db/state_db/sc/flatkv/config.go +++ b/sei-db/state_db/sc/flatkv/config.go @@ -96,7 +96,7 @@ type Config struct { // The number of threads in this pool is equal to MiscThreadsPerCore * runtime.NumCPU() + MiscConstantThreadCount. MiscConstantThreadCount int - // If true, FlatKV will return an error if it encounters an usupported key type. Otherwise, + // If true, FlatKV will return an error if it encounters an unsupported key type. Otherwise, // it will log a warning and continue. StrictKeyTypeCheck bool } diff --git a/sei-db/state_db/sc/flatkv/store_apply.go b/sei-db/state_db/sc/flatkv/store_apply.go index d362cd3a6f..f2a6bed90e 100644 --- a/sei-db/state_db/sc/flatkv/store_apply.go +++ b/sei-db/state_db/sc/flatkv/store_apply.go @@ -17,7 +17,7 @@ var supportedKeyTypes = map[evm.EVMKeyKind]struct{}{ evm.EVMKeyCodeHash: {}, evm.EVMKeyCode: {}, evm.EVMKeyLegacy: {}, -} +} // TODO also use this for reads // ApplyChangeSets buffers EVM changesets and updates LtHash. func (s *CommitStore) ApplyChangeSets(changeSets []*proto.NamedChangeSet) error { @@ -29,7 +29,7 @@ func (s *CommitStore) ApplyChangeSets(changeSets []*proto.NamedChangeSet) error // Setup // /////////// s.phaseTimer.SetPhase("apply_change_sets_prepare") - s.pendingChangeSets = append(s.pendingChangeSets, changeSets...) // TODO this is wrong!! + s.pendingChangeSets = append(s.pendingChangeSets, changeSets...) changesByType, err := sortChangeSets(changeSets, s.config.StrictKeyTypeCheck) if err != nil { @@ -64,6 +64,7 @@ func (s *CommitStore) ApplyChangeSets(changeSets []*proto.NamedChangeSet) error } newAccountValues := deriveNewAccountValues(accountWrites, accountOld, blockHeight) accountPairs := gatherLTHashPairs(newAccountValues, accountOld) + storeWrites(s.accountWrites, newAccountValues) // Gather storage pairs storageChanges, err := processStorageChanges(changesByType[evm.EVMKeyStorage], blockHeight) @@ -71,6 +72,7 @@ func (s *CommitStore) ApplyChangeSets(changeSets []*proto.NamedChangeSet) error return fmt.Errorf("failed to parse storage changes: %w", err) } storagePairs := gatherLTHashPairs(storageChanges, storageOld) + storeWrites(s.storageWrites, storageChanges) // Gather code pairs codeChanges, err := processCodeChanges(changesByType[evm.EVMKeyCode], blockHeight) @@ -78,6 +80,7 @@ func (s *CommitStore) ApplyChangeSets(changeSets []*proto.NamedChangeSet) error return fmt.Errorf("failed to parse code changes: %w", err) } codePairs := gatherLTHashPairs(codeChanges, codeOld) + storeWrites(s.codeWrites, codeChanges) // Gather legacy pairs legacyChanges, err := processLegacyChanges(changesByType[evm.EVMKeyLegacy], blockHeight) @@ -85,6 +88,7 @@ func (s *CommitStore) ApplyChangeSets(changeSets []*proto.NamedChangeSet) error return fmt.Errorf("failed to parse legacy changes: %w", err) } legacyPairs := gatherLTHashPairs(legacyChanges, legacyOld) + storeWrites(s.legacyWrites, legacyChanges) //////////////////// // Compute LTHash // @@ -120,15 +124,27 @@ func (s *CommitStore) ApplyChangeSets(changeSets []*proto.NamedChangeSet) error return nil } +// Store a map of writes into a map of pending writes. +func storeWrites[T vtype.VType]( + // the map that is accumulating writes + pendingWrites map[string]T, + // new writes that need to be applied to the pendingWrites map + newValues map[string]T, +) { + for keyStr, newValue := range newValues { + pendingWrites[keyStr] = newValue + } +} + // Sort the change sets by type. func sortChangeSets( - cs []*proto.NamedChangeSet, + changeSets []*proto.NamedChangeSet, // If true, returns an error if an unsupported key type is encountered. strict bool, ) (map[evm.EVMKeyKind]map[string][]byte, error) { result := make(map[evm.EVMKeyKind]map[string][]byte) - for _, cs := range cs { + for _, cs := range changeSets { if cs.Changeset.Pairs == nil { continue } @@ -140,6 +156,7 @@ func sortChangeSets( return nil, fmt.Errorf("unsupported key type: %s", kind) } else { logger.Warn("unsupported key type", "key", kind) + continue } } @@ -151,7 +168,11 @@ func sortChangeSets( result[kind] = kindMap } - kindMap[keyStr] = pair.Value + if pair.Delete { + kindMap[keyStr] = nil + } else { + kindMap[keyStr] = pair.Value + } } } @@ -166,12 +187,16 @@ func processStorageChanges( result := make(map[string]*vtype.StorageData) for keyStr, rawChange := range rawChanges { - value, err := vtype.ParseStorageValue(rawChange) - if err != nil { - return nil, fmt.Errorf("failed to parse storage value: %w", err) + if rawChange == nil { + // Deletion is equivalent to setting the storage value to a zero value + result[keyStr] = vtype.NewStorageData().SetBlockHeight(blockHeight).SetValue(&[32]byte{}) + } else { + value, err := vtype.ParseStorageValue(rawChange) + if err != nil { + return nil, fmt.Errorf("failed to parse storage value: %w", err) + } + result[keyStr] = vtype.NewStorageData().SetBlockHeight(blockHeight).SetValue(value) } - - result[keyStr] = vtype.NewStorageData().SetBlockHeight(blockHeight).SetValue(value) } return result, nil @@ -185,7 +210,12 @@ func processCodeChanges( result := make(map[string]*vtype.CodeData) for keyStr, rawChange := range rawChanges { - result[keyStr] = vtype.NewCodeData().SetBlockHeight(blockHeight).SetBytecode(rawChange) + if rawChange == nil { + // Deletion is equivalent to setting the code to a zero value + result[keyStr] = vtype.NewCodeData().SetBlockHeight(blockHeight).SetBytecode(nil) + } else { + result[keyStr] = vtype.NewCodeData().SetBlockHeight(blockHeight).SetBytecode(rawChange) + } } return result, nil } @@ -198,7 +228,12 @@ func processLegacyChanges( result := make(map[string]*vtype.LegacyData) for keyStr, rawChange := range rawChanges { - result[keyStr] = vtype.NewLegacyData().SetBlockHeight(blockHeight).SetValue(rawChange) + if rawChange == nil { + // Deletion is equivalent to setting the legacy value to a zero value + result[keyStr] = vtype.NewLegacyData().SetBlockHeight(blockHeight).SetValue(nil) + } else { + result[keyStr] = vtype.NewLegacyData().SetBlockHeight(blockHeight).SetValue(rawChange) + } } return result, nil } @@ -217,7 +252,6 @@ func gatherLTHashPairs[T vtype.VType]( var newBytes []byte if !newValue.IsDelete() { newBytes = newValue.Serialize() - } var oldBytes []byte @@ -243,41 +277,55 @@ func mergeAccountUpdates( balanceChanges map[string][]byte, ) (map[string]*vtype.PendingAccountWrite, error) { + // PendingAccountWrite objects are well behaved when nil, no need to bootstrap map entries. updates := make(map[string]*vtype.PendingAccountWrite) if nonceChanges != nil { for key, nonceChange := range nonceChanges { - nonce, err := vtype.ParseNonce(nonceChange) - if err != nil { - return nil, fmt.Errorf("invalid nonce value: %w", err) + if nonceChange == nil { + // Deletion is equivalent to setting the nonce to 0 + updates[key] = updates[key].SetNonce(0) + } else { + nonce, err := vtype.ParseNonce(nonceChange) + if err != nil { + return nil, fmt.Errorf("invalid nonce value: %w", err) + } + updates[key] = updates[key].SetNonce(nonce) } - // nil handled internally, no need to bootstrap map entries - updates[key] = updates[key].SetNonce(nonce) } } if codeHashChanges != nil { for key, codeHashChange := range codeHashChanges { - codeHash, err := vtype.ParseCodeHash(codeHashChange) - if err != nil { - return nil, fmt.Errorf("invalid codehash value: %w", err) + if codeHashChange == nil { + // Deletion is equivalent to setting the code hash to a zero hash + var zero vtype.CodeHash + updates[key] = updates[key].SetCodeHash(&zero) + } else { + codeHash, err := vtype.ParseCodeHash(codeHashChange) + if err != nil { + return nil, fmt.Errorf("invalid codehash value: %w", err) + } + updates[key] = updates[key].SetCodeHash(codeHash) } - // nil handled internally, no need to bootstrap map entries - updates[key] = updates[key].SetCodeHash(codeHash) } } if balanceChanges != nil { for key, balanceChange := range balanceChanges { - balance, err := vtype.ParseBalance(balanceChange) - if err != nil { - return nil, fmt.Errorf("invalid balance value: %w", err) + if balanceChange == nil { + // Deletion is equivalent to setting the balance to a zero balance + var zero vtype.Balance + updates[key] = updates[key].SetBalance(&zero) + } else { + balance, err := vtype.ParseBalance(balanceChange) + if err != nil { + return nil, fmt.Errorf("invalid balance value: %w", err) + } + updates[key] = updates[key].SetBalance(balance) } - // nil handled internally, no need to bootstrap map entries - updates[key] = updates[key].SetBalance(balance) } } - return updates, nil } diff --git a/sei-db/state_db/sc/flatkv/store_read.go b/sei-db/state_db/sc/flatkv/store_read.go index 14fda3331f..2c4be926ad 100644 --- a/sei-db/state_db/sc/flatkv/store_read.go +++ b/sei-db/state_db/sc/flatkv/store_read.go @@ -208,6 +208,9 @@ func (s *CommitStore) getStorageValue(key []byte) ([]byte, error) { func (s *CommitStore) getCodeValue(key []byte) ([]byte, error) { pendingWrite, hasPending := s.codeWrites[string(key)] if hasPending { + if pendingWrite.IsDelete() { + return nil, nil + } return pendingWrite.GetBytecode(), nil } diff --git a/sei-db/state_db/sc/flatkv/store_write.go b/sei-db/state_db/sc/flatkv/store_write.go index 6c66bf3338..584fec3f3f 100644 --- a/sei-db/state_db/sc/flatkv/store_write.go +++ b/sei-db/state_db/sc/flatkv/store_write.go @@ -120,7 +120,7 @@ func (s *CommitStore) commitBatches(version int64) error { defer func() { _ = batch.Close() }() for keyStr, accountWrite := range s.accountWrites { - key := []byte(keyStr) // TODO verify this is correct! + key := []byte(keyStr) if accountWrite.IsDelete() { if err := batch.Delete(key); err != nil { return fmt.Errorf("accountDB delete: %w", err) @@ -317,7 +317,7 @@ func (s *CommitStore) batchReadOldValues(changesByType map[evm.EVMKeyKind]map[st // EVM storage storageBatch := make(map[string]types.BatchGetResult) - for key, _ := range changesByType[evm.EVMKeyStorage] { + for key := range changesByType[evm.EVMKeyStorage] { if _, ok := s.storageWrites[key]; ok { // We've got the old value in the pending writes buffer. storageOld[key] = s.storageWrites[key] @@ -336,7 +336,7 @@ func (s *CommitStore) batchReadOldValues(changesByType map[evm.EVMKeyKind]map[st // Accounts accountBatch := make(map[string]types.BatchGetResult) - for key, _ := range changesByType[evm.EVMKeyNonce] { + for key := range changesByType[evm.EVMKeyNonce] { if _, ok := s.accountWrites[key]; ok { // We've got the old value in the pending writes buffer. accountOld[key] = s.accountWrites[key] @@ -345,7 +345,7 @@ func (s *CommitStore) batchReadOldValues(changesByType map[evm.EVMKeyKind]map[st accountBatch[key] = types.BatchGetResult{} } } - for key, _ := range changesByType[evm.EVMKeyCodeHash] { + for key := range changesByType[evm.EVMKeyCodeHash] { if _, ok := s.accountWrites[key]; ok { // We've got the old value in the pending writes buffer. accountOld[key] = s.accountWrites[key] @@ -365,7 +365,7 @@ func (s *CommitStore) batchReadOldValues(changesByType map[evm.EVMKeyKind]map[st // EVM bytecode codeBatch := make(map[string]types.BatchGetResult) - for key, _ := range changesByType[evm.EVMKeyCode] { + for key := range changesByType[evm.EVMKeyCode] { if _, ok := s.codeWrites[key]; ok { // We've got the old value in the pending writes buffer. codeOld[key] = s.codeWrites[key] @@ -384,7 +384,7 @@ func (s *CommitStore) batchReadOldValues(changesByType map[evm.EVMKeyKind]map[st // Legacy data legacyBatch := make(map[string]types.BatchGetResult) - for key, _ := range changesByType[evm.EVMKeyLegacy] { + for key := range changesByType[evm.EVMKeyLegacy] { if _, ok := s.legacyWrites[key]; ok { // We've got the old value in the pending writes buffer. legacyOld[key] = s.legacyWrites[key] diff --git a/sei-db/state_db/sc/flatkv/vtype/legacy_data.go b/sei-db/state_db/sc/flatkv/vtype/legacy_data.go index c86f867ef6..00ec0ced45 100644 --- a/sei-db/state_db/sc/flatkv/vtype/legacy_data.go +++ b/sei-db/state_db/sc/flatkv/vtype/legacy_data.go @@ -112,6 +112,8 @@ func (l *LegacyData) SetValue(value []byte) *LegacyData { } newData := make([]byte, legacyHeaderLength+len(value)) copy(newData, l.data[:legacyValueStart]) + copy(newData[legacyValueStart:], value) + l.data = newData return l } diff --git a/sei-db/state_db/sc/flatkv/vtype/pending_account_write.go b/sei-db/state_db/sc/flatkv/vtype/pending_account_write.go index 19a6141cbb..8f3d9a1ddd 100644 --- a/sei-db/state_db/sc/flatkv/vtype/pending_account_write.go +++ b/sei-db/state_db/sc/flatkv/vtype/pending_account_write.go @@ -91,6 +91,9 @@ func (p *PendingAccountWrite) SetNonce(nonce uint64) *PendingAccountWrite { // SetCodeHash marks the code hash as changed. The pointer is stored directly; the caller // must not modify the underlying array after calling SetCodeHash. Returns self. func (p *PendingAccountWrite) SetCodeHash(codeHash *CodeHash) *PendingAccountWrite { + if p == nil { + p = NewPendingAccountWrite() + } p.codeHash = codeHash return p } From ea8b01e72f151a6d0e4957a19ac82bbcebc97844 Mon Sep 17 00:00:00 2001 From: Cody Littley Date: Wed, 1 Apr 2026 14:18:03 -0500 Subject: [PATCH 106/119] fix compile issues --- sei-db/state_db/sc/flatkv/exporter.go | 14 +- sei-db/state_db/sc/flatkv/exporter_test.go | 9 +- sei-db/state_db/sc/flatkv/keys.go | 9 ++ sei-db/state_db/sc/flatkv/keys_test.go | 140 ------------------ .../sc/flatkv/lthash_correctness_test.go | 25 +++- sei-db/state_db/sc/flatkv/snapshot_test.go | 5 +- sei-db/state_db/sc/flatkv/store_apply.go | 2 +- sei-db/state_db/sc/flatkv/store_write_test.go | 51 ++++--- .../sc/flatkv/vtype/account_data_test.go | 42 +++--- .../sc/flatkv/vtype/legacy_data_test.go | 28 ++-- .../vtype/pending_account_write_test.go | 64 ++++---- 11 files changed, 145 insertions(+), 244 deletions(-) diff --git a/sei-db/state_db/sc/flatkv/exporter.go b/sei-db/state_db/sc/flatkv/exporter.go index 4252a43358..8ae0726d3d 100644 --- a/sei-db/state_db/sc/flatkv/exporter.go +++ b/sei-db/state_db/sc/flatkv/exporter.go @@ -166,7 +166,7 @@ func (e *KVExporter) convertToNodes(db exportDBKind, key, value []byte) ([]*type } func (e *KVExporter) accountToNodes(key, value []byte) ([]*types.SnapshotNode, error) { - av, err := DecodeAccountValue(value) + ad, err := vtype.DeserializeAccountData(value) if err != nil { return nil, fmt.Errorf("corrupt account entry key=%x: %w", key, err) } @@ -174,8 +174,8 @@ func (e *KVExporter) accountToNodes(key, value []byte) ([]*types.SnapshotNode, e var nodes []*types.SnapshotNode nonceKey := evm.BuildMemIAVLEVMKey(evm.EVMKeyNonce, key) - nonceValue := make([]byte, NonceLen) - binary.BigEndian.PutUint64(nonceValue, av.Nonce) + nonceValue := make([]byte, vtype.NonceLen) + binary.BigEndian.PutUint64(nonceValue, ad.GetNonce()) nodes = append(nodes, &types.SnapshotNode{ Key: nonceKey, Value: nonceValue, @@ -183,10 +183,12 @@ func (e *KVExporter) accountToNodes(key, value []byte) ([]*types.SnapshotNode, e Height: 0, }) - if av.HasCode() { + codeHash := ad.GetCodeHash() + var zeroHash vtype.CodeHash + if codeHash != nil && *codeHash != zeroHash { codeHashKey := evm.BuildMemIAVLEVMKey(evm.EVMKeyCodeHash, key) - codeHashValue := make([]byte, CodeHashLen) - copy(codeHashValue, av.CodeHash[:]) + codeHashValue := make([]byte, vtype.CodeHashLen) + copy(codeHashValue, codeHash[:]) nodes = append(nodes, &types.SnapshotNode{ Key: codeHashKey, Value: codeHashValue, diff --git a/sei-db/state_db/sc/flatkv/exporter_test.go b/sei-db/state_db/sc/flatkv/exporter_test.go index 915cdd6d4d..ca973e0041 100644 --- a/sei-db/state_db/sc/flatkv/exporter_test.go +++ b/sei-db/state_db/sc/flatkv/exporter_test.go @@ -11,6 +11,7 @@ import ( errorutils "github.com/sei-protocol/sei-chain/sei-db/common/errors" "github.com/sei-protocol/sei-chain/sei-db/common/evm" "github.com/sei-protocol/sei-chain/sei-db/proto" + "github.com/sei-protocol/sei-chain/sei-db/state_db/sc/flatkv/vtype" "github.com/sei-protocol/sei-chain/sei-db/state_db/sc/types" iavl "github.com/sei-protocol/sei-chain/sei-iavl/proto" ) @@ -88,7 +89,7 @@ func TestExporterAccountKeys(t *testing.T) { nonceVal := []byte{0, 0, 0, 0, 0, 0, 0, 42} codeHashKey := evm.BuildMemIAVLEVMKey(evm.EVMKeyCodeHash, addr[:]) - codeHashVal := make([]byte, CodeHashLen) + codeHashVal := make([]byte, vtype.CodeHashLen) codeHashVal[0] = 0xDE require.NoError(t, s.ApplyChangeSets([]*proto.NamedChangeSet{ @@ -165,7 +166,7 @@ func TestExporterRoundTrip(t *testing.T) { codeKey := evm.BuildMemIAVLEVMKey(evm.EVMKeyCode, addr[:]) codeVal := []byte{0x60, 0x80} codeHashKey := evm.BuildMemIAVLEVMKey(evm.EVMKeyCodeHash, addr[:]) - codeHashVal := make([]byte, CodeHashLen) + codeHashVal := make([]byte, vtype.CodeHashLen) codeHashVal[31] = 0xAB require.NoError(t, s.ApplyChangeSets([]*proto.NamedChangeSet{ @@ -371,7 +372,7 @@ func TestImportPurgesStaleData(t *testing.T) { codeStale := evm.BuildMemIAVLEVMKey(evm.EVMKeyCode, addrStale[:]) nonceVal := []byte{0, 0, 0, 0, 0, 0, 0, 1} - codeHashVal := make([]byte, CodeHashLen) + codeHashVal := make([]byte, vtype.CodeHashLen) codeHashVal[31] = 0xAB codeVal := []byte{0x60, 0x80} @@ -402,7 +403,7 @@ func TestImportPurgesStaleData(t *testing.T) { newStorageVal := padLeft32(0xA1) newNonceVal := []byte{0, 0, 0, 0, 0, 0, 0, 5} - newCodeHashVal := make([]byte, CodeHashLen) + newCodeHashVal := make([]byte, vtype.CodeHashLen) newCodeHashVal[31] = 0xCD newCodeVal := []byte{0x60, 0x40, 0x52} diff --git a/sei-db/state_db/sc/flatkv/keys.go b/sei-db/state_db/sc/flatkv/keys.go index 24da5df9ed..d9923fe513 100644 --- a/sei-db/state_db/sc/flatkv/keys.go +++ b/sei-db/state_db/sc/flatkv/keys.go @@ -76,6 +76,15 @@ func StorageKey(addr Address, slot Slot) []byte { return key } +func SlotFromBytes(b []byte) (Slot, bool) { + if len(b) != SlotLen { + return Slot{}, false + } + var s Slot + copy(s[:], b) + return s, true +} + // PrefixEnd returns the exclusive upper bound for prefix iteration (or nil). func PrefixEnd(prefix []byte) []byte { if len(prefix) == 0 { diff --git a/sei-db/state_db/sc/flatkv/keys_test.go b/sei-db/state_db/sc/flatkv/keys_test.go index 263101c20d..b77814b369 100644 --- a/sei-db/state_db/sc/flatkv/keys_test.go +++ b/sei-db/state_db/sc/flatkv/keys_test.go @@ -1,8 +1,6 @@ package flatkv import ( - "math" - "math/rand" "testing" "github.com/stretchr/testify/require" @@ -31,144 +29,6 @@ func TestFlatKVPrefixEnd(t *testing.T) { } } -func TestFlatKVAccountValueEncoding(t *testing.T) { - // Deterministic seed so failures are reproducible. - const seed = int64(1) - rng := rand.New(rand.NewSource(seed)) - - randomBytes := func(n int) []byte { - b := make([]byte, n) - rng.Read(b) - return b - } - - t.Run("RoundTripContract", func(t *testing.T) { - var balance Balance - copy(balance[:], randomBytes(BalanceLen)) - var codeHash CodeHash - copy(codeHash[:], randomBytes(CodeHashLen)) - - original := AccountValue{ - Balance: balance, - Nonce: rng.Uint64(), - CodeHash: codeHash, - } - - require.True(t, original.HasCode(), "contract should have code") - - encoded := EncodeAccountValue(original) - require.Equal(t, accountValueContractLen, len(encoded), "contract should be 72 bytes") - - decoded, err := DecodeAccountValue(encoded) - require.NoError(t, err) - require.Equal(t, original, decoded) - }) - - t.Run("RoundTripEOA", func(t *testing.T) { - var balance Balance - copy(balance[:], randomBytes(BalanceLen)) - - original := AccountValue{ - Balance: balance, - Nonce: rng.Uint64(), - CodeHash: CodeHash{}, // EOA has no code - } - - require.False(t, original.HasCode(), "EOA should not have code") - - encoded := EncodeAccountValue(original) - require.Equal(t, accountValueEOALen, len(encoded), "EOA should be 40 bytes") - - decoded, err := DecodeAccountValue(encoded) - require.NoError(t, err) - require.Equal(t, original, decoded) - }) - - t.Run("RoundTripZeroEOA", func(t *testing.T) { - // Completely empty account (zero balance, zero nonce, no code) - original := AccountValue{ - Balance: Balance{}, - Nonce: 0, - CodeHash: CodeHash{}, - } - - require.False(t, original.HasCode()) - - encoded := EncodeAccountValue(original) - require.Equal(t, accountValueEOALen, len(encoded), "zero EOA should be 40 bytes") - - decoded, err := DecodeAccountValue(encoded) - require.NoError(t, err) - require.Equal(t, original, decoded) - }) - - t.Run("InvalidLength", func(t *testing.T) { - // Too short - _, err := DecodeAccountValue([]byte{0x00}) - require.Error(t, err) - require.Contains(t, err.Error(), "invalid account value length") - - // In between EOA and Contract lengths - _, err = DecodeAccountValue(make([]byte, 50)) - require.Error(t, err) - require.Contains(t, err.Error(), "invalid account value length") - - // Too long - _, err = DecodeAccountValue(make([]byte, 100)) - require.Error(t, err) - require.Contains(t, err.Error(), "invalid account value length") - }) - - t.Run("NonceIsBigEndianUint64", func(t *testing.T) { - // Test with EOA - original := AccountValue{ - Nonce: math.MaxUint64, - } - encoded := EncodeAccountValue(original) - decoded, err := DecodeAccountValue(encoded) - require.NoError(t, err) - require.Equal(t, original.Nonce, decoded.Nonce) - - // Test with Contract - var codeHash CodeHash - copy(codeHash[:], randomBytes(CodeHashLen)) - originalContract := AccountValue{ - Nonce: math.MaxUint64, - CodeHash: codeHash, - } - encodedContract := EncodeAccountValue(originalContract) - decodedContract, err := DecodeAccountValue(encodedContract) - require.NoError(t, err) - require.Equal(t, originalContract.Nonce, decodedContract.Nonce) - }) - - t.Run("HasCodeMethod", func(t *testing.T) { - // EOA - no code - eoa := AccountValue{CodeHash: CodeHash{}} - require.False(t, eoa.HasCode()) - - // Contract - has code (any non-zero hash) - var codeHash CodeHash - codeHash[0] = 0x01 // Just one non-zero byte is enough - contract := AccountValue{CodeHash: codeHash} - require.True(t, contract.HasCode()) - }) -} - -func TestAccountValueIsEmpty(t *testing.T) { - require.True(t, AccountValue{}.IsEmpty(), "zero-value AccountValue should be empty") - - require.False(t, AccountValue{Nonce: 1}.IsEmpty(), "non-zero nonce") - require.False(t, AccountValue{CodeHash: CodeHash{0x01}}.IsEmpty(), "non-zero codehash") - require.False(t, AccountValue{Balance: Balance{0x01}}.IsEmpty(), "non-zero balance") - - require.False(t, AccountValue{ - Balance: Balance{0x01}, - Nonce: 42, - CodeHash: CodeHash{0xFF}, - }.IsEmpty(), "all non-zero fields") -} - func TestFlatKVTypeConversions(t *testing.T) { t.Run("AddressFromBytes", func(t *testing.T) { valid := make([]byte, AddressLen) diff --git a/sei-db/state_db/sc/flatkv/lthash_correctness_test.go b/sei-db/state_db/sc/flatkv/lthash_correctness_test.go index d6c58b4286..93e47a358d 100644 --- a/sei-db/state_db/sc/flatkv/lthash_correctness_test.go +++ b/sei-db/state_db/sc/flatkv/lthash_correctness_test.go @@ -10,6 +10,7 @@ import ( "github.com/sei-protocol/sei-chain/sei-db/db_engine/types" "github.com/sei-protocol/sei-chain/sei-db/proto" "github.com/sei-protocol/sei-chain/sei-db/state_db/sc/flatkv/lthash" + "github.com/sei-protocol/sei-chain/sei-db/state_db/sc/flatkv/vtype" iavl "github.com/sei-protocol/sei-chain/sei-iavl/proto" "github.com/stretchr/testify/require" ) @@ -51,7 +52,7 @@ func fullScanLtHash(t *testing.T, s *CommitStore) *lthash.LtHash { // ---------- helpers to build memiavl-format changeset pairs ---------- func nonceBytes(n uint64) []byte { - b := make([]byte, NonceLen) + b := make([]byte, vtype.NonceLen) binary.BigEndian.PutUint64(b, n) return b } @@ -68,8 +69,8 @@ func slotN(n byte) Slot { return s } -func codeHashN(n byte) CodeHash { - var h CodeHash +func codeHashN(n byte) vtype.CodeHash { + var h vtype.CodeHash for i := range h { h[i] = n } @@ -83,7 +84,7 @@ func noncePair(addr Address, nonce uint64) *iavl.KVPair { } } -func codeHashPair(addr Address, ch CodeHash) *iavl.KVPair { +func codeHashPair(addr Address, ch vtype.CodeHash) *iavl.KVPair { return &iavl.KVPair{ Key: evm.BuildMemIAVLEVMKey(evm.EVMKeyCodeHash, addr[:]), Value: ch[:], @@ -988,7 +989,11 @@ func TestLtHashAccountDeleteThenRecreate(t *testing.T) { raw, err := s.accountDB.Get(AccountKey(addr)) require.NoError(t, err) - require.Equal(t, accountValueEOALen, len(raw), "row should be 40-byte EOA encoding") + ad, err := vtype.DeserializeAccountData(raw) + require.NoError(t, err) + require.Equal(t, uint64(99), ad.GetNonce()) + var zeroHash vtype.CodeHash + require.Equal(t, &zeroHash, ad.GetCodeHash(), "codehash should be zero (EOA)") } func TestLtHashAccountPartialDeletePreservesRow(t *testing.T) { @@ -1011,7 +1016,11 @@ func TestLtHashAccountPartialDeletePreservesRow(t *testing.T) { raw, err := s.accountDB.Get(AccountKey(addr)) require.NoError(t, err, "row should still exist after partial delete") - require.Equal(t, accountValueEOALen, len(raw), "should shrink to EOA encoding") + ad, err := vtype.DeserializeAccountData(raw) + require.NoError(t, err) + require.Equal(t, uint64(3), ad.GetNonce(), "nonce should be preserved") + var zeroHash vtype.CodeHash + require.Equal(t, &zeroHash, ad.GetCodeHash(), "codehash should be zero after delete") } // TestAccountPendingReadPartialDelete verifies that the isDelete guard in @@ -1045,7 +1054,7 @@ func TestAccountPendingReadPartialDelete(t *testing.T) { paw := s.accountWrites[string(addr[:])] require.NotNil(t, paw) - require.False(t, paw.isDelete, "row should NOT be marked for deletion (partial delete)") + require.False(t, paw.IsDelete(), "row should NOT be marked for deletion (partial delete)") } // TestAccountRowDeleteGetBeforeCommit verifies the core behavioral change: @@ -1094,7 +1103,7 @@ func TestAccountRowDeleteGetBeforeCommit(t *testing.T) { // Verify isDelete is set paw := s.accountWrites[string(addr[:])] require.NotNil(t, paw) - require.True(t, paw.isDelete, "row should be marked for deletion (all fields zero)") + require.True(t, paw.IsDelete(), "row should be marked for deletion (all fields zero)") } // TestLtHashAccountWriteZeroGC verifies that writing a zero value (not a diff --git a/sei-db/state_db/sc/flatkv/snapshot_test.go b/sei-db/state_db/sc/flatkv/snapshot_test.go index 11d9560386..73accab2b5 100644 --- a/sei-db/state_db/sc/flatkv/snapshot_test.go +++ b/sei-db/state_db/sc/flatkv/snapshot_test.go @@ -11,6 +11,7 @@ import ( "github.com/sei-protocol/sei-chain/sei-db/db_engine/pebbledb" "github.com/sei-protocol/sei-chain/sei-db/db_engine/types" "github.com/sei-protocol/sei-chain/sei-db/proto" + "github.com/sei-protocol/sei-chain/sei-db/state_db/sc/flatkv/vtype" iavl "github.com/sei-protocol/sei-chain/sei-iavl/proto" "github.com/stretchr/testify/require" ) @@ -1717,10 +1718,10 @@ func TestAccountRowDeletePersistsAfterReopen(t *testing.T) { Name: "evm", Changeset: iavl.ChangeSet{Pairs: []*iavl.KVPair{ {Key: evm.BuildMemIAVLEVMKey(evm.EVMKeyNonce, addr[:]), Value: []byte{0, 0, 0, 0, 0, 0, 0, 5}}, - {Key: evm.BuildMemIAVLEVMKey(evm.EVMKeyCodeHash, addr[:]), Value: make([]byte, CodeHashLen)}, + {Key: evm.BuildMemIAVLEVMKey(evm.EVMKeyCodeHash, addr[:]), Value: make([]byte, vtype.CodeHashLen)}, }}, } - ch := CodeHash{0xAA} + ch := vtype.CodeHash{0xAA} cs1.Changeset.Pairs[1].Value = ch[:] require.NoError(t, s.ApplyChangeSets([]*proto.NamedChangeSet{cs1})) _, err = s.Commit() diff --git a/sei-db/state_db/sc/flatkv/store_apply.go b/sei-db/state_db/sc/flatkv/store_apply.go index f2a6bed90e..ebc6cba039 100644 --- a/sei-db/state_db/sc/flatkv/store_apply.go +++ b/sei-db/state_db/sc/flatkv/store_apply.go @@ -153,7 +153,7 @@ func sortChangeSets( if _, ok := supportedKeyTypes[kind]; !ok { if strict { - return nil, fmt.Errorf("unsupported key type: %s", kind) + return nil, fmt.Errorf("unsupported key type: %v", kind) } else { logger.Warn("unsupported key type", "key", kind) continue diff --git a/sei-db/state_db/sc/flatkv/store_write_test.go b/sei-db/state_db/sc/flatkv/store_write_test.go index 5446c56251..b4b71d2084 100644 --- a/sei-db/state_db/sc/flatkv/store_write_test.go +++ b/sei-db/state_db/sc/flatkv/store_write_test.go @@ -8,6 +8,7 @@ import ( "github.com/sei-protocol/sei-chain/sei-db/common/evm" "github.com/sei-protocol/sei-chain/sei-db/db_engine/types" "github.com/sei-protocol/sei-chain/sei-db/proto" + "github.com/sei-protocol/sei-chain/sei-db/state_db/sc/flatkv/vtype" iavl "github.com/sei-protocol/sei-chain/sei-iavl/proto" "github.com/stretchr/testify/require" ) @@ -21,7 +22,7 @@ func TestStoreNonStorageKeys(t *testing.T) { defer s.Close() addr := Address{0x99} - codeHash := CodeHash{0x11, 0x22, 0x33, 0x44, 0x55, 0x66, 0x77, 0x88, + codeHash := vtype.CodeHash{0x11, 0x22, 0x33, 0x44, 0x55, 0x66, 0x77, 0x88, 0x99, 0xAA, 0xBB, 0xCC, 0xDD, 0xEE, 0xFF, 0x00, 0x11, 0x22, 0x33, 0x44, 0x55, 0x66, 0x77, 0x88, 0x99, 0xAA, 0xBB, 0xCC, 0xDD, 0xEE, 0xFF, 0x00} @@ -300,7 +301,7 @@ func TestAccountValueStorage(t *testing.T) { defer s.Close() addr := Address{0xFF, 0xFF} - expectedCodeHash := CodeHash{0xAA, 0xBB, 0xCC, 0xDD, 0xEE, 0xFF, 0x11, 0x22, 0x33, 0x44, 0x55, 0x66, 0x77, 0x88, 0x99, 0xAA, 0xBB, 0xCC, 0xDD, 0xEE, 0xFF, 0x11, 0x22, 0x33, 0x44, 0x55, 0x66, 0x77, 0x88, 0x99, 0xAA, 0xBB} + expectedCodeHash := vtype.CodeHash{0xAA, 0xBB, 0xCC, 0xDD, 0xEE, 0xFF, 0x11, 0x22, 0x33, 0x44, 0x55, 0x66, 0x77, 0x88, 0x99, 0xAA, 0xBB, 0xCC, 0xDD, 0xEE, 0xFF, 0x11, 0x22, 0x33, 0x44, 0x55, 0x66, 0x77, 0x88, 0x99, 0xAA, 0xBB} // Write both Nonce and CodeHash for the same address // AccountValue stores: balance(32) || nonce(8) || codehash(32) @@ -334,11 +335,12 @@ func TestAccountValueStorage(t *testing.T) { require.NotNil(t, stored) // Decode and verify - av, err := DecodeAccountValue(stored) + ad, err := vtype.DeserializeAccountData(stored) require.NoError(t, err) - require.Equal(t, uint64(42), av.Nonce, "Nonce should be 42") - require.Equal(t, expectedCodeHash, av.CodeHash, "CodeHash should match") - require.Equal(t, Balance{}, av.Balance, "Balance should be zero") + require.Equal(t, uint64(42), ad.GetNonce(), "Nonce should be 42") + require.Equal(t, &expectedCodeHash, ad.GetCodeHash(), "CodeHash should match") + var zeroBalance vtype.Balance + require.Equal(t, &zeroBalance, ad.GetBalance(), "Balance should be zero") // Get method should return individual fields nonceKey := evm.BuildMemIAVLEVMKey(evm.EVMKeyNonce, addr[:]) @@ -665,7 +667,7 @@ func TestMultipleApplyAccountFieldsPreservesOther(t *testing.T) { addr := Address{0xBB} nonceKey := evm.BuildMemIAVLEVMKey(evm.EVMKeyNonce, addr[:]) codeHashKey := evm.BuildMemIAVLEVMKey(evm.EVMKeyCodeHash, addr[:]) - codeHash := CodeHash{0xDE, 0xAD, 0xBE, 0xEF, 0x00, 0x00, 0x00, 0x00, + codeHash := vtype.CodeHash{0xDE, 0xAD, 0xBE, 0xEF, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x01} @@ -741,7 +743,7 @@ func TestLtHashAccountFieldMerge(t *testing.T) { addr := Address{0xCC} nonceKey := evm.BuildMemIAVLEVMKey(evm.EVMKeyNonce, addr[:]) codeHashKey := evm.BuildMemIAVLEVMKey(evm.EVMKeyCodeHash, addr[:]) - codeHash := CodeHash{0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x08, + codeHash := vtype.CodeHash{0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x08, 0x09, 0x0A, 0x0B, 0x0C, 0x0D, 0x0E, 0x0F, 0x10, 0x11, 0x12, 0x13, 0x14, 0x15, 0x16, 0x17, 0x18, 0x19, 0x1A, 0x1B, 0x1C, 0x1D, 0x1E, 0x1F, 0x20} @@ -762,7 +764,7 @@ func TestLtHashAccountFieldMerge(t *testing.T) { accountWrite := s.accountWrites[string(addr[:])] require.NotNil(t, accountWrite) require.Equal(t, uint64(10), accountWrite.GetNonce()) - require.Equal(t, codeHash, accountWrite.GetCodeHash()) + require.Equal(t, &codeHash, accountWrite.GetCodeHash()) } // ============================================================================= @@ -1211,7 +1213,7 @@ func TestCrossApplyChangeSetsAccountOrdering(t *testing.T) { } func bytesToNonce(b []byte) uint64 { - if len(b) != NonceLen { + if len(b) != vtype.NonceLen { return 0 } return binary.BigEndian.Uint64(b) @@ -1227,42 +1229,43 @@ func TestAccountValueEncodingTransition(t *testing.T) { addr := addrN(0x01) - // Step 1: Write nonce only → EOA encoding (40 bytes) + // Step 1: Write nonce only (AccountData always 81 bytes) cs1 := namedCS(noncePair(addr, 7)) require.NoError(t, s.ApplyChangeSets([]*proto.NamedChangeSet{cs1})) commitAndCheck(t, s) raw1, err := s.accountDB.Get(AccountKey(addr)) require.NoError(t, err) - require.Equal(t, accountValueEOALen, len(raw1), "nonce-only should produce EOA encoding (40 bytes)") + ad1, err := vtype.DeserializeAccountData(raw1) + require.NoError(t, err) + require.Equal(t, uint64(7), ad1.GetNonce()) + var zeroHash vtype.CodeHash + require.Equal(t, &zeroHash, ad1.GetCodeHash(), "nonce-only should have zero codehash") - // Step 2: Add codehash → contract encoding (72 bytes) + // Step 2: Add codehash cs2 := namedCS(codeHashPair(addr, codeHashN(0xAB))) require.NoError(t, s.ApplyChangeSets([]*proto.NamedChangeSet{cs2})) commitAndCheck(t, s) raw2, err := s.accountDB.Get(AccountKey(addr)) require.NoError(t, err) - require.Equal(t, accountValueContractLen, len(raw2), "nonce+codehash should produce contract encoding (72 bytes)") - - av2, err := DecodeAccountValue(raw2) + ad2, err := vtype.DeserializeAccountData(raw2) require.NoError(t, err) - require.Equal(t, uint64(7), av2.Nonce, "nonce should be preserved after codehash write") - require.Equal(t, codeHashN(0xAB), av2.CodeHash) + require.Equal(t, uint64(7), ad2.GetNonce(), "nonce should be preserved after codehash write") + expectedCH := codeHashN(0xAB) + require.Equal(t, &expectedCH, ad2.GetCodeHash()) - // Step 3: Delete codehash → back to EOA encoding (40 bytes) + // Step 3: Delete codehash → back to zero codehash cs3 := namedCS(codeHashDeletePair(addr)) require.NoError(t, s.ApplyChangeSets([]*proto.NamedChangeSet{cs3})) commitAndCheck(t, s) raw3, err := s.accountDB.Get(AccountKey(addr)) require.NoError(t, err) - require.Equal(t, accountValueEOALen, len(raw3), "codehash delete should shrink back to EOA encoding (40 bytes)") - - av3, err := DecodeAccountValue(raw3) + ad3, err := vtype.DeserializeAccountData(raw3) require.NoError(t, err) - require.Equal(t, uint64(7), av3.Nonce, "nonce should survive codehash deletion") - require.Equal(t, CodeHash{}, av3.CodeHash, "codehash should be zero after delete") + require.Equal(t, uint64(7), ad3.GetNonce(), "nonce should survive codehash deletion") + require.Equal(t, &zeroHash, ad3.GetCodeHash(), "codehash should be zero after delete") } // ============================================================================= diff --git a/sei-db/state_db/sc/flatkv/vtype/account_data_test.go b/sei-db/state_db/sc/flatkv/vtype/account_data_test.go index 6bb5149496..f3618b46f2 100644 --- a/sei-db/state_db/sc/flatkv/vtype/account_data_test.go +++ b/sei-db/state_db/sc/flatkv/vtype/account_data_test.go @@ -19,9 +19,9 @@ const testdataDir = "testdata" func TestSerializationGoldenFile_V0(t *testing.T) { ad := NewAccountData(). SetBlockHeight(100). - SetBalance(toArray32(leftPad32([]byte{1}))). + SetBalance(toBalance(leftPad32([]byte{1}))). SetNonce(42). - SetCodeHash(toArray32(bytes.Repeat([]byte{0xaa}, 32))) + SetCodeHash(toCodeHash(bytes.Repeat([]byte{0xaa}, 32))) serialized := ad.Serialize() @@ -44,8 +44,8 @@ func TestSerializationGoldenFile_V0(t *testing.T) { require.NoError(t, err) require.Equal(t, int64(100), rt.GetBlockHeight()) require.Equal(t, uint64(42), rt.GetNonce()) - require.Equal(t, toArray32(leftPad32([]byte{1})), rt.GetBalance()) - require.Equal(t, toArray32(bytes.Repeat([]byte{0xaa}, 32)), rt.GetCodeHash()) + require.Equal(t, toBalance(leftPad32([]byte{1})), rt.GetBalance()) + require.Equal(t, toCodeHash(bytes.Repeat([]byte{0xaa}, 32)), rt.GetCodeHash()) } func TestNewAccountData_ZeroInitialized(t *testing.T) { @@ -54,8 +54,8 @@ func TestNewAccountData_ZeroInitialized(t *testing.T) { require.Equal(t, AccountDataVersion0, ad.GetSerializationVersion()) require.Equal(t, int64(0), ad.GetBlockHeight()) require.Equal(t, uint64(0), ad.GetNonce()) - require.Equal(t, &zero, ad.GetBalance()) - require.Equal(t, &zero, ad.GetCodeHash()) + require.Equal(t, (*Balance)(&zero), ad.GetBalance()) + require.Equal(t, (*CodeHash)(&zero), ad.GetCodeHash()) } func TestSerializeLength(t *testing.T) { @@ -64,8 +64,8 @@ func TestSerializeLength(t *testing.T) { } func TestRoundTrip_AllFieldsSet(t *testing.T) { - balance := toArray32(leftPad32([]byte{0xff, 0xee, 0xdd})) - codeHash := toArray32(bytes.Repeat([]byte{0xab}, 32)) + balance := toBalance(leftPad32([]byte{0xff, 0xee, 0xdd})) + codeHash := toCodeHash(bytes.Repeat([]byte{0xab}, 32)) ad := NewAccountData(). SetBlockHeight(999). @@ -88,13 +88,13 @@ func TestRoundTrip_ZeroValues(t *testing.T) { var zero [32]byte require.Equal(t, int64(0), rt.GetBlockHeight()) require.Equal(t, uint64(0), rt.GetNonce()) - require.Equal(t, &zero, rt.GetBalance()) - require.Equal(t, &zero, rt.GetCodeHash()) + require.Equal(t, (*Balance)(&zero), rt.GetBalance()) + require.Equal(t, (*CodeHash)(&zero), rt.GetCodeHash()) } func TestRoundTrip_MaxValues(t *testing.T) { - maxBalance := toArray32(bytes.Repeat([]byte{0xff}, 32)) - maxCodeHash := toArray32(bytes.Repeat([]byte{0xff}, 32)) + maxBalance := toBalance(bytes.Repeat([]byte{0xff}, 32)) + maxCodeHash := toCodeHash(bytes.Repeat([]byte{0xff}, 32)) maxNonce := uint64(0xffffffffffffffff) maxBlockHeight := int64(math.MaxInt64) @@ -118,7 +118,7 @@ func TestIsDelete_AllZeroPayload(t *testing.T) { } func TestIsDelete_NonZeroBalance(t *testing.T) { - ad := NewAccountData().SetBalance(toArray32(leftPad32([]byte{1}))) + ad := NewAccountData().SetBalance(toBalance(leftPad32([]byte{1}))) require.False(t, ad.IsDelete()) } @@ -128,7 +128,7 @@ func TestIsDelete_NonZeroNonce(t *testing.T) { } func TestIsDelete_NonZeroCodeHash(t *testing.T) { - ad := NewAccountData().SetCodeHash(toArray32(bytes.Repeat([]byte{0x01}, 32))) + ad := NewAccountData().SetCodeHash(toCodeHash(bytes.Repeat([]byte{0x01}, 32))) require.False(t, ad.IsDelete()) } @@ -162,9 +162,9 @@ func TestDeserialize_UnsupportedVersion(t *testing.T) { func TestSetterChaining(t *testing.T) { ad := NewAccountData(). SetBlockHeight(1). - SetBalance(toArray32(leftPad32([]byte{2}))). + SetBalance(toBalance(leftPad32([]byte{2}))). SetNonce(3). - SetCodeHash(toArray32(leftPad32([]byte{4}))) + SetCodeHash(toCodeHash(leftPad32([]byte{4}))) require.Equal(t, int64(1), ad.GetBlockHeight()) require.Equal(t, uint64(3), ad.GetNonce()) @@ -181,7 +181,15 @@ func leftPad32(b []byte) []byte { return padded } -// toArray32 converts a []byte to a *[32]byte. +// toArray32 converts a []byte to a *[32]byte (len must be 32). func toArray32(b []byte) *[32]byte { return (*[32]byte)(b) } + +func toBalance(b []byte) *Balance { + return (*Balance)(b) +} + +func toCodeHash(b []byte) *CodeHash { + return (*CodeHash)(b) +} diff --git a/sei-db/state_db/sc/flatkv/vtype/legacy_data_test.go b/sei-db/state_db/sc/flatkv/vtype/legacy_data_test.go index 6a8cfa96ce..9115c0fcde 100644 --- a/sei-db/state_db/sc/flatkv/vtype/legacy_data_test.go +++ b/sei-db/state_db/sc/flatkv/vtype/legacy_data_test.go @@ -13,7 +13,7 @@ import ( func TestLegacySerializationGoldenFile_V0(t *testing.T) { value := []byte{0xca, 0xfe, 0xba, 0xbe} - ld := NewLegacyData(value). + ld := NewLegacyData().SetValue(value). SetBlockHeight(100) serialized := ld.Serialize() @@ -40,14 +40,14 @@ func TestLegacySerializationGoldenFile_V0(t *testing.T) { func TestLegacyNewWithValue(t *testing.T) { value := []byte{0x01, 0x02, 0x03} - ld := NewLegacyData(value) + ld := NewLegacyData().SetValue(value) require.Equal(t, LegacyDataVersion0, ld.GetSerializationVersion()) require.Equal(t, int64(0), ld.GetBlockHeight()) require.Equal(t, value, ld.GetValue()) } func TestLegacyNewEmpty(t *testing.T) { - ld := NewLegacyData(nil) + ld := NewLegacyData() require.Equal(t, LegacyDataVersion0, ld.GetSerializationVersion()) require.Equal(t, int64(0), ld.GetBlockHeight()) require.Empty(t, ld.GetValue()) @@ -55,18 +55,18 @@ func TestLegacyNewEmpty(t *testing.T) { func TestLegacySerializeLength(t *testing.T) { value := []byte{0x01, 0x02, 0x03} - ld := NewLegacyData(value) + ld := NewLegacyData().SetValue(value) require.Len(t, ld.Serialize(), legacyHeaderLength+len(value)) } func TestLegacySerializeLength_Empty(t *testing.T) { - ld := NewLegacyData(nil) + ld := NewLegacyData() require.Len(t, ld.Serialize(), legacyHeaderLength) } func TestLegacyRoundTrip_WithValue(t *testing.T) { value := bytes.Repeat([]byte{0xab}, 1000) - ld := NewLegacyData(value). + ld := NewLegacyData().SetValue(value). SetBlockHeight(999) rt, err := DeserializeLegacyData(ld.Serialize()) @@ -76,7 +76,7 @@ func TestLegacyRoundTrip_WithValue(t *testing.T) { } func TestLegacyRoundTrip_EmptyValue(t *testing.T) { - ld := NewLegacyData(nil). + ld := NewLegacyData(). SetBlockHeight(42) rt, err := DeserializeLegacyData(ld.Serialize()) @@ -86,7 +86,7 @@ func TestLegacyRoundTrip_EmptyValue(t *testing.T) { } func TestLegacyRoundTrip_MaxBlockHeight(t *testing.T) { - ld := NewLegacyData([]byte{0xff}). + ld := NewLegacyData().SetValue([]byte{0xff}). SetBlockHeight(math.MaxInt64) rt, err := DeserializeLegacyData(ld.Serialize()) @@ -96,17 +96,17 @@ func TestLegacyRoundTrip_MaxBlockHeight(t *testing.T) { } func TestLegacyIsDelete_EmptyValue(t *testing.T) { - ld := NewLegacyData(nil).SetBlockHeight(500) + ld := NewLegacyData().SetBlockHeight(500) require.True(t, ld.IsDelete()) } func TestLegacyIsDelete_EmptySlice(t *testing.T) { - ld := NewLegacyData([]byte{}) + ld := NewLegacyData().SetValue([]byte{}) require.True(t, ld.IsDelete()) } func TestLegacyIsDelete_NonEmptyValue(t *testing.T) { - ld := NewLegacyData([]byte{0x01}) + ld := NewLegacyData().SetValue([]byte{0x01}) require.False(t, ld.IsDelete()) } @@ -126,7 +126,7 @@ func TestLegacyDeserialize_TooShort(t *testing.T) { } func TestLegacyDeserialize_HeaderOnly(t *testing.T) { - ld := NewLegacyData(nil) + ld := NewLegacyData() rt, err := DeserializeLegacyData(ld.Serialize()) require.NoError(t, err) require.Empty(t, rt.GetValue()) @@ -140,7 +140,7 @@ func TestLegacyDeserialize_UnsupportedVersion(t *testing.T) { } func TestLegacySetterChaining(t *testing.T) { - ld := NewLegacyData([]byte{0x01}). + ld := NewLegacyData().SetValue([]byte{0x01}). SetBlockHeight(42) require.Equal(t, int64(42), ld.GetBlockHeight()) @@ -153,7 +153,7 @@ func TestLegacyConstantLayout_V0(t *testing.T) { func TestLegacyNewCopiesValue(t *testing.T) { value := []byte{0x01, 0x02, 0x03} - ld := NewLegacyData(value) + ld := NewLegacyData().SetValue(value) value[0] = 0xff require.Equal(t, byte(0x01), ld.GetValue()[0]) } diff --git a/sei-db/state_db/sc/flatkv/vtype/pending_account_write_test.go b/sei-db/state_db/sc/flatkv/vtype/pending_account_write_test.go index 6c9832b67c..ac6861295c 100644 --- a/sei-db/state_db/sc/flatkv/vtype/pending_account_write_test.go +++ b/sei-db/state_db/sc/flatkv/vtype/pending_account_write_test.go @@ -16,18 +16,18 @@ func TestPAW_SetNonce_MergeOntoZeroBase(t *testing.T) { require.Equal(t, uint64(42), result.GetNonce()) require.Equal(t, int64(100), result.GetBlockHeight()) var zero [32]byte - require.Equal(t, &zero, result.GetBalance()) - require.Equal(t, &zero, result.GetCodeHash()) + require.Equal(t, (*Balance)(&zero), result.GetBalance()) + require.Equal(t, (*CodeHash)(&zero), result.GetCodeHash()) } func TestPAW_SetCodeHash_MergeOntoExistingAccount(t *testing.T) { base := NewAccountData(). SetBlockHeight(50). - SetBalance(toArray32(leftPad32([]byte{0xff}))). + SetBalance(toBalance(leftPad32([]byte{0xff}))). SetNonce(10). - SetCodeHash(toArray32(bytes.Repeat([]byte{0xaa}, 32))) + SetCodeHash(toCodeHash(bytes.Repeat([]byte{0xaa}, 32))) - newCodeHash := toArray32(bytes.Repeat([]byte{0xbb}, 32)) + newCodeHash := toCodeHash(bytes.Repeat([]byte{0xbb}, 32)) paw := NewPendingAccountWrite().SetCodeHash(newCodeHash) result := paw.Merge(base, 100) @@ -35,7 +35,7 @@ func TestPAW_SetCodeHash_MergeOntoExistingAccount(t *testing.T) { // Changed field require.Equal(t, newCodeHash, result.GetCodeHash()) // Unchanged fields carried over from base - require.Equal(t, toArray32(leftPad32([]byte{0xff})), result.GetBalance()) + require.Equal(t, toBalance(leftPad32([]byte{0xff})), result.GetBalance()) require.Equal(t, uint64(10), result.GetNonce()) // Block height updated require.Equal(t, int64(100), result.GetBlockHeight()) @@ -44,10 +44,10 @@ func TestPAW_SetCodeHash_MergeOntoExistingAccount(t *testing.T) { func TestPAW_SetBalance_MergeOntoExistingAccount(t *testing.T) { base := NewAccountData(). SetBlockHeight(50). - SetBalance(toArray32(leftPad32([]byte{0x01}))). + SetBalance(toBalance(leftPad32([]byte{0x01}))). SetNonce(5) - newBalance := toArray32(leftPad32([]byte{0x02})) + newBalance := toBalance(leftPad32([]byte{0x02})) paw := NewPendingAccountWrite().SetBalance(newBalance) result := paw.Merge(base, 60) @@ -60,12 +60,12 @@ func TestPAW_SetBalance_MergeOntoExistingAccount(t *testing.T) { func TestPAW_MultipleFields(t *testing.T) { base := NewAccountData(). SetBlockHeight(1). - SetBalance(toArray32(leftPad32([]byte{0x01}))). + SetBalance(toBalance(leftPad32([]byte{0x01}))). SetNonce(1). - SetCodeHash(toArray32(bytes.Repeat([]byte{0x01}, 32))) + SetCodeHash(toCodeHash(bytes.Repeat([]byte{0x01}, 32))) - newBalance := toArray32(leftPad32([]byte{0x02})) - newCodeHash := toArray32(bytes.Repeat([]byte{0x02}, 32)) + newBalance := toBalance(leftPad32([]byte{0x02})) + newCodeHash := toCodeHash(bytes.Repeat([]byte{0x02}, 32)) paw := NewPendingAccountWrite(). SetBalance(newBalance). SetNonce(99). @@ -90,35 +90,37 @@ func TestPAW_ZeroNonce(t *testing.T) { } func TestPAW_ZeroBalance(t *testing.T) { - base := NewAccountData().SetBalance(toArray32(leftPad32([]byte{0xff}))) - paw := NewPendingAccountWrite().SetBalance(&[32]byte{}) + base := NewAccountData().SetBalance(toBalance(leftPad32([]byte{0xff}))) + var zeroBal Balance + paw := NewPendingAccountWrite().SetBalance(&zeroBal) result := paw.Merge(base, 10) - var zero [32]byte - require.Equal(t, &zero, result.GetBalance()) + require.Equal(t, &zeroBal, result.GetBalance()) } func TestPAW_ZeroCodeHash(t *testing.T) { - base := NewAccountData().SetCodeHash(toArray32(bytes.Repeat([]byte{0xaa}, 32))) - paw := NewPendingAccountWrite().SetCodeHash(&[32]byte{}) + base := NewAccountData().SetCodeHash(toCodeHash(bytes.Repeat([]byte{0xaa}, 32))) + var zeroHash CodeHash + paw := NewPendingAccountWrite().SetCodeHash(&zeroHash) result := paw.Merge(base, 10) - var zero [32]byte - require.Equal(t, &zero, result.GetCodeHash()) + require.Equal(t, &zeroHash, result.GetCodeHash()) } func TestPAW_ZeroAllFields_ResultIsDelete(t *testing.T) { base := NewAccountData(). - SetBalance(toArray32(leftPad32([]byte{0x01}))). + SetBalance(toBalance(leftPad32([]byte{0x01}))). SetNonce(1). - SetCodeHash(toArray32(bytes.Repeat([]byte{0x01}, 32))) + SetCodeHash(toCodeHash(bytes.Repeat([]byte{0x01}, 32))) + var zBal Balance + var zHash CodeHash paw := NewPendingAccountWrite(). - SetBalance(&[32]byte{}). + SetBalance(&zBal). SetNonce(0). - SetCodeHash(&[32]byte{}) + SetCodeHash(&zHash) result := paw.Merge(base, 10) @@ -149,16 +151,22 @@ func TestPAW_IsSetFlags(t *testing.T) { require.True(t, paw.IsNonceSet()) require.False(t, paw.IsCodeHashSet()) - paw.SetBalance(&[32]byte{1}) + balSet := Balance{} + balSet[0] = 1 + paw.SetBalance(&balSet) require.True(t, paw.IsBalanceSet()) - paw.SetCodeHash(&[32]byte{2}) + chSet := CodeHash{} + chSet[0] = 2 + paw.SetCodeHash(&chSet) require.True(t, paw.IsCodeHashSet()) } func TestPAW_GettersReturnSetValues(t *testing.T) { - bal := [32]byte{0xab} - ch := [32]byte{0xcd} + bal := Balance{} + bal[0] = 0xab + ch := CodeHash{} + ch[0] = 0xcd paw := NewPendingAccountWrite(). SetBalance(&bal). SetNonce(123). From caadeef32c7b1757a3f4f1c0a5b38482d1c01698 Mon Sep 17 00:00:00 2001 From: Cody Littley Date: Wed, 1 Apr 2026 14:22:58 -0500 Subject: [PATCH 107/119] lint --- sei-db/state_db/sc/flatkv/store_apply.go | 66 +++++++++++------------- 1 file changed, 30 insertions(+), 36 deletions(-) diff --git a/sei-db/state_db/sc/flatkv/store_apply.go b/sei-db/state_db/sc/flatkv/store_apply.go index ebc6cba039..bdd22afef6 100644 --- a/sei-db/state_db/sc/flatkv/store_apply.go +++ b/sei-db/state_db/sc/flatkv/store_apply.go @@ -244,7 +244,7 @@ func gatherLTHashPairs[T vtype.VType]( oldValues map[string]T, ) []lthash.KVPairWithLastValue { - var pairs []lthash.KVPairWithLastValue = make([]lthash.KVPairWithLastValue, 0, len(newValues)) + pairs := make([]lthash.KVPairWithLastValue, 0, len(newValues)) for keyStr, newValue := range newValues { var oldValue = oldValues[keyStr] @@ -280,50 +280,44 @@ func mergeAccountUpdates( // PendingAccountWrite objects are well behaved when nil, no need to bootstrap map entries. updates := make(map[string]*vtype.PendingAccountWrite) - if nonceChanges != nil { - for key, nonceChange := range nonceChanges { - if nonceChange == nil { - // Deletion is equivalent to setting the nonce to 0 - updates[key] = updates[key].SetNonce(0) - } else { - nonce, err := vtype.ParseNonce(nonceChange) - if err != nil { - return nil, fmt.Errorf("invalid nonce value: %w", err) - } - updates[key] = updates[key].SetNonce(nonce) + for key, nonceChange := range nonceChanges { + if nonceChange == nil { + // Deletion is equivalent to setting the nonce to 0 + updates[key] = updates[key].SetNonce(0) + } else { + nonce, err := vtype.ParseNonce(nonceChange) + if err != nil { + return nil, fmt.Errorf("invalid nonce value: %w", err) } + updates[key] = updates[key].SetNonce(nonce) } } - if codeHashChanges != nil { - for key, codeHashChange := range codeHashChanges { - if codeHashChange == nil { - // Deletion is equivalent to setting the code hash to a zero hash - var zero vtype.CodeHash - updates[key] = updates[key].SetCodeHash(&zero) - } else { - codeHash, err := vtype.ParseCodeHash(codeHashChange) - if err != nil { - return nil, fmt.Errorf("invalid codehash value: %w", err) - } - updates[key] = updates[key].SetCodeHash(codeHash) + for key, codeHashChange := range codeHashChanges { + if codeHashChange == nil { + // Deletion is equivalent to setting the code hash to a zero hash + var zero vtype.CodeHash + updates[key] = updates[key].SetCodeHash(&zero) + } else { + codeHash, err := vtype.ParseCodeHash(codeHashChange) + if err != nil { + return nil, fmt.Errorf("invalid codehash value: %w", err) } + updates[key] = updates[key].SetCodeHash(codeHash) } } - if balanceChanges != nil { - for key, balanceChange := range balanceChanges { - if balanceChange == nil { - // Deletion is equivalent to setting the balance to a zero balance - var zero vtype.Balance - updates[key] = updates[key].SetBalance(&zero) - } else { - balance, err := vtype.ParseBalance(balanceChange) - if err != nil { - return nil, fmt.Errorf("invalid balance value: %w", err) - } - updates[key] = updates[key].SetBalance(balance) + for key, balanceChange := range balanceChanges { + if balanceChange == nil { + // Deletion is equivalent to setting the balance to a zero balance + var zero vtype.Balance + updates[key] = updates[key].SetBalance(&zero) + } else { + balance, err := vtype.ParseBalance(balanceChange) + if err != nil { + return nil, fmt.Errorf("invalid balance value: %w", err) } + updates[key] = updates[key].SetBalance(balance) } } return updates, nil From aab88b16c7236b5752c450cdf698ff57bef55b47 Mon Sep 17 00:00:00 2001 From: Cody Littley Date: Wed, 1 Apr 2026 14:30:32 -0500 Subject: [PATCH 108/119] test cleanup --- .../state_db/sc/flatkv/vtype/account_data.go | 8 + .../sc/flatkv/vtype/account_data_test.go | 89 +++++++++ .../sc/flatkv/vtype/base_types_test.go | 184 ++++++++++++++++++ .../sc/flatkv/vtype/code_data_test.go | 52 +++++ .../sc/flatkv/vtype/legacy_data_test.go | 52 +++++ .../vtype/pending_account_write_test.go | 74 +++++++ .../state_db/sc/flatkv/vtype/storage_data.go | 4 + .../sc/flatkv/vtype/storage_data_test.go | 51 +++++ 8 files changed, 514 insertions(+) create mode 100644 sei-db/state_db/sc/flatkv/vtype/base_types_test.go diff --git a/sei-db/state_db/sc/flatkv/vtype/account_data.go b/sei-db/state_db/sc/flatkv/vtype/account_data.go index da8a7524ec..99a43c7948 100644 --- a/sei-db/state_db/sc/flatkv/vtype/account_data.go +++ b/sei-db/state_db/sc/flatkv/vtype/account_data.go @@ -164,6 +164,10 @@ func (a *AccountData) SetBalance(balance *Balance) *AccountData { if a == nil { a = NewAccountData() } + if balance == nil { + var zero Balance + balance = &zero + } copy(a.data[accountBalanceStart:accountNonceStart], balance[:]) return a } @@ -182,6 +186,10 @@ func (a *AccountData) SetCodeHash(codeHash *CodeHash) *AccountData { if a == nil { a = NewAccountData() } + if codeHash == nil { + var zero CodeHash + codeHash = &zero + } copy(a.data[accountCodeHashStart:accountDataLength], codeHash[:]) return a } diff --git a/sei-db/state_db/sc/flatkv/vtype/account_data_test.go b/sei-db/state_db/sc/flatkv/vtype/account_data_test.go index f3618b46f2..caac87a156 100644 --- a/sei-db/state_db/sc/flatkv/vtype/account_data_test.go +++ b/sei-db/state_db/sc/flatkv/vtype/account_data_test.go @@ -174,6 +174,95 @@ func TestConstantLayout_V0(t *testing.T) { require.Equal(t, 81, accountDataLength) } +func TestNilAccountData_Getters(t *testing.T) { + var ad *AccountData + var zero [32]byte + + require.Equal(t, AccountDataVersion0, ad.GetSerializationVersion()) + require.Equal(t, int64(0), ad.GetBlockHeight()) + require.Equal(t, uint64(0), ad.GetNonce()) + require.Equal(t, (*Balance)(&zero), ad.GetBalance()) + require.Equal(t, (*CodeHash)(&zero), ad.GetCodeHash()) +} + +func TestNilAccountData_IsDelete(t *testing.T) { + var ad *AccountData + require.True(t, ad.IsDelete()) +} + +func TestNilAccountData_Serialize(t *testing.T) { + var ad *AccountData + s := ad.Serialize() + require.Len(t, s, accountDataLength) + for _, b := range s { + require.Equal(t, byte(0), b) + } +} + +func TestNilAccountData_SerializeRoundTrips(t *testing.T) { + var ad *AccountData + rt, err := DeserializeAccountData(ad.Serialize()) + require.NoError(t, err) + require.True(t, rt.IsDelete()) +} + +func TestNilAccountData_Copy(t *testing.T) { + var ad *AccountData + cp := ad.Copy() + require.NotNil(t, cp) + require.True(t, cp.IsDelete()) + require.Len(t, cp.Serialize(), accountDataLength) +} + +func TestNilAccountData_SettersAutoCreate(t *testing.T) { + var a1 *AccountData + a1 = a1.SetBlockHeight(42) + require.NotNil(t, a1) + require.Equal(t, int64(42), a1.GetBlockHeight()) + + var a2 *AccountData + a2 = a2.SetNonce(7) + require.NotNil(t, a2) + require.Equal(t, uint64(7), a2.GetNonce()) + + var a3 *AccountData + bal := Balance{0x01} + a3 = a3.SetBalance(&bal) + require.NotNil(t, a3) + require.Equal(t, &bal, a3.GetBalance()) + + var a4 *AccountData + ch := CodeHash{0x02} + a4 = a4.SetCodeHash(&ch) + require.NotNil(t, a4) + require.Equal(t, &ch, a4.GetCodeHash()) +} + +func TestAccountData_CopyIndependence(t *testing.T) { + ad := NewAccountData().SetNonce(10).SetBlockHeight(5) + cp := ad.Copy() + + cp.SetNonce(99) + require.Equal(t, uint64(10), ad.GetNonce(), "original must not change") + require.Equal(t, uint64(99), cp.GetNonce()) +} + +func TestAccountData_SetBalanceNilZeros(t *testing.T) { + ad := NewAccountData(). + SetBalance(toBalance(leftPad32([]byte{0xff}))). + SetBalance(nil) + var zero Balance + require.Equal(t, &zero, ad.GetBalance()) +} + +func TestAccountData_SetCodeHashNilZeros(t *testing.T) { + ad := NewAccountData(). + SetCodeHash(toCodeHash(bytes.Repeat([]byte{0xaa}, 32))). + SetCodeHash(nil) + var zero CodeHash + require.Equal(t, &zero, ad.GetCodeHash()) +} + // leftPad32 returns a 32-byte slice with b right-aligned (big-endian style). func leftPad32(b []byte) []byte { padded := make([]byte, 32) diff --git a/sei-db/state_db/sc/flatkv/vtype/base_types_test.go b/sei-db/state_db/sc/flatkv/vtype/base_types_test.go new file mode 100644 index 0000000000..2d5e60a373 --- /dev/null +++ b/sei-db/state_db/sc/flatkv/vtype/base_types_test.go @@ -0,0 +1,184 @@ +package vtype + +import ( + "bytes" + "encoding/binary" + "testing" + + "github.com/stretchr/testify/require" +) + +// --- ParseNonce --- + +func TestParseNonce_Valid(t *testing.T) { + buf := make([]byte, NonceLen) + binary.BigEndian.PutUint64(buf, 42) + n, err := ParseNonce(buf) + require.NoError(t, err) + require.Equal(t, uint64(42), n) +} + +func TestParseNonce_Zero(t *testing.T) { + n, err := ParseNonce(make([]byte, NonceLen)) + require.NoError(t, err) + require.Equal(t, uint64(0), n) +} + +func TestParseNonce_MaxUint64(t *testing.T) { + buf := bytes.Repeat([]byte{0xff}, NonceLen) + n, err := ParseNonce(buf) + require.NoError(t, err) + require.Equal(t, uint64(0xffffffffffffffff), n) +} + +func TestParseNonce_TooShort(t *testing.T) { + _, err := ParseNonce([]byte{0x01, 0x02}) + require.Error(t, err) +} + +func TestParseNonce_TooLong(t *testing.T) { + _, err := ParseNonce(make([]byte, NonceLen+1)) + require.Error(t, err) +} + +func TestParseNonce_Empty(t *testing.T) { + _, err := ParseNonce([]byte{}) + require.Error(t, err) +} + +func TestParseNonce_Nil(t *testing.T) { + _, err := ParseNonce(nil) + require.Error(t, err) +} + +// --- ParseCodeHash --- + +func TestParseCodeHash_Valid(t *testing.T) { + input := bytes.Repeat([]byte{0xab}, CodeHashLen) + ch, err := ParseCodeHash(input) + require.NoError(t, err) + require.Equal(t, input, ch[:]) +} + +func TestParseCodeHash_Zero(t *testing.T) { + ch, err := ParseCodeHash(make([]byte, CodeHashLen)) + require.NoError(t, err) + var zero CodeHash + require.Equal(t, &zero, ch) +} + +func TestParseCodeHash_CopiesInput(t *testing.T) { + input := bytes.Repeat([]byte{0xab}, CodeHashLen) + ch, err := ParseCodeHash(input) + require.NoError(t, err) + input[0] = 0xff + require.Equal(t, byte(0xab), ch[0], "ParseCodeHash must copy, not alias") +} + +func TestParseCodeHash_TooShort(t *testing.T) { + _, err := ParseCodeHash([]byte{0x01}) + require.Error(t, err) +} + +func TestParseCodeHash_TooLong(t *testing.T) { + _, err := ParseCodeHash(make([]byte, CodeHashLen+1)) + require.Error(t, err) +} + +func TestParseCodeHash_Empty(t *testing.T) { + _, err := ParseCodeHash([]byte{}) + require.Error(t, err) +} + +func TestParseCodeHash_Nil(t *testing.T) { + _, err := ParseCodeHash(nil) + require.Error(t, err) +} + +// --- ParseBalance --- + +func TestParseBalance_Valid(t *testing.T) { + input := leftPad32([]byte{0x01, 0x00}) + bal, err := ParseBalance(input) + require.NoError(t, err) + require.Equal(t, input, bal[:]) +} + +func TestParseBalance_Zero(t *testing.T) { + bal, err := ParseBalance(make([]byte, BalanceLen)) + require.NoError(t, err) + var zero Balance + require.Equal(t, &zero, bal) +} + +func TestParseBalance_CopiesInput(t *testing.T) { + input := bytes.Repeat([]byte{0xab}, BalanceLen) + bal, err := ParseBalance(input) + require.NoError(t, err) + input[0] = 0xff + require.Equal(t, byte(0xab), bal[0], "ParseBalance must copy, not alias") +} + +func TestParseBalance_TooShort(t *testing.T) { + _, err := ParseBalance([]byte{0x01}) + require.Error(t, err) +} + +func TestParseBalance_TooLong(t *testing.T) { + _, err := ParseBalance(make([]byte, BalanceLen+1)) + require.Error(t, err) +} + +func TestParseBalance_Empty(t *testing.T) { + _, err := ParseBalance([]byte{}) + require.Error(t, err) +} + +func TestParseBalance_Nil(t *testing.T) { + _, err := ParseBalance(nil) + require.Error(t, err) +} + +// --- ParseStorageValue --- + +func TestParseStorageValue_Valid(t *testing.T) { + input := leftPad32([]byte{0xde, 0xad}) + val, err := ParseStorageValue(input) + require.NoError(t, err) + require.Equal(t, input, val[:]) +} + +func TestParseStorageValue_Zero(t *testing.T) { + val, err := ParseStorageValue(make([]byte, SlotLen)) + require.NoError(t, err) + var zero [32]byte + require.Equal(t, &zero, val) +} + +func TestParseStorageValue_CopiesInput(t *testing.T) { + input := bytes.Repeat([]byte{0xab}, SlotLen) + val, err := ParseStorageValue(input) + require.NoError(t, err) + input[0] = 0xff + require.Equal(t, byte(0xab), val[0], "ParseStorageValue must copy, not alias") +} + +func TestParseStorageValue_TooShort(t *testing.T) { + _, err := ParseStorageValue([]byte{0x01}) + require.Error(t, err) +} + +func TestParseStorageValue_TooLong(t *testing.T) { + _, err := ParseStorageValue(make([]byte, SlotLen+1)) + require.Error(t, err) +} + +func TestParseStorageValue_Empty(t *testing.T) { + _, err := ParseStorageValue([]byte{}) + require.Error(t, err) +} + +func TestParseStorageValue_Nil(t *testing.T) { + _, err := ParseStorageValue(nil) + require.Error(t, err) +} diff --git a/sei-db/state_db/sc/flatkv/vtype/code_data_test.go b/sei-db/state_db/sc/flatkv/vtype/code_data_test.go index 4b434f9707..0f5650047f 100644 --- a/sei-db/state_db/sc/flatkv/vtype/code_data_test.go +++ b/sei-db/state_db/sc/flatkv/vtype/code_data_test.go @@ -158,3 +158,55 @@ func TestCodeNewCopiesBytecode(t *testing.T) { bytecode[0] = 0xff require.Equal(t, byte(0x01), cd.GetBytecode()[0]) } + +func TestNilCodeData_Getters(t *testing.T) { + var cd *CodeData + + require.Equal(t, CodeDataVersion0, cd.GetSerializationVersion()) + require.Equal(t, int64(0), cd.GetBlockHeight()) + require.Empty(t, cd.GetBytecode()) +} + +func TestNilCodeData_IsDelete(t *testing.T) { + var cd *CodeData + require.True(t, cd.IsDelete()) +} + +func TestNilCodeData_Serialize(t *testing.T) { + var cd *CodeData + s := cd.Serialize() + require.Len(t, s, codeBytecodeStart) +} + +func TestNilCodeData_SerializeRoundTrips(t *testing.T) { + var cd *CodeData + rt, err := DeserializeCodeData(cd.Serialize()) + require.NoError(t, err) + require.True(t, rt.IsDelete()) + require.Empty(t, rt.GetBytecode()) +} + +func TestNilCodeData_SettersAutoCreate(t *testing.T) { + var c1 *CodeData + c1 = c1.SetBlockHeight(42) + require.NotNil(t, c1) + require.Equal(t, int64(42), c1.GetBlockHeight()) + + var c2 *CodeData + c2 = c2.SetBytecode([]byte{0xAB}) + require.NotNil(t, c2) + require.Equal(t, []byte{0xAB}, c2.GetBytecode()) +} + +func TestCodeData_SetBytecodeOverwrite(t *testing.T) { + cd := NewCodeData().SetBytecode([]byte{0x01, 0x02, 0x03}) + cd.SetBytecode([]byte{0xAA}) + require.Equal(t, []byte{0xAA}, cd.GetBytecode()) +} + +func TestCodeData_SetBytecodeNil(t *testing.T) { + cd := NewCodeData().SetBytecode([]byte{0x01}) + cd = cd.SetBytecode(nil) + require.Empty(t, cd.GetBytecode()) + require.True(t, cd.IsDelete()) +} diff --git a/sei-db/state_db/sc/flatkv/vtype/legacy_data_test.go b/sei-db/state_db/sc/flatkv/vtype/legacy_data_test.go index 9115c0fcde..25026bb520 100644 --- a/sei-db/state_db/sc/flatkv/vtype/legacy_data_test.go +++ b/sei-db/state_db/sc/flatkv/vtype/legacy_data_test.go @@ -157,3 +157,55 @@ func TestLegacyNewCopiesValue(t *testing.T) { value[0] = 0xff require.Equal(t, byte(0x01), ld.GetValue()[0]) } + +func TestNilLegacyData_Getters(t *testing.T) { + var ld *LegacyData + + require.Equal(t, LegacyDataVersion0, ld.GetSerializationVersion()) + require.Equal(t, int64(0), ld.GetBlockHeight()) + require.Empty(t, ld.GetValue()) +} + +func TestNilLegacyData_IsDelete(t *testing.T) { + var ld *LegacyData + require.True(t, ld.IsDelete()) +} + +func TestNilLegacyData_Serialize(t *testing.T) { + var ld *LegacyData + s := ld.Serialize() + require.Len(t, s, legacyHeaderLength) +} + +func TestNilLegacyData_SerializeRoundTrips(t *testing.T) { + var ld *LegacyData + rt, err := DeserializeLegacyData(ld.Serialize()) + require.NoError(t, err) + require.True(t, rt.IsDelete()) + require.Empty(t, rt.GetValue()) +} + +func TestNilLegacyData_SettersAutoCreate(t *testing.T) { + var l1 *LegacyData + l1 = l1.SetBlockHeight(42) + require.NotNil(t, l1) + require.Equal(t, int64(42), l1.GetBlockHeight()) + + var l2 *LegacyData + l2 = l2.SetValue([]byte{0xAB}) + require.NotNil(t, l2) + require.Equal(t, []byte{0xAB}, l2.GetValue()) +} + +func TestLegacyData_SetValueOverwrite(t *testing.T) { + ld := NewLegacyData().SetValue([]byte{0x01, 0x02, 0x03}) + ld = ld.SetValue([]byte{0xAA}) + require.Equal(t, []byte{0xAA}, ld.GetValue()) +} + +func TestLegacyData_SetValueNil(t *testing.T) { + ld := NewLegacyData().SetValue([]byte{0x01}) + ld = ld.SetValue(nil) + require.Empty(t, ld.GetValue()) + require.True(t, ld.IsDelete()) +} diff --git a/sei-db/state_db/sc/flatkv/vtype/pending_account_write_test.go b/sei-db/state_db/sc/flatkv/vtype/pending_account_write_test.go index ac6861295c..72f38dd94d 100644 --- a/sei-db/state_db/sc/flatkv/vtype/pending_account_write_test.go +++ b/sei-db/state_db/sc/flatkv/vtype/pending_account_write_test.go @@ -200,3 +200,77 @@ func TestPAW_SetThenZero(t *testing.T) { result := paw.Merge(base, 10) require.Equal(t, uint64(0), result.GetNonce()) } + +func TestNilPAW_Getters(t *testing.T) { + var paw *PendingAccountWrite + var zeroBal Balance + var zeroHash CodeHash + + require.Equal(t, &zeroBal, paw.GetBalance()) + require.Equal(t, uint64(0), paw.GetNonce()) + require.Equal(t, &zeroHash, paw.GetCodeHash()) +} + +func TestNilPAW_IsSetFlags(t *testing.T) { + var paw *PendingAccountWrite + require.False(t, paw.IsBalanceSet()) + require.False(t, paw.IsNonceSet()) + require.False(t, paw.IsCodeHashSet()) +} + +func TestNilPAW_SettersAutoCreate(t *testing.T) { + var p1 *PendingAccountWrite + p1 = p1.SetNonce(5) + require.NotNil(t, p1) + require.Equal(t, uint64(5), p1.GetNonce()) + require.True(t, p1.IsNonceSet()) + + var p2 *PendingAccountWrite + bal := Balance{0x01} + p2 = p2.SetBalance(&bal) + require.NotNil(t, p2) + require.Equal(t, &bal, p2.GetBalance()) + require.True(t, p2.IsBalanceSet()) + + var p3 *PendingAccountWrite + ch := CodeHash{0x02} + p3 = p3.SetCodeHash(&ch) + require.NotNil(t, p3) + require.Equal(t, &ch, p3.GetCodeHash()) + require.True(t, p3.IsCodeHashSet()) +} + +func TestNilPAW_MergeOntoBase(t *testing.T) { + base := NewAccountData(). + SetBlockHeight(50). + SetNonce(10). + SetBalance(toBalance(leftPad32([]byte{0xff}))) + + var paw *PendingAccountWrite + result := paw.Merge(base, 100) + + require.Equal(t, int64(100), result.GetBlockHeight()) + require.Equal(t, uint64(10), result.GetNonce()) + require.Equal(t, toBalance(leftPad32([]byte{0xff})), result.GetBalance()) +} + +func TestNilPAW_MergeOntoNilBase(t *testing.T) { + var paw *PendingAccountWrite + result := paw.Merge(nil, 100) + + require.NotNil(t, result) + require.Equal(t, int64(100), result.GetBlockHeight()) + require.True(t, result.IsDelete()) +} + +func TestPAW_MergeOntoNilBase(t *testing.T) { + paw := NewPendingAccountWrite().SetNonce(42) + result := paw.Merge(nil, 100) + + require.NotNil(t, result) + require.Equal(t, int64(100), result.GetBlockHeight()) + require.Equal(t, uint64(42), result.GetNonce()) + var zero [32]byte + require.Equal(t, (*Balance)(&zero), result.GetBalance()) + require.Equal(t, (*CodeHash)(&zero), result.GetCodeHash()) +} diff --git a/sei-db/state_db/sc/flatkv/vtype/storage_data.go b/sei-db/state_db/sc/flatkv/vtype/storage_data.go index 1ac3d30644..c8de461f4a 100644 --- a/sei-db/state_db/sc/flatkv/vtype/storage_data.go +++ b/sei-db/state_db/sc/flatkv/vtype/storage_data.go @@ -133,6 +133,10 @@ func (s *StorageData) SetValue(value *[32]byte) *StorageData { if s == nil { s = NewStorageData() } + if value == nil { + var zero [32]byte + value = &zero + } copy(s.data[storageValueStart:storageDataLength], value[:]) return s } diff --git a/sei-db/state_db/sc/flatkv/vtype/storage_data_test.go b/sei-db/state_db/sc/flatkv/vtype/storage_data_test.go index 80ac966aae..9c161ae471 100644 --- a/sei-db/state_db/sc/flatkv/vtype/storage_data_test.go +++ b/sei-db/state_db/sc/flatkv/vtype/storage_data_test.go @@ -135,3 +135,54 @@ func TestStorageSetterChaining(t *testing.T) { func TestStorageConstantLayout_V0(t *testing.T) { require.Equal(t, 41, storageDataLength) } + +func TestNilStorageData_Getters(t *testing.T) { + var sd *StorageData + var zero [32]byte + + require.Equal(t, StorageDataVersion0, sd.GetSerializationVersion()) + require.Equal(t, int64(0), sd.GetBlockHeight()) + require.Equal(t, &zero, sd.GetValue()) +} + +func TestNilStorageData_IsDelete(t *testing.T) { + var sd *StorageData + require.True(t, sd.IsDelete()) +} + +func TestNilStorageData_Serialize(t *testing.T) { + var sd *StorageData + s := sd.Serialize() + require.Len(t, s, storageDataLength) + for _, b := range s { + require.Equal(t, byte(0), b) + } +} + +func TestNilStorageData_SerializeRoundTrips(t *testing.T) { + var sd *StorageData + rt, err := DeserializeStorageData(sd.Serialize()) + require.NoError(t, err) + require.True(t, rt.IsDelete()) +} + +func TestNilStorageData_SettersAutoCreate(t *testing.T) { + var s1 *StorageData + s1 = s1.SetBlockHeight(42) + require.NotNil(t, s1) + require.Equal(t, int64(42), s1.GetBlockHeight()) + + var s2 *StorageData + val := [32]byte{0x01} + s2 = s2.SetValue(&val) + require.NotNil(t, s2) + require.Equal(t, &val, s2.GetValue()) +} + +func TestStorageData_SetValueNilZeros(t *testing.T) { + sd := NewStorageData(). + SetValue(toArray32(leftPad32([]byte{0xff}))). + SetValue(nil) + var zero [32]byte + require.Equal(t, &zero, sd.GetValue()) +} From 4618c0a26fb9f5df0db41907e15b45b63861f7e9 Mon Sep 17 00:00:00 2001 From: Cody Littley Date: Wed, 1 Apr 2026 14:52:46 -0500 Subject: [PATCH 109/119] clean up interface --- .../state_db/bench/wrappers/flatkv_wrapper.go | 3 +- sei-db/state_db/sc/composite/store_test.go | 38 ++- sei-db/state_db/sc/flatkv/api.go | 10 +- sei-db/state_db/sc/flatkv/exporter_test.go | 41 ++- .../sc/flatkv/lthash_correctness_test.go | 59 ++-- sei-db/state_db/sc/flatkv/snapshot_test.go | 86 ++++-- sei-db/state_db/sc/flatkv/store_read.go | 222 +++++++++----- sei-db/state_db/sc/flatkv/store_read_test.go | 290 +++++++++++++++++- sei-db/state_db/sc/flatkv/store_test.go | 48 ++- sei-db/state_db/sc/flatkv/store_write_test.go | 127 +++++--- 10 files changed, 688 insertions(+), 236 deletions(-) diff --git a/sei-db/state_db/bench/wrappers/flatkv_wrapper.go b/sei-db/state_db/bench/wrappers/flatkv_wrapper.go index a1e959c58f..d6ee7fe7f4 100644 --- a/sei-db/state_db/bench/wrappers/flatkv_wrapper.go +++ b/sei-db/state_db/bench/wrappers/flatkv_wrapper.go @@ -61,8 +61,7 @@ func (f *flatKVWrapper) Close() error { } func (f *flatKVWrapper) Read(key []byte) (data []byte, found bool, err error) { - data, found = f.base.Get(key) - return data, found, nil + return f.base.Get(key) } func (f *flatKVWrapper) GetPhaseTimer() *metrics.PhaseTimer { diff --git a/sei-db/state_db/sc/composite/store_test.go b/sei-db/state_db/sc/composite/store_test.go index 86a39a60a2..56843eca8e 100644 --- a/sei-db/state_db/sc/composite/store_test.go +++ b/sei-db/state_db/sc/composite/store_test.go @@ -27,19 +27,22 @@ func (f *failingEVMStore) LoadVersion(int64, bool) (flatkv.Store, error) { } func (f *failingEVMStore) ApplyChangeSets([]*proto.NamedChangeSet) error { return nil } func (f *failingEVMStore) Commit() (int64, error) { return 0, nil } -func (f *failingEVMStore) Get([]byte) ([]byte, bool) { return nil, false } -func (f *failingEVMStore) Has([]byte) bool { return false } -func (f *failingEVMStore) Iterator(_, _ []byte) flatkv.Iterator { return nil } -func (f *failingEVMStore) IteratorByPrefix([]byte) flatkv.Iterator { return nil } -func (f *failingEVMStore) RootHash() []byte { return nil } -func (f *failingEVMStore) Version() int64 { return 0 } -func (f *failingEVMStore) WriteSnapshot(string) error { return nil } -func (f *failingEVMStore) Rollback(int64) error { return nil } -func (f *failingEVMStore) Exporter(int64) (types.Exporter, error) { return nil, nil } -func (f *failingEVMStore) Importer(int64) (types.Importer, error) { return nil, nil } -func (f *failingEVMStore) GetPhaseTimer() *metrics.PhaseTimer { return nil } -func (f *failingEVMStore) CommittedRootHash() []byte { return nil } -func (f *failingEVMStore) Close() error { return nil } +func (f *failingEVMStore) Get([]byte) ([]byte, bool, error) { return nil, false, nil } +func (f *failingEVMStore) GetBlockHeightModified([]byte) (int64, bool, error) { + return -1, false, nil +} +func (f *failingEVMStore) Has([]byte) (bool, error) { return false, nil } +func (f *failingEVMStore) Iterator(_, _ []byte) flatkv.Iterator { return nil } +func (f *failingEVMStore) IteratorByPrefix([]byte) flatkv.Iterator { return nil } +func (f *failingEVMStore) RootHash() []byte { return nil } +func (f *failingEVMStore) Version() int64 { return 0 } +func (f *failingEVMStore) WriteSnapshot(string) error { return nil } +func (f *failingEVMStore) Rollback(int64) error { return nil } +func (f *failingEVMStore) Exporter(int64) (types.Exporter, error) { return nil, nil } +func (f *failingEVMStore) Importer(int64) (types.Importer, error) { return nil, nil } +func (f *failingEVMStore) GetPhaseTimer() *metrics.PhaseTimer { return nil } +func (f *failingEVMStore) CommittedRootHash() []byte { return nil } +func (f *failingEVMStore) Close() error { return nil } func padLeft32(val ...byte) []byte { var b [32]byte @@ -578,11 +581,13 @@ func TestExportImportSplitWrite(t *testing.T) { // Verify FlatKV data require.NotNil(t, dst.evmCommitter) - got, found := dst.evmCommitter.Get(storageKey) + got, found, err := dst.evmCommitter.Get(storageKey) + require.NoError(t, err) require.True(t, found, "storage key should exist in FlatKV after import") require.Equal(t, storageVal, got) - got, found = dst.evmCommitter.Get(nonceKey) + got, found, err = dst.evmCommitter.Get(nonceKey) + require.NoError(t, err) require.True(t, found, "nonce key should exist in FlatKV after import") require.Equal(t, nonceVal, got) } @@ -834,7 +839,8 @@ func TestReconcileVersionsThenContinueCommitting(t *testing.T) { bankStore := cs3.GetChildStoreByName("bank") require.Equal(t, []byte{0xA5}, bankStore.Get([]byte("bal"))) - got, found := cs3.evmCommitter.Get(storageKey) + got, found, err := cs3.evmCommitter.Get(storageKey) + require.NoError(t, err) require.True(t, found) require.Equal(t, padLeft32(0xA5), got) } diff --git a/sei-db/state_db/sc/flatkv/api.go b/sei-db/state_db/sc/flatkv/api.go index ad21e60b43..3293881e47 100644 --- a/sei-db/state_db/sc/flatkv/api.go +++ b/sei-db/state_db/sc/flatkv/api.go @@ -33,11 +33,15 @@ type Store interface { // Commit persists buffered writes and advances the version. Commit() (int64, error) - // Get returns the value for the x/evm memiavl key, or (nil, false) if not found. - Get(key []byte) ([]byte, bool) + // Get returns the value for the x/evm memiavl key. If not found, returns (nil, false, nil). + Get(key []byte) (value []byte, found bool, err error) + + // GetBlockHeightModified returns the block height at which the key was last modified. + // If not found, returns (-1, false, nil). + GetBlockHeightModified(key []byte) (int64, bool, error) // Has reports whether the x/evm memiavl key exists. - Has(key []byte) bool + Has(key []byte) (bool, error) // Iterator returns an iterator over [start, end) in memiavl key order. // Pass nil for unbounded. diff --git a/sei-db/state_db/sc/flatkv/exporter_test.go b/sei-db/state_db/sc/flatkv/exporter_test.go index ca973e0041..e980e1e7ec 100644 --- a/sei-db/state_db/sc/flatkv/exporter_test.go +++ b/sei-db/state_db/sc/flatkv/exporter_test.go @@ -202,19 +202,23 @@ func TestExporterRoundTrip(t *testing.T) { // --- Verify round-trip --- require.Equal(t, int64(1), s2.Version()) - got, found := s2.Get(storageKey) + got, found, err := s2.Get(storageKey) + require.NoError(t, err) require.True(t, found, "storage key should exist after import") require.Equal(t, storageVal, got) - got, found = s2.Get(nonceKey) + got, found, err = s2.Get(nonceKey) + require.NoError(t, err) require.True(t, found, "nonce key should exist after import") require.Equal(t, nonceVal, got) - got, found = s2.Get(codeKey) + got, found, err = s2.Get(codeKey) + require.NoError(t, err) require.True(t, found, "code key should exist after import") require.Equal(t, codeVal, got) - got, found = s2.Get(codeHashKey) + got, found, err = s2.Get(codeHashKey) + require.NoError(t, err) require.True(t, found, "codehash key should exist after import") require.Equal(t, codeHashVal, got) @@ -325,11 +329,13 @@ func TestImportSurvivesReopen(t *testing.T) { require.Equal(t, int64(1), s2.Version()) - got, found := s2.Get(storageKey) + got, found, err := s2.Get(storageKey) + require.NoError(t, err) require.True(t, found, "storage key must survive reopen") require.Equal(t, storageVal, got) - got, found = s2.Get(nonceKey) + got, found, err = s2.Get(nonceKey) + require.NoError(t, err) require.True(t, found, "nonce key must survive reopen") require.Equal(t, nonceVal, got) @@ -392,8 +398,10 @@ func TestImportPurgesStaleData(t *testing.T) { staleKeys := [][]byte{storageStale, nonceStale, codeHashStale, codeStale} + var found bool for _, k := range staleKeys { - _, found := s.Get(k) + _, found, err = s.Get(k) + require.NoError(t, err) require.True(t, found, "pre-import: key should exist") } @@ -440,24 +448,30 @@ func TestImportPurgesStaleData(t *testing.T) { require.NoError(t, imp.Close()) // --- Phase 4: verify stale keys are gone across all DB types --- - got, found := s.Get(storageA) + var got []byte + got, found, err = s.Get(storageA) + require.NoError(t, err) require.True(t, found, "storage key A should exist") require.Equal(t, newStorageVal, got) - got, found = s.Get(nonceA) + got, found, err = s.Get(nonceA) + require.NoError(t, err) require.True(t, found, "nonce key A should exist") require.Equal(t, newNonceVal, got) - got, found = s.Get(codeB) + got, found, err = s.Get(codeB) + require.NoError(t, err) require.True(t, found, "code key B should exist") require.Equal(t, newCodeVal, got) - got, found = s.Get(codeHashB) + got, found, err = s.Get(codeHashB) + require.NoError(t, err) require.True(t, found, "codehash key B should exist") require.Equal(t, newCodeHashVal, got) for _, k := range staleKeys { - _, found = s.Get(k) + _, found, err = s.Get(k) + require.NoError(t, err) require.False(t, found, "stale key should NOT exist after import") } @@ -473,7 +487,8 @@ func TestImportPurgesStaleData(t *testing.T) { require.Equal(t, int64(1), s.Version()) for _, k := range staleKeys { - _, found = s.Get(k) + _, found, err = s.Get(k) + require.NoError(t, err) require.False(t, found, "stale key must remain absent after reopen") } require.Equal(t, srcHash, s.RootHash()) diff --git a/sei-db/state_db/sc/flatkv/lthash_correctness_test.go b/sei-db/state_db/sc/flatkv/lthash_correctness_test.go index 93e47a358d..898f0c62d0 100644 --- a/sei-db/state_db/sc/flatkv/lthash_correctness_test.go +++ b/sei-db/state_db/sc/flatkv/lthash_correctness_test.go @@ -734,7 +734,8 @@ func TestLtHashCrossApplyAccountSameFieldOverwrite(t *testing.T) { // Verify final value key := evm.BuildMemIAVLEVMKey(evm.EVMKeyNonce, addr[:]) - val, found := s.Get(key) + val, found, err := s.Get(key) + require.NoError(t, err) require.True(t, found) require.Equal(t, uint64(20), binary.BigEndian.Uint64(val)) } @@ -768,7 +769,8 @@ func TestLtHashCrossApplyStorageOverwrite(t *testing.T) { // Verify final value key := evm.BuildMemIAVLEVMKey(evm.EVMKeyStorage, StorageKey(addr, slot)) - val, found := s.Get(key) + val, found, err := s.Get(key) + require.NoError(t, err) require.True(t, found) require.Equal(t, padLeft32(0x33), val) } @@ -805,7 +807,8 @@ func TestLtHashCrossApplyCodeOverwrite(t *testing.T) { // Verify final value key := evm.BuildMemIAVLEVMKey(evm.EVMKeyCode, addr[:]) - val, found := s.Get(key) + val, found, err := s.Get(key) + require.NoError(t, err) require.True(t, found) require.Equal(t, []byte{0x60, 0x40, 0x02, 0x03}, val) } @@ -837,7 +840,8 @@ func TestLtHashCrossApplyLegacyOverwrite(t *testing.T) { verifyLtHashAtHeight(t, s, 2) // Verify final value - val, found := s.Get(legacyKey) + val, found, err := s.Get(legacyKey) + require.NoError(t, err) require.True(t, found) require.Equal(t, []byte{0x00, 0x30}, val) } @@ -897,27 +901,32 @@ func TestLtHashCrossApplyMixedOverwrite(t *testing.T) { // Verify all final values nonceKey := evm.BuildMemIAVLEVMKey(evm.EVMKeyNonce, addr[:]) - nonceVal, found := s.Get(nonceKey) + nonceVal, found, err := s.Get(nonceKey) + require.NoError(t, err) require.True(t, found) require.Equal(t, uint64(100), binary.BigEndian.Uint64(nonceVal)) chKey := evm.BuildMemIAVLEVMKey(evm.EVMKeyCodeHash, addr[:]) - chVal, found := s.Get(chKey) + chVal, found, err := s.Get(chKey) + require.NoError(t, err) require.True(t, found) expected := codeHashN(0x30) require.Equal(t, expected[:], chVal) codeKey := evm.BuildMemIAVLEVMKey(evm.EVMKeyCode, addr[:]) - codeVal, found := s.Get(codeKey) + codeVal, found, err := s.Get(codeKey) + require.NoError(t, err) require.True(t, found) require.Equal(t, []byte{0x60, 0x60, 0x01}, codeVal) storageKey := evm.BuildMemIAVLEVMKey(evm.EVMKeyStorage, StorageKey(addr, slot)) - storageVal, found := s.Get(storageKey) + storageVal, found, err := s.Get(storageKey) + require.NoError(t, err) require.True(t, found) require.Equal(t, padLeft32(0x33), storageVal) - legacyVal, found := s.Get(legacyKey) + legacyVal, found, err := s.Get(legacyKey) + require.NoError(t, err) require.True(t, found) require.Equal(t, []byte{0x00, 0x03}, legacyVal) } @@ -979,12 +988,14 @@ func TestLtHashAccountDeleteThenRecreate(t *testing.T) { verifyLtHashAtHeight(t, s, 2) nonceKey := evm.BuildMemIAVLEVMKey(evm.EVMKeyNonce, addr[:]) - nonceVal, found := s.Get(nonceKey) + nonceVal, found, err := s.Get(nonceKey) + require.NoError(t, err) require.True(t, found) require.Equal(t, nonceBytes(99), nonceVal) chKey := evm.BuildMemIAVLEVMKey(evm.EVMKeyCodeHash, addr[:]) - _, found = s.Get(chKey) + _, found, err = s.Get(chKey) + require.NoError(t, err) require.False(t, found, "codehash should be zero (EOA)") raw, err := s.accountDB.Get(AccountKey(addr)) @@ -1044,11 +1055,13 @@ func TestAccountPendingReadPartialDelete(t *testing.T) { })) // Pending reads before commit - nonceVal, found := s.Get(nonceKey) + nonceVal, found, err := s.Get(nonceKey) + require.NoError(t, err) require.True(t, found, "nonce should be readable from pending writes") require.Equal(t, nonceBytes(42), nonceVal) - chVal, found := s.Get(chKey) + chVal, found, err := s.Get(chKey) + require.NoError(t, err) require.False(t, found, "codehash should be not-found after pending delete") require.Nil(t, chVal) @@ -1074,11 +1087,13 @@ func TestAccountRowDeleteGetBeforeCommit(t *testing.T) { })) // Verify both fields are readable before commit - nonceVal, found := s.Get(nonceKey) + nonceVal, found, err := s.Get(nonceKey) + require.NoError(t, err) require.True(t, found, "nonce should be readable from pending writes") require.Equal(t, nonceBytes(10), nonceVal) - chVal, found := s.Get(chKey) + chVal, found, err := s.Get(chKey) + require.NoError(t, err) require.True(t, found, "codehash should be readable from pending writes") expected := codeHashN(0xEE) require.Equal(t, expected[:], chVal) @@ -1089,16 +1104,22 @@ func TestAccountRowDeleteGetBeforeCommit(t *testing.T) { })) // Verify both fields return not-found BEFORE commit (the core semantic change) - nonceVal, found = s.Get(nonceKey) + nonceVal, found, err = s.Get(nonceKey) + require.NoError(t, err) require.False(t, found, "nonce should not be found after pending full-delete") require.Nil(t, nonceVal) - chVal, found = s.Get(chKey) + chVal, found, err = s.Get(chKey) + require.NoError(t, err) require.False(t, found, "codehash should not be found after pending full-delete") require.Nil(t, chVal) - require.False(t, s.Has(nonceKey), "Has(nonce) should be false after pending full-delete") - require.False(t, s.Has(chKey), "Has(codehash) should be false after pending full-delete") + hasNonce, err := s.Has(nonceKey) + require.NoError(t, err) + require.False(t, hasNonce, "Has(nonce) should be false after pending full-delete") + hasCodeHash, err := s.Has(chKey) + require.NoError(t, err) + require.False(t, hasCodeHash, "Has(codehash) should be false after pending full-delete") // Verify isDelete is set paw := s.accountWrites[string(addr[:])] diff --git a/sei-db/state_db/sc/flatkv/snapshot_test.go b/sei-db/state_db/sc/flatkv/snapshot_test.go index 73accab2b5..a9d8ac219e 100644 --- a/sei-db/state_db/sc/flatkv/snapshot_test.go +++ b/sei-db/state_db/sc/flatkv/snapshot_test.go @@ -122,10 +122,12 @@ func TestOpenFromSnapshot(t *testing.T) { // Verify data from all 3 versions is present key1 := evm.BuildMemIAVLEVMKey(evm.EVMKeyStorage, StorageKey(Address{0x10}, Slot{0x01})) key3 := evm.BuildMemIAVLEVMKey(evm.EVMKeyStorage, StorageKey(Address{0x10}, Slot{0x03})) - v, ok := s2.Get(key1) + v, ok, err := s2.Get(key1) + require.NoError(t, err) require.True(t, ok) require.Equal(t, padLeft32(0x01), v) - v, ok = s2.Get(key3) + v, ok, err = s2.Get(key3) + require.NoError(t, err) require.True(t, ok) require.Equal(t, padLeft32(0x03), v) } @@ -193,12 +195,14 @@ func TestRollbackRewindsState(t *testing.T) { // v5's data should not exist (WAL truncated, snapshot pruned) key5 := evm.BuildMemIAVLEVMKey(evm.EVMKeyStorage, StorageKey(Address{0x30}, Slot{0x05})) - _, ok := s.Get(key5) + _, ok, err := s.Get(key5) + require.NoError(t, err) require.False(t, ok, "v5 data should be gone after rollback to v4") // v4's data should still exist key4 := evm.BuildMemIAVLEVMKey(evm.EVMKeyStorage, StorageKey(Address{0x30}, Slot{0x04})) - v, ok := s.Get(key4) + v, ok, err := s.Get(key4) + require.NoError(t, err) require.True(t, ok) require.Equal(t, padLeft32(0x04), v) @@ -475,7 +479,8 @@ func TestSnapshotThenCatchupThenVerifyCorrectness(t *testing.T) { require.NoError(t, s1.WriteSnapshot("")) // Record baseline value at v2 for the same key. - vAtV2, ok := s1.Get(key) + vAtV2, ok, err := s1.Get(key) + require.NoError(t, err) require.True(t, ok) require.Equal(t, padLeft32(0x01), vAtV2) @@ -493,7 +498,8 @@ func TestSnapshotThenCatchupThenVerifyCorrectness(t *testing.T) { require.NoError(t, err) _, err = s2.LoadVersion(2, false) require.NoError(t, err) - gotV2, ok := s2.Get(key) + gotV2, ok, err := s2.Get(key) + require.NoError(t, err) require.True(t, ok) require.Equal(t, padLeft32(0x01), gotV2, "snapshot baseline should remain stable") require.NoError(t, s2.Close()) @@ -508,7 +514,8 @@ func TestSnapshotThenCatchupThenVerifyCorrectness(t *testing.T) { defer s3.Close() require.Equal(t, int64(4), s3.Version()) - gotLatest, ok := s3.Get(key) + gotLatest, ok, err := s3.Get(key) + require.NoError(t, err) require.True(t, ok) require.Equal(t, padLeft32(0x04), gotLatest) } @@ -548,7 +555,8 @@ func TestLoadVersionMixedSequence(t *testing.T) { require.NoError(t, err) require.Equal(t, int64(2), s1.Version()) require.Equal(t, hashAtV2, s1.RootHash()) - v, ok := s1.Get(key) + v, ok, err := s1.Get(key) + require.NoError(t, err) require.True(t, ok) require.Equal(t, padLeft32(0x02), v) require.NoError(t, s1.Close()) @@ -562,7 +570,8 @@ func TestLoadVersionMixedSequence(t *testing.T) { require.NoError(t, err) require.Equal(t, int64(4), s2.Version()) require.Equal(t, hashAtV4, s2.RootHash()) - v, ok = s2.Get(key) + v, ok, err = s2.Get(key) + require.NoError(t, err) require.True(t, ok) require.Equal(t, padLeft32(0x04), v) require.NoError(t, s2.Close()) @@ -576,7 +585,8 @@ func TestLoadVersionMixedSequence(t *testing.T) { require.NoError(t, err, "LoadVersion(2) must succeed after LoadVersion(0) dirtied working dir") require.Equal(t, int64(2), s3.Version()) require.Equal(t, hashAtV2, s3.RootHash()) - v, ok = s3.Get(key) + v, ok, err = s3.Get(key) + require.NoError(t, err) require.True(t, ok) require.Equal(t, padLeft32(0x02), v) require.NoError(t, s3.Close()) @@ -1259,17 +1269,20 @@ func TestSnapshotPreservesAllKeyTypes(t *testing.T) { require.Equal(t, hash, s2.RootHash()) storageKey := evm.BuildMemIAVLEVMKey(evm.EVMKeyStorage, StorageKey(addr, slot)) - v, ok := s2.Get(storageKey) + v, ok, err := s2.Get(storageKey) + require.NoError(t, err) require.True(t, ok) require.Equal(t, padLeft32(0x11), v) nonceKey := evm.BuildMemIAVLEVMKey(evm.EVMKeyNonce, addr[:]) - v, ok = s2.Get(nonceKey) + v, ok, err = s2.Get(nonceKey) + require.NoError(t, err) require.True(t, ok) require.Equal(t, []byte{0, 0, 0, 0, 0, 0, 0, 7}, v) codeKey := evm.BuildMemIAVLEVMKey(evm.EVMKeyCode, addr[:]) - v, ok = s2.Get(codeKey) + v, ok, err = s2.Get(codeKey) + require.NoError(t, err) require.True(t, ok) require.Equal(t, []byte{0x60, 0x80}, v) } @@ -1370,21 +1383,25 @@ func TestReopenAfterDeletes(t *testing.T) { require.Equal(t, hashBefore, s2.RootHash()) storageKey := evm.BuildMemIAVLEVMKey(evm.EVMKeyStorage, StorageKey(addr, slot)) - _, found := s2.Get(storageKey) + _, found, err := s2.Get(storageKey) + require.NoError(t, err) require.False(t, found, "storage should stay deleted after reopen") codeKey2 := evm.BuildMemIAVLEVMKey(evm.EVMKeyCode, addr[:]) - _, found = s2.Get(codeKey2) + _, found, err = s2.Get(codeKey2) + require.NoError(t, err) require.False(t, found, "code should stay deleted after reopen") // With Account Row GC, all-zero account row is physically deleted. nonceKey := evm.BuildMemIAVLEVMKey(evm.EVMKeyNonce, addr[:]) - nonceVal, found := s2.Get(nonceKey) + nonceVal, found, err := s2.Get(nonceKey) + require.NoError(t, err) require.False(t, found, "nonce should not be found after reopen (row deleted)") require.Nil(t, nonceVal) chKey := evm.BuildMemIAVLEVMKey(evm.EVMKeyCodeHash, addr[:]) - chVal, found := s2.Get(chKey) + chVal, found, err := s2.Get(chKey) + require.NoError(t, err) require.False(t, found, "codehash should not be found after reopen (row deleted)") require.Nil(t, chVal) } @@ -1413,14 +1430,21 @@ func TestWALTruncationThenRollback(t *testing.T) { for i := 1; i <= 5; i++ { key := evm.BuildMemIAVLEVMKey(evm.EVMKeyStorage, StorageKey(addrN(byte(i)), slotN(byte(i)))) - val, found := s.Get(key) + var val []byte + var found bool + var loopErr error + val, found, loopErr = s.Get(key) + require.NoError(t, loopErr) require.True(t, found, "key at block %d should exist after rollback to v5", i) require.Equal(t, padLeft32(byte(i)), val) } for i := 6; i <= 10; i++ { key := evm.BuildMemIAVLEVMKey(evm.EVMKeyStorage, StorageKey(addrN(byte(i)), slotN(byte(i)))) - _, found := s.Get(key) + var found bool + var loopErr error + _, found, loopErr = s.Get(key) + require.NoError(t, loopErr) require.False(t, found, "key at block %d should NOT exist after rollback to v5", i) } @@ -1460,7 +1484,11 @@ func TestReopenAfterSnapshotAndTruncation(t *testing.T) { for i := 1; i <= 10; i++ { key := evm.BuildMemIAVLEVMKey(evm.EVMKeyStorage, StorageKey(addrN(byte(i)), slotN(byte(i)))) - val, found := s2.Get(key) + var val []byte + var found bool + var loopErr error + val, found, loopErr = s2.Get(key) + require.NoError(t, loopErr) require.True(t, found, "key at block %d should exist after reopen", i) require.Equal(t, padLeft32(byte(i)), val) } @@ -1586,7 +1614,8 @@ func TestWALDirectoryDeleted(t *testing.T) { require.Equal(t, int64(3), s2.Version()) key := evm.BuildMemIAVLEVMKey(evm.EVMKeyStorage, StorageKey(Address{0x03}, Slot{0x03})) - val, found := s2.Get(key) + val, found, err := s2.Get(key) + require.NoError(t, err) require.True(t, found) require.Equal(t, padLeft32(0xCC), val) } @@ -1749,7 +1778,8 @@ func TestAccountRowDeletePersistsAfterReopen(t *testing.T) { require.Equal(t, hashBefore, s2.RootHash(), "LtHash should match after reopen") - nonceVal, found := s2.Get(nonceKey) + nonceVal, found, err := s2.Get(nonceKey) + require.NoError(t, err) require.False(t, found, "nonce should not be found after reopen (row deleted)") require.Nil(t, nonceVal) } @@ -1811,7 +1841,8 @@ func TestAccountRowDeleteSurvivesWALReplay(t *testing.T) { require.Equal(t, hashAtV2, s2.RootHash(), "LtHash should match after WAL replay") nonceKey := evm.BuildMemIAVLEVMKey(evm.EVMKeyNonce, addr[:]) - _, found := s2.Get(nonceKey) + _, found, err := s2.Get(nonceKey) + require.NoError(t, err) require.False(t, found, "nonce should not be found after WAL replay (row deleted)") } @@ -1840,7 +1871,8 @@ func TestAccountRowDeleteAfterSnapshotRollback(t *testing.T) { _, err = s.Commit() // v1 (snapshot taken) require.NoError(t, err) - nonceVal, found := s.Get(nonceKey) + nonceVal, found, err := s.Get(nonceKey) + require.NoError(t, err) require.True(t, found) require.Equal(t, []byte{0, 0, 0, 0, 0, 0, 0, 3}, nonceVal) @@ -1854,14 +1886,16 @@ func TestAccountRowDeleteAfterSnapshotRollback(t *testing.T) { _, err = s.Commit() // v2 (row deleted, snapshot taken) require.NoError(t, err) - _, found = s.Get(nonceKey) + _, found, err = s.Get(nonceKey) + require.NoError(t, err) require.False(t, found, "nonce should be gone at v2") // Rollback to v1: row should be restored require.NoError(t, s.Rollback(1)) require.Equal(t, int64(1), s.Version()) - nonceVal, found = s.Get(nonceKey) + nonceVal, found, err = s.Get(nonceKey) + require.NoError(t, err) require.True(t, found, "nonce should be restored after rollback to v1") require.Equal(t, []byte{0, 0, 0, 0, 0, 0, 0, 3}, nonceVal) diff --git a/sei-db/state_db/sc/flatkv/store_read.go b/sei-db/state_db/sc/flatkv/store_read.go index 2c4be926ad..cfb180a25f 100644 --- a/sei-db/state_db/sc/flatkv/store_read.go +++ b/sei-db/state_db/sc/flatkv/store_read.go @@ -11,94 +11,123 @@ import ( ) // Get returns the value for the given memiavl key. -// Returns (value, true) if found, (nil, false) if not found. -func (s *CommitStore) Get(key []byte) ([]byte, bool) { +// Returns (value, true, nil) if found, (nil, false, nil) if not found. +func (s *CommitStore) Get(key []byte) ([]byte, bool, error) { kind, keyBytes := evm.ParseEVMKey(key) if kind == evm.EVMKeyUnknown { - return nil, false + return nil, false, nil } switch kind { case evm.EVMKeyStorage: value, err := s.getStorageValue(keyBytes) if err != nil { - return nil, false + return nil, false, err } - return value, value != nil + return value, value != nil, nil case evm.EVMKeyNonce, evm.EVMKeyCodeHash: - // Account data: keyBytes = addr(20) - // accountDB stores AccountValue at key=addr(20) - addr, ok := AddressFromBytes(keyBytes) - if !ok { - return nil, false - } - - // Check pending writes first - if accountValue, found := s.accountWrites[string(addr[:])]; found { - if accountValue.IsDelete() { - return nil, false - } - if kind == evm.EVMKeyNonce { - nonceBytes := make([]byte, vtype.NonceLen) - binary.BigEndian.PutUint64(nonceBytes, accountValue.GetNonce()) - return nonceBytes, true - } - // CodeHash - codeHash := accountValue.GetCodeHash() - var zeroCodeHash vtype.CodeHash - if *codeHash == zeroCodeHash { - return nil, false - } - return codeHash[:], true - } - - // Read from accountDB - encoded, err := s.accountDB.Get(AccountKey(addr)) + accountData, err := s.getAccountData(keyBytes) if err != nil { - return nil, false + return nil, false, err } - accountData, err := vtype.DeserializeAccountData(encoded) - if err != nil { - return nil, false + if accountData == nil || accountData.IsDelete() { + return nil, false, nil } if kind == evm.EVMKeyNonce { - nonce := make([]byte, vtype.NonceLen) - binary.BigEndian.PutUint64(nonce, accountData.GetNonce()) - return nonce, true + nonceBytes := make([]byte, vtype.NonceLen) + binary.BigEndian.PutUint64(nonceBytes, accountData.GetNonce()) + return nonceBytes, true, nil } // CodeHash codeHash := accountData.GetCodeHash() var zeroCodeHash vtype.CodeHash if *codeHash == zeroCodeHash { - return nil, false + return nil, false, nil } - return codeHash[:], true + return codeHash[:], true, nil case evm.EVMKeyCode: value, err := s.getCodeValue(keyBytes) if err != nil { - return nil, false + return nil, false, err } - return value, value != nil + return value, value != nil, nil case evm.EVMKeyLegacy: value, err := s.getLegacyValue(keyBytes) if err != nil { - return nil, false + return nil, false, err + } + return value, value != nil, nil + + default: + return nil, false, nil + } +} + +// GetBlockHeightModified returns the block height at which the key was last modified. +// If not found, returns (-1, false, nil). +func (s *CommitStore) GetBlockHeightModified(key []byte) (int64, bool, error) { + kind, keyBytes := evm.ParseEVMKey(key) + if kind == evm.EVMKeyUnknown { + return -1, false, nil + } + + switch kind { + case evm.EVMKeyStorage: + sd, err := s.getStorageData(keyBytes) + if err != nil { + return -1, false, err + } + if sd == nil || sd.IsDelete() { + return -1, false, nil + } + return sd.GetBlockHeight(), true, nil + + case evm.EVMKeyNonce, evm.EVMKeyCodeHash: + accountData, err := s.getAccountData(keyBytes) + if err != nil { + return -1, false, err + } + if accountData == nil || accountData.IsDelete() { + return -1, false, nil + } + return accountData.GetBlockHeight(), true, nil + + case evm.EVMKeyCode: + cd, err := s.getCodeData(keyBytes) + if err != nil { + return -1, false, err + } + if cd == nil || cd.IsDelete() { + return -1, false, nil + } + return cd.GetBlockHeight(), true, nil + + case evm.EVMKeyLegacy: + ld, err := s.getLegacyData(keyBytes) + if err != nil { + return -1, false, err + } + if ld == nil || ld.IsDelete() { + return -1, false, nil } - return value, value != nil + return ld.GetBlockHeight(), true, nil default: - return nil, false + return -1, false, nil } } // Has reports whether the given memiavl key exists. -func (s *CommitStore) Has(key []byte) bool { - _, found := s.Get(key) - return found +func (s *CommitStore) Has(key []byte) (bool, error) { + _, found, err := s.Get(key) + if err != nil { + return false, fmt.Errorf("failed to get key %x: %w", key, err) + } + return found, nil } // Iterator returns an iterator over [start, end) in memiavl key order. @@ -181,74 +210,103 @@ func (s *CommitStore) IteratorByPrefix(prefix []byte) Iterator { // Internal Getters (used by ApplyChangeSets for LtHash computation) // ============================================================================= -func (s *CommitStore) getStorageValue(key []byte) ([]byte, error) { - pendingWrite, hasPending := s.storageWrites[string(key)] - if hasPending { - if pendingWrite.IsDelete() { +func (s *CommitStore) getAccountData(keyBytes []byte) (*vtype.AccountData, error) { + addr, ok := AddressFromBytes(keyBytes) + if !ok { + return nil, nil + } + + if accountValue, found := s.accountWrites[string(addr[:])]; found { + return accountValue, nil + } + + encoded, err := s.accountDB.Get(AccountKey(addr)) + if err != nil { + if errorutils.IsNotFound(err) { return nil, nil } - return pendingWrite.GetValue()[:], nil + return nil, fmt.Errorf("accountDB I/O error for key %x: %w", addr, err) } + return vtype.DeserializeAccountData(encoded) +} - value, err := s.storageDB.Get(key) +func (s *CommitStore) getStorageData(keyBytes []byte) (*vtype.StorageData, error) { + pendingWrite, hasPending := s.storageWrites[string(keyBytes)] + if hasPending { + return pendingWrite, nil + } + + value, err := s.storageDB.Get(keyBytes) if err != nil { if errorutils.IsNotFound(err) { return nil, nil } - return nil, fmt.Errorf("storageDB I/O error for key %x: %w", key, err) + return nil, fmt.Errorf("storageDB I/O error for key %x: %w", keyBytes, err) } + return vtype.DeserializeStorageData(value) +} - storageData, err := vtype.DeserializeStorageData(value) +func (s *CommitStore) getStorageValue(key []byte) ([]byte, error) { + sd, err := s.getStorageData(key) if err != nil { - return nil, fmt.Errorf("failed to deserialize storage data: %w", err) + return nil, err + } + if sd == nil || sd.IsDelete() { + return nil, nil } - return storageData.GetValue()[:], nil + return sd.GetValue()[:], nil } -func (s *CommitStore) getCodeValue(key []byte) ([]byte, error) { - pendingWrite, hasPending := s.codeWrites[string(key)] +func (s *CommitStore) getCodeData(keyBytes []byte) (*vtype.CodeData, error) { + pendingWrite, hasPending := s.codeWrites[string(keyBytes)] if hasPending { - if pendingWrite.IsDelete() { - return nil, nil - } - return pendingWrite.GetBytecode(), nil + return pendingWrite, nil } - value, err := s.codeDB.Get(key) + value, err := s.codeDB.Get(keyBytes) if err != nil { if errorutils.IsNotFound(err) { return nil, nil } - return nil, fmt.Errorf("codeDB I/O error for key %x: %w", key, err) + return nil, fmt.Errorf("codeDB I/O error for key %x: %w", keyBytes, err) } + return vtype.DeserializeCodeData(value) +} - codeData, err := vtype.DeserializeCodeData(value) +func (s *CommitStore) getCodeValue(key []byte) ([]byte, error) { + cd, err := s.getCodeData(key) if err != nil { - return nil, fmt.Errorf("failed to deserialize code data: %w", err) + return nil, err } - return codeData.GetBytecode(), nil + if cd == nil || cd.IsDelete() { + return nil, nil + } + return cd.GetBytecode(), nil } -func (s *CommitStore) getLegacyValue(key []byte) ([]byte, error) { - pendingWrite, hasPending := s.legacyWrites[string(key)] +func (s *CommitStore) getLegacyData(keyBytes []byte) (*vtype.LegacyData, error) { + pendingWrite, hasPending := s.legacyWrites[string(keyBytes)] if hasPending { - if pendingWrite.IsDelete() { - return nil, nil - } - return pendingWrite.GetValue(), nil + return pendingWrite, nil } - value, err := s.legacyDB.Get(key) + value, err := s.legacyDB.Get(keyBytes) if err != nil { if errorutils.IsNotFound(err) { return nil, nil } - return nil, fmt.Errorf("legacyDB I/O error for key %x: %w", key, err) + return nil, fmt.Errorf("legacyDB I/O error for key %x: %w", keyBytes, err) } + return vtype.DeserializeLegacyData(value) +} - legacyData, err := vtype.DeserializeLegacyData(value) +func (s *CommitStore) getLegacyValue(key []byte) ([]byte, error) { + ld, err := s.getLegacyData(key) if err != nil { - return nil, fmt.Errorf("failed to deserialize legacy data: %w", err) + return nil, err + } + if ld == nil || ld.IsDelete() { + return nil, nil } - return legacyData.GetValue(), nil + return ld.GetValue(), nil } diff --git a/sei-db/state_db/sc/flatkv/store_read_test.go b/sei-db/state_db/sc/flatkv/store_read_test.go index dc19f481a6..100fb615ac 100644 --- a/sei-db/state_db/sc/flatkv/store_read_test.go +++ b/sei-db/state_db/sc/flatkv/store_read_test.go @@ -1,10 +1,12 @@ package flatkv import ( + "encoding/binary" "testing" "github.com/sei-protocol/sei-chain/sei-db/common/evm" "github.com/sei-protocol/sei-chain/sei-db/proto" + "github.com/sei-protocol/sei-chain/sei-db/state_db/sc/flatkv/vtype" iavl "github.com/sei-protocol/sei-chain/sei-iavl/proto" "github.com/stretchr/testify/require" ) @@ -23,7 +25,8 @@ func TestStoreGetPendingWrites(t *testing.T) { key := memiavlStorageKey(addr, slot) // No data initially - _, found := s.Get(key) + _, found, err := s.Get(key) + require.NoError(t, err) require.False(t, found) // Apply changeset (adds to pending writes) @@ -31,7 +34,8 @@ func TestStoreGetPendingWrites(t *testing.T) { require.NoError(t, s.ApplyChangeSets([]*proto.NamedChangeSet{cs})) // Should be readable from pending writes - got, found := s.Get(key) + got, found, err := s.Get(key) + require.NoError(t, err) require.True(t, found) require.Equal(t, value, got) @@ -39,7 +43,8 @@ func TestStoreGetPendingWrites(t *testing.T) { commitAndCheck(t, s) // Should still be readable after commit - got, found = s.Get(key) + got, found, err = s.Get(key) + require.NoError(t, err) require.True(t, found) require.Equal(t, value, got) } @@ -58,7 +63,8 @@ func TestStoreGetPendingDelete(t *testing.T) { commitAndCheck(t, s) // Verify exists - _, found := s.Get(key) + _, found, err := s.Get(key) + require.NoError(t, err) require.True(t, found) // Apply delete (pending) @@ -66,14 +72,16 @@ func TestStoreGetPendingDelete(t *testing.T) { require.NoError(t, s.ApplyChangeSets([]*proto.NamedChangeSet{cs2})) // Should not be found (pending delete) - _, found = s.Get(key) + _, found, err = s.Get(key) + require.NoError(t, err) require.False(t, found) // Commit delete commitAndCheck(t, s) // Still should not be found - _, found = s.Get(key) + _, found, err = s.Get(key) + require.NoError(t, err) require.False(t, found) } @@ -90,8 +98,11 @@ func TestStoreGetNonStorageKeys(t *testing.T) { evm.BuildMemIAVLEVMKey(evm.EVMKeyCode, addr[:]), } + var err error + var found bool for _, key := range nonStorageKeys { - _, found := s.Get(key) + _, found, err = s.Get(key) + require.NoError(t, err) require.False(t, found, "non-storage keys should not be found before write") } } @@ -105,7 +116,9 @@ func TestStoreHas(t *testing.T) { key := memiavlStorageKey(addr, slot) // Initially not found - require.False(t, s.Has(key)) + found, err := s.Has(key) + require.NoError(t, err) + require.False(t, found) // Write and commit cs := makeChangeSet(key, padLeft32(0xAA), false) @@ -113,7 +126,9 @@ func TestStoreHas(t *testing.T) { commitAndCheck(t, s) // Now should exist - require.True(t, s.Has(key)) + found, err = s.Has(key) + require.NoError(t, err) + require.True(t, found) } // ============================================================================= @@ -128,7 +143,8 @@ func TestStoreGetLegacyPendingWrites(t *testing.T) { legacyKey := append([]byte{0x09}, addr[:]...) // Not found initially - _, found := s.Get(legacyKey) + _, found, err := s.Get(legacyKey) + require.NoError(t, err) require.False(t, found) // Apply changeset @@ -136,13 +152,15 @@ func TestStoreGetLegacyPendingWrites(t *testing.T) { require.NoError(t, s.ApplyChangeSets([]*proto.NamedChangeSet{cs})) // Should be readable from pending writes - got, found := s.Get(legacyKey) + got, found, err := s.Get(legacyKey) + require.NoError(t, err) require.True(t, found) require.Equal(t, []byte{0x00, 0x40}, got) // Commit and still readable commitAndCheck(t, s) - got, found = s.Get(legacyKey) + got, found, err = s.Get(legacyKey) + require.NoError(t, err) require.True(t, found) require.Equal(t, []byte{0x00, 0x40}, got) } @@ -159,7 +177,8 @@ func TestStoreGetLegacyPendingDelete(t *testing.T) { require.NoError(t, s.ApplyChangeSets([]*proto.NamedChangeSet{cs1})) commitAndCheck(t, s) - _, found := s.Get(legacyKey) + _, found, err := s.Get(legacyKey) + require.NoError(t, err) require.True(t, found) // Apply delete (pending) @@ -167,12 +186,14 @@ func TestStoreGetLegacyPendingDelete(t *testing.T) { require.NoError(t, s.ApplyChangeSets([]*proto.NamedChangeSet{cs2})) // Should not be found (pending delete) - _, found = s.Get(legacyKey) + _, found, err = s.Get(legacyKey) + require.NoError(t, err) require.False(t, found) // Commit delete commitAndCheck(t, s) - _, found = s.Get(legacyKey) + _, found, err = s.Get(legacyKey) + require.NoError(t, err) require.False(t, found) } @@ -194,7 +215,8 @@ func TestStoreDelete(t *testing.T) { commitAndCheck(t, s) // Verify exists - got, found := s.Get(key) + got, found, err := s.Get(key) + require.NoError(t, err) require.True(t, found) require.Equal(t, padLeft32(0x77), got) @@ -204,7 +226,8 @@ func TestStoreDelete(t *testing.T) { commitAndCheck(t, s) // Should not exist - _, found = s.Get(key) + _, found, err = s.Get(key) + require.NoError(t, err) require.False(t, found) } @@ -390,3 +413,236 @@ func TestStoreIteratorByPrefixAddress(t *testing.T) { } require.Equal(t, 2, count2, "should find 2 slots for addr2") } + +// ============================================================================= +// GetBlockHeightModified +// ============================================================================= + +func TestGetBlockHeightModified_Storage(t *testing.T) { + s := setupTestStore(t) + defer s.Close() + + addr := Address{0x01} + slot := Slot{0x02} + key := memiavlStorageKey(addr, slot) + + // Not found initially + bh, found, err := s.GetBlockHeightModified(key) + require.NoError(t, err) + require.False(t, found) + require.Equal(t, int64(-1), bh) + + // Write at version 1 + cs := makeChangeSet(key, padLeft32(0x42), false) + require.NoError(t, s.ApplyChangeSets([]*proto.NamedChangeSet{cs})) + commitAndCheck(t, s) // version 1 + + bh, found, err = s.GetBlockHeightModified(key) + require.NoError(t, err) + require.True(t, found) + require.Equal(t, int64(1), bh) + + // Overwrite at version 2 + cs2 := makeChangeSet(key, padLeft32(0x99), false) + require.NoError(t, s.ApplyChangeSets([]*proto.NamedChangeSet{cs2})) + commitAndCheck(t, s) // version 2 + + bh, found, err = s.GetBlockHeightModified(key) + require.NoError(t, err) + require.True(t, found) + require.Equal(t, int64(2), bh) +} + +func TestGetBlockHeightModified_Nonce(t *testing.T) { + s := setupTestStore(t) + defer s.Close() + + addr := Address{0x10} + nonceKey := evm.BuildMemIAVLEVMKey(evm.EVMKeyNonce, addr[:]) + nonceVal := make([]byte, vtype.NonceLen) + binary.BigEndian.PutUint64(nonceVal, 7) + + // Not found initially + bh, found, err := s.GetBlockHeightModified(nonceKey) + require.NoError(t, err) + require.False(t, found) + require.Equal(t, int64(-1), bh) + + // Write at version 1 + cs := makeChangeSet(nonceKey, nonceVal, false) + require.NoError(t, s.ApplyChangeSets([]*proto.NamedChangeSet{cs})) + commitAndCheck(t, s) + + bh, found, err = s.GetBlockHeightModified(nonceKey) + require.NoError(t, err) + require.True(t, found) + require.Equal(t, int64(1), bh) +} + +func TestGetBlockHeightModified_CodeHash(t *testing.T) { + s := setupTestStore(t) + defer s.Close() + + addr := Address{0x20} + codeHashKey := evm.BuildMemIAVLEVMKey(evm.EVMKeyCodeHash, addr[:]) + codeHashVal := vtype.CodeHash{0xAA} + + // Not found initially + bh, found, err := s.GetBlockHeightModified(codeHashKey) + require.NoError(t, err) + require.False(t, found) + require.Equal(t, int64(-1), bh) + + // Write nonce + codehash together (account data is a single row) + nonceKey := evm.BuildMemIAVLEVMKey(evm.EVMKeyNonce, addr[:]) + nonceVal := make([]byte, vtype.NonceLen) + binary.BigEndian.PutUint64(nonceVal, 1) + cs := &proto.NamedChangeSet{ + Name: "evm", + Changeset: iavl.ChangeSet{ + Pairs: []*iavl.KVPair{ + {Key: nonceKey, Value: nonceVal}, + {Key: codeHashKey, Value: codeHashVal[:]}, + }, + }, + } + require.NoError(t, s.ApplyChangeSets([]*proto.NamedChangeSet{cs})) + commitAndCheck(t, s) + + bh, found, err = s.GetBlockHeightModified(codeHashKey) + require.NoError(t, err) + require.True(t, found) + require.Equal(t, int64(1), bh) +} + +func TestGetBlockHeightModified_Code(t *testing.T) { + s := setupTestStore(t) + defer s.Close() + + addr := Address{0x30} + codeKey := evm.BuildMemIAVLEVMKey(evm.EVMKeyCode, addr[:]) + bytecode := []byte{0x60, 0x80, 0x60, 0x40} + + // Not found initially + bh, found, err := s.GetBlockHeightModified(codeKey) + require.NoError(t, err) + require.False(t, found) + require.Equal(t, int64(-1), bh) + + // Write at version 1 + cs := makeChangeSet(codeKey, bytecode, false) + require.NoError(t, s.ApplyChangeSets([]*proto.NamedChangeSet{cs})) + commitAndCheck(t, s) + + bh, found, err = s.GetBlockHeightModified(codeKey) + require.NoError(t, err) + require.True(t, found) + require.Equal(t, int64(1), bh) +} + +func TestGetBlockHeightModified_Legacy(t *testing.T) { + s := setupTestStore(t) + defer s.Close() + + addr := Address{0x40} + legacyKey := append([]byte{0x09}, addr[:]...) + + // Not found initially + bh, found, err := s.GetBlockHeightModified(legacyKey) + require.NoError(t, err) + require.False(t, found) + require.Equal(t, int64(-1), bh) + + // Write at version 1 + cs := makeChangeSet(legacyKey, []byte{0xCA, 0xFE}, false) + require.NoError(t, s.ApplyChangeSets([]*proto.NamedChangeSet{cs})) + commitAndCheck(t, s) + + bh, found, err = s.GetBlockHeightModified(legacyKey) + require.NoError(t, err) + require.True(t, found) + require.Equal(t, int64(1), bh) +} + +func TestGetBlockHeightModified_UnknownKey(t *testing.T) { + s := setupTestStore(t) + defer s.Close() + + bh, found, err := s.GetBlockHeightModified([]byte{0xFF, 0xFF}) + require.NoError(t, err) + require.False(t, found) + require.Equal(t, int64(-1), bh) +} + +func TestGetBlockHeightModified_DeletedKey(t *testing.T) { + s := setupTestStore(t) + defer s.Close() + + addr := Address{0x50} + slot := Slot{0x60} + key := memiavlStorageKey(addr, slot) + + // Write then delete + cs1 := makeChangeSet(key, padLeft32(0x01), false) + require.NoError(t, s.ApplyChangeSets([]*proto.NamedChangeSet{cs1})) + commitAndCheck(t, s) + + cs2 := makeChangeSet(key, nil, true) + require.NoError(t, s.ApplyChangeSets([]*proto.NamedChangeSet{cs2})) + commitAndCheck(t, s) + + bh, found, err := s.GetBlockHeightModified(key) + require.NoError(t, err) + require.False(t, found) + require.Equal(t, int64(-1), bh) +} + +func TestGetBlockHeightModified_PendingWrite(t *testing.T) { + s := setupTestStore(t) + defer s.Close() + + addr := Address{0x70} + slot := Slot{0x80} + key := memiavlStorageKey(addr, slot) + + // Apply but don't commit — data is pending + cs := makeChangeSet(key, padLeft32(0x42), false) + require.NoError(t, s.ApplyChangeSets([]*proto.NamedChangeSet{cs})) + + bh, found, err := s.GetBlockHeightModified(key) + require.NoError(t, err) + require.True(t, found) + require.Equal(t, int64(1), bh) +} + +func TestGetBlockHeightModified_UpdateBumpsHeight(t *testing.T) { + s := setupTestStore(t) + defer s.Close() + + addr := Address{0x90} + nonceKey := evm.BuildMemIAVLEVMKey(evm.EVMKeyNonce, addr[:]) + nonce1 := make([]byte, vtype.NonceLen) + binary.BigEndian.PutUint64(nonce1, 1) + nonce2 := make([]byte, vtype.NonceLen) + binary.BigEndian.PutUint64(nonce2, 2) + + // Write at version 1 + cs1 := makeChangeSet(nonceKey, nonce1, false) + require.NoError(t, s.ApplyChangeSets([]*proto.NamedChangeSet{cs1})) + commitAndCheck(t, s) + + bh, found, err := s.GetBlockHeightModified(nonceKey) + require.NoError(t, err) + require.True(t, found) + require.Equal(t, int64(1), bh) + + // Update at version 2 + cs2 := makeChangeSet(nonceKey, nonce2, false) + require.NoError(t, s.ApplyChangeSets([]*proto.NamedChangeSet{cs2})) + commitAndCheck(t, s) + + bh, found, err = s.GetBlockHeightModified(nonceKey) + require.NoError(t, err) + require.True(t, found) + require.Equal(t, int64(2), bh) +} diff --git a/sei-db/state_db/sc/flatkv/store_test.go b/sei-db/state_db/sc/flatkv/store_test.go index 1b939567aa..19a71275e0 100644 --- a/sei-db/state_db/sc/flatkv/store_test.go +++ b/sei-db/state_db/sc/flatkv/store_test.go @@ -184,7 +184,8 @@ func TestStoreApplyAndCommit(t *testing.T) { // Apply but not commit - should be readable from pending writes require.NoError(t, s.ApplyChangeSets([]*proto.NamedChangeSet{cs})) - got, found := s.Get(key) + got, found, err := s.Get(key) + require.NoError(t, err) require.True(t, found, "should be readable from pending writes") require.Equal(t, value, got) @@ -192,7 +193,8 @@ func TestStoreApplyAndCommit(t *testing.T) { commitAndCheck(t, s) // Still should be readable after commit - got, found = s.Get(key) + got, found, err = s.Get(key) + require.NoError(t, err) require.True(t, found) require.Equal(t, value, got) } @@ -231,7 +233,8 @@ func TestStoreMultipleWrites(t *testing.T) { // Verify all entries for _, e := range entries { key := memiavlStorageKey(addr, e.slot) - got, found := s.Get(key) + got, found, err := s.Get(key) + require.NoError(t, err) require.True(t, found) require.Equal(t, padLeft32(e.value), got) } @@ -302,7 +305,8 @@ func TestStoreVersioning(t *testing.T) { require.Equal(t, int64(2), s.Version()) // Latest value should be from version 2 - got, found := s.Get(key) + got, found, err := s.Get(key) + require.NoError(t, err) require.True(t, found) require.Equal(t, padLeft32(0x02), got) } @@ -337,7 +341,8 @@ func TestStorePersistence(t *testing.T) { require.NoError(t, err) defer s2.Close() - got, found := s2.Get(key) + got, found, err := s2.Get(key) + require.NoError(t, err) require.True(t, found) require.Equal(t, value, got) @@ -693,7 +698,8 @@ func TestGetUnknownKeyReturnsNil(t *testing.T) { s := setupTestStore(t) defer s.Close() - v, ok := s.Get([]byte{0xFF, 0xFF, 0xFF}) + v, ok, err := s.Get([]byte{0xFF, 0xFF, 0xFF}) + require.NoError(t, err) require.False(t, ok) require.Nil(t, v) } @@ -741,15 +747,18 @@ func TestPersistenceAllKeyTypes(t *testing.T) { require.Equal(t, int64(1), s2.Version()) require.Equal(t, hash, s2.RootHash()) - v, ok := s2.Get(storageKey) + v, ok, err := s2.Get(storageKey) + require.NoError(t, err) require.True(t, ok) require.Equal(t, padLeft32(0x11), v) - v, ok = s2.Get(nonceKey) + v, ok, err = s2.Get(nonceKey) + require.NoError(t, err) require.True(t, ok) require.Equal(t, []byte{0, 0, 0, 0, 0, 0, 0, 5}, v) - v, ok = s2.Get(codeKey) + v, ok, err = s2.Get(codeKey) + require.NoError(t, err) require.True(t, ok) require.Equal(t, []byte{0x60, 0x80}, v) } @@ -777,7 +786,8 @@ func TestReadOnlyBasicLoadAndRead(t *testing.T) { defer ro.Close() require.Equal(t, int64(1), ro.Version()) - got, found := ro.Get(key) + got, found, err := ro.Get(key) + require.NoError(t, err) require.True(t, found) require.Equal(t, value, got) require.NotNil(t, ro.RootHash()) @@ -807,7 +817,8 @@ func TestReadOnlyLoadFromUnopenedStore(t *testing.T) { defer ro.Close() require.Equal(t, int64(1), ro.Version()) - got, found := ro.Get(key) + got, found, err := ro.Get(key) + require.NoError(t, err) require.True(t, found) require.Equal(t, value, got) } @@ -835,7 +846,8 @@ func TestReadOnlyAtSpecificVersion(t *testing.T) { defer ro.Close() require.Equal(t, int64(3), ro.Version()) - got, found := ro.Get(key) + got, found, err := ro.Get(key) + require.NoError(t, err) require.True(t, found) require.Equal(t, padLeft32(3), got) } @@ -894,7 +906,8 @@ func TestReadOnlyParentWritesDuringReadOnly(t *testing.T) { require.Equal(t, int64(3), s.Version()) require.Equal(t, int64(1), ro.Version()) - got, found := ro.Get(key) + got, found, err := ro.Get(key) + require.NoError(t, err) require.True(t, found) require.Equal(t, padLeft32(1), got) } @@ -930,8 +943,10 @@ func TestReadOnlyConcurrentInstances(t *testing.T) { require.Equal(t, int64(4), ro1.Version()) require.Equal(t, int64(4), ro2.Version()) - g1, ok1 := ro1.Get(key) - g2, ok2 := ro2.Get(key) + g1, ok1, err := ro1.Get(key) + require.NoError(t, err) + g2, ok2, err := ro2.Get(key) + require.NoError(t, err) require.True(t, ok1) require.True(t, ok2) require.Equal(t, padLeft32(4), g1) @@ -959,7 +974,8 @@ func TestReadOnlyFailureDoesNotAffectParent(t *testing.T) { require.NoError(t, err) require.Equal(t, int64(2), v) - got, found := s.Get(key) + got, found, err := s.Get(key) + require.NoError(t, err) require.True(t, found) require.Equal(t, padLeft32(2), got) } diff --git a/sei-db/state_db/sc/flatkv/store_write_test.go b/sei-db/state_db/sc/flatkv/store_write_test.go index b4b71d2084..1878b5bbcf 100644 --- a/sei-db/state_db/sc/flatkv/store_write_test.go +++ b/sei-db/state_db/sc/flatkv/store_write_test.go @@ -42,12 +42,14 @@ func TestStoreNonStorageKeys(t *testing.T) { commitAndCheck(t, s) // Nonce should be found - nonceValue, found := s.Get(nonceKey) + nonceValue, found, err := s.Get(nonceKey) + require.NoError(t, err) require.True(t, found, "nonce should be found") require.Equal(t, []byte{0, 0, 0, 0, 0, 0, 0, 17}, nonceValue) // CodeHash should be found - codeHashValue, found := s.Get(codeHashKey) + codeHashValue, found, err := s.Get(codeHashKey) + require.NoError(t, err) require.True(t, found, "codehash should be found") require.Equal(t, codeHash[:], codeHashValue) } @@ -108,23 +110,27 @@ func TestStoreWriteAllDBs(t *testing.T) { // Verify storage data was written (via Store.Get which deserializes) storageMemiavlKey := evm.BuildMemIAVLEVMKey(evm.EVMKeyStorage, StorageKey(addr, slot)) - storageValue, found := s.Get(storageMemiavlKey) + storageValue, found, err := s.Get(storageMemiavlKey) + require.NoError(t, err) require.True(t, found, "Storage should be found") require.Equal(t, padLeft32(0x11, 0x22), storageValue) // Verify account and code data was written nonceKey := evm.BuildMemIAVLEVMKey(evm.EVMKeyNonce, addr[:]) - nonceValue, found := s.Get(nonceKey) + nonceValue, found, err := s.Get(nonceKey) + require.NoError(t, err) require.True(t, found, "Nonce should be found") require.Equal(t, []byte{0, 0, 0, 0, 0, 0, 0, 42}, nonceValue) codeKey := evm.BuildMemIAVLEVMKey(evm.EVMKeyCode, addr[:]) - codeValue, found := s.Get(codeKey) + codeValue, found, err := s.Get(codeKey) + require.NoError(t, err) require.True(t, found, "Code should be found") require.Equal(t, []byte{0x60, 0x60, 0x60}, codeValue) // Verify legacy data persisted (via Store.Get which deserializes) - legacyVal, found := s.Get(legacyKey) + legacyVal, found, err := s.Get(legacyKey) + require.NoError(t, err) require.True(t, found, "Legacy should be found") require.Equal(t, []byte{0x00, 0x03}, legacyVal) } @@ -196,23 +202,27 @@ func TestStoreWriteAccountAndCode(t *testing.T) { // Verify account data was written nonceKey1 := evm.BuildMemIAVLEVMKey(evm.EVMKeyNonce, addr1[:]) - nonce1, found := s.Get(nonceKey1) + nonce1, found, err := s.Get(nonceKey1) + require.NoError(t, err) require.True(t, found, "Nonce1 should be found") require.Equal(t, []byte{0, 0, 0, 0, 0, 0, 0, 1}, nonce1) nonceKey2 := evm.BuildMemIAVLEVMKey(evm.EVMKeyNonce, addr2[:]) - nonce2, found := s.Get(nonceKey2) + nonce2, found, err := s.Get(nonceKey2) + require.NoError(t, err) require.True(t, found, "Nonce2 should be found") require.Equal(t, []byte{0, 0, 0, 0, 0, 0, 0, 2}, nonce2) // Verify code data was written codeKey1 := evm.BuildMemIAVLEVMKey(evm.EVMKeyCode, addr1[:]) - code1, found := s.Get(codeKey1) + code1, found, err := s.Get(codeKey1) + require.NoError(t, err) require.True(t, found, "Code1 should be found") require.Equal(t, []byte{0x60, 0x80}, code1) codeKey2 := evm.BuildMemIAVLEVMKey(evm.EVMKeyCode, addr2[:]) - code2, found := s.Get(codeKey2) + code2, found, err := s.Get(codeKey2) + require.NoError(t, err) require.True(t, found, "Code2 should be found") require.Equal(t, []byte{0x60, 0xA0}, code2) @@ -284,13 +294,15 @@ func TestStoreWriteDelete(t *testing.T) { // Nonce was the only account field written (no codehash). After delete, // all fields are zero so the accountDB row is physically deleted. nonceKeyDel := evm.BuildMemIAVLEVMKey(evm.EVMKeyNonce, addr[:]) - nonceValue, found := s.Get(nonceKeyDel) + nonceValue, found, err := s.Get(nonceKeyDel) + require.NoError(t, err) require.False(t, found, "nonce should not be found after account row deletion") require.Nil(t, nonceValue) // Verify code is deleted codeKeyDel := evm.BuildMemIAVLEVMKey(evm.EVMKeyCode, addr[:]) - _, found = s.Get(codeKeyDel) + _, found, err = s.Get(codeKeyDel) + require.NoError(t, err) require.False(t, found, "code should be deleted") requireAllLocalMetaAt(t, s, 2) @@ -344,12 +356,14 @@ func TestAccountValueStorage(t *testing.T) { // Get method should return individual fields nonceKey := evm.BuildMemIAVLEVMKey(evm.EVMKeyNonce, addr[:]) - nonceValue, found := s.Get(nonceKey) + nonceValue, found, err := s.Get(nonceKey) + require.NoError(t, err) require.True(t, found, "Nonce should be found") require.Equal(t, []byte{0, 0, 0, 0, 0, 0, 0, 42}, nonceValue, "Nonce should be 42") codeHashKey := evm.BuildMemIAVLEVMKey(evm.EVMKeyCodeHash, addr[:]) - codeHashValue, found := s.Get(codeHashKey) + codeHashValue, found, err := s.Get(codeHashKey) + require.NoError(t, err) require.True(t, found, "CodeHash should be found") require.Equal(t, expectedCodeHash[:], codeHashValue, "CodeHash should match") } @@ -380,7 +394,8 @@ func TestStoreWriteLegacyKeys(t *testing.T) { require.Equal(t, int64(1), s.localMeta[legacyDBDir].CommittedVersion) // Verify data persisted (via Store.Get which deserializes) - got, found := s.Get(codeSizeKey) + got, found, err := s.Get(codeSizeKey) + require.NoError(t, err) require.True(t, found) require.Equal(t, codeSizeValue, got) } @@ -427,7 +442,8 @@ func TestStoreWriteLegacyAndOptimizedKeys(t *testing.T) { // Verify legacy data persisted (via Store.Get which deserializes) codeSizeKey := append([]byte{0x09}, addr[:]...) - got, found := s.Get(codeSizeKey) + got, found, err := s.Get(codeSizeKey) + require.NoError(t, err) require.True(t, found) require.Equal(t, []byte{0x00, 0x03}, got) } @@ -445,7 +461,8 @@ func TestStoreWriteDeleteLegacyKey(t *testing.T) { commitAndCheck(t, s) // Verify exists - got, found := s.Get(legacyKey) + got, found, err := s.Get(legacyKey) + require.NoError(t, err) require.True(t, found) require.Equal(t, []byte{0x00, 0x10}, got) @@ -455,7 +472,8 @@ func TestStoreWriteDeleteLegacyKey(t *testing.T) { commitAndCheck(t, s) // Should not be found - _, found = s.Get(legacyKey) + _, found, err = s.Get(legacyKey) + require.NoError(t, err) require.False(t, found) } @@ -535,7 +553,8 @@ func TestStoreFsyncConfig(t *testing.T) { commitAndCheck(t, store) // Data should be readable - got, found := store.Get(key) + got, found, err := store.Get(key) + require.NoError(t, err) require.True(t, found) require.Equal(t, padLeft32(0xCC), got) @@ -651,11 +670,13 @@ func TestMultipleApplyChangeSetsBeforeCommit(t *testing.T) { commitAndCheck(t, s) - v1, ok := s.Get(key1) + v1, ok, err := s.Get(key1) + require.NoError(t, err) require.True(t, ok) require.Equal(t, padLeft32(0x11), v1) - v2, ok := s.Get(key2) + v2, ok, err := s.Get(key2) + require.NoError(t, err) require.True(t, ok) require.Equal(t, padLeft32(0x22), v2) } @@ -680,11 +701,13 @@ func TestMultipleApplyAccountFieldsPreservesOther(t *testing.T) { require.NoError(t, s.ApplyChangeSets([]*proto.NamedChangeSet{cs2})) commitAndCheck(t, s) - nonceVal, ok := s.Get(nonceKey) + nonceVal, ok, err := s.Get(nonceKey) + require.NoError(t, err) require.True(t, ok) require.Equal(t, []byte{0, 0, 0, 0, 0, 0, 0, 42}, nonceVal, "nonce should be preserved after codehash update") - chVal, ok := s.Get(codeHashKey) + chVal, ok, err := s.Get(codeHashKey) + require.NoError(t, err) require.True(t, ok) require.Equal(t, codeHash[:], chVal) } @@ -790,7 +813,8 @@ func TestOverwriteSameKeyInSingleBlock(t *testing.T) { require.NoError(t, s.ApplyChangeSets([]*proto.NamedChangeSet{cs})) commitAndCheck(t, s) - v, ok := s.Get(key) + v, ok, err := s.Get(key) + require.NoError(t, err) require.True(t, ok) require.Equal(t, padLeft32(0x02), v, "last write should win") } @@ -832,7 +856,8 @@ func TestStoreFsyncEnabled(t *testing.T) { commitStorageEntry(t, s, Address{0x01}, Slot{0x01}, []byte{0x01}) require.Equal(t, int64(1), s.Version()) - v, ok := s.Get(memiavlStorageKey(Address{0x01}, Slot{0x01})) + v, ok, err := s.Get(memiavlStorageKey(Address{0x01}, Slot{0x01})) + require.NoError(t, err) require.True(t, ok) require.Equal(t, padLeft32(0x01), v) } @@ -917,23 +942,30 @@ func TestDeleteSemanticsCodehashAsymmetry(t *testing.T) { // After deleting all account fields, the row is physically deleted (Account Row GC). nonceKey := evm.BuildMemIAVLEVMKey(evm.EVMKeyNonce, addr[:]) - nonceVal, found := s.Get(nonceKey) + nonceVal, found, err := s.Get(nonceKey) + require.NoError(t, err) require.False(t, found, "nonce should not be found after all-zero account row deletion") require.Nil(t, nonceVal) chKey := evm.BuildMemIAVLEVMKey(evm.EVMKeyCodeHash, addr[:]) - chVal, found := s.Get(chKey) + chVal, found, err := s.Get(chKey) + require.NoError(t, err) require.False(t, found, "codehash should not be found after row deletion") require.Nil(t, chVal) - require.False(t, s.Has(chKey), "Has(codehash) should be false after delete") - require.False(t, s.Has(nonceKey), "Has(nonce) should be false after row deletion") + hasCodeHash, err := s.Has(chKey) + require.NoError(t, err) + require.False(t, hasCodeHash, "Has(codehash) should be false after delete") + hasNonce, err := s.Has(nonceKey) + require.NoError(t, err) + require.False(t, hasNonce, "Has(nonce) should be false after row deletion") codeKey := evm.BuildMemIAVLEVMKey(evm.EVMKeyCode, addr[:]) - _, found = s.Get(codeKey) + _, found, err = s.Get(codeKey) + require.NoError(t, err) require.False(t, found, "code should be physically deleted") - _, err := s.accountDB.Get(AccountKey(addr)) + _, err = s.accountDB.Get(AccountKey(addr)) require.Error(t, err, "accountDB row should be physically deleted when all fields are zero") } @@ -958,7 +990,8 @@ func TestCrossApplyChangeSetsOrdering(t *testing.T) { commitAndCheck(t, s) key := evm.BuildMemIAVLEVMKey(evm.EVMKeyStorage, StorageKey(addr, slot)) - _, found := s.Get(key) + _, found, err := s.Get(key) + require.NoError(t, err) require.False(t, found, "write-then-delete: key should be gone") }) @@ -982,7 +1015,8 @@ func TestCrossApplyChangeSetsOrdering(t *testing.T) { commitAndCheck(t, s) key := evm.BuildMemIAVLEVMKey(evm.EVMKeyStorage, StorageKey(addr, slot)) - val, found := s.Get(key) + val, found, err := s.Get(key) + require.NoError(t, err) require.True(t, found, "delete-then-write: key should exist") require.Equal(t, padLeft32(0xBB), val) }) @@ -1142,7 +1176,8 @@ func TestCrossApplyChangeSetsAccountOrdering(t *testing.T) { // With Account Row GC, nonce-only account becomes all-zero → row deleted key := evm.BuildMemIAVLEVMKey(evm.EVMKeyNonce, addr[:]) - _, found := s.Get(key) + _, found, err := s.Get(key) + require.NoError(t, err) require.False(t, found, "nonce-only account should be deleted after nonce delete") }) @@ -1164,7 +1199,8 @@ func TestCrossApplyChangeSetsAccountOrdering(t *testing.T) { commitAndCheck(t, s) key := evm.BuildMemIAVLEVMKey(evm.EVMKeyNonce, addr[:]) - val, found := s.Get(key) + val, found, err := s.Get(key) + require.NoError(t, err) require.True(t, found) require.Equal(t, uint64(99), bytesToNonce(val)) }) @@ -1183,7 +1219,8 @@ func TestCrossApplyChangeSetsAccountOrdering(t *testing.T) { commitAndCheck(t, s) key := evm.BuildMemIAVLEVMKey(evm.EVMKeyCodeHash, addr[:]) - _, found := s.Get(key) + _, found, err := s.Get(key) + require.NoError(t, err) require.False(t, found, "codehash-only account: delete → all-zero → row deleted") }) @@ -1205,7 +1242,8 @@ func TestCrossApplyChangeSetsAccountOrdering(t *testing.T) { commitAndCheck(t, s) key := evm.BuildMemIAVLEVMKey(evm.EVMKeyCodeHash, addr[:]) - val, found := s.Get(key) + val, found, err := s.Get(key) + require.NoError(t, err) require.True(t, found, "codehash should be restored after delete-then-write") expected := codeHashN(0xBB) require.Equal(t, expected[:], val) @@ -1294,11 +1332,13 @@ func TestAccountRowDeletedWhenAllFieldsZero(t *testing.T) { _, err := s.accountDB.Get(AccountKey(addr)) require.Error(t, err, "accountDB row should be physically deleted") - nonceVal, found := s.Get(nonceKey) + nonceVal, found, err := s.Get(nonceKey) + require.NoError(t, err) require.False(t, found, "nonce should not be found after row deletion") require.Nil(t, nonceVal) - chVal, found := s.Get(chKey) + chVal, found, err := s.Get(chKey) + require.NoError(t, err) require.False(t, found, "codehash should not be found after row deletion") require.Nil(t, chVal) } @@ -1325,7 +1365,8 @@ func TestAccountRowPersistsWhenPartiallyZero(t *testing.T) { require.NoError(t, err, "accountDB row should still exist after partial delete") require.NotNil(t, raw) - nonceVal, found := s.Get(nonceKey) + nonceVal, found, err := s.Get(nonceKey) + require.NoError(t, err) require.True(t, found, "nonce should still be readable") require.Equal(t, nonceBytes(7), nonceVal) } @@ -1359,7 +1400,8 @@ func TestAccountRowDeleteThenRecreate(t *testing.T) { require.NoError(t, err, "row should be recreated") require.NotNil(t, raw) - nonceVal, found := s.Get(nonceKey) + nonceVal, found, err := s.Get(nonceKey) + require.NoError(t, err) require.True(t, found) require.Equal(t, nonceBytes(99), nonceVal) } @@ -1394,7 +1436,8 @@ func TestAccountRowGCOnWriteZero(t *testing.T) { require.Error(t, err, "accountDB row should be GC'd when write-zero makes account empty") nonceKey := evm.BuildMemIAVLEVMKey(evm.EVMKeyNonce, addr[:]) - _, found := s.Get(nonceKey) + _, found, err := s.Get(nonceKey) + require.NoError(t, err) require.False(t, found, "nonce should not be found after write-zero GC") } From 28730258301ed802113c44bc8bc15e668ed6128a Mon Sep 17 00:00:00 2001 From: Cody Littley Date: Wed, 1 Apr 2026 15:07:30 -0500 Subject: [PATCH 110/119] check for bad keys --- .../state_db/sc/flatkv/flatkv_test_config.go | 1 + sei-db/state_db/sc/flatkv/keys.go | 17 +++++++++++++ sei-db/state_db/sc/flatkv/store_apply.go | 14 ++--------- sei-db/state_db/sc/flatkv/store_read.go | 12 ++++++++-- sei-db/state_db/sc/flatkv/store_read_test.go | 22 ++++++++++++++++- sei-db/state_db/sc/flatkv/store_test.go | 24 +++++++++++++++++-- 6 files changed, 73 insertions(+), 17 deletions(-) diff --git a/sei-db/state_db/sc/flatkv/flatkv_test_config.go b/sei-db/state_db/sc/flatkv/flatkv_test_config.go index 4ab1b71bfa..1434850852 100644 --- a/sei-db/state_db/sc/flatkv/flatkv_test_config.go +++ b/sei-db/state_db/sc/flatkv/flatkv_test_config.go @@ -43,5 +43,6 @@ func DefaultTestConfig(t *testing.T) *Config { ReaderThreadsPerCore: 2.0, ReaderPoolQueueSize: 1024, MiscPoolThreadsPerCore: 4.0, + StrictKeyTypeCheck: true, } } diff --git a/sei-db/state_db/sc/flatkv/keys.go b/sei-db/state_db/sc/flatkv/keys.go index d9923fe513..391763051e 100644 --- a/sei-db/state_db/sc/flatkv/keys.go +++ b/sei-db/state_db/sc/flatkv/keys.go @@ -3,6 +3,7 @@ package flatkv import ( "bytes" + "github.com/sei-protocol/sei-chain/sei-db/common/evm" "github.com/sei-protocol/sei-chain/sei-db/state_db/sc/flatkv/lthash" ) @@ -30,6 +31,22 @@ func isMetaKey(key []byte) bool { return bytes.HasPrefix(key, metaKeyPrefixBytes) } +// Supported EVM key types for FlatKV. +// TODO: add balance key when that is eventually supported +var supportedKeyTypes = map[evm.EVMKeyKind]struct{}{ + evm.EVMKeyStorage: {}, + evm.EVMKeyNonce: {}, + evm.EVMKeyCodeHash: {}, + evm.EVMKeyCode: {}, + evm.EVMKeyLegacy: {}, +} + +// IsSupportedKeyType reports whether the given key kind is handled by FlatKV. +func IsSupportedKeyType(kind evm.EVMKeyKind) bool { + _, ok := supportedKeyTypes[kind] + return ok +} + const ( AddressLen = 20 SlotLen = 32 diff --git a/sei-db/state_db/sc/flatkv/store_apply.go b/sei-db/state_db/sc/flatkv/store_apply.go index bdd22afef6..2f5e45dd5c 100644 --- a/sei-db/state_db/sc/flatkv/store_apply.go +++ b/sei-db/state_db/sc/flatkv/store_apply.go @@ -9,16 +9,6 @@ import ( "github.com/sei-protocol/sei-chain/sei-db/state_db/sc/flatkv/vtype" ) -// Supported key types for FlatKV. -// TODO: add balance key when that is eventually supported -var supportedKeyTypes = map[evm.EVMKeyKind]struct{}{ - evm.EVMKeyStorage: {}, - evm.EVMKeyNonce: {}, - evm.EVMKeyCodeHash: {}, - evm.EVMKeyCode: {}, - evm.EVMKeyLegacy: {}, -} // TODO also use this for reads - // ApplyChangeSets buffers EVM changesets and updates LtHash. func (s *CommitStore) ApplyChangeSets(changeSets []*proto.NamedChangeSet) error { if s.readOnly { @@ -151,11 +141,11 @@ func sortChangeSets( for _, pair := range cs.Changeset.Pairs { kind, keyBytes := evm.ParseEVMKey(pair.Key) - if _, ok := supportedKeyTypes[kind]; !ok { + if !IsSupportedKeyType(kind) { if strict { return nil, fmt.Errorf("unsupported key type: %v", kind) } else { - logger.Warn("unsupported key type", "key", kind) + logger.Warn("unsupported key type in ApplyChangeSets", "kind", kind) continue } } diff --git a/sei-db/state_db/sc/flatkv/store_read.go b/sei-db/state_db/sc/flatkv/store_read.go index cfb180a25f..c7bafb9558 100644 --- a/sei-db/state_db/sc/flatkv/store_read.go +++ b/sei-db/state_db/sc/flatkv/store_read.go @@ -14,7 +14,11 @@ import ( // Returns (value, true, nil) if found, (nil, false, nil) if not found. func (s *CommitStore) Get(key []byte) ([]byte, bool, error) { kind, keyBytes := evm.ParseEVMKey(key) - if kind == evm.EVMKeyUnknown { + if !IsSupportedKeyType(kind) { + if s.config.StrictKeyTypeCheck { + return nil, false, fmt.Errorf("unsupported key type: %v", kind) + } + logger.Warn("unsupported key type in Get", "kind", kind) return nil, false, nil } @@ -71,7 +75,11 @@ func (s *CommitStore) Get(key []byte) ([]byte, bool, error) { // If not found, returns (-1, false, nil). func (s *CommitStore) GetBlockHeightModified(key []byte) (int64, bool, error) { kind, keyBytes := evm.ParseEVMKey(key) - if kind == evm.EVMKeyUnknown { + if !IsSupportedKeyType(kind) { + if s.config.StrictKeyTypeCheck { + return -1, false, fmt.Errorf("unsupported key type: %v", kind) + } + logger.Warn("unsupported key type in GetBlockHeightModified", "kind", kind) return -1, false, nil } diff --git a/sei-db/state_db/sc/flatkv/store_read_test.go b/sei-db/state_db/sc/flatkv/store_read_test.go index 100fb615ac..72bc443576 100644 --- a/sei-db/state_db/sc/flatkv/store_read_test.go +++ b/sei-db/state_db/sc/flatkv/store_read_test.go @@ -564,7 +564,7 @@ func TestGetBlockHeightModified_Legacy(t *testing.T) { require.Equal(t, int64(1), bh) } -func TestGetBlockHeightModified_UnknownKey(t *testing.T) { +func TestGetBlockHeightModified_MissingKey(t *testing.T) { s := setupTestStore(t) defer s.Close() @@ -574,6 +574,26 @@ func TestGetBlockHeightModified_UnknownKey(t *testing.T) { require.Equal(t, int64(-1), bh) } +func TestGetBlockHeightModified_UnsupportedKeyType_Strict(t *testing.T) { + s := setupTestStore(t) + defer s.Close() + + _, _, err := s.GetBlockHeightModified([]byte{}) + require.Error(t, err) +} + +func TestGetBlockHeightModified_UnsupportedKeyType_NonStrict(t *testing.T) { + cfg := DefaultTestConfig(t) + cfg.StrictKeyTypeCheck = false + s := setupTestStoreWithConfig(t, cfg) + defer s.Close() + + bh, found, err := s.GetBlockHeightModified([]byte{}) + require.NoError(t, err) + require.False(t, found) + require.Equal(t, int64(-1), bh) +} + func TestGetBlockHeightModified_DeletedKey(t *testing.T) { s := setupTestStore(t) defer s.Close() diff --git a/sei-db/state_db/sc/flatkv/store_test.go b/sei-db/state_db/sc/flatkv/store_test.go index 19a71275e0..8dccdea83b 100644 --- a/sei-db/state_db/sc/flatkv/store_test.go +++ b/sei-db/state_db/sc/flatkv/store_test.go @@ -691,10 +691,10 @@ func TestRootHashIsBlake3_256(t *testing.T) { } // ============================================================================= -// Get returns nil for unknown keys +// Get returns nil for missing keys, errors for unsupported key types // ============================================================================= -func TestGetUnknownKeyReturnsNil(t *testing.T) { +func TestGetMissingKeyReturnsNil(t *testing.T) { s := setupTestStore(t) defer s.Close() @@ -704,6 +704,26 @@ func TestGetUnknownKeyReturnsNil(t *testing.T) { require.Nil(t, v) } +func TestGetUnsupportedKeyType_Strict(t *testing.T) { + s := setupTestStore(t) + defer s.Close() + + _, _, err := s.Get([]byte{}) + require.Error(t, err) +} + +func TestGetUnsupportedKeyType_NonStrict(t *testing.T) { + cfg := DefaultTestConfig(t) + cfg.StrictKeyTypeCheck = false + s := setupTestStoreWithConfig(t, cfg) + defer s.Close() + + v, ok, err := s.Get([]byte{}) + require.NoError(t, err) + require.False(t, ok) + require.Nil(t, v) +} + // ============================================================================= // Persistence across close/reopen // ============================================================================= From a1a46d71036b70d29a243c115ed207d935a168fc Mon Sep 17 00:00:00 2001 From: Cody Littley Date: Wed, 1 Apr 2026 15:28:54 -0500 Subject: [PATCH 111/119] better handling of variable sized data --- sei-db/state_db/sc/flatkv/vtype/code_data.go | 54 +++++++++--------- .../state_db/sc/flatkv/vtype/legacy_data.go | 55 ++++++++++--------- 2 files changed, 56 insertions(+), 53 deletions(-) diff --git a/sei-db/state_db/sc/flatkv/vtype/code_data.go b/sei-db/state_db/sc/flatkv/vtype/code_data.go index 061d439c58..376b0bcf7a 100644 --- a/sei-db/state_db/sc/flatkv/vtype/code_data.go +++ b/sei-db/state_db/sc/flatkv/vtype/code_data.go @@ -36,24 +36,26 @@ var _ VType = (*CodeData)(nil) // This data structure is not threadsafe. Values passed into and values received from this data structure // are not safe to modify without first copying them. type CodeData struct { - data []byte + version CodeDataVersion + blockHeight int64 + bytecode []byte } // Create a new CodeData with the given bytecode. func NewCodeData() *CodeData { - data := make([]byte, codeBytecodeStart) - data[codeVersionStart] = byte(CodeDataVersion0) - return &CodeData{data: data} + return &CodeData{version: CodeDataVersion0} } // Serialize the code data to a byte slice. -// -// The returned byte slice is not safe to modify without first copying it. func (c *CodeData) Serialize() []byte { if c == nil { return make([]byte, codeBytecodeStart) } - return c.data + data := make([]byte, codeBytecodeStart+len(c.bytecode)) + data[codeVersionStart] = byte(c.version) + binary.BigEndian.PutUint64(data[codeBlockHeightStart:codeBytecodeStart], uint64(c.blockHeight)) //nolint:gosec + copy(data[codeBytecodeStart:], c.bytecode) + return data } // Deserialize the code data from the given byte slice. @@ -62,21 +64,24 @@ func DeserializeCodeData(data []byte) (*CodeData, error) { return nil, errors.New("data is empty") } - codeData := &CodeData{ - data: data, - } - - serializationVersion := codeData.GetSerializationVersion() - if serializationVersion != CodeDataVersion0 { - return nil, fmt.Errorf("unsupported serialization version: %d", serializationVersion) + version := CodeDataVersion(data[codeVersionStart]) + if version != CodeDataVersion0 { + return nil, fmt.Errorf("unsupported serialization version: %d", version) } if len(data) < codeBytecodeStart { return nil, fmt.Errorf("data length at version %d should be at least %d, got %d", - serializationVersion, codeBytecodeStart, len(data)) + version, codeBytecodeStart, len(data)) } - return codeData, nil + bytecode := make([]byte, len(data)-codeBytecodeStart) + copy(bytecode, data[codeBytecodeStart:]) + + return &CodeData{ + version: version, + blockHeight: int64(binary.BigEndian.Uint64(data[codeBlockHeightStart:codeBytecodeStart])), //nolint:gosec + bytecode: bytecode, + }, nil } // Get the serialization version for this CodeData instance. @@ -84,7 +89,7 @@ func (c *CodeData) GetSerializationVersion() CodeDataVersion { if c == nil { return CodeDataVersion0 } - return (CodeDataVersion)(c.data[codeVersionStart]) + return c.version } // Get the block height when this code was last modified. @@ -92,15 +97,15 @@ func (c *CodeData) GetBlockHeight() int64 { if c == nil { return 0 } - return int64(binary.BigEndian.Uint64(c.data[codeBlockHeightStart:codeBytecodeStart])) //nolint:gosec + return c.blockHeight } // Get the contract bytecode. func (c *CodeData) GetBytecode() []byte { if c == nil { - return make([]byte, 0) + return []byte{} } - return c.data[codeBytecodeStart:] + return c.bytecode } // Set the contract bytecode. Returns self (or a new CodeData if nil). @@ -108,10 +113,7 @@ func (c *CodeData) SetBytecode(bytecode []byte) *CodeData { if c == nil { c = NewCodeData() } - newData := make([]byte, codeBytecodeStart+len(bytecode)) - copy(newData, c.data[:codeBytecodeStart]) - copy(newData[codeBytecodeStart:], bytecode) - c.data = newData + c.bytecode = append([]byte(nil), bytecode...) return c } @@ -121,7 +123,7 @@ func (c *CodeData) IsDelete() bool { if c == nil { return true } - return len(c.data) == codeBytecodeStart // TODO verify that this is the correct semantics! + return len(c.bytecode) == 0 } // Set the block height when this code was last modified/touched. Returns self (or a new CodeData if nil). @@ -129,6 +131,6 @@ func (c *CodeData) SetBlockHeight(blockHeight int64) *CodeData { if c == nil { c = NewCodeData() } - binary.BigEndian.PutUint64(c.data[codeBlockHeightStart:codeBytecodeStart], uint64(blockHeight)) //nolint:gosec + c.blockHeight = blockHeight return c } diff --git a/sei-db/state_db/sc/flatkv/vtype/legacy_data.go b/sei-db/state_db/sc/flatkv/vtype/legacy_data.go index 00ec0ced45..f8a500031d 100644 --- a/sei-db/state_db/sc/flatkv/vtype/legacy_data.go +++ b/sei-db/state_db/sc/flatkv/vtype/legacy_data.go @@ -32,30 +32,31 @@ const ( var _ VType = (*LegacyData)(nil) -// TODO revisit types with variable sized fields!!! Not elegegant how we currently do this. - // Used for encapsulating and serializing legacy data in the FlatKV legacy database. // // This data structure is not threadsafe. Values passed into and values received from this data structure // are not safe to modify without first copying them. type LegacyData struct { - data []byte + version LegacyDataVersion + blockHeight int64 + value []byte } // Create a new LegacyData with the given value. func NewLegacyData() *LegacyData { - data := make([]byte, legacyHeaderLength) - return &LegacyData{data: data} + return &LegacyData{version: LegacyDataVersion0} } // Serialize the legacy data to a byte slice. -// -// The returned byte slice is not safe to modify without first copying it. func (l *LegacyData) Serialize() []byte { if l == nil { return make([]byte, legacyHeaderLength) } - return l.data + data := make([]byte, legacyHeaderLength+len(l.value)) + data[legacyVersionStart] = byte(l.version) + binary.BigEndian.PutUint64(data[legacyBlockHeightStart:legacyValueStart], uint64(l.blockHeight)) //nolint:gosec + copy(data[legacyValueStart:], l.value) + return data } // Deserialize the legacy data from the given byte slice. @@ -64,21 +65,24 @@ func DeserializeLegacyData(data []byte) (*LegacyData, error) { return nil, errors.New("data is empty") } - legacyData := &LegacyData{ - data: data, - } - - serializationVersion := legacyData.GetSerializationVersion() - if serializationVersion != LegacyDataVersion0 { - return nil, fmt.Errorf("unsupported serialization version: %d", serializationVersion) + version := LegacyDataVersion(data[legacyVersionStart]) + if version != LegacyDataVersion0 { + return nil, fmt.Errorf("unsupported serialization version: %d", version) } if len(data) < legacyHeaderLength { return nil, fmt.Errorf("data length at version %d should be at least %d, got %d", - serializationVersion, legacyHeaderLength, len(data)) + version, legacyHeaderLength, len(data)) } - return legacyData, nil + value := make([]byte, len(data)-legacyHeaderLength) + copy(value, data[legacyValueStart:]) + + return &LegacyData{ + version: version, + blockHeight: int64(binary.BigEndian.Uint64(data[legacyBlockHeightStart:legacyValueStart])), //nolint:gosec + value: value, + }, nil } // Get the serialization version for this LegacyData instance. @@ -86,7 +90,7 @@ func (l *LegacyData) GetSerializationVersion() LegacyDataVersion { if l == nil { return LegacyDataVersion0 } - return (LegacyDataVersion)(l.data[legacyVersionStart]) + return l.version } // Get the block height when this legacy data was last modified. @@ -94,15 +98,15 @@ func (l *LegacyData) GetBlockHeight() int64 { if l == nil { return 0 } - return int64(binary.BigEndian.Uint64(l.data[legacyBlockHeightStart:legacyValueStart])) //nolint:gosec + return l.blockHeight } // Get the legacy value. func (l *LegacyData) GetValue() []byte { if l == nil { - return make([]byte, 0) + return []byte{} } - return l.data[legacyValueStart:] + return l.value } // Set the legacy value. Returns self (or a new LegacyData if nil). @@ -110,10 +114,7 @@ func (l *LegacyData) SetValue(value []byte) *LegacyData { if l == nil { l = NewLegacyData() } - newData := make([]byte, legacyHeaderLength+len(value)) - copy(newData, l.data[:legacyValueStart]) - copy(newData[legacyValueStart:], value) - l.data = newData + l.value = append([]byte(nil), value...) return l } @@ -123,7 +124,7 @@ func (l *LegacyData) IsDelete() bool { if l == nil { return true } - return len(l.data) == legacyHeaderLength + return len(l.value) == 0 } // Set the block height when this legacy data was last modified/touched. Returns self (or a new LegacyData if nil). @@ -131,6 +132,6 @@ func (l *LegacyData) SetBlockHeight(blockHeight int64) *LegacyData { if l == nil { l = NewLegacyData() } - binary.BigEndian.PutUint64(l.data[legacyBlockHeightStart:legacyValueStart], uint64(blockHeight)) //nolint:gosec + l.blockHeight = blockHeight return l } From 8bb29c707edab12181e05e871d955c3a2fb71344 Mon Sep 17 00:00:00 2001 From: Cody Littley Date: Wed, 1 Apr 2026 15:37:12 -0500 Subject: [PATCH 112/119] Improve codehash serialization --- .../state_db/sc/flatkv/vtype/account_data.go | 52 ++++++--- .../sc/flatkv/vtype/account_data_test.go | 105 ++++++++++++++---- .../testdata/account_data_v0_compact.hex | 1 + ...t_data_v0.hex => account_data_v0_full.hex} | 0 4 files changed, 117 insertions(+), 41 deletions(-) create mode 100644 sei-db/state_db/sc/flatkv/vtype/testdata/account_data_v0_compact.hex rename sei-db/state_db/sc/flatkv/vtype/testdata/{account_data_v0.hex => account_data_v0_full.hex} (100%) diff --git a/sei-db/state_db/sc/flatkv/vtype/account_data.go b/sei-db/state_db/sc/flatkv/vtype/account_data.go index 99a43c7948..45d4556e83 100644 --- a/sei-db/state_db/sc/flatkv/vtype/account_data.go +++ b/sei-db/state_db/sc/flatkv/vtype/account_data.go @@ -17,12 +17,21 @@ const ( /* Serialization schema for AccountData version 0: +Full form (81 bytes): + | Version | Block Height | Balance | Nonce | Code Hash | |---------|--------------|----------|----------|-----------| | 1 byte | 8 bytes | 32 bytes | 8 bytes | 32 bytes | -Data is stored in big-endian order. +Compact form (49 bytes) — used when code hash is all zeros: + +| Version | Block Height | Balance | Nonce | +|---------|--------------|----------|----------| +| 1 byte | 8 bytes | 32 bytes | 8 bytes | +Data is stored in big-endian order. At deserialization time, the two forms +are distinguished by length. The compact form is preferred for serialization +since ~97% of accounts have no code hash. */ const ( @@ -31,6 +40,7 @@ const ( accountBalanceStart = 9 accountNonceStart = 41 accountCodeHashStart = 49 + accountCompactLength = accountCodeHashStart // 49 accountDataLength = 81 ) @@ -51,37 +61,45 @@ func NewAccountData() *AccountData { } } -// Serialize the account data to a byte slice. +// Serialize the account data to a byte slice. If the code hash is all zeros, +// the compact form (49 bytes) is returned; otherwise the full form (81 bytes). // // The returned byte slice is not safe to modify without first copying it. func (a *AccountData) Serialize() []byte { if a == nil { - return make([]byte, accountDataLength) + return make([]byte, accountCompactLength) + } + for i := accountCodeHashStart; i < accountDataLength; i++ { + if a.data[i] != 0 { + return a.data + } } - return a.data + return a.data[:accountCompactLength] } -// Deserialize the account data from the given byte slice. +// Deserialize the account data from the given byte slice. Accepts both the +// compact (49 byte) and full (81 byte) forms. func DeserializeAccountData(data []byte) (*AccountData, error) { if len(data) == 0 { return nil, errors.New("data is empty") } - accountData := &AccountData{ - data: data, - } - - serializationVersion := accountData.GetSerializationVersion() - if serializationVersion != AccountDataVersion0 { - return nil, fmt.Errorf("unsupported serialization version: %d", serializationVersion) + version := AccountDataVersion(data[accountVersionStart]) + if version != AccountDataVersion0 { + return nil, fmt.Errorf("unsupported serialization version: %d", version) } - if len(data) != accountDataLength { - return nil, fmt.Errorf("data length at version %d should be %d, got %d", - serializationVersion, accountDataLength, len(data)) + switch len(data) { + case accountDataLength: + return &AccountData{data: data}, nil + case accountCompactLength: + full := make([]byte, accountDataLength) + copy(full, data) + return &AccountData{data: full}, nil + default: + return nil, fmt.Errorf("data length at version %d should be %d or %d, got %d", + version, accountCompactLength, accountDataLength, len(data)) } - - return accountData, nil } // Get the serialization version for this AccountData instance. diff --git a/sei-db/state_db/sc/flatkv/vtype/account_data_test.go b/sei-db/state_db/sc/flatkv/vtype/account_data_test.go index caac87a156..8c6c65d8b5 100644 --- a/sei-db/state_db/sc/flatkv/vtype/account_data_test.go +++ b/sei-db/state_db/sc/flatkv/vtype/account_data_test.go @@ -13,34 +13,36 @@ import ( const testdataDir = "testdata" -// If the golden file does not exist it is created on the first run. -// Subsequent runs verify that serialization still matches, catching -// accidental compatibility breaks. -func TestSerializationGoldenFile_V0(t *testing.T) { - ad := NewAccountData(). - SetBlockHeight(100). - SetBalance(toBalance(leftPad32([]byte{1}))). - SetNonce(42). - SetCodeHash(toCodeHash(bytes.Repeat([]byte{0xaa}, 32))) - - serialized := ad.Serialize() - - golden := filepath.Join(testdataDir, "account_data_v0.hex") +// goldenCheck compares serialized against a golden hex file, creating it on first run. +func goldenCheck(t *testing.T, name string, serialized []byte) { + t.Helper() + golden := filepath.Join(testdataDir, name) if _, err := os.Stat(golden); os.IsNotExist(err) { require.NoError(t, os.MkdirAll(testdataDir, 0o755)) require.NoError(t, os.WriteFile(golden, []byte(hex.EncodeToString(serialized)), 0o644)) t.Logf("created golden file %s — re-run to verify", golden) return } - want, err := os.ReadFile(golden) require.NoError(t, err) wantBytes, err := hex.DecodeString(string(want)) require.NoError(t, err) - require.Equal(t, wantBytes, serialized, "serialization differs from golden file") + require.Equal(t, wantBytes, serialized, "serialization differs from golden file %s", name) +} + +// Full form: with non-zero codehash (81 bytes). +func TestSerializationGoldenFile_V0_Full(t *testing.T) { + ad := NewAccountData(). + SetBlockHeight(100). + SetBalance(toBalance(leftPad32([]byte{1}))). + SetNonce(42). + SetCodeHash(toCodeHash(bytes.Repeat([]byte{0xaa}, 32))) + + serialized := ad.Serialize() + require.Len(t, serialized, accountDataLength) + goldenCheck(t, "account_data_v0_full.hex", serialized) - // Verify round-trip from the golden bytes. - rt, err := DeserializeAccountData(wantBytes) + rt, err := DeserializeAccountData(serialized) require.NoError(t, err) require.Equal(t, int64(100), rt.GetBlockHeight()) require.Equal(t, uint64(42), rt.GetNonce()) @@ -48,6 +50,26 @@ func TestSerializationGoldenFile_V0(t *testing.T) { require.Equal(t, toCodeHash(bytes.Repeat([]byte{0xaa}, 32)), rt.GetCodeHash()) } +// Compact form: zero codehash omitted (49 bytes). +func TestSerializationGoldenFile_V0_Compact(t *testing.T) { + ad := NewAccountData(). + SetBlockHeight(100). + SetBalance(toBalance(leftPad32([]byte{1}))). + SetNonce(42) + + serialized := ad.Serialize() + require.Len(t, serialized, accountCompactLength) + goldenCheck(t, "account_data_v0_compact.hex", serialized) + + rt, err := DeserializeAccountData(serialized) + require.NoError(t, err) + require.Equal(t, int64(100), rt.GetBlockHeight()) + require.Equal(t, uint64(42), rt.GetNonce()) + require.Equal(t, toBalance(leftPad32([]byte{1})), rt.GetBalance()) + var zeroHash CodeHash + require.Equal(t, &zeroHash, rt.GetCodeHash()) +} + func TestNewAccountData_ZeroInitialized(t *testing.T) { ad := NewAccountData() var zero [32]byte @@ -58,8 +80,13 @@ func TestNewAccountData_ZeroInitialized(t *testing.T) { require.Equal(t, (*CodeHash)(&zero), ad.GetCodeHash()) } -func TestSerializeLength(t *testing.T) { +func TestSerializeLength_Compact(t *testing.T) { ad := NewAccountData() + require.Len(t, ad.Serialize(), accountCompactLength) +} + +func TestSerializeLength_Full(t *testing.T) { + ad := NewAccountData().SetCodeHash(toCodeHash(bytes.Repeat([]byte{0x01}, 32))) require.Len(t, ad.Serialize(), accountDataLength) } @@ -83,7 +110,9 @@ func TestRoundTrip_AllFieldsSet(t *testing.T) { func TestRoundTrip_ZeroValues(t *testing.T) { ad := NewAccountData() - rt, err := DeserializeAccountData(ad.Serialize()) + serialized := ad.Serialize() + require.Len(t, serialized, accountCompactLength, "zero codehash should produce compact form") + rt, err := DeserializeAccountData(serialized) require.NoError(t, err) var zero [32]byte require.Equal(t, int64(0), rt.GetBlockHeight()) @@ -92,6 +121,24 @@ func TestRoundTrip_ZeroValues(t *testing.T) { require.Equal(t, (*CodeHash)(&zero), rt.GetCodeHash()) } +func TestRoundTrip_CompactWithNonZeroFields(t *testing.T) { + ad := NewAccountData(). + SetBlockHeight(500). + SetBalance(toBalance(leftPad32([]byte{0x42}))). + SetNonce(77) + + serialized := ad.Serialize() + require.Len(t, serialized, accountCompactLength) + + rt, err := DeserializeAccountData(serialized) + require.NoError(t, err) + require.Equal(t, int64(500), rt.GetBlockHeight()) + require.Equal(t, uint64(77), rt.GetNonce()) + require.Equal(t, toBalance(leftPad32([]byte{0x42})), rt.GetBalance()) + var zeroHash CodeHash + require.Equal(t, &zeroHash, rt.GetCodeHash()) +} + func TestRoundTrip_MaxValues(t *testing.T) { maxBalance := toBalance(bytes.Repeat([]byte{0xff}, 32)) maxCodeHash := toCodeHash(bytes.Repeat([]byte{0xff}, 32)) @@ -152,10 +199,20 @@ func TestDeserialize_TooLong(t *testing.T) { require.Error(t, err) } +func TestDeserialize_BetweenCompactAndFull(t *testing.T) { + _, err := DeserializeAccountData(make([]byte, accountCompactLength+1)) + require.Error(t, err) +} + func TestDeserialize_UnsupportedVersion(t *testing.T) { - data := make([]byte, accountDataLength) - data[0] = 0xff - _, err := DeserializeAccountData(data) + full := make([]byte, accountDataLength) + full[0] = 0xff + _, err := DeserializeAccountData(full) + require.Error(t, err) + + compact := make([]byte, accountCompactLength) + compact[0] = 0xff + _, err = DeserializeAccountData(compact) require.Error(t, err) } @@ -193,7 +250,7 @@ func TestNilAccountData_IsDelete(t *testing.T) { func TestNilAccountData_Serialize(t *testing.T) { var ad *AccountData s := ad.Serialize() - require.Len(t, s, accountDataLength) + require.Len(t, s, accountCompactLength) for _, b := range s { require.Equal(t, byte(0), b) } @@ -211,7 +268,7 @@ func TestNilAccountData_Copy(t *testing.T) { cp := ad.Copy() require.NotNil(t, cp) require.True(t, cp.IsDelete()) - require.Len(t, cp.Serialize(), accountDataLength) + require.Len(t, cp.Serialize(), accountCompactLength) } func TestNilAccountData_SettersAutoCreate(t *testing.T) { diff --git a/sei-db/state_db/sc/flatkv/vtype/testdata/account_data_v0_compact.hex b/sei-db/state_db/sc/flatkv/vtype/testdata/account_data_v0_compact.hex new file mode 100644 index 0000000000..953ddccd75 --- /dev/null +++ b/sei-db/state_db/sc/flatkv/vtype/testdata/account_data_v0_compact.hex @@ -0,0 +1 @@ +0000000000000000640000000000000000000000000000000000000000000000000000000000000001000000000000002a \ No newline at end of file diff --git a/sei-db/state_db/sc/flatkv/vtype/testdata/account_data_v0.hex b/sei-db/state_db/sc/flatkv/vtype/testdata/account_data_v0_full.hex similarity index 100% rename from sei-db/state_db/sc/flatkv/vtype/testdata/account_data_v0.hex rename to sei-db/state_db/sc/flatkv/vtype/testdata/account_data_v0_full.hex From 20509218944404bc2515f492ca20f0e722e8d77c Mon Sep 17 00:00:00 2001 From: Cody Littley Date: Wed, 1 Apr 2026 15:39:14 -0500 Subject: [PATCH 113/119] cleanup --- sei-db/state_db/sc/flatkv/vtype/base_types.go | 2 -- .../state_db/sc/flatkv/vtype/base_types_test.go | 17 +++++++++++++++++ 2 files changed, 17 insertions(+), 2 deletions(-) diff --git a/sei-db/state_db/sc/flatkv/vtype/base_types.go b/sei-db/state_db/sc/flatkv/vtype/base_types.go index 49cee3e3f6..660c136e8b 100644 --- a/sei-db/state_db/sc/flatkv/vtype/base_types.go +++ b/sei-db/state_db/sc/flatkv/vtype/base_types.go @@ -13,8 +13,6 @@ const ( BalanceLen = 32 ) -// TODO unit test this file!!! - // Address is an EVM address (20 bytes). type Address [AddressLen]byte diff --git a/sei-db/state_db/sc/flatkv/vtype/base_types_test.go b/sei-db/state_db/sc/flatkv/vtype/base_types_test.go index 2d5e60a373..5bf4772d6b 100644 --- a/sei-db/state_db/sc/flatkv/vtype/base_types_test.go +++ b/sei-db/state_db/sc/flatkv/vtype/base_types_test.go @@ -8,6 +8,23 @@ import ( "github.com/stretchr/testify/require" ) +// --- Constants and type sizes --- + +func TestConstantValues(t *testing.T) { + require.Equal(t, 20, AddressLen) + require.Equal(t, 32, CodeHashLen) + require.Equal(t, 8, NonceLen) + require.Equal(t, 32, SlotLen) + require.Equal(t, 32, BalanceLen) +} + +func TestTypeSizes(t *testing.T) { + require.Len(t, Address{}, AddressLen) + require.Len(t, CodeHash{}, CodeHashLen) + require.Len(t, Slot{}, SlotLen) + require.Len(t, Balance{}, BalanceLen) +} + // --- ParseNonce --- func TestParseNonce_Valid(t *testing.T) { From d9a981d91abfce8ba277f8dbdad84dc70f197b19 Mon Sep 17 00:00:00 2001 From: Cody Littley Date: Wed, 8 Apr 2026 13:27:11 -0500 Subject: [PATCH 114/119] fix things that broke with merge --- sei-db/state_db/sc/flatkv/exporter_test.go | 31 ++++++++-------- .../sc/flatkv/lthash_correctness_test.go | 35 +++++++++++-------- sei-db/state_db/sc/flatkv/store_read_test.go | 31 ++++++++++++---- sei-db/state_db/sc/flatkv/store_write_test.go | 4 --- 4 files changed, 62 insertions(+), 39 deletions(-) diff --git a/sei-db/state_db/sc/flatkv/exporter_test.go b/sei-db/state_db/sc/flatkv/exporter_test.go index a7e876c3d4..2b375adaeb 100644 --- a/sei-db/state_db/sc/flatkv/exporter_test.go +++ b/sei-db/state_db/sc/flatkv/exporter_test.go @@ -751,33 +751,36 @@ func TestExporterAtHistoricalVersion(t *testing.T) { func TestExportImportLargerDataset(t *testing.T) { cfg := DefaultTestConfig(t) - cfg.SnapshotInterval = 5 s := setupTestStoreWithConfig(t, cfg) defer s.Close() - // Write multiple key types across multiple addresses. + // Write multiple key types across multiple addresses in a single block + // so that all rows share the same block height. The importer commits + // everything at a single version, so block heights must match for the + // LtHash round-trip to be identical. + var allPairs []*proto.KVPair for i := byte(1); i <= 10; i++ { addr := addrN(i) - pairs := []*proto.KVPair{ + allPairs = append(allPairs, noncePair(addr, uint64(i)), - { + &proto.KVPair{ Key: evm.BuildMemIAVLEVMKey(evm.EVMKeyStorage, StorageKey(addr, slotN(i))), Value: padLeft32(i, i, i), }, - } + ) if i%3 == 0 { - pairs = append(pairs, + allPairs = append(allPairs, codeHashPair(addr, codeHashN(i)), codePair(addr, []byte{0x60, i}), ) } - cs := &proto.NamedChangeSet{ - Name: "evm", - Changeset: proto.ChangeSet{Pairs: pairs}, - } - require.NoError(t, s.ApplyChangeSets([]*proto.NamedChangeSet{cs})) - commitAndCheck(t, s) } + cs := &proto.NamedChangeSet{ + Name: "evm", + Changeset: proto.ChangeSet{Pairs: allPairs}, + } + require.NoError(t, s.ApplyChangeSets([]*proto.NamedChangeSet{cs})) + commitAndCheck(t, s) originalHash := s.RootHash() // Export. @@ -796,14 +799,14 @@ func TestExportImportLargerDataset(t *testing.T) { _, err = s2.LoadVersion(0, false) require.NoError(t, err) - imp, err := s2.Importer(10) + imp, err := s2.Importer(1) require.NoError(t, err) for _, n := range nodes { imp.AddNode(n) } require.NoError(t, imp.Close()) - require.Equal(t, int64(10), s2.Version()) + require.Equal(t, int64(1), s2.Version()) require.Equal(t, originalHash, s2.RootHash(), "imported store should have identical RootHash") require.NoError(t, s2.Close()) } diff --git a/sei-db/state_db/sc/flatkv/lthash_correctness_test.go b/sei-db/state_db/sc/flatkv/lthash_correctness_test.go index 694c4ce794..aa46732575 100644 --- a/sei-db/state_db/sc/flatkv/lthash_correctness_test.go +++ b/sei-db/state_db/sc/flatkv/lthash_correctness_test.go @@ -1307,22 +1307,27 @@ func TestLtHashExportImportRoundTrip(t *testing.T) { s := setupTestStore(t) defer s.Close() - // Build state across multiple blocks + // Build state in a single block so that all rows share the same block + // height. The importer commits everything at a single version, so block + // heights must match for the LtHash round-trip to be identical. + var evmPairs []*proto.KVPair + var legacyCS []*proto.NamedChangeSet for i := byte(1); i <= 5; i++ { addr := addrN(i) + evmPairs = append(evmPairs, + noncePair(addr, uint64(i)*10), + codeHashPair(addr, codeHashN(i)), + codePair(addr, []byte{0x60, 0x80, i}), + storagePair(addr, slotN(i), []byte{i, 0xBB}), + ) legacyKey := append([]byte{0x09}, addr[:]...) - require.NoError(t, s.ApplyChangeSets([]*proto.NamedChangeSet{ - namedCS( - noncePair(addr, uint64(i)*10), - codeHashPair(addr, codeHashN(i)), - codePair(addr, []byte{0x60, 0x80, i}), - storagePair(addr, slotN(i), []byte{i, 0xBB}), - ), - makeChangeSet(legacyKey, []byte{i, 0xCC}, false), - })) - commitAndCheck(t, s) + legacyCS = append(legacyCS, makeChangeSet(legacyKey, []byte{i, 0xCC}, false)) } - verifyLtHashAtHeight(t, s, 5) + allCS := append([]*proto.NamedChangeSet{namedCS(evmPairs...)}, legacyCS...) + require.NoError(t, s.ApplyChangeSets(allCS)) + commitAndCheck(t, s) + + verifyLtHashAtHeight(t, s, 1) srcHash := s.RootHash() // Export @@ -1344,7 +1349,7 @@ func TestLtHashExportImportRoundTrip(t *testing.T) { // Import into fresh store s2 := setupTestStore(t) - imp, err := s2.Importer(5) + imp, err := s2.Importer(1) require.NoError(t, err) require.NoError(t, imp.AddModule(evm.EVMFlatKVStoreKey)) for _, n := range nodes { @@ -1352,10 +1357,10 @@ func TestLtHashExportImportRoundTrip(t *testing.T) { } require.NoError(t, imp.Close()) - require.Equal(t, int64(5), s2.Version()) + require.Equal(t, int64(1), s2.Version()) require.Equal(t, srcHash, s2.RootHash(), "imported store RootHash should match source") - verifyLtHashAtHeight(t, s2, 5) + verifyLtHashAtHeight(t, s2, 1) require.NoError(t, s2.Close()) } diff --git a/sei-db/state_db/sc/flatkv/store_read_test.go b/sei-db/state_db/sc/flatkv/store_read_test.go index 00c628fc47..8e85d5180b 100644 --- a/sei-db/state_db/sc/flatkv/store_read_test.go +++ b/sei-db/state_db/sc/flatkv/store_read_test.go @@ -595,17 +595,15 @@ func TestGetUnknownKeyTypes(t *testing.T) { s := setupTestStore(t) defer s.Close() - cases := []struct { + // Nil and empty keys map to EVMKeyEmpty/EVMKeyUnknown, which is + // unsupported and errors under StrictKeyTypeCheck. + for _, tc := range []struct { name string key []byte }{ {"nil key", nil}, {"empty key", []byte{}}, - {"single byte", []byte{0xFF}}, - {"random bytes", []byte{0xDE, 0xAD, 0xBE, 0xEF}}, - {"short nonce-like (2 bytes)", []byte{0x04, 0x01}}, - } - for _, tc := range cases { + } { t.Run(tc.name, func(t *testing.T) { _, _, err := s.Get(tc.key) require.Error(t, err) @@ -613,6 +611,27 @@ func TestGetUnknownKeyTypes(t *testing.T) { require.Error(t, err) }) } + + // Non-empty keys that don't match a known prefix are classified as + // EVMKeyLegacy, which is a supported type — Get/Has should not error. + for _, tc := range []struct { + name string + key []byte + }{ + {"single byte", []byte{0xFF}}, + {"random bytes", []byte{0xDE, 0xAD, 0xBE, 0xEF}}, + {"short nonce-like (2 bytes)", []byte{0x04, 0x01}}, + } { + t.Run(tc.name, func(t *testing.T) { + val, found, err := s.Get(tc.key) + require.NoError(t, err) + require.False(t, found) + require.Nil(t, val) + found, err = s.Has(tc.key) + require.NoError(t, err) + require.False(t, found) + }) + } } // ============================================================================= diff --git a/sei-db/state_db/sc/flatkv/store_write_test.go b/sei-db/state_db/sc/flatkv/store_write_test.go index cfcfc0f5bf..65f7048563 100644 --- a/sei-db/state_db/sc/flatkv/store_write_test.go +++ b/sei-db/state_db/sc/flatkv/store_write_test.go @@ -1710,10 +1710,6 @@ func TestApplyChangeSetsEVMKeyEmptySkipped(t *testing.T) { s := setupTestStore(t) defer s.Close() - hashBefore := s.RootHash() - - // Only zero-length keys return EVMKeyUnknown (alias for EVMKeyEmpty). - // All non-empty keys are routed to at least EVMKeyLegacy. cs := &proto.NamedChangeSet{ Name: "evm", Changeset: proto.ChangeSet{Pairs: []*proto.KVPair{ From 309fb33bdb9b956219c5235288c6da8287575443 Mon Sep 17 00:00:00 2001 From: Cody Littley Date: Wed, 8 Apr 2026 13:35:53 -0500 Subject: [PATCH 115/119] revert unecessary changes --- sei-db/common/utils/hash64.go | 39 ------------------- .../bench/cryptosim}/canned_random.go | 14 ++----- sei-db/state_db/bench/cryptosim/cryptosim.go | 3 +- .../bench/cryptosim/data_generator.go | 7 ++-- sei-db/state_db/bench/cryptosim/receipt.go | 5 +-- .../state_db/bench/cryptosim/receipt_test.go | 19 +++++---- sei-db/state_db/bench/cryptosim/util.go | 36 +++++++++++++++++ 7 files changed, 55 insertions(+), 68 deletions(-) delete mode 100644 sei-db/common/utils/hash64.go rename sei-db/{common/rand => state_db/bench/cryptosim}/canned_random.go (95%) diff --git a/sei-db/common/utils/hash64.go b/sei-db/common/utils/hash64.go deleted file mode 100644 index 04526131a7..0000000000 --- a/sei-db/common/utils/hash64.go +++ /dev/null @@ -1,39 +0,0 @@ -package utils - -import "math" - -// Hash64 returns a well-distributed 64-bit hash of x. -// It implements the SplitMix64 finalizer, a fast non-cryptographic mixing -// function with excellent avalanche properties. It is suitable for hash tables, -// sharding, randomized iteration, and benchmarks, but it is NOT -// cryptographically secure. -// -// The function is a bijection over uint64 (no collisions as a mapping). -// -// References: -// - Steele, Lea, Flood. "Fast Splittable Pseudorandom Number Generators" -// (OOPSLA 2014): https://doi.org/10.1145/2660193.2660195 -// - Public domain reference implementation: -// http://xorshift.di.unimi.it/splitmix64.c -func Hash64(x int64) int64 { - z := uint64(x) //nolint:gosec // G115 - hash function, int64->uint64 conversion intentional - z += 0x9e3779b97f4a7c15 - z = (z ^ (z >> 30)) * 0xbf58476d1ce4e5b9 - z = (z ^ (z >> 27)) * 0x94d049bb133111eb - z = z ^ (z >> 31) - //nolint:gosec // G115 - hash function converts uint64 to int64, overflow intentional - return int64(z) -} - -// PositiveHash64 returns the absolute value of Hash64(x). It never returns a negative value. -// When Hash64(x) is math.MinInt64, returns math.MaxInt64 since the true absolute value does not fit in int64. -func PositiveHash64(x int64) int64 { - result := Hash64(x) - if result == math.MinInt64 { - return math.MaxInt64 - } - if result < 0 { - return -result - } - return result -} diff --git a/sei-db/common/rand/canned_random.go b/sei-db/state_db/bench/cryptosim/canned_random.go similarity index 95% rename from sei-db/common/rand/canned_random.go rename to sei-db/state_db/bench/cryptosim/canned_random.go index e5636a16f3..6e95db15e4 100644 --- a/sei-db/common/rand/canned_random.go +++ b/sei-db/state_db/bench/cryptosim/canned_random.go @@ -1,16 +1,10 @@ -package rand +package cryptosim import ( "encoding/binary" "fmt" "math" "math/rand" - - "github.com/sei-protocol/sei-chain/sei-db/common/utils" -) - -const ( - AddressLen = 20 // EVM address length ) // CannedRandom provides pre-generated randomness for benchmarking. @@ -75,7 +69,7 @@ func NewCannedRandom( func (cr *CannedRandom) Clone(randomizeOffset bool) *CannedRandom { index := cr.index if randomizeOffset { - index = utils.PositiveHash64(cr.Int64()) % int64(len(cr.buffer)) + index = PositiveHash64(cr.Int64()) % int64(len(cr.buffer)) } return &CannedRandom{ buffer: cr.buffer, @@ -108,7 +102,7 @@ func (cr *CannedRandom) SeededBytes(count int, seed int64) []byte { return cr.buffer } - startIndex := utils.PositiveHash64(seed) % int64(len(cr.buffer)-count) + startIndex := PositiveHash64(seed) % int64(len(cr.buffer)-count) return cr.buffer[startIndex : startIndex+int64(count)] } @@ -121,7 +115,7 @@ func (cr *CannedRandom) Int64() int64 { } base := binary.BigEndian.Uint64(buf[:]) //nolint:gosec // G115 - benchmark uses deterministic non-crypto randomness, overflow acceptable - result := utils.Hash64(int64(base) + cr.index) + result := Hash64(int64(base) + cr.index) // Add 8 to the index to skip the 8 bytes we just read. cr.index = (cr.index + 8) % bufLen diff --git a/sei-db/state_db/bench/cryptosim/cryptosim.go b/sei-db/state_db/bench/cryptosim/cryptosim.go index bc6610975e..72f0dbc14f 100644 --- a/sei-db/state_db/bench/cryptosim/cryptosim.go +++ b/sei-db/state_db/bench/cryptosim/cryptosim.go @@ -6,7 +6,6 @@ import ( "runtime" "time" - "github.com/sei-protocol/sei-chain/sei-db/common/rand" "github.com/sei-protocol/sei-chain/sei-db/state_db/bench/wrappers" "golang.org/x/time/rate" ) @@ -151,7 +150,7 @@ func NewCryptoSim( // avoiding rate() spikes when restarting with a preserved DB. fmt.Printf("Initializing random number generator.\n") - rand := rand.NewCannedRandom(config.CannedRandomSize, config.Seed) + rand := NewCannedRandom(config.CannedRandomSize, config.Seed) consoleUpdatePeriod := time.Duration(config.ConsoleUpdateIntervalSeconds * float64(time.Second)) diff --git a/sei-db/state_db/bench/cryptosim/data_generator.go b/sei-db/state_db/bench/cryptosim/data_generator.go index c050042665..09ead76273 100644 --- a/sei-db/state_db/bench/cryptosim/data_generator.go +++ b/sei-db/state_db/bench/cryptosim/data_generator.go @@ -5,7 +5,6 @@ import ( "fmt" "github.com/sei-protocol/sei-chain/sei-db/common/evm" - "github.com/sei-protocol/sei-chain/sei-db/common/rand" ) const ( @@ -37,7 +36,7 @@ type DataGenerator struct { initialNextBlockNumber uint64 // The random number generator. - rand *rand.CannedRandom + rand *CannedRandom // The address of the fee account (i.e. the account that collects gas fees). This is a special account // and has account ID 0. Since we reuse this account very often, it is cached for performance. @@ -65,7 +64,7 @@ type DataGenerator struct { func NewDataGenerator( config *CryptoSimConfig, database *Database, - rand *rand.CannedRandom, + rand *CannedRandom, metrics *CryptosimMetrics, ) (*DataGenerator, error) { @@ -324,6 +323,6 @@ func (d *DataGenerator) ReportEndOfBlock() { // Get the random number generator. Note that the random number generator is not thread safe, and // so the caller is responsible for ensuring that it is not used concurrently with other calls to the data generator. -func (d *DataGenerator) Rand() *rand.CannedRandom { +func (d *DataGenerator) Rand() *CannedRandom { return d.rand } diff --git a/sei-db/state_db/bench/cryptosim/receipt.go b/sei-db/state_db/bench/cryptosim/receipt.go index 730e27c3e9..4ab348aa2c 100644 --- a/sei-db/state_db/bench/cryptosim/receipt.go +++ b/sei-db/state_db/bench/cryptosim/receipt.go @@ -7,7 +7,6 @@ import ( "hash" ethtypes "github.com/ethereum/go-ethereum/core/types" - "github.com/sei-protocol/sei-chain/sei-db/common/rand" evmtypes "github.com/sei-protocol/sei-chain/x/evm/types" "golang.org/x/crypto/sha3" ) @@ -44,7 +43,7 @@ var erc20TransferEventSignatureBytes = [hashLen]byte{ // BuildERC20TransferReceiptFromTxn produces a plausible successful ERC20 transfer receipt from a transaction. func BuildERC20TransferReceiptFromTxn( - crand *rand.CannedRandom, + crand *CannedRandom, feeCollectionAccount []byte, blockNumber uint64, txIndex uint32, @@ -68,7 +67,7 @@ func BuildERC20TransferReceiptFromTxn( // ERC20 balances as storage slots rather than separate account references. The caller supplies the block number and tx // index so the resulting receipt can line up with the simulated block being benchmarked. func BuildERC20TransferReceipt( - crand *rand.CannedRandom, + crand *CannedRandom, feeCollectionAccount []byte, srcAccount []byte, dstAccount []byte, diff --git a/sei-db/state_db/bench/cryptosim/receipt_test.go b/sei-db/state_db/bench/cryptosim/receipt_test.go index a23d4b0fbf..f1e61109fb 100644 --- a/sei-db/state_db/bench/cryptosim/receipt_test.go +++ b/sei-db/state_db/bench/cryptosim/receipt_test.go @@ -5,12 +5,11 @@ import ( ethtypes "github.com/ethereum/go-ethereum/core/types" "github.com/sei-protocol/sei-chain/sei-db/common/evm" - "github.com/sei-protocol/sei-chain/sei-db/common/rand" ) func makeTestKeys(t *testing.T) (feeAccount, srcAccount, dstAccount, senderSlot, receiverSlot, erc20Contract []byte) { t.Helper() - keyRand := rand.NewCannedRandom(4096, 1) + keyRand := NewCannedRandom(4096, 1) feeAccount = evm.BuildMemIAVLEVMKey(evm.EVMKeyCodeHash, keyRand.Address(accountPrefix, 0, AddressLen)) srcAddr := keyRand.Address(accountPrefix, 1, AddressLen) @@ -33,7 +32,7 @@ func makeTestKeys(t *testing.T) (feeAccount, srcAccount, dstAccount, senderSlot, } func TestBuildERC20TransferReceipt(t *testing.T) { - crand := rand.NewCannedRandom(1<<20, 42) + crand := NewCannedRandom(1<<20, 42) feeAccount, srcAccount, dstAccount, senderSlot, receiverSlot, erc20Contract := makeTestKeys(t) receipt, err := BuildERC20TransferReceipt( @@ -69,7 +68,7 @@ func TestBuildERC20TransferReceipt(t *testing.T) { } func TestBuildERC20TransferReceipt_InvalidInputs(t *testing.T) { - crand := rand.NewCannedRandom(1<<20, 42) + crand := NewCannedRandom(1<<20, 42) feeAccount, srcAccount, dstAccount, senderSlot, receiverSlot, erc20Contract := makeTestKeys(t) if _, err := BuildERC20TransferReceipt(nil, feeAccount, srcAccount, dstAccount, senderSlot, receiverSlot, erc20Contract, 1_000_000, 0); err == nil { @@ -85,8 +84,8 @@ func TestBuildERC20TransferReceipt_InvalidInputs(t *testing.T) { // Regression test: account keys with EVMKeyCode prefix must still be accepted. func TestBuildERC20TransferReceipt_EVMKeyCodeAccounts(t *testing.T) { - keyRand := rand.NewCannedRandom(4096, 1) - crand := rand.NewCannedRandom(1<<20, 42) + crand := NewCannedRandom(1<<20, 42) + keyRand := NewCannedRandom(4096, 1) feeAccount := evm.BuildMemIAVLEVMKey(evm.EVMKeyCode, keyRand.Address(accountPrefix, 0, AddressLen)) srcAddr := keyRand.Address(accountPrefix, 1, AddressLen) @@ -114,8 +113,8 @@ func TestBuildERC20TransferReceipt_EVMKeyCodeAccounts(t *testing.T) { // Regression test: uses the exact key formats produced by data_generator.go // (EVMKeyCodeHash for accounts, EVMKeyStorage with full StorageKeyLen payload). func TestBuildERC20TransferReceipt_DataGeneratorKeyFormats(t *testing.T) { - keyRand := rand.NewCannedRandom(4096, 1) - crand := rand.NewCannedRandom(1<<20, 42) + crand := NewCannedRandom(1<<20, 42) + keyRand := NewCannedRandom(4096, 1) feeAccount := evm.BuildMemIAVLEVMKey(evm.EVMKeyCodeHash, keyRand.Address(accountPrefix, 0, AddressLen)) srcAccount := evm.BuildMemIAVLEVMKey(evm.EVMKeyCodeHash, keyRand.Address(accountPrefix, 1, AddressLen)) @@ -135,8 +134,8 @@ func TestBuildERC20TransferReceipt_DataGeneratorKeyFormats(t *testing.T) { } func BenchmarkBuildERC20TransferReceipt(b *testing.B) { - keyRand := rand.NewCannedRandom(4096, 1) - receiptRand := rand.NewCannedRandom(1<<20, 2) + keyRand := NewCannedRandom(4096, 1) + receiptRand := NewCannedRandom(1<<20, 2) feeAccount := evm.BuildMemIAVLEVMKey(evm.EVMKeyCodeHash, keyRand.Address(accountPrefix, 0, AddressLen)) srcAddr := keyRand.Address(accountPrefix, 1, AddressLen) diff --git a/sei-db/state_db/bench/cryptosim/util.go b/sei-db/state_db/bench/cryptosim/util.go index cc46e24c7f..508408d61e 100644 --- a/sei-db/state_db/bench/cryptosim/util.go +++ b/sei-db/state_db/bench/cryptosim/util.go @@ -42,6 +42,42 @@ func paddedCounterKey(s string) []byte { return b } +// Hash64 returns a well-distributed 64-bit hash of x. +// It implements the SplitMix64 finalizer, a fast non-cryptographic mixing +// function with excellent avalanche properties. It is suitable for hash tables, +// sharding, randomized iteration, and benchmarks, but it is NOT +// cryptographically secure. +// +// The function is a bijection over uint64 (no collisions as a mapping). +// +// References: +// - Steele, Lea, Flood. "Fast Splittable Pseudorandom Number Generators" +// (OOPSLA 2014): https://doi.org/10.1145/2660193.2660195 +// - Public domain reference implementation: +// http://xorshift.di.unimi.it/splitmix64.c +func Hash64(x int64) int64 { + z := uint64(x) //nolint:gosec // G115 - hash function, int64->uint64 conversion intentional + z += 0x9e3779b97f4a7c15 + z = (z ^ (z >> 30)) * 0xbf58476d1ce4e5b9 + z = (z ^ (z >> 27)) * 0x94d049bb133111eb + z = z ^ (z >> 31) + //nolint:gosec // G115 - hash function converts uint64 to int64, overflow intentional + return int64(z) +} + +// PositiveHash64 returns the absolute value of Hash64(x). It never returns a negative value. +// When Hash64(x) is math.MinInt64, returns math.MaxInt64 since the true absolute value does not fit in int64. +func PositiveHash64(x int64) int64 { + result := Hash64(x) + if result == math.MinInt64 { + return math.MaxInt64 + } + if result < 0 { + return -result + } + return result +} + // ResolveAndCreateDir expands ~ to the home directory, resolves the path to // an absolute path, and creates the directory if it doesn't exist. func ResolveAndCreateDir(dataDir string) (string, error) { From b9e1d2f678811fbf456a6aeeb3915b9f959e9d0e Mon Sep 17 00:00:00 2001 From: Cody Littley Date: Wed, 8 Apr 2026 13:44:46 -0500 Subject: [PATCH 116/119] fix unit tests --- sei-db/state_db/sc/composite/store_test.go | 12 ++++++------ 1 file changed, 6 insertions(+), 6 deletions(-) diff --git a/sei-db/state_db/sc/composite/store_test.go b/sei-db/state_db/sc/composite/store_test.go index b6b5df63ed..5a7042041d 100644 --- a/sei-db/state_db/sc/composite/store_test.go +++ b/sei-db/state_db/sc/composite/store_test.go @@ -710,7 +710,7 @@ func TestReconcileVersionsAfterCrash(t *testing.T) { Name: EVMStoreName, Changeset: proto.ChangeSet{ Pairs: []*proto.KVPair{ - {Key: storageKey, Value: []byte{i}}, + {Key: storageKey, Value: padLeft32(i)}, }, }, }, @@ -777,7 +777,7 @@ func TestReconcileVersionsThenContinueCommitting(t *testing.T) { {Key: []byte("bal"), Value: []byte{i}}, }}}, {Name: EVMStoreName, Changeset: proto.ChangeSet{Pairs: []*proto.KVPair{ - {Key: storageKey, Value: []byte{i}}, + {Key: storageKey, Value: padLeft32(i)}, }}}, })) _, err = cs.Commit() @@ -807,13 +807,13 @@ func TestReconcileVersionsThenContinueCommitting(t *testing.T) { // Continue committing new blocks on top of the reconciled state. // Version 3 is re-created with new data (0xA3 instead of 0x03). for i := byte(0); i < 3; i++ { - v := []byte{0xA0 + i + 3} + v := 0xA0 + i + 3 require.NoError(t, cs2.ApplyChangeSets([]*proto.NamedChangeSet{ {Name: "bank", Changeset: proto.ChangeSet{Pairs: []*proto.KVPair{ - {Key: []byte("bal"), Value: v}, + {Key: []byte("bal"), Value: []byte{v}}, }}}, {Name: EVMStoreName, Changeset: proto.ChangeSet{Pairs: []*proto.KVPair{ - {Key: storageKey, Value: v}, + {Key: storageKey, Value: padLeft32(v)}, }}}, })) ver, err := cs2.Commit() @@ -872,7 +872,7 @@ func TestReconcileVersionsCosmosAheadByMultiple(t *testing.T) { Name: EVMStoreName, Changeset: proto.ChangeSet{ Pairs: []*proto.KVPair{ - {Key: storageKey, Value: []byte{i}}, + {Key: storageKey, Value: padLeft32(i)}, }, }, }, From 2b0fdd7cf07ff21802e8d5d9931a2a65c9c6f8aa Mon Sep 17 00:00:00 2001 From: Cody Littley Date: Wed, 8 Apr 2026 14:34:56 -0500 Subject: [PATCH 117/119] fix unit test --- sei-db/state_db/sc/composite/store_test.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/sei-db/state_db/sc/composite/store_test.go b/sei-db/state_db/sc/composite/store_test.go index 5a7042041d..a4c6405d1b 100644 --- a/sei-db/state_db/sc/composite/store_test.go +++ b/sei-db/state_db/sc/composite/store_test.go @@ -210,7 +210,7 @@ func TestLatticeHashCommitInfo(t *testing.T) { Name: EVMStoreName, Changeset: proto.ChangeSet{ Pairs: []*proto.KVPair{ - {Key: evmStorageKey, Value: []byte{round}}, + {Key: evmStorageKey, Value: padLeft32(round)}, }, }, }, From ab913ba137e5fc57352e01b45108e36036470eb7 Mon Sep 17 00:00:00 2001 From: Cody Littley Date: Thu, 9 Apr 2026 08:53:17 -0500 Subject: [PATCH 118/119] made suggested changes --- sei-db/state_db/sc/flatkv/config.go | 5 --- .../state_db/sc/flatkv/flatkv_test_config.go | 1 - sei-db/state_db/sc/flatkv/store_apply.go | 35 +++++++++---------- sei-db/state_db/sc/flatkv/store_read.go | 16 +++------ sei-db/state_db/sc/flatkv/store_test.go | 1 - 5 files changed, 21 insertions(+), 37 deletions(-) diff --git a/sei-db/state_db/sc/flatkv/config.go b/sei-db/state_db/sc/flatkv/config.go index 8f39eef9a1..1da9f1b6e0 100644 --- a/sei-db/state_db/sc/flatkv/config.go +++ b/sei-db/state_db/sc/flatkv/config.go @@ -95,10 +95,6 @@ type Config struct { // Controls the number of goroutines pre-allocated in the thread pool for miscellaneous operations. // The number of threads in this pool is equal to MiscThreadsPerCore * runtime.NumCPU() + MiscConstantThreadCount. MiscConstantThreadCount int - - // If true, FlatKV will return an error if it encounters an unsupported key type. Otherwise, - // it will log a warning and continue. - StrictKeyTypeCheck bool } // DefaultConfig returns Config with safe default values. @@ -124,7 +120,6 @@ func DefaultConfig() *Config { ReaderPoolQueueSize: 1024, MiscPoolThreadsPerCore: 4.0, MiscConstantThreadCount: 0, - StrictKeyTypeCheck: true, } cfg.AccountCacheConfig.MaxSize = unit.GB diff --git a/sei-db/state_db/sc/flatkv/flatkv_test_config.go b/sei-db/state_db/sc/flatkv/flatkv_test_config.go index 1434850852..4ab1b71bfa 100644 --- a/sei-db/state_db/sc/flatkv/flatkv_test_config.go +++ b/sei-db/state_db/sc/flatkv/flatkv_test_config.go @@ -43,6 +43,5 @@ func DefaultTestConfig(t *testing.T) *Config { ReaderThreadsPerCore: 2.0, ReaderPoolQueueSize: 1024, MiscPoolThreadsPerCore: 4.0, - StrictKeyTypeCheck: true, } } diff --git a/sei-db/state_db/sc/flatkv/store_apply.go b/sei-db/state_db/sc/flatkv/store_apply.go index 79c83bb3f9..df13fe4d3a 100644 --- a/sei-db/state_db/sc/flatkv/store_apply.go +++ b/sei-db/state_db/sc/flatkv/store_apply.go @@ -19,9 +19,8 @@ func (s *CommitStore) ApplyChangeSets(changeSets []*proto.NamedChangeSet) error // Setup // /////////// s.phaseTimer.SetPhase("apply_change_sets_prepare") - s.pendingChangeSets = append(s.pendingChangeSets, changeSets...) - changesByType, err := sortChangeSets(changeSets, s.config.StrictKeyTypeCheck) + changesByType, err := sortChangeSets(changeSets) if err != nil { return fmt.Errorf("failed to sort change sets: %w", err) } @@ -73,7 +72,7 @@ func (s *CommitStore) ApplyChangeSets(changeSets []*proto.NamedChangeSet) error storeWrites(s.codeWrites, codeChanges) // Gather legacy pairs - legacyChanges, err := processLegacyChanges(changesByType[evm.EVMKeyLegacy], blockHeight) + legacyChanges, err := processLegacyChanges(changesByType[evm.EVMKeyLegacy]) if err != nil { return fmt.Errorf("failed to parse legacy changes: %w", err) } @@ -110,6 +109,13 @@ func (s *CommitStore) ApplyChangeSets(changeSets []*proto.NamedChangeSet) error } s.workingLtHash = globalHash + ////////////// + // Finalize // + ////////////// + + // Now that we've made it through the batch without errors, we can add the change sets to the pending change sets. + s.pendingChangeSets = append(s.pendingChangeSets, changeSets...) + s.phaseTimer.SetPhase("apply_change_done") return nil } @@ -126,12 +132,8 @@ func storeWrites[T vtype.VType]( } } -// Sort the change sets by type. -func sortChangeSets( - changeSets []*proto.NamedChangeSet, - // If true, returns an error if an unsupported key type is encountered. - strict bool, -) (map[evm.EVMKeyKind]map[string][]byte, error) { +// Sort the change sets by type. This method only returns an error if +func sortChangeSets(changeSets []*proto.NamedChangeSet) (map[evm.EVMKeyKind]map[string][]byte, error) { result := make(map[evm.EVMKeyKind]map[string][]byte) for _, cs := range changeSets { @@ -139,15 +141,13 @@ func sortChangeSets( continue } for _, pair := range cs.Changeset.Pairs { + kind, keyBytes := evm.ParseEVMKey(pair.Key) + // evm.ParseEVMKey() should return a valid key type 100% of the time, unless we add a new key but + // forget to update IsSupportedKeyType() and associated code. This is a sanity check. if !IsSupportedKeyType(kind) { - if strict { - return nil, fmt.Errorf("unsupported key type: %v", kind) - } else { - logger.Warn("unsupported key type in ApplyChangeSets", "kind", kind) - continue - } + return nil, fmt.Errorf("unsupported key type: %v", kind) } keyStr := string(keyBytes) @@ -211,10 +211,7 @@ func processCodeChanges( } // Process incoming legacy changes into a form appropriate for hashing and insertion into the DB. -func processLegacyChanges( - rawChanges map[string][]byte, - blockHeight int64, -) (map[string]*vtype.LegacyData, error) { +func processLegacyChanges(rawChanges map[string][]byte) (map[string]*vtype.LegacyData, error) { result := make(map[string]*vtype.LegacyData) for keyStr, rawChange := range rawChanges { diff --git a/sei-db/state_db/sc/flatkv/store_read.go b/sei-db/state_db/sc/flatkv/store_read.go index b0b08fe238..8922ba5f18 100644 --- a/sei-db/state_db/sc/flatkv/store_read.go +++ b/sei-db/state_db/sc/flatkv/store_read.go @@ -15,11 +15,8 @@ import ( func (s *CommitStore) Get(key []byte) ([]byte, bool, error) { kind, keyBytes := evm.ParseEVMKey(key) if !IsSupportedKeyType(kind) { - if s.config.StrictKeyTypeCheck { - return nil, false, fmt.Errorf("unsupported key type: %v", kind) - } - logger.Warn("unsupported key type in Get", "kind", kind) - return nil, false, nil + // Only possible if a new type is added to evm.ParseEVMKey() without updating code to handle that type. + return nil, false, fmt.Errorf("unsupported key type: %v", kind) } switch kind { @@ -76,11 +73,8 @@ func (s *CommitStore) Get(key []byte) ([]byte, bool, error) { func (s *CommitStore) GetBlockHeightModified(key []byte) (int64, bool, error) { kind, keyBytes := evm.ParseEVMKey(key) if !IsSupportedKeyType(kind) { - if s.config.StrictKeyTypeCheck { - return -1, false, fmt.Errorf("unsupported key type: %v", kind) - } - logger.Warn("unsupported key type in GetBlockHeightModified", "kind", kind) - return -1, false, nil + // Only possible if a new type is added to evm.ParseEVMKey() without updating code to handle that type. + return -1, false, fmt.Errorf("unsupported key type: %v", kind) } switch kind { @@ -114,7 +108,7 @@ func (s *CommitStore) GetBlockHeightModified(key []byte) (int64, bool, error) { } return cd.GetBlockHeight(), true, nil default: - return -1, false, fmt.Errorf("unsupported key type: %v", kind) + return -1, false, fmt.Errorf("block height modified not tracked for key type: %v", kind) } } diff --git a/sei-db/state_db/sc/flatkv/store_test.go b/sei-db/state_db/sc/flatkv/store_test.go index bbc1eb5cc7..83e69e5ad4 100644 --- a/sei-db/state_db/sc/flatkv/store_test.go +++ b/sei-db/state_db/sc/flatkv/store_test.go @@ -713,7 +713,6 @@ func TestGetUnsupportedKeyType_Strict(t *testing.T) { func TestGetUnsupportedKeyType_NonStrict(t *testing.T) { cfg := DefaultTestConfig(t) - cfg.StrictKeyTypeCheck = false s := setupTestStoreWithConfig(t, cfg) defer s.Close() From b30f03fc50ba3a53d5ed22d3788ff613820c59a2 Mon Sep 17 00:00:00 2001 From: Cody Littley Date: Thu, 9 Apr 2026 09:41:38 -0500 Subject: [PATCH 119/119] removed err from API --- .../state_db/bench/wrappers/flatkv_wrapper.go | 3 +- sei-db/state_db/sc/composite/store_test.go | 13 +- sei-db/state_db/sc/flatkv/api.go | 6 +- .../state_db/sc/flatkv/crash_recovery_test.go | 12 +- sei-db/state_db/sc/flatkv/exporter_test.go | 51 ++-- .../sc/flatkv/lthash_correctness_test.go | 57 ++-- sei-db/state_db/sc/flatkv/snapshot_test.go | 99 +++---- sei-db/state_db/sc/flatkv/store_read.go | 42 ++- sei-db/state_db/sc/flatkv/store_read_test.go | 253 ++++++------------ sei-db/state_db/sc/flatkv/store_test.go | 76 ++---- sei-db/state_db/sc/flatkv/store_write_test.go | 128 +++------ 11 files changed, 254 insertions(+), 486 deletions(-) diff --git a/sei-db/state_db/bench/wrappers/flatkv_wrapper.go b/sei-db/state_db/bench/wrappers/flatkv_wrapper.go index d6ee7fe7f4..7f6a270bec 100644 --- a/sei-db/state_db/bench/wrappers/flatkv_wrapper.go +++ b/sei-db/state_db/bench/wrappers/flatkv_wrapper.go @@ -61,7 +61,8 @@ func (f *flatKVWrapper) Close() error { } func (f *flatKVWrapper) Read(key []byte) (data []byte, found bool, err error) { - return f.base.Get(key) + val, ok := f.base.Get(key) + return val, ok, nil } func (f *flatKVWrapper) GetPhaseTimer() *metrics.PhaseTimer { diff --git a/sei-db/state_db/sc/composite/store_test.go b/sei-db/state_db/sc/composite/store_test.go index a4c6405d1b..3a8c1beddf 100644 --- a/sei-db/state_db/sc/composite/store_test.go +++ b/sei-db/state_db/sc/composite/store_test.go @@ -26,11 +26,11 @@ func (f *failingEVMStore) LoadVersion(int64, bool) (flatkv.Store, error) { } func (f *failingEVMStore) ApplyChangeSets([]*proto.NamedChangeSet) error { return nil } func (f *failingEVMStore) Commit() (int64, error) { return 0, nil } -func (f *failingEVMStore) Get([]byte) ([]byte, bool, error) { return nil, false, nil } +func (f *failingEVMStore) Get([]byte) ([]byte, bool) { return nil, false } func (f *failingEVMStore) GetBlockHeightModified([]byte) (int64, bool, error) { return -1, false, nil } -func (f *failingEVMStore) Has([]byte) (bool, error) { return false, nil } +func (f *failingEVMStore) Has([]byte) bool { return false } func (f *failingEVMStore) Iterator(_, _ []byte) flatkv.Iterator { return nil } func (f *failingEVMStore) IteratorByPrefix([]byte) flatkv.Iterator { return nil } func (f *failingEVMStore) RootHash() []byte { return nil } @@ -580,13 +580,11 @@ func TestExportImportSplitWrite(t *testing.T) { // Verify FlatKV data require.NotNil(t, dst.evmCommitter) - got, found, err := dst.evmCommitter.Get(storageKey) - require.NoError(t, err) + got, found := dst.evmCommitter.Get(storageKey) require.True(t, found, "storage key should exist in FlatKV after import") require.Equal(t, storageVal, got) - got, found, err = dst.evmCommitter.Get(nonceKey) - require.NoError(t, err) + got, found = dst.evmCommitter.Get(nonceKey) require.True(t, found, "nonce key should exist in FlatKV after import") require.Equal(t, nonceVal, got) } @@ -838,8 +836,7 @@ func TestReconcileVersionsThenContinueCommitting(t *testing.T) { bankStore := cs3.GetChildStoreByName("bank") require.Equal(t, []byte{0xA5}, bankStore.Get([]byte("bal"))) - got, found, err := cs3.evmCommitter.Get(storageKey) - require.NoError(t, err) + got, found := cs3.evmCommitter.Get(storageKey) require.True(t, found) require.Equal(t, padLeft32(0xA5), got) } diff --git a/sei-db/state_db/sc/flatkv/api.go b/sei-db/state_db/sc/flatkv/api.go index 3293881e47..4c4537603c 100644 --- a/sei-db/state_db/sc/flatkv/api.go +++ b/sei-db/state_db/sc/flatkv/api.go @@ -33,15 +33,15 @@ type Store interface { // Commit persists buffered writes and advances the version. Commit() (int64, error) - // Get returns the value for the x/evm memiavl key. If not found, returns (nil, false, nil). - Get(key []byte) (value []byte, found bool, err error) + // Get returns the value for the x/evm memiavl key. If not found, returns (nil, false). + Get(key []byte) (value []byte, found bool) // GetBlockHeightModified returns the block height at which the key was last modified. // If not found, returns (-1, false, nil). GetBlockHeightModified(key []byte) (int64, bool, error) // Has reports whether the x/evm memiavl key exists. - Has(key []byte) (bool, error) + Has(key []byte) bool // Iterator returns an iterator over [start, end) in memiavl key order. // Pass nil for unbounded. diff --git a/sei-db/state_db/sc/flatkv/crash_recovery_test.go b/sei-db/state_db/sc/flatkv/crash_recovery_test.go index 1e05e2377e..d9849c3bfa 100644 --- a/sei-db/state_db/sc/flatkv/crash_recovery_test.go +++ b/sei-db/state_db/sc/flatkv/crash_recovery_test.go @@ -122,8 +122,7 @@ func TestCrashRecoveryGlobalMetadataAheadOfDataDBs(t *testing.T) { for i := 1; i <= 5; i++ { key := evm.BuildMemIAVLEVMKey(evm.EVMKeyStorage, StorageKey(addr, slotN(byte(i)))) - val, found, err := s2.Get(key) - require.NoError(t, err) + val, found := s2.Get(key) require.True(t, found, "slot %d should exist after recovery", i) require.Equal(t, padLeft32(byte(i*11)), val) } @@ -165,8 +164,7 @@ func TestCrashRecoveryWALReplayLargeGap(t *testing.T) { // All 20 storage slots should be readable. for i := 1; i <= 20; i++ { key := evm.BuildMemIAVLEVMKey(evm.EVMKeyStorage, StorageKey(addr, slotN(byte(i)))) - val, found, err := s2.Get(key) - require.NoError(t, err) + val, found := s2.Get(key) require.True(t, found, "slot %d should exist", i) require.Equal(t, padLeft32(byte(i)), val) } @@ -207,8 +205,7 @@ func TestCrashRecoveryEmptyWALAfterSnapshot(t *testing.T) { require.Equal(t, expectedVersion, s2.Version()) require.Equal(t, expectedHash, s2.RootHash()) - val, found, err := s2.Get(key) - require.NoError(t, err) + val, found := s2.Get(key) require.True(t, found) require.Equal(t, padLeft32(0xAA), val) @@ -301,8 +298,7 @@ func TestCrashRecoveryCrashAfterWALBeforeDBCommit(t *testing.T) { require.Equal(t, int64(2), s2.Version()) require.NotEqual(t, hashAfterV1, s2.RootHash(), "hash should differ after v2 replay") - val, found, err := s2.Get(key) - require.NoError(t, err) + val, found := s2.Get(key) require.True(t, found) require.Equal(t, padLeft32(0x22), val, "v2 value should be present after catchup") verifyLtHashConsistency(t, s2) diff --git a/sei-db/state_db/sc/flatkv/exporter_test.go b/sei-db/state_db/sc/flatkv/exporter_test.go index 2b375adaeb..8a804deb8a 100644 --- a/sei-db/state_db/sc/flatkv/exporter_test.go +++ b/sei-db/state_db/sc/flatkv/exporter_test.go @@ -202,23 +202,19 @@ func TestExporterRoundTrip(t *testing.T) { // --- Verify round-trip --- require.Equal(t, int64(1), s2.Version()) - got, found, err := s2.Get(storageKey) - require.NoError(t, err) + got, found := s2.Get(storageKey) require.True(t, found, "storage key should exist after import") require.Equal(t, storageVal, got) - got, found, err = s2.Get(nonceKey) - require.NoError(t, err) + got, found = s2.Get(nonceKey) require.True(t, found, "nonce key should exist after import") require.Equal(t, nonceVal, got) - got, found, err = s2.Get(codeKey) - require.NoError(t, err) + got, found = s2.Get(codeKey) require.True(t, found, "code key should exist after import") require.Equal(t, codeVal, got) - got, found, err = s2.Get(codeHashKey) - require.NoError(t, err) + got, found = s2.Get(codeHashKey) require.True(t, found, "codehash key should exist after import") require.Equal(t, codeHashVal, got) @@ -329,13 +325,11 @@ func TestImportSurvivesReopen(t *testing.T) { require.Equal(t, int64(1), s2.Version()) - got, found, err := s2.Get(storageKey) - require.NoError(t, err) + got, found := s2.Get(storageKey) require.True(t, found, "storage key must survive reopen") require.Equal(t, storageVal, got) - got, found, err = s2.Get(nonceKey) - require.NoError(t, err) + got, found = s2.Get(nonceKey) require.True(t, found, "nonce key must survive reopen") require.Equal(t, nonceVal, got) @@ -400,8 +394,7 @@ func TestImportPurgesStaleData(t *testing.T) { var found bool for _, k := range staleKeys { - _, found, err = s.Get(k) - require.NoError(t, err) + _, found = s.Get(k) require.True(t, found, "pre-import: key should exist") } @@ -449,29 +442,24 @@ func TestImportPurgesStaleData(t *testing.T) { // --- Phase 4: verify stale keys are gone across all DB types --- var got []byte - got, found, err = s.Get(storageA) - require.NoError(t, err) + got, found = s.Get(storageA) require.True(t, found, "storage key A should exist") require.Equal(t, newStorageVal, got) - got, found, err = s.Get(nonceA) - require.NoError(t, err) + got, found = s.Get(nonceA) require.True(t, found, "nonce key A should exist") require.Equal(t, newNonceVal, got) - got, found, err = s.Get(codeB) - require.NoError(t, err) + got, found = s.Get(codeB) require.True(t, found, "code key B should exist") require.Equal(t, newCodeVal, got) - got, found, err = s.Get(codeHashB) - require.NoError(t, err) + got, found = s.Get(codeHashB) require.True(t, found, "codehash key B should exist") require.Equal(t, newCodeHashVal, got) for _, k := range staleKeys { - _, found, err = s.Get(k) - require.NoError(t, err) + _, found = s.Get(k) require.False(t, found, "stale key should NOT exist after import") } @@ -487,8 +475,7 @@ func TestImportPurgesStaleData(t *testing.T) { require.Equal(t, int64(1), s.Version()) for _, k := range staleKeys { - _, found, err = s.Get(k) - require.NoError(t, err) + _, found = s.Get(k) require.False(t, found, "stale key must remain absent after reopen") } require.Equal(t, srcHash, s.RootHash()) @@ -570,8 +557,7 @@ func TestImporterHeightNonZeroSkipped(t *testing.T) { // Data should NOT have been imported. key := evm.BuildMemIAVLEVMKey(evm.EVMKeyStorage, StorageKey(addrN(0x01), slotN(0x01))) - _, found, err := s.Get(key) - require.NoError(t, err) + _, found := s.Get(key) require.False(t, found, "height != 0 node should be skipped") require.NoError(t, s.Close()) } @@ -673,8 +659,7 @@ func TestImporterDoubleImport(t *testing.T) { require.NoError(t, imp1.Close()) key1 := evm.BuildMemIAVLEVMKey(evm.EVMKeyStorage, StorageKey(addrN(0x01), slotN(0x01))) - val, found, err := s.Get(key1) - require.NoError(t, err) + val, found := s.Get(key1) require.True(t, found) require.Equal(t, padLeft32(0x11), val) @@ -690,13 +675,11 @@ func TestImporterDoubleImport(t *testing.T) { require.Equal(t, int64(2), s.Version()) // Data from first import should be gone. - _, found, err = s.Get(key1) - require.NoError(t, err) + _, found = s.Get(key1) require.False(t, found, "first import data should be wiped by second import") key2 := evm.BuildMemIAVLEVMKey(evm.EVMKeyStorage, StorageKey(addrN(0x02), slotN(0x02))) - val, found, err = s.Get(key2) - require.NoError(t, err) + val, found = s.Get(key2) require.True(t, found) require.Equal(t, padLeft32(0x22), val) require.NoError(t, s.Close()) diff --git a/sei-db/state_db/sc/flatkv/lthash_correctness_test.go b/sei-db/state_db/sc/flatkv/lthash_correctness_test.go index aa46732575..e78ac3370b 100644 --- a/sei-db/state_db/sc/flatkv/lthash_correctness_test.go +++ b/sei-db/state_db/sc/flatkv/lthash_correctness_test.go @@ -735,8 +735,7 @@ func TestLtHashCrossApplyAccountSameFieldOverwrite(t *testing.T) { // Verify final value key := evm.BuildMemIAVLEVMKey(evm.EVMKeyNonce, addr[:]) - val, found, err := s.Get(key) - require.NoError(t, err) + val, found := s.Get(key) require.True(t, found) require.Equal(t, uint64(20), binary.BigEndian.Uint64(val)) } @@ -770,8 +769,7 @@ func TestLtHashCrossApplyStorageOverwrite(t *testing.T) { // Verify final value key := evm.BuildMemIAVLEVMKey(evm.EVMKeyStorage, StorageKey(addr, slot)) - val, found, err := s.Get(key) - require.NoError(t, err) + val, found := s.Get(key) require.True(t, found) require.Equal(t, padLeft32(0x33), val) } @@ -808,8 +806,7 @@ func TestLtHashCrossApplyCodeOverwrite(t *testing.T) { // Verify final value key := evm.BuildMemIAVLEVMKey(evm.EVMKeyCode, addr[:]) - val, found, err := s.Get(key) - require.NoError(t, err) + val, found := s.Get(key) require.True(t, found) require.Equal(t, []byte{0x60, 0x40, 0x02, 0x03}, val) } @@ -841,8 +838,7 @@ func TestLtHashCrossApplyLegacyOverwrite(t *testing.T) { verifyLtHashAtHeight(t, s, 2) // Verify final value - val, found, err := s.Get(legacyKey) - require.NoError(t, err) + val, found := s.Get(legacyKey) require.True(t, found) require.Equal(t, []byte{0x00, 0x30}, val) } @@ -902,32 +898,27 @@ func TestLtHashCrossApplyMixedOverwrite(t *testing.T) { // Verify all final values nonceKey := evm.BuildMemIAVLEVMKey(evm.EVMKeyNonce, addr[:]) - nonceVal, found, err := s.Get(nonceKey) - require.NoError(t, err) + nonceVal, found := s.Get(nonceKey) require.True(t, found) require.Equal(t, uint64(100), binary.BigEndian.Uint64(nonceVal)) chKey := evm.BuildMemIAVLEVMKey(evm.EVMKeyCodeHash, addr[:]) - chVal, found, err := s.Get(chKey) - require.NoError(t, err) + chVal, found := s.Get(chKey) require.True(t, found) expected := codeHashN(0x30) require.Equal(t, expected[:], chVal) codeKey := evm.BuildMemIAVLEVMKey(evm.EVMKeyCode, addr[:]) - codeVal, found, err := s.Get(codeKey) - require.NoError(t, err) + codeVal, found := s.Get(codeKey) require.True(t, found) require.Equal(t, []byte{0x60, 0x60, 0x01}, codeVal) storageKey := evm.BuildMemIAVLEVMKey(evm.EVMKeyStorage, StorageKey(addr, slot)) - storageVal, found, err := s.Get(storageKey) - require.NoError(t, err) + storageVal, found := s.Get(storageKey) require.True(t, found) require.Equal(t, padLeft32(0x33), storageVal) - legacyVal, found, err := s.Get(legacyKey) - require.NoError(t, err) + legacyVal, found := s.Get(legacyKey) require.True(t, found) require.Equal(t, []byte{0x00, 0x03}, legacyVal) } @@ -989,14 +980,12 @@ func TestLtHashAccountDeleteThenRecreate(t *testing.T) { verifyLtHashAtHeight(t, s, 2) nonceKey := evm.BuildMemIAVLEVMKey(evm.EVMKeyNonce, addr[:]) - nonceVal, found, err := s.Get(nonceKey) - require.NoError(t, err) + nonceVal, found := s.Get(nonceKey) require.True(t, found) require.Equal(t, nonceBytes(99), nonceVal) chKey := evm.BuildMemIAVLEVMKey(evm.EVMKeyCodeHash, addr[:]) - _, found, err = s.Get(chKey) - require.NoError(t, err) + _, found = s.Get(chKey) require.False(t, found, "codehash should be zero (EOA)") raw, err := s.accountDB.Get(AccountKey(addr)) @@ -1056,13 +1045,11 @@ func TestAccountPendingReadPartialDelete(t *testing.T) { })) // Pending reads before commit - nonceVal, found, err := s.Get(nonceKey) - require.NoError(t, err) + nonceVal, found := s.Get(nonceKey) require.True(t, found, "nonce should be readable from pending writes") require.Equal(t, nonceBytes(42), nonceVal) - chVal, found, err := s.Get(chKey) - require.NoError(t, err) + chVal, found := s.Get(chKey) require.False(t, found, "codehash should be not-found after pending delete") require.Nil(t, chVal) @@ -1088,13 +1075,11 @@ func TestAccountRowDeleteGetBeforeCommit(t *testing.T) { })) // Verify both fields are readable before commit - nonceVal, found, err := s.Get(nonceKey) - require.NoError(t, err) + nonceVal, found := s.Get(nonceKey) require.True(t, found, "nonce should be readable from pending writes") require.Equal(t, nonceBytes(10), nonceVal) - chVal, found, err := s.Get(chKey) - require.NoError(t, err) + chVal, found := s.Get(chKey) require.True(t, found, "codehash should be readable from pending writes") expected := codeHashN(0xEE) require.Equal(t, expected[:], chVal) @@ -1105,21 +1090,17 @@ func TestAccountRowDeleteGetBeforeCommit(t *testing.T) { })) // Verify both fields return not-found BEFORE commit (the core semantic change) - nonceVal, found, err = s.Get(nonceKey) - require.NoError(t, err) + nonceVal, found = s.Get(nonceKey) require.False(t, found, "nonce should not be found after pending full-delete") require.Nil(t, nonceVal) - chVal, found, err = s.Get(chKey) - require.NoError(t, err) + chVal, found = s.Get(chKey) require.False(t, found, "codehash should not be found after pending full-delete") require.Nil(t, chVal) - hasNonce, err := s.Has(nonceKey) - require.NoError(t, err) + hasNonce := s.Has(nonceKey) require.False(t, hasNonce, "Has(nonce) should be false after pending full-delete") - hasCodeHash, err := s.Has(chKey) - require.NoError(t, err) + hasCodeHash := s.Has(chKey) require.False(t, hasCodeHash, "Has(codehash) should be false after pending full-delete") // Verify isDelete is set diff --git a/sei-db/state_db/sc/flatkv/snapshot_test.go b/sei-db/state_db/sc/flatkv/snapshot_test.go index 4981c3809c..6e0c861d0b 100644 --- a/sei-db/state_db/sc/flatkv/snapshot_test.go +++ b/sei-db/state_db/sc/flatkv/snapshot_test.go @@ -121,12 +121,10 @@ func TestOpenFromSnapshot(t *testing.T) { // Verify data from all 3 versions is present key1 := evm.BuildMemIAVLEVMKey(evm.EVMKeyStorage, StorageKey(Address{0x10}, Slot{0x01})) key3 := evm.BuildMemIAVLEVMKey(evm.EVMKeyStorage, StorageKey(Address{0x10}, Slot{0x03})) - v, ok, err := s2.Get(key1) - require.NoError(t, err) + v, ok := s2.Get(key1) require.True(t, ok) require.Equal(t, padLeft32(0x01), v) - v, ok, err = s2.Get(key3) - require.NoError(t, err) + v, ok = s2.Get(key3) require.True(t, ok) require.Equal(t, padLeft32(0x03), v) } @@ -194,14 +192,12 @@ func TestRollbackRewindsState(t *testing.T) { // v5's data should not exist (WAL truncated, snapshot pruned) key5 := evm.BuildMemIAVLEVMKey(evm.EVMKeyStorage, StorageKey(Address{0x30}, Slot{0x05})) - _, ok, err := s.Get(key5) - require.NoError(t, err) + _, ok := s.Get(key5) require.False(t, ok, "v5 data should be gone after rollback to v4") // v4's data should still exist key4 := evm.BuildMemIAVLEVMKey(evm.EVMKeyStorage, StorageKey(Address{0x30}, Slot{0x04})) - v, ok, err := s.Get(key4) - require.NoError(t, err) + v, ok := s.Get(key4) require.True(t, ok) require.Equal(t, padLeft32(0x04), v) @@ -478,8 +474,7 @@ func TestSnapshotThenCatchupThenVerifyCorrectness(t *testing.T) { require.NoError(t, s1.WriteSnapshot("")) // Record baseline value at v2 for the same key. - vAtV2, ok, err := s1.Get(key) - require.NoError(t, err) + vAtV2, ok := s1.Get(key) require.True(t, ok) require.Equal(t, padLeft32(0x01), vAtV2) @@ -497,8 +492,7 @@ func TestSnapshotThenCatchupThenVerifyCorrectness(t *testing.T) { require.NoError(t, err) _, err = s2.LoadVersion(2, false) require.NoError(t, err) - gotV2, ok, err := s2.Get(key) - require.NoError(t, err) + gotV2, ok := s2.Get(key) require.True(t, ok) require.Equal(t, padLeft32(0x01), gotV2, "snapshot baseline should remain stable") require.NoError(t, s2.Close()) @@ -513,8 +507,7 @@ func TestSnapshotThenCatchupThenVerifyCorrectness(t *testing.T) { defer s3.Close() require.Equal(t, int64(4), s3.Version()) - gotLatest, ok, err := s3.Get(key) - require.NoError(t, err) + gotLatest, ok := s3.Get(key) require.True(t, ok) require.Equal(t, padLeft32(0x04), gotLatest) } @@ -554,8 +547,7 @@ func TestLoadVersionMixedSequence(t *testing.T) { require.NoError(t, err) require.Equal(t, int64(2), s1.Version()) require.Equal(t, hashAtV2, s1.RootHash()) - v, ok, err := s1.Get(key) - require.NoError(t, err) + v, ok := s1.Get(key) require.True(t, ok) require.Equal(t, padLeft32(0x02), v) require.NoError(t, s1.Close()) @@ -569,8 +561,7 @@ func TestLoadVersionMixedSequence(t *testing.T) { require.NoError(t, err) require.Equal(t, int64(4), s2.Version()) require.Equal(t, hashAtV4, s2.RootHash()) - v, ok, err = s2.Get(key) - require.NoError(t, err) + v, ok = s2.Get(key) require.True(t, ok) require.Equal(t, padLeft32(0x04), v) require.NoError(t, s2.Close()) @@ -584,8 +575,7 @@ func TestLoadVersionMixedSequence(t *testing.T) { require.NoError(t, err, "LoadVersion(2) must succeed after LoadVersion(0) dirtied working dir") require.Equal(t, int64(2), s3.Version()) require.Equal(t, hashAtV2, s3.RootHash()) - v, ok, err = s3.Get(key) - require.NoError(t, err) + v, ok = s3.Get(key) require.True(t, ok) require.Equal(t, padLeft32(0x02), v) require.NoError(t, s3.Close()) @@ -1268,20 +1258,17 @@ func TestSnapshotPreservesAllKeyTypes(t *testing.T) { require.Equal(t, hash, s2.RootHash()) storageKey := evm.BuildMemIAVLEVMKey(evm.EVMKeyStorage, StorageKey(addr, slot)) - v, ok, err := s2.Get(storageKey) - require.NoError(t, err) + v, ok := s2.Get(storageKey) require.True(t, ok) require.Equal(t, padLeft32(0x11), v) nonceKey := evm.BuildMemIAVLEVMKey(evm.EVMKeyNonce, addr[:]) - v, ok, err = s2.Get(nonceKey) - require.NoError(t, err) + v, ok = s2.Get(nonceKey) require.True(t, ok) require.Equal(t, []byte{0, 0, 0, 0, 0, 0, 0, 7}, v) codeKey := evm.BuildMemIAVLEVMKey(evm.EVMKeyCode, addr[:]) - v, ok, err = s2.Get(codeKey) - require.NoError(t, err) + v, ok = s2.Get(codeKey) require.True(t, ok) require.Equal(t, []byte{0x60, 0x80}, v) } @@ -1382,25 +1369,21 @@ func TestReopenAfterDeletes(t *testing.T) { require.Equal(t, hashBefore, s2.RootHash()) storageKey := evm.BuildMemIAVLEVMKey(evm.EVMKeyStorage, StorageKey(addr, slot)) - _, found, err := s2.Get(storageKey) - require.NoError(t, err) + _, found := s2.Get(storageKey) require.False(t, found, "storage should stay deleted after reopen") codeKey2 := evm.BuildMemIAVLEVMKey(evm.EVMKeyCode, addr[:]) - _, found, err = s2.Get(codeKey2) - require.NoError(t, err) + _, found = s2.Get(codeKey2) require.False(t, found, "code should stay deleted after reopen") // With Account Row GC, all-zero account row is physically deleted. nonceKey := evm.BuildMemIAVLEVMKey(evm.EVMKeyNonce, addr[:]) - nonceVal, found, err := s2.Get(nonceKey) - require.NoError(t, err) + nonceVal, found := s2.Get(nonceKey) require.False(t, found, "nonce should not be found after reopen (row deleted)") require.Nil(t, nonceVal) chKey := evm.BuildMemIAVLEVMKey(evm.EVMKeyCodeHash, addr[:]) - chVal, found, err := s2.Get(chKey) - require.NoError(t, err) + chVal, found := s2.Get(chKey) require.False(t, found, "codehash should not be found after reopen (row deleted)") require.Nil(t, chVal) } @@ -1431,9 +1414,7 @@ func TestWALTruncationThenRollback(t *testing.T) { key := evm.BuildMemIAVLEVMKey(evm.EVMKeyStorage, StorageKey(addrN(byte(i)), slotN(byte(i)))) var val []byte var found bool - var loopErr error - val, found, loopErr = s.Get(key) - require.NoError(t, loopErr) + val, found = s.Get(key) require.True(t, found, "key at block %d should exist after rollback to v5", i) require.Equal(t, padLeft32(byte(i)), val) } @@ -1441,9 +1422,7 @@ func TestWALTruncationThenRollback(t *testing.T) { for i := 6; i <= 10; i++ { key := evm.BuildMemIAVLEVMKey(evm.EVMKeyStorage, StorageKey(addrN(byte(i)), slotN(byte(i)))) var found bool - var loopErr error - _, found, loopErr = s.Get(key) - require.NoError(t, loopErr) + _, found = s.Get(key) require.False(t, found, "key at block %d should NOT exist after rollback to v5", i) } @@ -1485,9 +1464,7 @@ func TestReopenAfterSnapshotAndTruncation(t *testing.T) { key := evm.BuildMemIAVLEVMKey(evm.EVMKeyStorage, StorageKey(addrN(byte(i)), slotN(byte(i)))) var val []byte var found bool - var loopErr error - val, found, loopErr = s2.Get(key) - require.NoError(t, loopErr) + val, found = s2.Get(key) require.True(t, found, "key at block %d should exist after reopen", i) require.Equal(t, padLeft32(byte(i)), val) } @@ -1613,8 +1590,7 @@ func TestWALDirectoryDeleted(t *testing.T) { require.Equal(t, int64(3), s2.Version()) key := evm.BuildMemIAVLEVMKey(evm.EVMKeyStorage, StorageKey(Address{0x03}, Slot{0x03})) - val, found, err := s2.Get(key) - require.NoError(t, err) + val, found := s2.Get(key) require.True(t, found) require.Equal(t, padLeft32(0xCC), val) } @@ -1777,8 +1753,7 @@ func TestAccountRowDeletePersistsAfterReopen(t *testing.T) { require.Equal(t, hashBefore, s2.RootHash(), "LtHash should match after reopen") - nonceVal, found, err := s2.Get(nonceKey) - require.NoError(t, err) + nonceVal, found := s2.Get(nonceKey) require.False(t, found, "nonce should not be found after reopen (row deleted)") require.Nil(t, nonceVal) } @@ -1840,8 +1815,7 @@ func TestAccountRowDeleteSurvivesWALReplay(t *testing.T) { require.Equal(t, hashAtV2, s2.RootHash(), "LtHash should match after WAL replay") nonceKey := evm.BuildMemIAVLEVMKey(evm.EVMKeyNonce, addr[:]) - _, found, err := s2.Get(nonceKey) - require.NoError(t, err) + _, found := s2.Get(nonceKey) require.False(t, found, "nonce should not be found after WAL replay (row deleted)") } @@ -1870,8 +1844,7 @@ func TestAccountRowDeleteAfterSnapshotRollback(t *testing.T) { _, err = s.Commit() // v1 (snapshot taken) require.NoError(t, err) - nonceVal, found, err := s.Get(nonceKey) - require.NoError(t, err) + nonceVal, found := s.Get(nonceKey) require.True(t, found) require.Equal(t, []byte{0, 0, 0, 0, 0, 0, 0, 3}, nonceVal) @@ -1885,16 +1858,14 @@ func TestAccountRowDeleteAfterSnapshotRollback(t *testing.T) { _, err = s.Commit() // v2 (row deleted, snapshot taken) require.NoError(t, err) - _, found, err = s.Get(nonceKey) - require.NoError(t, err) + _, found = s.Get(nonceKey) require.False(t, found, "nonce should be gone at v2") // Rollback to v1: row should be restored require.NoError(t, s.Rollback(1)) require.Equal(t, int64(1), s.Version()) - nonceVal, found, err = s.Get(nonceKey) - require.NoError(t, err) + nonceVal, found = s.Get(nonceKey) require.True(t, found, "nonce should be restored after rollback to v1") require.Equal(t, []byte{0, 0, 0, 0, 0, 0, 0, 3}, nonceVal) @@ -1940,8 +1911,7 @@ func TestRollbackToCurrentVersion(t *testing.T) { require.Equal(t, int64(1), s.Version()) require.Equal(t, hashV1, s.RootHash()) - val, found, err := s.Get(key) - require.NoError(t, err) + val, found := s.Get(key) require.True(t, found) require.Equal(t, padLeft32(0x22), val) } @@ -1983,13 +1953,11 @@ func TestRollbackDiscardsUncommittedPendingWrites(t *testing.T) { require.NoError(t, s.Rollback(1)) require.Equal(t, int64(1), s.Version()) - val, found, err := s.Get(key1) - require.NoError(t, err) + val, found := s.Get(key1) require.True(t, found) require.Equal(t, padLeft32(0x44), val) - _, found, err = s.Get(key2) - require.NoError(t, err) + _, found = s.Get(key2) require.False(t, found, "uncommitted pending write should be discarded after rollback") } @@ -2020,8 +1988,7 @@ func TestRollbackThenNewTimeline(t *testing.T) { require.NoError(t, err) require.Equal(t, int64(2), v) // Version 2 in the new timeline. - val, found, err := s.Get(key) - require.NoError(t, err) + val, found := s.Get(key) require.True(t, found) require.Equal(t, padLeft32(0xFF), val) } @@ -2117,8 +2084,7 @@ func TestWriteSnapshotWhileReadOnlyCloneActive(t *testing.T) { require.NoError(t, s.WriteSnapshot("")) // RO clone should still work. - val, found, err := ro.Get(evm.BuildMemIAVLEVMKey(evm.EVMKeyStorage, StorageKey(addrN(0x07), slotN(0x01)))) - require.NoError(t, err) + val, found := ro.Get(evm.BuildMemIAVLEVMKey(evm.EVMKeyStorage, StorageKey(addrN(0x07), slotN(0x01)))) require.True(t, found) require.Equal(t, padLeft32(0x77), val) require.NoError(t, s.Close()) @@ -2139,8 +2105,7 @@ func TestWriteSnapshotDirParameterIgnored(t *testing.T) { require.NoError(t, s.WriteSnapshot("/tmp/this-should-be-ignored")) // Verify snapshot was created in the correct location (not the passed dir). - val, found, err := s.Get(evm.BuildMemIAVLEVMKey(evm.EVMKeyStorage, StorageKey(addrN(0x08), slotN(0x01)))) - require.NoError(t, err) + val, found := s.Get(evm.BuildMemIAVLEVMKey(evm.EVMKeyStorage, StorageKey(addrN(0x08), slotN(0x01)))) require.True(t, found) require.Equal(t, padLeft32(0x88), val) } diff --git a/sei-db/state_db/sc/flatkv/store_read.go b/sei-db/state_db/sc/flatkv/store_read.go index 8922ba5f18..c562731b70 100644 --- a/sei-db/state_db/sc/flatkv/store_read.go +++ b/sei-db/state_db/sc/flatkv/store_read.go @@ -11,60 +11,60 @@ import ( ) // Get returns the value for the given memiavl key. -// Returns (value, true, nil) if found, (nil, false, nil) if not found. -func (s *CommitStore) Get(key []byte) ([]byte, bool, error) { +// Returns (value, true) if found, (nil, false) if not found. +// Panics on I/O errors or unsupported key types. +func (s *CommitStore) Get(key []byte) ([]byte, bool) { kind, keyBytes := evm.ParseEVMKey(key) if !IsSupportedKeyType(kind) { - // Only possible if a new type is added to evm.ParseEVMKey() without updating code to handle that type. - return nil, false, fmt.Errorf("unsupported key type: %v", kind) + panic(fmt.Sprintf("flatkv: unsupported key type: %v", kind)) } switch kind { case evm.EVMKeyStorage: value, err := s.getStorageValue(keyBytes) if err != nil { - return nil, false, err + panic(fmt.Sprintf("flatkv: Get storage key %x: %v", key, err)) } - return value, value != nil, nil + return value, value != nil case evm.EVMKeyNonce, evm.EVMKeyCodeHash: accountData, err := s.getAccountData(keyBytes) if err != nil { - return nil, false, err + panic(fmt.Sprintf("flatkv: Get account key %x: %v", key, err)) } if accountData == nil || accountData.IsDelete() { - return nil, false, nil + return nil, false } if kind == evm.EVMKeyNonce { nonceBytes := make([]byte, vtype.NonceLen) binary.BigEndian.PutUint64(nonceBytes, accountData.GetNonce()) - return nonceBytes, true, nil + return nonceBytes, true } // CodeHash codeHash := accountData.GetCodeHash() var zeroCodeHash vtype.CodeHash if *codeHash == zeroCodeHash { - return nil, false, nil + return nil, false } - return codeHash[:], true, nil + return codeHash[:], true case evm.EVMKeyCode: value, err := s.getCodeValue(keyBytes) if err != nil { - return nil, false, err + panic(fmt.Sprintf("flatkv: Get code key %x: %v", key, err)) } - return value, value != nil, nil + return value, value != nil case evm.EVMKeyLegacy: value, err := s.getLegacyValue(keyBytes) if err != nil { - return nil, false, err + panic(fmt.Sprintf("flatkv: Get legacy key %x: %v", key, err)) } - return value, value != nil, nil + return value, value != nil default: - return nil, false, nil + return nil, false } } @@ -113,12 +113,10 @@ func (s *CommitStore) GetBlockHeightModified(key []byte) (int64, bool, error) { } // Has reports whether the given memiavl key exists. -func (s *CommitStore) Has(key []byte) (bool, error) { - _, found, err := s.Get(key) - if err != nil { - return false, fmt.Errorf("failed to get key %x: %w", key, err) - } - return found, nil +// Panics on I/O errors or unsupported key types. +func (s *CommitStore) Has(key []byte) bool { + _, found := s.Get(key) + return found } // Iterator returns an iterator over [start, end) in memiavl key order. diff --git a/sei-db/state_db/sc/flatkv/store_read_test.go b/sei-db/state_db/sc/flatkv/store_read_test.go index 8e85d5180b..39a056fd2f 100644 --- a/sei-db/state_db/sc/flatkv/store_read_test.go +++ b/sei-db/state_db/sc/flatkv/store_read_test.go @@ -26,8 +26,7 @@ func TestStoreGetPendingWrites(t *testing.T) { key := memiavlStorageKey(addr, slot) // No data initially - _, found, err := s.Get(key) - require.NoError(t, err) + _, found := s.Get(key) require.False(t, found) // Apply changeset (adds to pending writes) @@ -35,8 +34,7 @@ func TestStoreGetPendingWrites(t *testing.T) { require.NoError(t, s.ApplyChangeSets([]*proto.NamedChangeSet{cs})) // Should be readable from pending writes - got, found, err := s.Get(key) - require.NoError(t, err) + got, found := s.Get(key) require.True(t, found) require.Equal(t, value, got) @@ -44,8 +42,7 @@ func TestStoreGetPendingWrites(t *testing.T) { commitAndCheck(t, s) // Should still be readable after commit - got, found, err = s.Get(key) - require.NoError(t, err) + got, found = s.Get(key) require.True(t, found) require.Equal(t, value, got) } @@ -64,8 +61,7 @@ func TestStoreGetPendingDelete(t *testing.T) { commitAndCheck(t, s) // Verify exists - _, found, err := s.Get(key) - require.NoError(t, err) + _, found := s.Get(key) require.True(t, found) // Apply delete (pending) @@ -73,16 +69,14 @@ func TestStoreGetPendingDelete(t *testing.T) { require.NoError(t, s.ApplyChangeSets([]*proto.NamedChangeSet{cs2})) // Should not be found (pending delete) - _, found, err = s.Get(key) - require.NoError(t, err) + _, found = s.Get(key) require.False(t, found) // Commit delete commitAndCheck(t, s) // Still should not be found - _, found, err = s.Get(key) - require.NoError(t, err) + _, found = s.Get(key) require.False(t, found) } @@ -99,11 +93,9 @@ func TestStoreGetNonStorageKeys(t *testing.T) { evm.BuildMemIAVLEVMKey(evm.EVMKeyCode, addr[:]), } - var err error var found bool for _, key := range nonStorageKeys { - _, found, err = s.Get(key) - require.NoError(t, err) + _, found = s.Get(key) require.False(t, found, "non-storage keys should not be found before write") } } @@ -117,8 +109,7 @@ func TestStoreHas(t *testing.T) { key := memiavlStorageKey(addr, slot) // Initially not found - found, err := s.Has(key) - require.NoError(t, err) + found := s.Has(key) require.False(t, found) // Write and commit @@ -127,8 +118,7 @@ func TestStoreHas(t *testing.T) { commitAndCheck(t, s) // Now should exist - found, err = s.Has(key) - require.NoError(t, err) + found = s.Has(key) require.True(t, found) } @@ -144,8 +134,7 @@ func TestStoreGetLegacyPendingWrites(t *testing.T) { legacyKey := append([]byte{0x09}, addr[:]...) // Not found initially - _, found, err := s.Get(legacyKey) - require.NoError(t, err) + _, found := s.Get(legacyKey) require.False(t, found) // Apply changeset @@ -153,15 +142,13 @@ func TestStoreGetLegacyPendingWrites(t *testing.T) { require.NoError(t, s.ApplyChangeSets([]*proto.NamedChangeSet{cs})) // Should be readable from pending writes - got, found, err := s.Get(legacyKey) - require.NoError(t, err) + got, found := s.Get(legacyKey) require.True(t, found) require.Equal(t, []byte{0x00, 0x40}, got) // Commit and still readable commitAndCheck(t, s) - got, found, err = s.Get(legacyKey) - require.NoError(t, err) + got, found = s.Get(legacyKey) require.True(t, found) require.Equal(t, []byte{0x00, 0x40}, got) } @@ -178,8 +165,7 @@ func TestStoreGetLegacyPendingDelete(t *testing.T) { require.NoError(t, s.ApplyChangeSets([]*proto.NamedChangeSet{cs1})) commitAndCheck(t, s) - _, found, err := s.Get(legacyKey) - require.NoError(t, err) + _, found := s.Get(legacyKey) require.True(t, found) // Apply delete (pending) @@ -187,14 +173,12 @@ func TestStoreGetLegacyPendingDelete(t *testing.T) { require.NoError(t, s.ApplyChangeSets([]*proto.NamedChangeSet{cs2})) // Should not be found (pending delete) - _, found, err = s.Get(legacyKey) - require.NoError(t, err) + _, found = s.Get(legacyKey) require.False(t, found) // Commit delete commitAndCheck(t, s) - _, found, err = s.Get(legacyKey) - require.NoError(t, err) + _, found = s.Get(legacyKey) require.False(t, found) } @@ -216,8 +200,7 @@ func TestStoreDelete(t *testing.T) { commitAndCheck(t, s) // Verify exists - got, found, err := s.Get(key) - require.NoError(t, err) + got, found := s.Get(key) require.True(t, found) require.Equal(t, padLeft32(0x77), got) @@ -227,8 +210,7 @@ func TestStoreDelete(t *testing.T) { commitAndCheck(t, s) // Should not exist - _, found, err = s.Get(key) - require.NoError(t, err) + _, found = s.Get(key) require.False(t, found) } @@ -443,50 +425,40 @@ func TestGetAllKeyTypesFromCommittedDB(t *testing.T) { commitAndCheck(t, s) // Storage - got, found, err := s.Get(evm.BuildMemIAVLEVMKey(evm.EVMKeyStorage, StorageKey(addr, slot))) - require.NoError(t, err) + got, found := s.Get(evm.BuildMemIAVLEVMKey(evm.EVMKeyStorage, StorageKey(addr, slot))) require.True(t, found, "storage should be found") require.Equal(t, padLeft32(0x42), got) // Nonce - got, found, err = s.Get(evm.BuildMemIAVLEVMKey(evm.EVMKeyNonce, addr[:])) - require.NoError(t, err) + got, found = s.Get(evm.BuildMemIAVLEVMKey(evm.EVMKeyNonce, addr[:])) require.True(t, found, "nonce should be found") require.Equal(t, uint64(7), binary.BigEndian.Uint64(got)) // CodeHash - got, found, err = s.Get(evm.BuildMemIAVLEVMKey(evm.EVMKeyCodeHash, addr[:])) - require.NoError(t, err) + got, found = s.Get(evm.BuildMemIAVLEVMKey(evm.EVMKeyCodeHash, addr[:])) require.True(t, found, "codehash should be found") require.Equal(t, ch[:], got) // Code - got, found, err = s.Get(evm.BuildMemIAVLEVMKey(evm.EVMKeyCode, addr[:])) - require.NoError(t, err) + got, found = s.Get(evm.BuildMemIAVLEVMKey(evm.EVMKeyCode, addr[:])) require.True(t, found, "code should be found") require.Equal(t, bytecode, got) // Legacy - got, found, err = s.Get(legacyKey) - require.NoError(t, err) + got, found = s.Get(legacyKey) require.True(t, found, "legacy should be found") require.Equal(t, legacyVal, got) // Has should match - found, err = s.Has(evm.BuildMemIAVLEVMKey(evm.EVMKeyStorage, StorageKey(addr, slot))) - require.NoError(t, err) + found = s.Has(evm.BuildMemIAVLEVMKey(evm.EVMKeyStorage, StorageKey(addr, slot))) require.True(t, found) - found, err = s.Has(evm.BuildMemIAVLEVMKey(evm.EVMKeyNonce, addr[:])) - require.NoError(t, err) + found = s.Has(evm.BuildMemIAVLEVMKey(evm.EVMKeyNonce, addr[:])) require.True(t, found) - found, err = s.Has(evm.BuildMemIAVLEVMKey(evm.EVMKeyCodeHash, addr[:])) - require.NoError(t, err) + found = s.Has(evm.BuildMemIAVLEVMKey(evm.EVMKeyCodeHash, addr[:])) require.True(t, found) - found, err = s.Has(evm.BuildMemIAVLEVMKey(evm.EVMKeyCode, addr[:])) - require.NoError(t, err) + found = s.Has(evm.BuildMemIAVLEVMKey(evm.EVMKeyCode, addr[:])) require.True(t, found) - found, err = s.Has(legacyKey) - require.NoError(t, err) + found = s.Has(legacyKey) require.True(t, found) } @@ -503,20 +475,16 @@ func TestGetNonceFromCommittedEOA(t *testing.T) { })) commitAndCheck(t, s) - got, found, err := s.Get(nonceKey) - require.NoError(t, err) + got, found := s.Get(nonceKey) require.True(t, found, "nonce should be found for EOA") require.Equal(t, uint64(42), binary.BigEndian.Uint64(got)) - _, found, err = s.Get(chKey) - require.NoError(t, err) + _, found = s.Get(chKey) require.False(t, found, "codehash should NOT be found for EOA") - found, err = s.Has(nonceKey) - require.NoError(t, err) + found = s.Has(nonceKey) require.True(t, found) - found, err = s.Has(chKey) - require.NoError(t, err) + found = s.Has(chKey) require.False(t, found) } @@ -534,21 +502,17 @@ func TestGetCodeHashFromCommittedContract(t *testing.T) { })) commitAndCheck(t, s) - got, found, err := s.Get(chKey) - require.NoError(t, err) + got, found := s.Get(chKey) require.True(t, found, "codehash should be found for contract") require.Equal(t, ch[:], got) - got, found, err = s.Get(nonceKey) - require.NoError(t, err) + got, found = s.Get(nonceKey) require.True(t, found) require.Equal(t, uint64(1), binary.BigEndian.Uint64(got)) - found, err = s.Has(chKey) - require.NoError(t, err) + found = s.Has(chKey) require.True(t, found) - found, err = s.Has(nonceKey) - require.NoError(t, err) + found = s.Has(nonceKey) require.True(t, found) } @@ -564,16 +528,14 @@ func TestGetCodeFromCommittedDB(t *testing.T) { require.NoError(t, s.ApplyChangeSets([]*proto.NamedChangeSet{ namedCS(codePair(addr, bytecode)), })) - got, found, err := s.Get(codeKey) - require.NoError(t, err) + got, found := s.Get(codeKey) require.True(t, found, "pending code write should be visible") require.Equal(t, bytecode, got) commitAndCheck(t, s) // Still visible after commit - got, found, err = s.Get(codeKey) - require.NoError(t, err) + got, found = s.Get(codeKey) require.True(t, found) require.Equal(t, bytecode, got) @@ -581,13 +543,11 @@ func TestGetCodeFromCommittedDB(t *testing.T) { require.NoError(t, s.ApplyChangeSets([]*proto.NamedChangeSet{ namedCS(codeDeletePair(addr)), })) - _, found, err = s.Get(codeKey) - require.NoError(t, err) + _, found = s.Get(codeKey) require.False(t, found, "pending code delete should hide the entry") commitAndCheck(t, s) - _, found, err = s.Get(codeKey) - require.NoError(t, err) + _, found = s.Get(codeKey) require.False(t, found, "code should be gone after commit") } @@ -596,7 +556,7 @@ func TestGetUnknownKeyTypes(t *testing.T) { defer s.Close() // Nil and empty keys map to EVMKeyEmpty/EVMKeyUnknown, which is - // unsupported and errors under StrictKeyTypeCheck. + // unsupported and panics under StrictKeyTypeCheck. for _, tc := range []struct { name string key []byte @@ -605,15 +565,13 @@ func TestGetUnknownKeyTypes(t *testing.T) { {"empty key", []byte{}}, } { t.Run(tc.name, func(t *testing.T) { - _, _, err := s.Get(tc.key) - require.Error(t, err) - _, err = s.Has(tc.key) - require.Error(t, err) + require.Panics(t, func() { s.Get(tc.key) }) + require.Panics(t, func() { s.Has(tc.key) }) }) } // Non-empty keys that don't match a known prefix are classified as - // EVMKeyLegacy, which is a supported type — Get/Has should not error. + // EVMKeyLegacy, which is a supported type — Get/Has should not panic. for _, tc := range []struct { name string key []byte @@ -623,12 +581,10 @@ func TestGetUnknownKeyTypes(t *testing.T) { {"short nonce-like (2 bytes)", []byte{0x04, 0x01}}, } { t.Run(tc.name, func(t *testing.T) { - val, found, err := s.Get(tc.key) - require.NoError(t, err) + val, found := s.Get(tc.key) require.False(t, found) require.Nil(t, val) - found, err = s.Has(tc.key) - require.NoError(t, err) + found = s.Has(tc.key) require.False(t, found) }) } @@ -658,20 +614,15 @@ func TestGetAccountAfterFullDeletePending(t *testing.T) { namedCS(nonceDeletePair(addr), codeHashDeletePair(addr)), })) - _, nonceFound, err := s.Get(nonceKey) - require.NoError(t, err) + _, nonceFound := s.Get(nonceKey) require.False(t, nonceFound, "nonce should not be found after full delete (isDelete=true)") - _, chFound, err := s.Get(chKey) - require.NoError(t, err) + _, chFound := s.Get(chKey) require.False(t, chFound, "codehash should not be found after full delete (isDelete=true)") - var found bool - found, err = s.Has(nonceKey) - require.NoError(t, err) + found := s.Has(nonceKey) require.False(t, found) - found, err = s.Has(chKey) - require.NoError(t, err) + found = s.Has(chKey) require.False(t, found) } @@ -695,20 +646,15 @@ func TestGetAccountAfterFullDeleteCommitted(t *testing.T) { // After full delete + commit, the account row is physically deleted from // accountDB (batch.Delete in commitBatches). Both fields return not-found. - _, nonceFound, err := s.Get(nonceKey) - require.NoError(t, err) + _, nonceFound := s.Get(nonceKey) require.False(t, nonceFound, "nonce should not be found after full delete + commit") - _, chFound, err := s.Get(chKey) - require.NoError(t, err) + _, chFound := s.Get(chKey) require.False(t, chFound, "codehash should not be found after full delete + commit") - var found bool - found, err = s.Has(nonceKey) - require.NoError(t, err) + found := s.Has(nonceKey) require.False(t, found) - found, err = s.Has(chKey) - require.NoError(t, err) + found = s.Has(chKey) require.False(t, found) } @@ -731,13 +677,11 @@ func TestGetAccountAfterPartialDelete(t *testing.T) { })) commitAndCheck(t, s) - got, found, err := s.Get(nonceKey) - require.NoError(t, err) + got, found := s.Get(nonceKey) require.True(t, found, "nonce should survive partial delete") require.Equal(t, uint64(99), binary.BigEndian.Uint64(got)) - _, found, err = s.Get(chKey) - require.NoError(t, err) + _, found = s.Get(chKey) require.False(t, found, "codehash should be gone after delete") // Account row should still exist (EOA encoding) @@ -764,8 +708,7 @@ func TestGetAfterOverwrite(t *testing.T) { })) commitAndCheck(t, s) - got, found, err := s.Get(key) - require.NoError(t, err) + got, found := s.Get(key) require.True(t, found) require.Equal(t, padLeft32(0x11), got) @@ -774,8 +717,7 @@ func TestGetAfterOverwrite(t *testing.T) { })) commitAndCheck(t, s) - got, found, err = s.Get(key) - require.NoError(t, err) + got, found = s.Get(key) require.True(t, found) require.Equal(t, padLeft32(0x22, 0x33), got, "should return v2 value after overwrite") } @@ -800,8 +742,7 @@ func TestGetAfterDeleteAndRecreate(t *testing.T) { })) commitAndCheck(t, s) - _, found, err := s.Get(key) - require.NoError(t, err) + _, found := s.Get(key) require.False(t, found, "should not be found after delete") // v3: re-create with different value @@ -810,8 +751,7 @@ func TestGetAfterDeleteAndRecreate(t *testing.T) { })) commitAndCheck(t, s) - got, found, err := s.Get(key) - require.NoError(t, err) + got, found := s.Get(key) require.True(t, found) require.Equal(t, padLeft32(0xBB, 0xCC), got, "should return v3 value after re-create") } @@ -856,28 +796,23 @@ func TestGetAfterReopenAllKeyTypes(t *testing.T) { require.NoError(t, err) defer s2.Close() - got, found, err := s2.Get(evm.BuildMemIAVLEVMKey(evm.EVMKeyStorage, StorageKey(addr, slot))) - require.NoError(t, err) + got, found := s2.Get(evm.BuildMemIAVLEVMKey(evm.EVMKeyStorage, StorageKey(addr, slot))) require.True(t, found, "storage should survive reopen") require.Equal(t, padLeft32(0x42), got) - got, found, err = s2.Get(evm.BuildMemIAVLEVMKey(evm.EVMKeyNonce, addr[:])) - require.NoError(t, err) + got, found = s2.Get(evm.BuildMemIAVLEVMKey(evm.EVMKeyNonce, addr[:])) require.True(t, found, "nonce should survive reopen") require.Equal(t, uint64(100), binary.BigEndian.Uint64(got)) - got, found, err = s2.Get(evm.BuildMemIAVLEVMKey(evm.EVMKeyCodeHash, addr[:])) - require.NoError(t, err) + got, found = s2.Get(evm.BuildMemIAVLEVMKey(evm.EVMKeyCodeHash, addr[:])) require.True(t, found, "codehash should survive reopen") require.Equal(t, ch[:], got) - got, found, err = s2.Get(evm.BuildMemIAVLEVMKey(evm.EVMKeyCode, addr[:])) - require.NoError(t, err) + got, found = s2.Get(evm.BuildMemIAVLEVMKey(evm.EVMKeyCode, addr[:])) require.True(t, found, "code should survive reopen") require.Equal(t, bytecode, got) - got, found, err = s2.Get(legacyKey) - require.NoError(t, err) + got, found = s2.Get(legacyKey) require.True(t, found, "legacy should survive reopen") require.Equal(t, []byte{0x77}, got) } @@ -1321,28 +1256,23 @@ func TestReadOnlyGetAllKeyTypes(t *testing.T) { require.NoError(t, err) defer ro.Close() - got, found, err := ro.Get(evm.BuildMemIAVLEVMKey(evm.EVMKeyStorage, StorageKey(addr, slot))) - require.NoError(t, err) + got, found := ro.Get(evm.BuildMemIAVLEVMKey(evm.EVMKeyStorage, StorageKey(addr, slot))) require.True(t, found) require.Equal(t, padLeft32(0x42), got) - got, found, err = ro.Get(evm.BuildMemIAVLEVMKey(evm.EVMKeyNonce, addr[:])) - require.NoError(t, err) + got, found = ro.Get(evm.BuildMemIAVLEVMKey(evm.EVMKeyNonce, addr[:])) require.True(t, found) require.Equal(t, uint64(50), binary.BigEndian.Uint64(got)) - got, found, err = ro.Get(evm.BuildMemIAVLEVMKey(evm.EVMKeyCodeHash, addr[:])) - require.NoError(t, err) + got, found = ro.Get(evm.BuildMemIAVLEVMKey(evm.EVMKeyCodeHash, addr[:])) require.True(t, found) require.Equal(t, ch[:], got) - got, found, err = ro.Get(evm.BuildMemIAVLEVMKey(evm.EVMKeyCode, addr[:])) - require.NoError(t, err) + got, found = ro.Get(evm.BuildMemIAVLEVMKey(evm.EVMKeyCode, addr[:])) require.True(t, found) require.Equal(t, bytecode, got) - got, found, err = ro.Get(legacyKey) - require.NoError(t, err) + got, found = ro.Get(legacyKey) require.True(t, found) require.Equal(t, []byte{0x77}, got) } @@ -1403,30 +1333,26 @@ func TestGetNilKey(t *testing.T) { s := setupTestStore(t) defer s.Close() - _, _, err := s.Get(nil) - require.Error(t, err) + require.Panics(t, func() { s.Get(nil) }) } func TestGetEmptyKey(t *testing.T) { s := setupTestStore(t) defer s.Close() - _, _, err := s.Get([]byte{}) - require.Error(t, err) + require.Panics(t, func() { s.Get([]byte{}) }) } func TestHasNilKey(t *testing.T) { s := setupTestStore(t) defer s.Close() - _, err := s.Has(nil) - require.Error(t, err) + require.Panics(t, func() { s.Has(nil) }) } func TestHasEmptyKey(t *testing.T) { s := setupTestStore(t) defer s.Close() - _, err := s.Has([]byte{}) - require.Error(t, err) + require.Panics(t, func() { s.Has([]byte{}) }) } func TestHasForAllKeyTypes(t *testing.T) { @@ -1450,17 +1376,13 @@ func TestHasForAllKeyTypes(t *testing.T) { require.NoError(t, s.ApplyChangeSets([]*proto.NamedChangeSet{cs})) commitAndCheck(t, s) - found, err := s.Has(evm.BuildMemIAVLEVMKey(evm.EVMKeyStorage, StorageKey(addr, slot))) - require.NoError(t, err) + found := s.Has(evm.BuildMemIAVLEVMKey(evm.EVMKeyStorage, StorageKey(addr, slot))) require.True(t, found) - found, err = s.Has(evm.BuildMemIAVLEVMKey(evm.EVMKeyNonce, addr[:])) - require.NoError(t, err) + found = s.Has(evm.BuildMemIAVLEVMKey(evm.EVMKeyNonce, addr[:])) require.True(t, found) - found, err = s.Has(evm.BuildMemIAVLEVMKey(evm.EVMKeyCodeHash, addr[:])) - require.NoError(t, err) + found = s.Has(evm.BuildMemIAVLEVMKey(evm.EVMKeyCodeHash, addr[:])) require.True(t, found) - found, err = s.Has(evm.BuildMemIAVLEVMKey(evm.EVMKeyCode, addr[:])) - require.NoError(t, err) + found = s.Has(evm.BuildMemIAVLEVMKey(evm.EVMKeyCode, addr[:])) require.True(t, found) } @@ -1475,14 +1397,12 @@ func TestHasOnPendingDeletes(t *testing.T) { cs := makeChangeSet(key, padLeft32(0xAA), false) require.NoError(t, s.ApplyChangeSets([]*proto.NamedChangeSet{cs})) commitAndCheck(t, s) - found, err := s.Has(key) - require.NoError(t, err) + found := s.Has(key) require.True(t, found) delCS := makeChangeSet(key, nil, true) require.NoError(t, s.ApplyChangeSets([]*proto.NamedChangeSet{delCS})) - found, err = s.Has(key) - require.NoError(t, err) + found = s.Has(key) require.False(t, found, "Has should return false for pending-deleted key") } @@ -1501,11 +1421,9 @@ func TestHasOnReadOnlyStore(t *testing.T) { require.NoError(t, err) defer ro.Close() - found, err := ro.Has(key) - require.NoError(t, err) + found := ro.Has(key) require.True(t, found) - found, err = ro.Has(evm.BuildMemIAVLEVMKey(evm.EVMKeyStorage, StorageKey(addrN(0xFF), slotN(0xFF)))) - require.NoError(t, err) + found = ro.Has(evm.BuildMemIAVLEVMKey(evm.EVMKeyStorage, StorageKey(addrN(0xFF), slotN(0xFF)))) require.False(t, found) require.NoError(t, s.Close()) } @@ -1546,16 +1464,14 @@ func TestGetAfterRollback(t *testing.T) { require.NoError(t, s.ApplyChangeSets([]*proto.NamedChangeSet{cs3})) commitAndCheck(t, s) // v3 - val, found, err := s.Get(key) - require.NoError(t, err) + val, found := s.Get(key) require.True(t, found) require.Equal(t, padLeft32(0x33), val) require.NoError(t, s.Rollback(2)) require.Equal(t, int64(2), s.Version()) - _, found, err = s.Get(key) - require.NoError(t, err) + _, found = s.Get(key) require.False(t, found, "key should be deleted at v2") } @@ -1566,8 +1482,7 @@ func TestGetWithTruncatedEVMKey(t *testing.T) { // A key with a valid storage prefix but too short to be parsed. statePrefix := evm.StateKeyPrefix() truncatedKey := append(statePrefix, 0x01, 0x02) - val, found, err := s.Get(truncatedKey) - require.NoError(t, err) + val, found := s.Get(truncatedKey) require.False(t, found) require.Nil(t, val) } diff --git a/sei-db/state_db/sc/flatkv/store_test.go b/sei-db/state_db/sc/flatkv/store_test.go index 83e69e5ad4..59157c87e6 100644 --- a/sei-db/state_db/sc/flatkv/store_test.go +++ b/sei-db/state_db/sc/flatkv/store_test.go @@ -183,8 +183,7 @@ func TestStoreApplyAndCommit(t *testing.T) { // Apply but not commit - should be readable from pending writes require.NoError(t, s.ApplyChangeSets([]*proto.NamedChangeSet{cs})) - got, found, err := s.Get(key) - require.NoError(t, err) + got, found := s.Get(key) require.True(t, found, "should be readable from pending writes") require.Equal(t, value, got) @@ -192,8 +191,7 @@ func TestStoreApplyAndCommit(t *testing.T) { commitAndCheck(t, s) // Still should be readable after commit - got, found, err = s.Get(key) - require.NoError(t, err) + got, found = s.Get(key) require.True(t, found) require.Equal(t, value, got) } @@ -232,8 +230,7 @@ func TestStoreMultipleWrites(t *testing.T) { // Verify all entries for _, e := range entries { key := memiavlStorageKey(addr, e.slot) - got, found, err := s.Get(key) - require.NoError(t, err) + got, found := s.Get(key) require.True(t, found) require.Equal(t, padLeft32(e.value), got) } @@ -304,8 +301,7 @@ func TestStoreVersioning(t *testing.T) { require.Equal(t, int64(2), s.Version()) // Latest value should be from version 2 - got, found, err := s.Get(key) - require.NoError(t, err) + got, found := s.Get(key) require.True(t, found) require.Equal(t, padLeft32(0x02), got) } @@ -340,8 +336,7 @@ func TestStorePersistence(t *testing.T) { require.NoError(t, err) defer s2.Close() - got, found, err := s2.Get(key) - require.NoError(t, err) + got, found := s2.Get(key) require.True(t, found) require.Equal(t, value, got) @@ -697,8 +692,7 @@ func TestGetMissingKeyReturnsNil(t *testing.T) { s := setupTestStore(t) defer s.Close() - v, ok, err := s.Get([]byte{0xFF, 0xFF, 0xFF}) - require.NoError(t, err) + v, ok := s.Get([]byte{0xFF, 0xFF, 0xFF}) require.False(t, ok) require.Nil(t, v) } @@ -707,8 +701,7 @@ func TestGetUnsupportedKeyType_Strict(t *testing.T) { s := setupTestStore(t) defer s.Close() - _, _, err := s.Get([]byte{}) - require.Error(t, err) + require.Panics(t, func() { s.Get([]byte{}) }) } func TestGetUnsupportedKeyType_NonStrict(t *testing.T) { @@ -716,10 +709,7 @@ func TestGetUnsupportedKeyType_NonStrict(t *testing.T) { s := setupTestStoreWithConfig(t, cfg) defer s.Close() - v, ok, err := s.Get([]byte{}) - require.NoError(t, err) - require.False(t, ok) - require.Nil(t, v) + require.Panics(t, func() { s.Get([]byte{}) }) } // ============================================================================= @@ -765,18 +755,15 @@ func TestPersistenceAllKeyTypes(t *testing.T) { require.Equal(t, int64(1), s2.Version()) require.Equal(t, hash, s2.RootHash()) - v, ok, err := s2.Get(storageKey) - require.NoError(t, err) + v, ok := s2.Get(storageKey) require.True(t, ok) require.Equal(t, padLeft32(0x11), v) - v, ok, err = s2.Get(nonceKey) - require.NoError(t, err) + v, ok = s2.Get(nonceKey) require.True(t, ok) require.Equal(t, []byte{0, 0, 0, 0, 0, 0, 0, 5}, v) - v, ok, err = s2.Get(codeKey) - require.NoError(t, err) + v, ok = s2.Get(codeKey) require.True(t, ok) require.Equal(t, []byte{0x60, 0x80}, v) } @@ -804,8 +791,7 @@ func TestReadOnlyBasicLoadAndRead(t *testing.T) { defer ro.Close() require.Equal(t, int64(1), ro.Version()) - got, found, err := ro.Get(key) - require.NoError(t, err) + got, found := ro.Get(key) require.True(t, found) require.Equal(t, value, got) require.NotNil(t, ro.RootHash()) @@ -835,8 +821,7 @@ func TestReadOnlyLoadFromUnopenedStore(t *testing.T) { defer ro.Close() require.Equal(t, int64(1), ro.Version()) - got, found, err := ro.Get(key) - require.NoError(t, err) + got, found := ro.Get(key) require.True(t, found) require.Equal(t, value, got) } @@ -864,8 +849,7 @@ func TestReadOnlyAtSpecificVersion(t *testing.T) { defer ro.Close() require.Equal(t, int64(3), ro.Version()) - got, found, err := ro.Get(key) - require.NoError(t, err) + got, found := ro.Get(key) require.True(t, found) require.Equal(t, padLeft32(3), got) } @@ -924,8 +908,7 @@ func TestReadOnlyParentWritesDuringReadOnly(t *testing.T) { require.Equal(t, int64(3), s.Version()) require.Equal(t, int64(1), ro.Version()) - got, found, err := ro.Get(key) - require.NoError(t, err) + got, found := ro.Get(key) require.True(t, found) require.Equal(t, padLeft32(1), got) } @@ -961,10 +944,8 @@ func TestReadOnlyConcurrentInstances(t *testing.T) { require.Equal(t, int64(4), ro1.Version()) require.Equal(t, int64(4), ro2.Version()) - g1, ok1, err := ro1.Get(key) - require.NoError(t, err) - g2, ok2, err := ro2.Get(key) - require.NoError(t, err) + g1, ok1 := ro1.Get(key) + g2, ok2 := ro2.Get(key) require.True(t, ok1) require.True(t, ok2) require.Equal(t, padLeft32(4), g1) @@ -992,8 +973,7 @@ func TestReadOnlyFailureDoesNotAffectParent(t *testing.T) { require.NoError(t, err) require.Equal(t, int64(2), v) - got, found, err := s.Get(key) - require.NoError(t, err) + got, found := s.Get(key) require.True(t, found) require.Equal(t, padLeft32(2), got) } @@ -1088,8 +1068,7 @@ func TestLoadVersionReload(t *testing.T) { require.Equal(t, int64(1), s.Version()) require.Equal(t, expectedHash, s.RootHash()) - val, found, err := s.Get(key) - require.NoError(t, err) + val, found := s.Get(key) require.True(t, found) require.Equal(t, padLeft32(0x11), val) require.NoError(t, s.Close()) @@ -1110,8 +1089,7 @@ func TestLoadVersionReadOnlyVersion0(t *testing.T) { defer ro.Close() require.Equal(t, int64(1), ro.Version()) - val, found, err := ro.Get(key) - require.NoError(t, err) + val, found := ro.Get(key) require.True(t, found) require.Equal(t, padLeft32(0x22), val) require.NoError(t, s.Close()) @@ -1136,13 +1114,11 @@ func TestLoadVersionReadOnlyDoesNotSeePending(t *testing.T) { require.NoError(t, err) defer ro.Close() - _, found, err := ro.Get(key2) - require.NoError(t, err) + _, found := ro.Get(key2) require.False(t, found, "read-only store should not see uncommitted data") // But committed data should be visible. - val, found, err := ro.Get(key) - require.NoError(t, err) + val, found := ro.Get(key) require.True(t, found) require.Equal(t, padLeft32(0x33), val) require.NoError(t, s.Close()) @@ -1198,12 +1174,11 @@ func TestCloseWithPendingUncommittedWrites(t *testing.T) { require.Equal(t, int64(1), s2.Version()) - val, found, err := s2.Get(key) + val, found := s2.Get(key) require.True(t, found, "committed data should persist") require.Equal(t, padLeft32(0x11), val) - _, found, err = s2.Get(key2) - require.NoError(t, err) + _, found = s2.Get(key2) require.False(t, found, "uncommitted data should be lost") } @@ -1223,8 +1198,7 @@ func TestCloseDuringConcurrentReadOnlyClone(t *testing.T) { require.NoError(t, s.Close()) // RO should still function. - val, found, err := ro.Get(key) - require.NoError(t, err) + val, found := ro.Get(key) require.True(t, found, "RO clone should remain functional after parent close") require.Equal(t, padLeft32(0xAA), val) diff --git a/sei-db/state_db/sc/flatkv/store_write_test.go b/sei-db/state_db/sc/flatkv/store_write_test.go index 65f7048563..25240466a7 100644 --- a/sei-db/state_db/sc/flatkv/store_write_test.go +++ b/sei-db/state_db/sc/flatkv/store_write_test.go @@ -41,14 +41,12 @@ func TestStoreNonStorageKeys(t *testing.T) { commitAndCheck(t, s) // Nonce should be found - nonceValue, found, err := s.Get(nonceKey) - require.NoError(t, err) + nonceValue, found := s.Get(nonceKey) require.True(t, found, "nonce should be found") require.Equal(t, []byte{0, 0, 0, 0, 0, 0, 0, 17}, nonceValue) // CodeHash should be found - codeHashValue, found, err := s.Get(codeHashKey) - require.NoError(t, err) + codeHashValue, found := s.Get(codeHashKey) require.True(t, found, "codehash should be found") require.Equal(t, codeHash[:], codeHashValue) } @@ -109,27 +107,23 @@ func TestStoreWriteAllDBs(t *testing.T) { // Verify storage data was written (via Store.Get which deserializes) storageMemiavlKey := evm.BuildMemIAVLEVMKey(evm.EVMKeyStorage, StorageKey(addr, slot)) - storageValue, found, err := s.Get(storageMemiavlKey) - require.NoError(t, err) + storageValue, found := s.Get(storageMemiavlKey) require.True(t, found, "Storage should be found") require.Equal(t, padLeft32(0x11, 0x22), storageValue) // Verify account and code data was written nonceKey := evm.BuildMemIAVLEVMKey(evm.EVMKeyNonce, addr[:]) - nonceValue, found, err := s.Get(nonceKey) - require.NoError(t, err) + nonceValue, found := s.Get(nonceKey) require.True(t, found, "Nonce should be found") require.Equal(t, []byte{0, 0, 0, 0, 0, 0, 0, 42}, nonceValue) codeKey := evm.BuildMemIAVLEVMKey(evm.EVMKeyCode, addr[:]) - codeValue, found, err := s.Get(codeKey) - require.NoError(t, err) + codeValue, found := s.Get(codeKey) require.True(t, found, "Code should be found") require.Equal(t, []byte{0x60, 0x60, 0x60}, codeValue) // Verify legacy data persisted (via Store.Get which deserializes) - legacyVal, found, err := s.Get(legacyKey) - require.NoError(t, err) + legacyVal, found := s.Get(legacyKey) require.True(t, found, "Legacy should be found") require.Equal(t, []byte{0x00, 0x03}, legacyVal) } @@ -201,27 +195,23 @@ func TestStoreWriteAccountAndCode(t *testing.T) { // Verify account data was written nonceKey1 := evm.BuildMemIAVLEVMKey(evm.EVMKeyNonce, addr1[:]) - nonce1, found, err := s.Get(nonceKey1) - require.NoError(t, err) + nonce1, found := s.Get(nonceKey1) require.True(t, found, "Nonce1 should be found") require.Equal(t, []byte{0, 0, 0, 0, 0, 0, 0, 1}, nonce1) nonceKey2 := evm.BuildMemIAVLEVMKey(evm.EVMKeyNonce, addr2[:]) - nonce2, found, err := s.Get(nonceKey2) - require.NoError(t, err) + nonce2, found := s.Get(nonceKey2) require.True(t, found, "Nonce2 should be found") require.Equal(t, []byte{0, 0, 0, 0, 0, 0, 0, 2}, nonce2) // Verify code data was written codeKey1 := evm.BuildMemIAVLEVMKey(evm.EVMKeyCode, addr1[:]) - code1, found, err := s.Get(codeKey1) - require.NoError(t, err) + code1, found := s.Get(codeKey1) require.True(t, found, "Code1 should be found") require.Equal(t, []byte{0x60, 0x80}, code1) codeKey2 := evm.BuildMemIAVLEVMKey(evm.EVMKeyCode, addr2[:]) - code2, found, err := s.Get(codeKey2) - require.NoError(t, err) + code2, found := s.Get(codeKey2) require.True(t, found, "Code2 should be found") require.Equal(t, []byte{0x60, 0xA0}, code2) @@ -293,15 +283,13 @@ func TestStoreWriteDelete(t *testing.T) { // Nonce was the only account field written (no codehash). After delete, // all fields are zero so the accountDB row is physically deleted. nonceKeyDel := evm.BuildMemIAVLEVMKey(evm.EVMKeyNonce, addr[:]) - nonceValue, found, err := s.Get(nonceKeyDel) - require.NoError(t, err) + nonceValue, found := s.Get(nonceKeyDel) require.False(t, found, "nonce should not be found after account row deletion") require.Nil(t, nonceValue) // Verify code is deleted codeKeyDel := evm.BuildMemIAVLEVMKey(evm.EVMKeyCode, addr[:]) - _, found, err = s.Get(codeKeyDel) - require.NoError(t, err) + _, found = s.Get(codeKeyDel) require.False(t, found, "code should be deleted") requireAllLocalMetaAt(t, s, 2) @@ -355,14 +343,12 @@ func TestAccountValueStorage(t *testing.T) { // Get method should return individual fields nonceKey := evm.BuildMemIAVLEVMKey(evm.EVMKeyNonce, addr[:]) - nonceValue, found, err := s.Get(nonceKey) - require.NoError(t, err) + nonceValue, found := s.Get(nonceKey) require.True(t, found, "Nonce should be found") require.Equal(t, []byte{0, 0, 0, 0, 0, 0, 0, 42}, nonceValue, "Nonce should be 42") codeHashKey := evm.BuildMemIAVLEVMKey(evm.EVMKeyCodeHash, addr[:]) - codeHashValue, found, err := s.Get(codeHashKey) - require.NoError(t, err) + codeHashValue, found := s.Get(codeHashKey) require.True(t, found, "CodeHash should be found") require.Equal(t, expectedCodeHash[:], codeHashValue, "CodeHash should match") } @@ -393,8 +379,7 @@ func TestStoreWriteLegacyKeys(t *testing.T) { require.Equal(t, int64(1), s.localMeta[legacyDBDir].CommittedVersion) // Verify data persisted (via Store.Get which deserializes) - got, found, err := s.Get(codeSizeKey) - require.NoError(t, err) + got, found := s.Get(codeSizeKey) require.True(t, found) require.Equal(t, codeSizeValue, got) } @@ -441,8 +426,7 @@ func TestStoreWriteLegacyAndOptimizedKeys(t *testing.T) { // Verify legacy data persisted (via Store.Get which deserializes) codeSizeKey := append([]byte{0x09}, addr[:]...) - got, found, err := s.Get(codeSizeKey) - require.NoError(t, err) + got, found := s.Get(codeSizeKey) require.True(t, found) require.Equal(t, []byte{0x00, 0x03}, got) } @@ -460,8 +444,7 @@ func TestStoreWriteDeleteLegacyKey(t *testing.T) { commitAndCheck(t, s) // Verify exists - got, found, err := s.Get(legacyKey) - require.NoError(t, err) + got, found := s.Get(legacyKey) require.True(t, found) require.Equal(t, []byte{0x00, 0x10}, got) @@ -471,8 +454,7 @@ func TestStoreWriteDeleteLegacyKey(t *testing.T) { commitAndCheck(t, s) // Should not be found - _, found, err = s.Get(legacyKey) - require.NoError(t, err) + _, found = s.Get(legacyKey) require.False(t, found) } @@ -552,8 +534,7 @@ func TestStoreFsyncConfig(t *testing.T) { commitAndCheck(t, store) // Data should be readable - got, found, err := store.Get(key) - require.NoError(t, err) + got, found := store.Get(key) require.True(t, found) require.Equal(t, padLeft32(0xCC), got) @@ -669,13 +650,11 @@ func TestMultipleApplyChangeSetsBeforeCommit(t *testing.T) { commitAndCheck(t, s) - v1, ok, err := s.Get(key1) - require.NoError(t, err) + v1, ok := s.Get(key1) require.True(t, ok) require.Equal(t, padLeft32(0x11), v1) - v2, ok, err := s.Get(key2) - require.NoError(t, err) + v2, ok := s.Get(key2) require.True(t, ok) require.Equal(t, padLeft32(0x22), v2) } @@ -700,13 +679,11 @@ func TestMultipleApplyAccountFieldsPreservesOther(t *testing.T) { require.NoError(t, s.ApplyChangeSets([]*proto.NamedChangeSet{cs2})) commitAndCheck(t, s) - nonceVal, ok, err := s.Get(nonceKey) - require.NoError(t, err) + nonceVal, ok := s.Get(nonceKey) require.True(t, ok) require.Equal(t, []byte{0, 0, 0, 0, 0, 0, 0, 42}, nonceVal, "nonce should be preserved after codehash update") - chVal, ok, err := s.Get(codeHashKey) - require.NoError(t, err) + chVal, ok := s.Get(codeHashKey) require.True(t, ok) require.Equal(t, codeHash[:], chVal) } @@ -812,8 +789,7 @@ func TestOverwriteSameKeyInSingleBlock(t *testing.T) { require.NoError(t, s.ApplyChangeSets([]*proto.NamedChangeSet{cs})) commitAndCheck(t, s) - v, ok, err := s.Get(key) - require.NoError(t, err) + v, ok := s.Get(key) require.True(t, ok) require.Equal(t, padLeft32(0x02), v, "last write should win") } @@ -855,8 +831,7 @@ func TestStoreFsyncEnabled(t *testing.T) { commitStorageEntry(t, s, Address{0x01}, Slot{0x01}, []byte{0x01}) require.Equal(t, int64(1), s.Version()) - v, ok, err := s.Get(memiavlStorageKey(Address{0x01}, Slot{0x01})) - require.NoError(t, err) + v, ok := s.Get(memiavlStorageKey(Address{0x01}, Slot{0x01})) require.True(t, ok) require.Equal(t, padLeft32(0x01), v) } @@ -941,30 +916,25 @@ func TestDeleteSemanticsCodehashAsymmetry(t *testing.T) { // After deleting all account fields, the row is physically deleted (Account Row GC). nonceKey := evm.BuildMemIAVLEVMKey(evm.EVMKeyNonce, addr[:]) - nonceVal, found, err := s.Get(nonceKey) - require.NoError(t, err) + nonceVal, found := s.Get(nonceKey) require.False(t, found, "nonce should not be found after all-zero account row deletion") require.Nil(t, nonceVal) chKey := evm.BuildMemIAVLEVMKey(evm.EVMKeyCodeHash, addr[:]) - chVal, found, err := s.Get(chKey) - require.NoError(t, err) + chVal, found := s.Get(chKey) require.False(t, found, "codehash should not be found after row deletion") require.Nil(t, chVal) - hasCodeHash, err := s.Has(chKey) - require.NoError(t, err) + hasCodeHash := s.Has(chKey) require.False(t, hasCodeHash, "Has(codehash) should be false after delete") - hasNonce, err := s.Has(nonceKey) - require.NoError(t, err) + hasNonce := s.Has(nonceKey) require.False(t, hasNonce, "Has(nonce) should be false after row deletion") codeKey := evm.BuildMemIAVLEVMKey(evm.EVMKeyCode, addr[:]) - _, found, err = s.Get(codeKey) - require.NoError(t, err) + _, found = s.Get(codeKey) require.False(t, found, "code should be physically deleted") - _, err = s.accountDB.Get(AccountKey(addr)) + _, err := s.accountDB.Get(AccountKey(addr)) require.Error(t, err, "accountDB row should be physically deleted when all fields are zero") } @@ -989,8 +959,7 @@ func TestCrossApplyChangeSetsOrdering(t *testing.T) { commitAndCheck(t, s) key := evm.BuildMemIAVLEVMKey(evm.EVMKeyStorage, StorageKey(addr, slot)) - _, found, err := s.Get(key) - require.NoError(t, err) + _, found := s.Get(key) require.False(t, found, "write-then-delete: key should be gone") }) @@ -1014,8 +983,7 @@ func TestCrossApplyChangeSetsOrdering(t *testing.T) { commitAndCheck(t, s) key := evm.BuildMemIAVLEVMKey(evm.EVMKeyStorage, StorageKey(addr, slot)) - val, found, err := s.Get(key) - require.NoError(t, err) + val, found := s.Get(key) require.True(t, found, "delete-then-write: key should exist") require.Equal(t, padLeft32(0xBB), val) }) @@ -1175,8 +1143,7 @@ func TestCrossApplyChangeSetsAccountOrdering(t *testing.T) { // With Account Row GC, nonce-only account becomes all-zero → row deleted key := evm.BuildMemIAVLEVMKey(evm.EVMKeyNonce, addr[:]) - _, found, err := s.Get(key) - require.NoError(t, err) + _, found := s.Get(key) require.False(t, found, "nonce-only account should be deleted after nonce delete") }) @@ -1198,8 +1165,7 @@ func TestCrossApplyChangeSetsAccountOrdering(t *testing.T) { commitAndCheck(t, s) key := evm.BuildMemIAVLEVMKey(evm.EVMKeyNonce, addr[:]) - val, found, err := s.Get(key) - require.NoError(t, err) + val, found := s.Get(key) require.True(t, found) require.Equal(t, uint64(99), bytesToNonce(val)) }) @@ -1218,8 +1184,7 @@ func TestCrossApplyChangeSetsAccountOrdering(t *testing.T) { commitAndCheck(t, s) key := evm.BuildMemIAVLEVMKey(evm.EVMKeyCodeHash, addr[:]) - _, found, err := s.Get(key) - require.NoError(t, err) + _, found := s.Get(key) require.False(t, found, "codehash-only account: delete → all-zero → row deleted") }) @@ -1241,8 +1206,7 @@ func TestCrossApplyChangeSetsAccountOrdering(t *testing.T) { commitAndCheck(t, s) key := evm.BuildMemIAVLEVMKey(evm.EVMKeyCodeHash, addr[:]) - val, found, err := s.Get(key) - require.NoError(t, err) + val, found := s.Get(key) require.True(t, found, "codehash should be restored after delete-then-write") expected := codeHashN(0xBB) require.Equal(t, expected[:], val) @@ -1331,13 +1295,11 @@ func TestAccountRowDeletedWhenAllFieldsZero(t *testing.T) { _, err := s.accountDB.Get(AccountKey(addr)) require.Error(t, err, "accountDB row should be physically deleted") - nonceVal, found, err := s.Get(nonceKey) - require.NoError(t, err) + nonceVal, found := s.Get(nonceKey) require.False(t, found, "nonce should not be found after row deletion") require.Nil(t, nonceVal) - chVal, found, err := s.Get(chKey) - require.NoError(t, err) + chVal, found := s.Get(chKey) require.False(t, found, "codehash should not be found after row deletion") require.Nil(t, chVal) } @@ -1364,8 +1326,7 @@ func TestAccountRowPersistsWhenPartiallyZero(t *testing.T) { require.NoError(t, err, "accountDB row should still exist after partial delete") require.NotNil(t, raw) - nonceVal, found, err := s.Get(nonceKey) - require.NoError(t, err) + nonceVal, found := s.Get(nonceKey) require.True(t, found, "nonce should still be readable") require.Equal(t, nonceBytes(7), nonceVal) } @@ -1399,8 +1360,7 @@ func TestAccountRowDeleteThenRecreate(t *testing.T) { require.NoError(t, err, "row should be recreated") require.NotNil(t, raw) - nonceVal, found, err := s.Get(nonceKey) - require.NoError(t, err) + nonceVal, found := s.Get(nonceKey) require.True(t, found) require.Equal(t, nonceBytes(99), nonceVal) } @@ -1435,8 +1395,7 @@ func TestAccountRowGCOnWriteZero(t *testing.T) { require.Error(t, err, "accountDB row should be GC'd when write-zero makes account empty") nonceKey := evm.BuildMemIAVLEVMKey(evm.EVMKeyNonce, addr[:]) - _, found, err := s.Get(nonceKey) - require.NoError(t, err) + _, found := s.Get(nonceKey) require.False(t, found, "nonce should not be found after write-zero GC") } @@ -1602,8 +1561,7 @@ func TestApplyChangeSetsMixedEVMAndNonEVM(t *testing.T) { require.Len(t, s.storageWrites, 1) // The EVM value should be readable via pending writes. - val, found, err := s.Get(storageKey) - require.NoError(t, err) + val, found := s.Get(storageKey) require.True(t, found) require.Equal(t, padLeft32(0x42), val) }