From 59355d2ecc3e897663889e920058dcfb1dbe5381 Mon Sep 17 00:00:00 2001 From: Mikhail Wall Date: Fri, 9 May 2025 16:17:40 +0200 Subject: [PATCH 01/24] feat: impementing single node consensus --- cl/cmd/redisapp/main.go | 20 - cl/cmd/singlenode/main.go | 1 + cl/mocks/mock_state.go | 358 ++++++++++++++++-- cl/redisapp/blockbuilder/blockbuilder.go | 134 +++++-- cl/redisapp/blockbuilder/blockbuilder_test.go | 212 ++--------- cl/redisapp/leaderfollower/leaderfollower.go | 150 ++++++-- .../leaderfollower/leaderfollower_test.go | 29 +- cl/redisapp/rapp.go | 44 ++- cl/redisapp/state/state.go | 325 +++++++++------- 9 files changed, 832 insertions(+), 441 deletions(-) create mode 100644 cl/cmd/singlenode/main.go diff --git a/cl/cmd/redisapp/main.go b/cl/cmd/redisapp/main.go index b70118d5b..d760b68a4 100644 --- a/cl/cmd/redisapp/main.go +++ b/cl/cmd/redisapp/main.go @@ -83,22 +83,6 @@ var ( }, }) - genesisBlockHashFlag = altsrc.NewStringFlag(&cli.StringFlag{ - Name: "genesis-block-hash", - Usage: "Genesis block hash", - EnvVars: []string{"RAPP_GENESIS_BLOCK_HASH"}, - Value: "dfc7fa546e1268f5bb65b9ec67759307d2435ad1bf609307c7c306e9bb0edcde", - Action: func(_ *cli.Context, s string) error { - if len(s) != 64 { - return fmt.Errorf("invalid genesis-block-hash: must be 64 hex characters") - } - if _, err := hex.DecodeString(s); err != nil { - return fmt.Errorf("invalid genesis-block-hash: %v", err) - } - return nil - }, - }) - redisAddrFlag = altsrc.NewStringFlag(&cli.StringFlag{ Name: "redis-addr", Usage: "Redis address", @@ -177,7 +161,6 @@ type Config struct { InstanceID string EthClientURL string JWTSecret string - GenesisBlockHash string RedisAddr string EVMBuildDelay time.Duration EVMBuildDelayEmptyBlocks time.Duration @@ -190,7 +173,6 @@ func main() { instanceIDFlag, ethClientURLFlag, jwtSecretFlag, - genesisBlockHashFlag, redisAddrFlag, logFmtFlag, logLevelFlag, @@ -245,7 +227,6 @@ func startApplication(c *cli.Context) error { InstanceID: c.String(instanceIDFlag.Name), EthClientURL: c.String(ethClientURLFlag.Name), JWTSecret: c.String(jwtSecretFlag.Name), - GenesisBlockHash: c.String(genesisBlockHashFlag.Name), RedisAddr: c.String(redisAddrFlag.Name), EVMBuildDelay: c.Duration(evmBuildDelayFlag.Name), EVMBuildDelayEmptyBlocks: c.Duration(evmBuildDelayEmptyBlockFlag.Name), @@ -259,7 +240,6 @@ func startApplication(c *cli.Context) error { cfg.InstanceID, cfg.EthClientURL, cfg.JWTSecret, - cfg.GenesisBlockHash, cfg.RedisAddr, cfg.PriorityFeeReceipt, log, diff --git a/cl/cmd/singlenode/main.go b/cl/cmd/singlenode/main.go new file mode 100644 index 000000000..06ab7d0f9 --- /dev/null +++ b/cl/cmd/singlenode/main.go @@ -0,0 +1 @@ +package main diff --git a/cl/mocks/mock_state.go b/cl/mocks/mock_state.go index f3803b80f..85604c9b4 100644 --- a/cl/mocks/mock_state.go +++ b/cl/mocks/mock_state.go @@ -10,6 +10,7 @@ import ( time "time" gomock "github.com/golang/mock/gomock" + state "github.com/primev/mev-commit/cl/redisapp/state" types "github.com/primev/mev-commit/cl/redisapp/types" redis "github.com/redis/go-redis/v9" ) @@ -7345,8 +7346,147 @@ func (m *MockStateManager) EXPECT() *MockStateManagerMockRecorder { return m.recorder } +// ExecuteTransaction mocks base method. +func (m *MockStateManager) ExecuteTransaction(ctx context.Context, ops ...state.PipelineOperation) error { + m.ctrl.T.Helper() + varargs := []interface{}{ctx} + for _, a := range ops { + varargs = append(varargs, a) + } + ret := m.ctrl.Call(m, "ExecuteTransaction", varargs...) + ret0, _ := ret[0].(error) + return ret0 +} + +// ExecuteTransaction indicates an expected call of ExecuteTransaction. +func (mr *MockStateManagerMockRecorder) ExecuteTransaction(ctx interface{}, ops ...interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + varargs := append([]interface{}{ctx}, ops...) + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ExecuteTransaction", reflect.TypeOf((*MockStateManager)(nil).ExecuteTransaction), varargs...) +} + +// GetBlockBuildState mocks base method. +func (m *MockStateManager) GetBlockBuildState(ctx context.Context) types.BlockBuildState { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "GetBlockBuildState", ctx) + ret0, _ := ret[0].(types.BlockBuildState) + return ret0 +} + +// GetBlockBuildState indicates an expected call of GetBlockBuildState. +func (mr *MockStateManagerMockRecorder) GetBlockBuildState(ctx interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetBlockBuildState", reflect.TypeOf((*MockStateManager)(nil).GetBlockBuildState), ctx) +} + +// LoadExecutionHead mocks base method. +func (m *MockStateManager) LoadExecutionHead(ctx context.Context) (*types.ExecutionHead, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "LoadExecutionHead", ctx) + ret0, _ := ret[0].(*types.ExecutionHead) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// LoadExecutionHead indicates an expected call of LoadExecutionHead. +func (mr *MockStateManagerMockRecorder) LoadExecutionHead(ctx interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "LoadExecutionHead", reflect.TypeOf((*MockStateManager)(nil).LoadExecutionHead), ctx) +} + +// LoadOrInitializeBlockState mocks base method. +func (m *MockStateManager) LoadOrInitializeBlockState(ctx context.Context) error { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "LoadOrInitializeBlockState", ctx) + ret0, _ := ret[0].(error) + return ret0 +} + +// LoadOrInitializeBlockState indicates an expected call of LoadOrInitializeBlockState. +func (mr *MockStateManagerMockRecorder) LoadOrInitializeBlockState(ctx interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "LoadOrInitializeBlockState", reflect.TypeOf((*MockStateManager)(nil).LoadOrInitializeBlockState), ctx) +} + +// ResetBlockState mocks base method. +func (m *MockStateManager) ResetBlockState(ctx context.Context) error { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "ResetBlockState", ctx) + ret0, _ := ret[0].(error) + return ret0 +} + +// ResetBlockState indicates an expected call of ResetBlockState. +func (mr *MockStateManagerMockRecorder) ResetBlockState(ctx interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ResetBlockState", reflect.TypeOf((*MockStateManager)(nil).ResetBlockState), ctx) +} + +// SaveBlockState mocks base method. +func (m *MockStateManager) SaveBlockState(ctx context.Context) error { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "SaveBlockState", ctx) + ret0, _ := ret[0].(error) + return ret0 +} + +// SaveBlockState indicates an expected call of SaveBlockState. +func (mr *MockStateManagerMockRecorder) SaveBlockState(ctx interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "SaveBlockState", reflect.TypeOf((*MockStateManager)(nil).SaveBlockState), ctx) +} + +// SaveExecutionHead mocks base method. +func (m *MockStateManager) SaveExecutionHead(ctx context.Context, head *types.ExecutionHead) error { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "SaveExecutionHead", ctx, head) + ret0, _ := ret[0].(error) + return ret0 +} + +// SaveExecutionHead indicates an expected call of SaveExecutionHead. +func (mr *MockStateManagerMockRecorder) SaveExecutionHead(ctx, head interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "SaveExecutionHead", reflect.TypeOf((*MockStateManager)(nil).SaveExecutionHead), ctx, head) +} + +// Stop mocks base method. +func (m *MockStateManager) Stop() { + m.ctrl.T.Helper() + m.ctrl.Call(m, "Stop") +} + +// Stop indicates an expected call of Stop. +func (mr *MockStateManagerMockRecorder) Stop() *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Stop", reflect.TypeOf((*MockStateManager)(nil).Stop)) +} + +// MockStreamManager is a mock of StreamManager interface. +type MockStreamManager struct { + ctrl *gomock.Controller + recorder *MockStreamManagerMockRecorder +} + +// MockStreamManagerMockRecorder is the mock recorder for MockStreamManager. +type MockStreamManagerMockRecorder struct { + mock *MockStreamManager +} + +// NewMockStreamManager creates a new mock instance. +func NewMockStreamManager(ctrl *gomock.Controller) *MockStreamManager { + mock := &MockStreamManager{ctrl: ctrl} + mock.recorder = &MockStreamManagerMockRecorder{mock} + return mock +} + +// EXPECT returns an object that allows the caller to indicate expected use. +func (m *MockStreamManager) EXPECT() *MockStreamManagerMockRecorder { + return m.recorder +} + // AckMessage mocks base method. -func (m *MockStateManager) AckMessage(ctx context.Context, messageID string) error { +func (m *MockStreamManager) AckMessage(ctx context.Context, messageID string) error { m.ctrl.T.Helper() ret := m.ctrl.Call(m, "AckMessage", ctx, messageID) ret0, _ := ret[0].(error) @@ -7354,13 +7494,13 @@ func (m *MockStateManager) AckMessage(ctx context.Context, messageID string) err } // AckMessage indicates an expected call of AckMessage. -func (mr *MockStateManagerMockRecorder) AckMessage(ctx, messageID interface{}) *gomock.Call { +func (mr *MockStreamManagerMockRecorder) AckMessage(ctx, messageID interface{}) *gomock.Call { mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "AckMessage", reflect.TypeOf((*MockStateManager)(nil).AckMessage), ctx, messageID) + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "AckMessage", reflect.TypeOf((*MockStreamManager)(nil).AckMessage), ctx, messageID) } // CreateConsumerGroup mocks base method. -func (m *MockStateManager) CreateConsumerGroup(ctx context.Context) error { +func (m *MockStreamManager) CreateConsumerGroup(ctx context.Context) error { m.ctrl.T.Helper() ret := m.ctrl.Call(m, "CreateConsumerGroup", ctx) ret0, _ := ret[0].(error) @@ -7368,13 +7508,143 @@ func (m *MockStateManager) CreateConsumerGroup(ctx context.Context) error { } // CreateConsumerGroup indicates an expected call of CreateConsumerGroup. -func (mr *MockStateManagerMockRecorder) CreateConsumerGroup(ctx interface{}) *gomock.Call { +func (mr *MockStreamManagerMockRecorder) CreateConsumerGroup(ctx interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "CreateConsumerGroup", reflect.TypeOf((*MockStreamManager)(nil).CreateConsumerGroup), ctx) +} + +// ExecuteTransaction mocks base method. +func (m *MockStreamManager) ExecuteTransaction(ctx context.Context, ops ...state.PipelineOperation) error { + m.ctrl.T.Helper() + varargs := []interface{}{ctx} + for _, a := range ops { + varargs = append(varargs, a) + } + ret := m.ctrl.Call(m, "ExecuteTransaction", varargs...) + ret0, _ := ret[0].(error) + return ret0 +} + +// ExecuteTransaction indicates an expected call of ExecuteTransaction. +func (mr *MockStreamManagerMockRecorder) ExecuteTransaction(ctx interface{}, ops ...interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + varargs := append([]interface{}{ctx}, ops...) + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ExecuteTransaction", reflect.TypeOf((*MockStreamManager)(nil).ExecuteTransaction), varargs...) +} + +// PublishToStream mocks base method. +func (m *MockStreamManager) PublishToStream(ctx context.Context, bsState *types.BlockBuildState) error { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "PublishToStream", ctx, bsState) + ret0, _ := ret[0].(error) + return ret0 +} + +// PublishToStream indicates an expected call of PublishToStream. +func (mr *MockStreamManagerMockRecorder) PublishToStream(ctx, bsState interface{}) *gomock.Call { mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "CreateConsumerGroup", reflect.TypeOf((*MockStateManager)(nil).CreateConsumerGroup), ctx) + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "PublishToStream", reflect.TypeOf((*MockStreamManager)(nil).PublishToStream), ctx, bsState) +} + +// ReadMessagesFromStream mocks base method. +func (m *MockStreamManager) ReadMessagesFromStream(ctx context.Context, msgType types.RedisMsgType) ([]redis.XStream, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "ReadMessagesFromStream", ctx, msgType) + ret0, _ := ret[0].([]redis.XStream) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// ReadMessagesFromStream indicates an expected call of ReadMessagesFromStream. +func (mr *MockStreamManagerMockRecorder) ReadMessagesFromStream(ctx, msgType interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ReadMessagesFromStream", reflect.TypeOf((*MockStreamManager)(nil).ReadMessagesFromStream), ctx, msgType) +} + +// Stop mocks base method. +func (m *MockStreamManager) Stop() { + m.ctrl.T.Helper() + m.ctrl.Call(m, "Stop") +} + +// Stop indicates an expected call of Stop. +func (mr *MockStreamManagerMockRecorder) Stop() *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Stop", reflect.TypeOf((*MockStreamManager)(nil).Stop)) +} + +// MockCoordinator is a mock of Coordinator interface. +type MockCoordinator struct { + ctrl *gomock.Controller + recorder *MockCoordinatorMockRecorder +} + +// MockCoordinatorMockRecorder is the mock recorder for MockCoordinator. +type MockCoordinatorMockRecorder struct { + mock *MockCoordinator +} + +// NewMockCoordinator creates a new mock instance. +func NewMockCoordinator(ctrl *gomock.Controller) *MockCoordinator { + mock := &MockCoordinator{ctrl: ctrl} + mock.recorder = &MockCoordinatorMockRecorder{mock} + return mock +} + +// EXPECT returns an object that allows the caller to indicate expected use. +func (m *MockCoordinator) EXPECT() *MockCoordinatorMockRecorder { + return m.recorder +} + +// AckMessage mocks base method. +func (m *MockCoordinator) AckMessage(ctx context.Context, messageID string) error { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "AckMessage", ctx, messageID) + ret0, _ := ret[0].(error) + return ret0 +} + +// AckMessage indicates an expected call of AckMessage. +func (mr *MockCoordinatorMockRecorder) AckMessage(ctx, messageID interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "AckMessage", reflect.TypeOf((*MockCoordinator)(nil).AckMessage), ctx, messageID) +} + +// CreateConsumerGroup mocks base method. +func (m *MockCoordinator) CreateConsumerGroup(ctx context.Context) error { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "CreateConsumerGroup", ctx) + ret0, _ := ret[0].(error) + return ret0 +} + +// CreateConsumerGroup indicates an expected call of CreateConsumerGroup. +func (mr *MockCoordinatorMockRecorder) CreateConsumerGroup(ctx interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "CreateConsumerGroup", reflect.TypeOf((*MockCoordinator)(nil).CreateConsumerGroup), ctx) +} + +// ExecuteTransaction mocks base method. +func (m *MockCoordinator) ExecuteTransaction(ctx context.Context, ops ...state.PipelineOperation) error { + m.ctrl.T.Helper() + varargs := []interface{}{ctx} + for _, a := range ops { + varargs = append(varargs, a) + } + ret := m.ctrl.Call(m, "ExecuteTransaction", varargs...) + ret0, _ := ret[0].(error) + return ret0 +} + +// ExecuteTransaction indicates an expected call of ExecuteTransaction. +func (mr *MockCoordinatorMockRecorder) ExecuteTransaction(ctx interface{}, ops ...interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + varargs := append([]interface{}{ctx}, ops...) + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ExecuteTransaction", reflect.TypeOf((*MockCoordinator)(nil).ExecuteTransaction), varargs...) } // GetBlockBuildState mocks base method. -func (m *MockStateManager) GetBlockBuildState(ctx context.Context) types.BlockBuildState { +func (m *MockCoordinator) GetBlockBuildState(ctx context.Context) types.BlockBuildState { m.ctrl.T.Helper() ret := m.ctrl.Call(m, "GetBlockBuildState", ctx) ret0, _ := ret[0].(types.BlockBuildState) @@ -7382,13 +7652,13 @@ func (m *MockStateManager) GetBlockBuildState(ctx context.Context) types.BlockBu } // GetBlockBuildState indicates an expected call of GetBlockBuildState. -func (mr *MockStateManagerMockRecorder) GetBlockBuildState(ctx interface{}) *gomock.Call { +func (mr *MockCoordinatorMockRecorder) GetBlockBuildState(ctx interface{}) *gomock.Call { mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetBlockBuildState", reflect.TypeOf((*MockStateManager)(nil).GetBlockBuildState), ctx) + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetBlockBuildState", reflect.TypeOf((*MockCoordinator)(nil).GetBlockBuildState), ctx) } // LoadExecutionHead mocks base method. -func (m *MockStateManager) LoadExecutionHead(ctx context.Context) (*types.ExecutionHead, error) { +func (m *MockCoordinator) LoadExecutionHead(ctx context.Context) (*types.ExecutionHead, error) { m.ctrl.T.Helper() ret := m.ctrl.Call(m, "LoadExecutionHead", ctx) ret0, _ := ret[0].(*types.ExecutionHead) @@ -7397,13 +7667,13 @@ func (m *MockStateManager) LoadExecutionHead(ctx context.Context) (*types.Execut } // LoadExecutionHead indicates an expected call of LoadExecutionHead. -func (mr *MockStateManagerMockRecorder) LoadExecutionHead(ctx interface{}) *gomock.Call { +func (mr *MockCoordinatorMockRecorder) LoadExecutionHead(ctx interface{}) *gomock.Call { mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "LoadExecutionHead", reflect.TypeOf((*MockStateManager)(nil).LoadExecutionHead), ctx) + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "LoadExecutionHead", reflect.TypeOf((*MockCoordinator)(nil).LoadExecutionHead), ctx) } // LoadOrInitializeBlockState mocks base method. -func (m *MockStateManager) LoadOrInitializeBlockState(ctx context.Context) error { +func (m *MockCoordinator) LoadOrInitializeBlockState(ctx context.Context) error { m.ctrl.T.Helper() ret := m.ctrl.Call(m, "LoadOrInitializeBlockState", ctx) ret0, _ := ret[0].(error) @@ -7411,13 +7681,27 @@ func (m *MockStateManager) LoadOrInitializeBlockState(ctx context.Context) error } // LoadOrInitializeBlockState indicates an expected call of LoadOrInitializeBlockState. -func (mr *MockStateManagerMockRecorder) LoadOrInitializeBlockState(ctx interface{}) *gomock.Call { +func (mr *MockCoordinatorMockRecorder) LoadOrInitializeBlockState(ctx interface{}) *gomock.Call { mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "LoadOrInitializeBlockState", reflect.TypeOf((*MockStateManager)(nil).LoadOrInitializeBlockState), ctx) + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "LoadOrInitializeBlockState", reflect.TypeOf((*MockCoordinator)(nil).LoadOrInitializeBlockState), ctx) +} + +// PublishToStream mocks base method. +func (m *MockCoordinator) PublishToStream(ctx context.Context, bsState *types.BlockBuildState) error { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "PublishToStream", ctx, bsState) + ret0, _ := ret[0].(error) + return ret0 +} + +// PublishToStream indicates an expected call of PublishToStream. +func (mr *MockCoordinatorMockRecorder) PublishToStream(ctx, bsState interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "PublishToStream", reflect.TypeOf((*MockCoordinator)(nil).PublishToStream), ctx, bsState) } // ReadMessagesFromStream mocks base method. -func (m *MockStateManager) ReadMessagesFromStream(ctx context.Context, msgType types.RedisMsgType) ([]redis.XStream, error) { +func (m *MockCoordinator) ReadMessagesFromStream(ctx context.Context, msgType types.RedisMsgType) ([]redis.XStream, error) { m.ctrl.T.Helper() ret := m.ctrl.Call(m, "ReadMessagesFromStream", ctx, msgType) ret0, _ := ret[0].([]redis.XStream) @@ -7426,13 +7710,13 @@ func (m *MockStateManager) ReadMessagesFromStream(ctx context.Context, msgType t } // ReadMessagesFromStream indicates an expected call of ReadMessagesFromStream. -func (mr *MockStateManagerMockRecorder) ReadMessagesFromStream(ctx, msgType interface{}) *gomock.Call { +func (mr *MockCoordinatorMockRecorder) ReadMessagesFromStream(ctx, msgType interface{}) *gomock.Call { mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ReadMessagesFromStream", reflect.TypeOf((*MockStateManager)(nil).ReadMessagesFromStream), ctx, msgType) + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ReadMessagesFromStream", reflect.TypeOf((*MockCoordinator)(nil).ReadMessagesFromStream), ctx, msgType) } // ResetBlockState mocks base method. -func (m *MockStateManager) ResetBlockState(ctx context.Context) error { +func (m *MockCoordinator) ResetBlockState(ctx context.Context) error { m.ctrl.T.Helper() ret := m.ctrl.Call(m, "ResetBlockState", ctx) ret0, _ := ret[0].(error) @@ -7440,13 +7724,13 @@ func (m *MockStateManager) ResetBlockState(ctx context.Context) error { } // ResetBlockState indicates an expected call of ResetBlockState. -func (mr *MockStateManagerMockRecorder) ResetBlockState(ctx interface{}) *gomock.Call { +func (mr *MockCoordinatorMockRecorder) ResetBlockState(ctx interface{}) *gomock.Call { mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ResetBlockState", reflect.TypeOf((*MockStateManager)(nil).ResetBlockState), ctx) + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ResetBlockState", reflect.TypeOf((*MockCoordinator)(nil).ResetBlockState), ctx) } // SaveBlockState mocks base method. -func (m *MockStateManager) SaveBlockState(ctx context.Context) error { +func (m *MockCoordinator) SaveBlockState(ctx context.Context) error { m.ctrl.T.Helper() ret := m.ctrl.Call(m, "SaveBlockState", ctx) ret0, _ := ret[0].(error) @@ -7454,13 +7738,13 @@ func (m *MockStateManager) SaveBlockState(ctx context.Context) error { } // SaveBlockState indicates an expected call of SaveBlockState. -func (mr *MockStateManagerMockRecorder) SaveBlockState(ctx interface{}) *gomock.Call { +func (mr *MockCoordinatorMockRecorder) SaveBlockState(ctx interface{}) *gomock.Call { mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "SaveBlockState", reflect.TypeOf((*MockStateManager)(nil).SaveBlockState), ctx) + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "SaveBlockState", reflect.TypeOf((*MockCoordinator)(nil).SaveBlockState), ctx) } // SaveBlockStateAndPublishToStream mocks base method. -func (m *MockStateManager) SaveBlockStateAndPublishToStream(ctx context.Context, bsState *types.BlockBuildState) error { +func (m *MockCoordinator) SaveBlockStateAndPublishToStream(ctx context.Context, bsState *types.BlockBuildState) error { m.ctrl.T.Helper() ret := m.ctrl.Call(m, "SaveBlockStateAndPublishToStream", ctx, bsState) ret0, _ := ret[0].(error) @@ -7468,13 +7752,13 @@ func (m *MockStateManager) SaveBlockStateAndPublishToStream(ctx context.Context, } // SaveBlockStateAndPublishToStream indicates an expected call of SaveBlockStateAndPublishToStream. -func (mr *MockStateManagerMockRecorder) SaveBlockStateAndPublishToStream(ctx, bsState interface{}) *gomock.Call { +func (mr *MockCoordinatorMockRecorder) SaveBlockStateAndPublishToStream(ctx, bsState interface{}) *gomock.Call { mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "SaveBlockStateAndPublishToStream", reflect.TypeOf((*MockStateManager)(nil).SaveBlockStateAndPublishToStream), ctx, bsState) + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "SaveBlockStateAndPublishToStream", reflect.TypeOf((*MockCoordinator)(nil).SaveBlockStateAndPublishToStream), ctx, bsState) } // SaveExecutionHead mocks base method. -func (m *MockStateManager) SaveExecutionHead(ctx context.Context, head *types.ExecutionHead) error { +func (m *MockCoordinator) SaveExecutionHead(ctx context.Context, head *types.ExecutionHead) error { m.ctrl.T.Helper() ret := m.ctrl.Call(m, "SaveExecutionHead", ctx, head) ret0, _ := ret[0].(error) @@ -7482,13 +7766,13 @@ func (m *MockStateManager) SaveExecutionHead(ctx context.Context, head *types.Ex } // SaveExecutionHead indicates an expected call of SaveExecutionHead. -func (mr *MockStateManagerMockRecorder) SaveExecutionHead(ctx, head interface{}) *gomock.Call { +func (mr *MockCoordinatorMockRecorder) SaveExecutionHead(ctx, head interface{}) *gomock.Call { mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "SaveExecutionHead", reflect.TypeOf((*MockStateManager)(nil).SaveExecutionHead), ctx, head) + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "SaveExecutionHead", reflect.TypeOf((*MockCoordinator)(nil).SaveExecutionHead), ctx, head) } // SaveExecutionHeadAndAck mocks base method. -func (m *MockStateManager) SaveExecutionHeadAndAck(ctx context.Context, head *types.ExecutionHead, messageID string) error { +func (m *MockCoordinator) SaveExecutionHeadAndAck(ctx context.Context, head *types.ExecutionHead, messageID string) error { m.ctrl.T.Helper() ret := m.ctrl.Call(m, "SaveExecutionHeadAndAck", ctx, head, messageID) ret0, _ := ret[0].(error) @@ -7496,19 +7780,19 @@ func (m *MockStateManager) SaveExecutionHeadAndAck(ctx context.Context, head *ty } // SaveExecutionHeadAndAck indicates an expected call of SaveExecutionHeadAndAck. -func (mr *MockStateManagerMockRecorder) SaveExecutionHeadAndAck(ctx, head, messageID interface{}) *gomock.Call { +func (mr *MockCoordinatorMockRecorder) SaveExecutionHeadAndAck(ctx, head, messageID interface{}) *gomock.Call { mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "SaveExecutionHeadAndAck", reflect.TypeOf((*MockStateManager)(nil).SaveExecutionHeadAndAck), ctx, head, messageID) + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "SaveExecutionHeadAndAck", reflect.TypeOf((*MockCoordinator)(nil).SaveExecutionHeadAndAck), ctx, head, messageID) } // Stop mocks base method. -func (m *MockStateManager) Stop() { +func (m *MockCoordinator) Stop() { m.ctrl.T.Helper() m.ctrl.Call(m, "Stop") } // Stop indicates an expected call of Stop. -func (mr *MockStateManagerMockRecorder) Stop() *gomock.Call { +func (mr *MockCoordinatorMockRecorder) Stop() *gomock.Call { mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Stop", reflect.TypeOf((*MockStateManager)(nil).Stop)) -} + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Stop", reflect.TypeOf((*MockCoordinator)(nil).Stop)) +} \ No newline at end of file diff --git a/cl/redisapp/blockbuilder/blockbuilder.go b/cl/redisapp/blockbuilder/blockbuilder.go index 27356e112..b12a74161 100644 --- a/cl/redisapp/blockbuilder/blockbuilder.go +++ b/cl/redisapp/blockbuilder/blockbuilder.go @@ -2,9 +2,11 @@ package blockbuilder import ( "context" + "encoding/base64" "errors" "fmt" "log/slog" + "math/big" "regexp" "strconv" "strings" @@ -14,7 +16,6 @@ import ( "github.com/ethereum/go-ethereum/beacon/engine" "github.com/ethereum/go-ethereum/common" etypes "github.com/ethereum/go-ethereum/core/types" - "github.com/primev/mev-commit/cl/redisapp/state" "github.com/primev/mev-commit/cl/redisapp/types" "github.com/primev/mev-commit/cl/redisapp/util" "github.com/vmihailenco/msgpack/v5" @@ -30,10 +31,18 @@ type EngineClient interface { payloadAttributes *engine.PayloadAttributes) (engine.ForkChoiceResponse, error) GetPayloadV3(ctx context.Context, payloadID engine.PayloadID) (*engine.ExecutionPayloadEnvelope, error) + + HeaderByNumber(ctx context.Context, number *big.Int) (*etypes.Header, error) +} + +type stateManager interface { + SaveBlockStateAndPublishToStream(ctx context.Context, state *types.BlockBuildState) error + GetBlockBuildState(ctx context.Context) types.BlockBuildState + ResetBlockState(ctx context.Context) error } type BlockBuilder struct { - stateManager state.StateManager + stateManager stateManager engineCl EngineClient logger *slog.Logger buildDelay time.Duration @@ -42,10 +51,17 @@ type BlockBuilder struct { LastCallTime time.Time lastBlockTime time.Time feeRecipient common.Address - ctx context.Context + executionHead *types.ExecutionHead } -func NewBlockBuilder(stateManager state.StateManager, engineCl EngineClient, logger *slog.Logger, buildDelay, buildDelayEmptyBlocks time.Duration, feeReceipt string) *BlockBuilder { +func NewBlockBuilder( + stateManager stateManager, + engineCl EngineClient, + logger *slog.Logger, + buildDelay, + buildDelayEmptyBlocks time.Duration, + feeReceipt string, +) *BlockBuilder { return &BlockBuilder{ stateManager: stateManager, engineCl: engineCl, @@ -58,6 +74,13 @@ func NewBlockBuilder(stateManager state.StateManager, engineCl EngineClient, log } } +func NewMemberBlockBuilder(engineCL EngineClient, logger *slog.Logger) *BlockBuilder { + return &BlockBuilder{ + engineCl: engineCL, + logger: logger, + } +} + func (bb *BlockBuilder) SetLastCallTimeToZero() { bb.LastCallTime = time.Time{} } @@ -95,7 +118,7 @@ func (bb *BlockBuilder) GetPayload(ctx context.Context) error { currentCallTime := time.Now() // Load execution head to get previous block timestamp - head, err := bb.stateManager.LoadExecutionHead(ctx) + head, err := bb.loadExecutionHead(ctx) if err != nil { return fmt.Errorf("latest execution block: %w", err) } @@ -132,7 +155,10 @@ func (bb *BlockBuilder) GetPayload(ctx context.Context) error { err = util.RetryWithBackoff(ctx, maxAttempts, bb.logger, func() error { response, err := bb.startBuild(ctx, head, ts) if err != nil { - bb.logger.Warn("Failed to build new EVM payload, will retry", "error", err) + bb.logger.Warn( + "Failed to build new EVM payload, will retry", + "error", err, + ) return err // Will retry } else if response.PayloadStatus.Status != engine.VALID { return backoff.Permanent(fmt.Errorf("invalid payload status: %s", response.PayloadStatus.Status)) @@ -170,7 +196,10 @@ func (bb *BlockBuilder) GetPayload(ctx context.Context) error { if isUnknownPayload(err) { return backoff.Permanent(err) } else if err != nil { - bb.logger.Warn("Failed to get payload, retrying...", "error", err) + bb.logger.Warn( + "Failed to get payload, retrying...", + "error", err, + ) return err // Will retry } @@ -185,7 +214,10 @@ func (bb *BlockBuilder) GetPayload(ctx context.Context) error { now := time.Now() timeSinceLastBlock := now.Sub(bb.lastBlockTime) if !hasTransactions && timeSinceLastBlock < bb.buildEmptyBlocksDelay { - bb.logger.Info("Leader: Skipping empty block", "timeSinceLastBlock", timeSinceLastBlock) + bb.logger.Info( + "Leader: Skipping empty block", + "timeSinceLastBlock", timeSinceLastBlock, + ) return nil } @@ -194,18 +226,23 @@ func (bb *BlockBuilder) GetPayload(ctx context.Context) error { return fmt.Errorf("failed to marshal payload: %w", err) } + encodedPayload := base64.StdEncoding.EncodeToString(payloadData) + payloadIDStr := payloadID.String() err = bb.stateManager.SaveBlockStateAndPublishToStream(ctx, &types.BlockBuildState{ CurrentStep: types.StepFinalizeBlock, PayloadID: payloadIDStr, - ExecutionPayload: string(payloadData), + ExecutionPayload: encodedPayload, }) if err != nil { return fmt.Errorf("failed to save state after GetPayload: %w", err) } - bb.logger.Info("Leader: BuildBlock completed and block is distributed", "PayloadID", payloadIDStr) + bb.logger.Info( + "Leader: BuildBlock completed and block is distributed", + "PayloadID", payloadIDStr, + ) bb.lastBlockTime = now return nil @@ -246,16 +283,27 @@ func (bb *BlockBuilder) ProcessLastPayload(ctx context.Context) error { bb.logger.Warn("Follower: Block already pushed to EVM, resetting state to StepBuildBlock") return nil // Success } else { - bb.logger.Warn("Follower: Invalid block height, exit", "invalid_height", invalidHeight, "expected_height", expectedHeight) + bb.logger.Warn( + "Follower: Invalid block height, exit", + "invalid_height", invalidHeight, + "expected_height", expectedHeight, + ) return backoff.Permanent(err) } } else { // Impossible to reach, unless geth changes the error message response - bb.logger.Warn("Conversion error", "error1", err1, "error2", err2) + bb.logger.Warn( + "Conversion error", + "error1", err1, + "error2", err2, + ) return backoff.Permanent(fmt.Errorf("conversion error1: %w, error2: %w", err1, err2)) } } else { - bb.logger.Warn("Follower: Failed to finalize block, retrying...", "error", err) + bb.logger.Warn( + "Follower: Failed to finalize block, retrying...", + "error", err, + ) return err // Will retry } } @@ -272,7 +320,10 @@ func (bb *BlockBuilder) ProcessLastPayload(ctx context.Context) error { bb.logger.Info("Follower: Resetting state to StepBuildBlock for next block") err := bb.stateManager.ResetBlockState(ctx) if err != nil { - bb.logger.Warn("Follower: Failed to reset block state, retrying...", "error", err) + bb.logger.Warn( + "Follower: Failed to reset block state, retrying...", + "error", err, + ) return err // Will retry } return nil // Success @@ -325,12 +376,16 @@ func (bb *BlockBuilder) FinalizeBlock(ctx context.Context, payloadIDStr, executi return errors.New("PayloadID or ExecutionPayload is missing in build state") } + executionPayloadBytes, err := base64.StdEncoding.DecodeString(executionPayloadStr) + if err != nil { + return fmt.Errorf("failed to decode ExecutionPayload: %w", err) + } + var executionPayload engine.ExecutableData - if err := msgpack.Unmarshal([]byte(executionPayloadStr), &executionPayload); err != nil { + if err := msgpack.Unmarshal(executionPayloadBytes, &executionPayload); err != nil { return fmt.Errorf("failed to deserialize ExecutionPayload: %w", err) } - - head, err := bb.stateManager.LoadExecutionHead(ctx) + head, err := bb.loadExecutionHead(ctx) if err != nil { return fmt.Errorf("failed to load execution head: %w", err) } @@ -347,25 +402,18 @@ func (bb *BlockBuilder) FinalizeBlock(ctx context.Context, payloadIDStr, executi } fcs := engine.ForkchoiceStateV1{ - HeadBlockHash: hash, - SafeBlockHash: hash, - FinalizedBlockHash: hash, + HeadBlockHash: executionPayload.BlockHash, + SafeBlockHash: executionPayload.BlockHash, + FinalizedBlockHash: executionPayload.BlockHash, } if err := bb.updateForkChoice(ctx, fcs, retryFunc); err != nil { return fmt.Errorf("failed to finalize fork choice update: %w", err) } - executionHead := &types.ExecutionHead{ - BlockHeight: executionPayload.Number, - BlockHash: executionPayload.BlockHash[:], - BlockTime: executionPayload.Timestamp, - } - - if err := bb.saveExecutionHead(ctx, executionHead, msgID); err != nil { + if err := bb.saveExecutionHead(executionPayload); err != nil { return fmt.Errorf("failed to save execution head: %w", err) } - return nil } @@ -435,9 +483,31 @@ func (bb *BlockBuilder) updateForkChoice(ctx context.Context, fcs engine.Forkcho }) } -func (bb *BlockBuilder) saveExecutionHead(ctx context.Context, executionHead *types.ExecutionHead, msgID string) error { - if msgID == "" { - return bb.stateManager.SaveExecutionHead(ctx, executionHead) +func (bb *BlockBuilder) loadExecutionHead(ctx context.Context) (*types.ExecutionHead, error) { + if bb.executionHead != nil { + return bb.executionHead, nil + } + + header, err := bb.engineCl.HeaderByNumber(ctx, nil) // nil for the latest block + if err != nil { + return nil, fmt.Errorf("failed to get the latest block header: %w", err) + } + + bb.executionHead = &types.ExecutionHead{ + BlockHeight: header.Number.Uint64(), + BlockHash: header.Hash().Bytes(), + BlockTime: header.Time, } - return bb.stateManager.SaveExecutionHeadAndAck(ctx, executionHead, msgID) + + return bb.executionHead, nil +} + +func (bb *BlockBuilder) saveExecutionHead(executionPayload engine.ExecutableData) error { + bb.executionHead = &types.ExecutionHead{ + BlockHeight: executionPayload.Number, + BlockHash: executionPayload.BlockHash[:], + BlockTime: executionPayload.Timestamp, + } + + return nil } diff --git a/cl/redisapp/blockbuilder/blockbuilder_test.go b/cl/redisapp/blockbuilder/blockbuilder_test.go index e40c1919f..28349256c 100644 --- a/cl/redisapp/blockbuilder/blockbuilder_test.go +++ b/cl/redisapp/blockbuilder/blockbuilder_test.go @@ -2,6 +2,7 @@ package blockbuilder import ( "context" + "encoding/base64" "encoding/json" "errors" "log/slog" @@ -52,6 +53,11 @@ func (m *MockEngineClient) NewPayloadV3(ctx context.Context, executionPayload en return args.Get(0).(engine.PayloadStatusV1), args.Error(1) } +func (m *MockEngineClient) HeaderByNumber(ctx context.Context, number *big.Int) (*etypes.Header, error) { + args := m.Called(ctx, number) + return args.Get(0).(*etypes.Header), args.Error(1) +} + func TestBlockBuilder_startBuild(t *testing.T) { ctx := context.Background() @@ -64,7 +70,7 @@ func TestBlockBuilder_startBuild(t *testing.T) { BlockTime: uint64(time.Now().UnixMilli()) - 10, } - stateManager, err := state.NewRedisStateManager("instanceID123", redisClient, nil, "010203") + stateManager, err := state.NewRedisCoordinator("instanceID123", redisClient, nil) require.NoError(t, err) mockEngineClient := new(MockEngineClient) @@ -74,7 +80,6 @@ func TestBlockBuilder_startBuild(t *testing.T) { buildDelay: buildDelay, buildDelayMs: uint64(buildDelay.Milliseconds()), logger: stLog, - ctx: ctx, } timestamp := time.Now() @@ -117,25 +122,18 @@ func TestBlockBuilder_getPayload(t *testing.T) { BlockHeight: 100, BlockTime: uint64(timestamp.UnixMilli()), } - executionHeadKey := "executionHead:instanceID123" - executionHeadData, _ := msgpack.Marshal(executionHead) mockRedisClient.EXPECT(). XGroupCreateMkStream(gomock.Any(), "mevcommit_block_stream", "mevcommit_consumer_group:instanceID123", "0").Return(redis.NewStatusCmd(ctx)) - mockRedisClient.EXPECT(). - Get(gomock.Any(), executionHeadKey). - Return(redis.NewStringResult(string(executionHeadData), nil)). - Times(1) - - mockRedisClient.EXPECT().Pipeline().Return(mockPipeliner) + mockRedisClient.EXPECT().TxPipeline().Return(mockPipeliner) mockPipeliner.EXPECT().Set(ctx, "blockBuildState:instanceID123", gomock.Any(), time.Duration(0)).Return(redis.NewStatusCmd(ctx)) mockPipeliner.EXPECT().XAdd(ctx, gomock.Any()).Return(redis.NewStringCmd(ctx, "result")) mockPipeliner.EXPECT().Exec(ctx).Return([]redis.Cmder{}, nil) - stateManager, err := state.NewRedisStateManager("instanceID123", mockRedisClient, nil, "010203") + stateManager, err := state.NewRedisCoordinator("instanceID123", mockRedisClient, nil) require.NoError(t, err) mockEngineClient := new(MockEngineClient) @@ -146,7 +144,6 @@ func TestBlockBuilder_getPayload(t *testing.T) { buildDelay: buildDelay, buildDelayMs: uint64(buildDelay.Milliseconds()), logger: stLog, - ctx: ctx, } hash := common.BytesToHash(executionHead.BlockHash) @@ -178,6 +175,7 @@ func TestBlockBuilder_getPayload(t *testing.T) { } mockEngineClient.On("GetPayloadV3", mock.Anything, *payloadID).Return(executionPayload, nil) + blockBuilder.executionHead = executionHead err = blockBuilder.GetPayload(ctx) require.NoError(t, err) @@ -188,8 +186,12 @@ func TestBlockBuilder_getPayload(t *testing.T) { func TestBlockBuilder_FinalizeBlock(t *testing.T) { ctx := context.Background() - redisClient, redisMock := redismock.NewClientMock() - redisMock.ExpectXGroupCreateMkStream("mevcommit_block_stream", "mevcommit_consumer_group:instanceID123", "0").SetVal("OK") + ctrl := gomock.NewController(t) + defer ctrl.Finish() + + mockRedisClient := mocks.NewMockRedisClient(ctrl) + + mockRedisClient.EXPECT().XGroupCreateMkStream(ctx, "mevcommit_block_stream", "mevcommit_consumer_group:instanceID123", "0").Return(redis.NewStatusCmd(ctx)) timestamp := uint64(1728051707) // 0x66fff9fb executionHead := &types.ExecutionHead{ @@ -197,11 +199,8 @@ func TestBlockBuilder_FinalizeBlock(t *testing.T) { BlockHeight: 2, BlockTime: timestamp - 10, } - executionHeadKey := "executionHead:instanceID123" - executionHeadData, _ := msgpack.Marshal(executionHead) - redisMock.ExpectGet(executionHeadKey).SetVal(string(executionHeadData)) - stateManager, err := state.NewRedisStateManager("instanceID123", redisClient, nil, "010203") + stateManager, err := state.NewRedisCoordinator("instanceID123", mockRedisClient, nil) require.NoError(t, err) mockEngineClient := new(MockEngineClient) @@ -211,7 +210,6 @@ func TestBlockBuilder_FinalizeBlock(t *testing.T) { buildDelay: buildDelay, buildDelayMs: uint64(buildDelay.Milliseconds()), logger: stLog, - ctx: ctx, } payloadIDStr := "payloadID123" @@ -246,12 +244,15 @@ func TestBlockBuilder_FinalizeBlock(t *testing.T) { msgpackData, err := msgpack.Marshal(executionPayload) require.NoError(t, err) + encodedPayload := base64.StdEncoding.EncodeToString(msgpackData) + payloadStatus := engine.PayloadStatusV1{ Status: engine.VALID, } + mockEngineClient.On("NewPayloadV3", mock.Anything, executionPayload, []common.Hash{}, mock.Anything).Return(payloadStatus, nil) - hash := common.BytesToHash(executionHead.BlockHash) + hash := executionPayload.BlockHash fcs := engine.ForkchoiceStateV1{ HeadBlockHash: hash, SafeBlockHash: hash, @@ -262,20 +263,12 @@ func TestBlockBuilder_FinalizeBlock(t *testing.T) { } mockEngineClient.On("ForkchoiceUpdatedV3", mock.Anything, fcs, (*engine.PayloadAttributes)(nil)).Return(forkChoiceResponse, nil) - executionHeadUpdate := &types.ExecutionHead{ - BlockHash: executionPayload.BlockHash.Bytes(), - BlockHeight: executionPayload.Number, - BlockTime: executionPayload.Timestamp, - } - executionHeadDataUpdated, _ := msgpack.Marshal(executionHeadUpdate) - redisMock.ExpectSet(executionHeadKey, executionHeadDataUpdated, 0).SetVal("OK") - - err = blockBuilder.FinalizeBlock(ctx, payloadIDStr, string(msgpackData), msgID) + blockBuilder.executionHead = executionHead + err = blockBuilder.FinalizeBlock(ctx, payloadIDStr, encodedPayload, msgID) require.NoError(t, err) mockEngineClient.AssertExpectations(t) - require.NoError(t, redisMock.ExpectationsWereMet()) } func TestBlockBuilder_startBuild_ForkchoiceUpdatedError(t *testing.T) { @@ -289,7 +282,7 @@ func TestBlockBuilder_startBuild_ForkchoiceUpdatedError(t *testing.T) { BlockTime: uint64(time.Now().UnixMilli()) - 10, } - stateManager, err := state.NewRedisStateManager("instanceID123", redisClient, nil, "010203") + stateManager, err := state.NewRedisCoordinator("instanceID123", redisClient, nil) require.NoError(t, err) mockEngineClient := new(MockEngineClient) @@ -299,7 +292,6 @@ func TestBlockBuilder_startBuild_ForkchoiceUpdatedError(t *testing.T) { buildDelay: buildDelay, buildDelayMs: uint64(buildDelay.Milliseconds()), logger: stLog, - ctx: ctx, } timestamp := time.Now() @@ -334,7 +326,7 @@ func TestBlockBuilder_startBuild_InvalidPayloadStatus(t *testing.T) { BlockTime: uint64(time.Now().UnixMilli()) - 10, } - stateManager, err := state.NewRedisStateManager("instanceID123", redisClient, nil, "010203") + stateManager, err := state.NewRedisCoordinator("instanceID123", redisClient, nil) require.NoError(t, err) mockEngineClient := new(MockEngineClient) @@ -344,7 +336,6 @@ func TestBlockBuilder_startBuild_InvalidPayloadStatus(t *testing.T) { buildDelay: buildDelay, buildDelayMs: uint64(buildDelay.Milliseconds()), logger: stLog, - ctx: ctx, } timestamp := time.Now() @@ -373,34 +364,6 @@ func TestBlockBuilder_startBuild_InvalidPayloadStatus(t *testing.T) { require.NoError(t, redisMock.ExpectationsWereMet()) } -func TestBlockBuilder_getPayload_startBuildFails(t *testing.T) { - ctx := context.Background() - redisClient, redisMock := redismock.NewClientMock() - redisMock.ExpectXGroupCreateMkStream("mevcommit_block_stream", "mevcommit_consumer_group:instanceID123", "0").SetVal("OK") - - stateManager, err := state.NewRedisStateManager("instanceID123", redisClient, nil, "010203") - require.NoError(t, err) - mockEngineClient := new(MockEngineClient) - blockBuilder := &BlockBuilder{ - stateManager: stateManager, - engineCl: mockEngineClient, - buildDelay: buildDelay, - buildDelayMs: uint64(buildDelay.Milliseconds()), - logger: stLog, - ctx: ctx, - } - - executionHeadKey := "executionHead:instanceID123" - redisMock.ExpectGet(executionHeadKey).SetErr(errors.New("redis error")) - - err = blockBuilder.GetPayload(ctx) - - require.Error(t, err) - assert.Contains(t, err.Error(), "failed to retrieve") - - require.NoError(t, redisMock.ExpectationsWereMet()) -} - func TestBlockBuilder_getPayload_GetPayloadUnknownPayload(t *testing.T) { ctx := context.Background() redisClient, redisMock := redismock.NewClientMock() @@ -412,11 +375,8 @@ func TestBlockBuilder_getPayload_GetPayloadUnknownPayload(t *testing.T) { BlockHeight: 100, BlockTime: uint64(timestamp.UnixMilli()) - 10, } - executionHeadKey := "executionHead:instanceID123" - executionHeadData, _ := msgpack.Marshal(executionHead) - redisMock.ExpectGet(executionHeadKey).SetVal(string(executionHeadData)) - stateManager, err := state.NewRedisStateManager("instanceID123", redisClient, nil, "010203") + stateManager, err := state.NewRedisCoordinator("instanceID123", redisClient, nil) require.NoError(t, err) mockEngineClient := new(MockEngineClient) blockBuilder := &BlockBuilder{ @@ -424,7 +384,6 @@ func TestBlockBuilder_getPayload_GetPayloadUnknownPayload(t *testing.T) { engineCl: mockEngineClient, buildDelay: time.Duration(1 * time.Second), logger: stLog, - ctx: ctx, } hash := common.BytesToHash(executionHead.BlockHash) @@ -446,6 +405,7 @@ func TestBlockBuilder_getPayload_GetPayloadUnknownPayload(t *testing.T) { mockEngineClient.On("GetPayloadV3", mock.Anything, *payloadID).Return(&engine.ExecutionPayloadEnvelope{}, errors.New("Unknown payload")) + blockBuilder.executionHead = executionHead err = blockBuilder.GetPayload(ctx) require.Error(t, err) @@ -466,11 +426,8 @@ func TestBlockBuilder_FinalizeBlock_InvalidBlockHeight(t *testing.T) { BlockHeight: 100, BlockTime: uint64(timestamp.UnixMilli()) - 10, } - executionHeadKey := "executionHead:instanceID123" - executionHeadData, _ := msgpack.Marshal(executionHead) - redisMock.ExpectGet(executionHeadKey).SetVal(string(executionHeadData)) - stateManager, err := state.NewRedisStateManager("instanceID123", redisClient, nil, "000000") + stateManager, err := state.NewRedisCoordinator("instanceID123", redisClient, nil) require.NoError(t, err) mockEngineClient := new(MockEngineClient) blockBuilder := &BlockBuilder{ @@ -479,7 +436,6 @@ func TestBlockBuilder_FinalizeBlock_InvalidBlockHeight(t *testing.T) { buildDelay: buildDelay, buildDelayMs: uint64(buildDelay.Milliseconds()), logger: stLog, - ctx: ctx, } payloadIDStr := "payloadID123" @@ -504,7 +460,9 @@ func TestBlockBuilder_FinalizeBlock_InvalidBlockHeight(t *testing.T) { } executionPayloadData, _ := msgpack.Marshal(executionPayload) - err = blockBuilder.FinalizeBlock(ctx, payloadIDStr, string(executionPayloadData), "") + executionPayloadEncoded := base64.StdEncoding.EncodeToString(executionPayloadData) + blockBuilder.executionHead = executionHead + err = blockBuilder.FinalizeBlock(ctx, payloadIDStr, executionPayloadEncoded, "") require.Error(t, err) assert.Contains(t, err.Error(), "invalid block height") @@ -523,11 +481,8 @@ func TestBlockBuilder_FinalizeBlock_NewPayloadInvalidStatus(t *testing.T) { BlockHeight: 2, BlockTime: timestamp - 10, } - executionHeadKey := "executionHead:instanceID123" - executionHeadData, _ := msgpack.Marshal(executionHead) - redisMock.ExpectGet(executionHeadKey).SetVal(string(executionHeadData)) - stateManager, err := state.NewRedisStateManager("instanceID123", redisClient, nil, "000000") + stateManager, err := state.NewRedisCoordinator("instanceID123", redisClient, nil) require.NoError(t, err) mockEngineClient := new(MockEngineClient) blockBuilder := &BlockBuilder{ @@ -536,7 +491,6 @@ func TestBlockBuilder_FinalizeBlock_NewPayloadInvalidStatus(t *testing.T) { buildDelay: buildDelay, buildDelayMs: uint64(buildDelay.Milliseconds()), logger: stLog, - ctx: ctx, } payloadIDStr := "payloadID123" @@ -561,13 +515,13 @@ func TestBlockBuilder_FinalizeBlock_NewPayloadInvalidStatus(t *testing.T) { } executionPayloadData, _ := msgpack.Marshal(executionPayload) - + executionPayloadEncoded := base64.StdEncoding.EncodeToString(executionPayloadData) payloadStatus := engine.PayloadStatusV1{ Status: "INVALID", } mockEngineClient.On("NewPayloadV3", mock.Anything, executionPayload, []common.Hash{}, mock.Anything).Return(payloadStatus, nil) - - err = blockBuilder.FinalizeBlock(ctx, payloadIDStr, string(executionPayloadData), "") + blockBuilder.executionHead = executionHead + err = blockBuilder.FinalizeBlock(ctx, payloadIDStr, executionPayloadEncoded, "") require.Error(t, err) assert.Contains(t, err.Error(), "failed to push new payload") @@ -587,11 +541,8 @@ func TestBlockBuilder_FinalizeBlock_ForkchoiceUpdatedInvalidStatus(t *testing.T) BlockHeight: 2, BlockTime: timestamp - 10, } - executionHeadKey := "executionHead:instanceID123" - executionHeadData, _ := msgpack.Marshal(executionHead) - redisMock.ExpectGet(executionHeadKey).SetVal(string(executionHeadData)) - stateManager, err := state.NewRedisStateManager("instanceID123", redisClient, nil, "000000") + stateManager, err := state.NewRedisCoordinator("instanceID123", redisClient, nil) require.NoError(t, err) mockEngineClient := new(MockEngineClient) blockBuilder := &BlockBuilder{ @@ -600,7 +551,6 @@ func TestBlockBuilder_FinalizeBlock_ForkchoiceUpdatedInvalidStatus(t *testing.T) buildDelay: buildDelay, buildDelayMs: uint64(buildDelay.Milliseconds()), logger: stLog, - ctx: ctx, } payloadIDStr := "payloadID123" @@ -624,16 +574,16 @@ func TestBlockBuilder_FinalizeBlock_ForkchoiceUpdatedInvalidStatus(t *testing.T) ExcessBlobGas: new(uint64), } executionPayloadData, _ := msgpack.Marshal(executionPayload) - + executionPayloadEncoded := base64.StdEncoding.EncodeToString(executionPayloadData) payloadStatus := engine.PayloadStatusV1{ Status: engine.VALID, } mockEngineClient.On("NewPayloadV3", mock.Anything, executionPayload, []common.Hash{}, mock.Anything).Return(payloadStatus, nil) fcs := engine.ForkchoiceStateV1{ - HeadBlockHash: executionPayload.ParentHash, - SafeBlockHash: executionPayload.ParentHash, - FinalizedBlockHash: executionPayload.ParentHash, + HeadBlockHash: executionPayload.BlockHash, + SafeBlockHash: executionPayload.BlockHash, + FinalizedBlockHash: executionPayload.BlockHash, } forkChoiceResponse := engine.ForkChoiceResponse{ PayloadStatus: engine.PayloadStatusV1{ @@ -642,7 +592,8 @@ func TestBlockBuilder_FinalizeBlock_ForkchoiceUpdatedInvalidStatus(t *testing.T) } mockEngineClient.On("ForkchoiceUpdatedV3", ctx, fcs, (*engine.PayloadAttributes)(nil)).Return(forkChoiceResponse, nil) - err = blockBuilder.FinalizeBlock(ctx, payloadIDStr, string(executionPayloadData), "") + blockBuilder.executionHead = executionHead + err = blockBuilder.FinalizeBlock(ctx, payloadIDStr, executionPayloadEncoded, "") require.Error(t, err) assert.Contains(t, err.Error(), "failed to finalize fork choice update") @@ -651,85 +602,6 @@ func TestBlockBuilder_FinalizeBlock_ForkchoiceUpdatedInvalidStatus(t *testing.T) require.NoError(t, redisMock.ExpectationsWereMet()) } -func TestBlockBuilder_FinalizeBlock_SaveExecutionHeadError(t *testing.T) { - ctx := context.Background() - redisClient, redisMock := redismock.NewClientMock() - redisMock.ExpectXGroupCreateMkStream("mevcommit_block_stream", "mevcommit_consumer_group:instanceID123", "0").SetVal("OK") - - timestamp := uint64(1728051707) // 0x66fff9fb - executionHead := &types.ExecutionHead{ - BlockHash: []byte{0xb, 0xf3, 0x9b, 0xc1, 0x8b, 0xe0, 0x59, 0xc1, 0xdc, 0xb8, 0x72, 0xac, 0x8c, 0xb, 0xc, 0x84, 0x56, 0x55, 0xa0, 0x1c, 0x2b, 0x7d, 0x8f, 0xd0, 0x1c, 0x4b, 0xec, 0xde, 0x6b, 0x3f, 0x93, 0xd7}, - BlockHeight: 2, - BlockTime: timestamp - 10, - } - executionHeadKey := "executionHead:instanceID123" - executionHeadData, _ := msgpack.Marshal(executionHead) - redisMock.ExpectGet(executionHeadKey).SetVal(string(executionHeadData)) - - stateManager, err := state.NewRedisStateManager("instanceID123", redisClient, nil, "000000") - require.NoError(t, err) - mockEngineClient := new(MockEngineClient) - blockBuilder := &BlockBuilder{ - stateManager: stateManager, - engineCl: mockEngineClient, - buildDelay: buildDelay, - buildDelayMs: uint64(buildDelay.Milliseconds()), - logger: stLog, - ctx: ctx, - } - - payloadIDStr := "payloadID123" - executionPayload := engine.ExecutableData{ - ParentHash: common.HexToHash("0x0bf39bc18be059c1dcb872ac8c0b0c845655a01c2b7d8fd01c4becde6b3f93d7"), - FeeRecipient: common.HexToAddress("0x0000000000000000000000000000000000000000"), - StateRoot: common.HexToHash("0xcdc166a6c2e7f8b873889a7256873144e61121f9fc1f027d79b8fa310b91ff0f"), - ReceiptsRoot: common.HexToHash("0x56e81f171bcc55a6ff8345e692c0f86e5b48e01b996cadc001622fb5e363b421"), - LogsBloom: common.FromHex("0x00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000"), - Random: common.HexToHash("0x0bf39bc18be059c1dcb872ac8c0b0c845655a01c2b7d8fd01c4becde6b3f93d7"), - Number: 3, - GasLimit: 30000000, - GasUsed: 0, - Timestamp: 0x66fff9fb, - ExtraData: common.FromHex("0xd983010e08846765746888676f312e32322e368664617277696e"), - BaseFeePerGas: big.NewInt(0x27ee3253), - BlockHash: common.HexToHash("0x9a9b2f7e98934f8544c22cdcb00526f48886170b15c4e4e96bd43af189b5aac4"), - Transactions: [][]byte{}, - Withdrawals: []*etypes.Withdrawal{}, - BlobGasUsed: new(uint64), - ExcessBlobGas: new(uint64), - } - executionPayloadData, _ := msgpack.Marshal(executionPayload) - - payloadStatus := engine.PayloadStatusV1{ - Status: engine.VALID, - } - mockEngineClient.On("NewPayloadV3", mock.Anything, executionPayload, []common.Hash{}, mock.Anything).Return(payloadStatus, nil) - fcs := engine.ForkchoiceStateV1{ - HeadBlockHash: executionPayload.ParentHash, - SafeBlockHash: executionPayload.ParentHash, - FinalizedBlockHash: executionPayload.ParentHash, - } - mockEngineClient.On("ForkchoiceUpdatedV3", mock.Anything, fcs, (*engine.PayloadAttributes)(nil)).Return(engine.ForkChoiceResponse{ - PayloadStatus: payloadStatus, - }, nil) - - executionHeadUpdate := &types.ExecutionHead{ - BlockHash: executionPayload.BlockHash.Bytes(), - BlockHeight: executionPayload.Number, - BlockTime: executionPayload.Timestamp, - } - executionHeadDataUpdated, _ := msgpack.Marshal(executionHeadUpdate) - redisMock.ExpectSet(executionHeadKey, executionHeadDataUpdated, time.Duration(0)).SetErr(errors.New("redis error")) - - err = blockBuilder.FinalizeBlock(ctx, payloadIDStr, string(executionPayloadData), "") - - require.Error(t, err) - assert.Contains(t, err.Error(), "failed to save execution head") - - mockEngineClient.AssertExpectations(t) - require.NoError(t, redisMock.ExpectationsWereMet()) -} - func matchPayloadAttributes(expectedHash common.Hash, executionHeadTime uint64) func(*engine.PayloadAttributes) bool { return func(attrs *engine.PayloadAttributes) bool { if attrs == nil { diff --git a/cl/redisapp/leaderfollower/leaderfollower.go b/cl/redisapp/leaderfollower/leaderfollower.go index 67c6c16ff..fed4bfd2e 100644 --- a/cl/redisapp/leaderfollower/leaderfollower.go +++ b/cl/redisapp/leaderfollower/leaderfollower.go @@ -9,7 +9,6 @@ import ( "time" "github.com/heyvito/go-leader/leader" - "github.com/primev/mev-commit/cl/redisapp/state" "github.com/primev/mev-commit/cl/redisapp/types" "github.com/primev/mev-commit/cl/redisapp/util" "github.com/redis/go-redis/v9" @@ -18,8 +17,8 @@ import ( type LeaderFollowerManager struct { isLeader atomic.Bool isFollowerInitialized atomic.Bool - stateManager state.StateManager - blockBuilder BlockBuilder + stateManager stateManager + blockBuilder blockBuilder leaderProc leader.Leader logger *slog.Logger instanceID string @@ -30,7 +29,7 @@ type LeaderFollowerManager struct { erroredCh <-chan error } -type BlockBuilder interface { +type blockBuilder interface { // Retrieves the latest payload and ensures it meets necessary conditions GetPayload(ctx context.Context) error @@ -44,12 +43,23 @@ type BlockBuilder interface { SetLastCallTimeToZero() } +// todo: work with block state through block builder, not directly +type stateManager interface { + // state related methods + GetBlockBuildState(ctx context.Context) types.BlockBuildState + ResetBlockState(ctx context.Context) error + + // stream related methods + AckMessage(ctx context.Context, messageID string) error + ReadMessagesFromStream(ctx context.Context, msgType types.RedisMsgType) ([]redis.XStream, error) +} + func NewLeaderFollowerManager( instanceID string, logger *slog.Logger, redisClient *redis.Client, - stateManager state.StateManager, - blockBuilder BlockBuilder, + stateManager stateManager, + blockBuilder blockBuilder, ) (*LeaderFollowerManager, error) { // Initialize leader election leaderOpts := leader.Opts{ @@ -83,7 +93,10 @@ func (lfm *LeaderFollowerManager) handleLeaderElection(ctx context.Context) { defer func() { err := lfm.leaderProc.Stop() if err != nil { - lfm.logger.Error("Error stopping leader election", "error", err) + lfm.logger.Error( + "Error stopping leader election", + "error", err, + ) } }() @@ -99,7 +112,10 @@ func (lfm *LeaderFollowerManager) handleLeaderElection(ctx context.Context) { lfm.isLeader.Store(false) lfm.logger.Info("Node demoted from leader") case err := <-lfm.erroredCh: - lfm.logger.Error("Leader election error", "error", err) + lfm.logger.Error( + "Leader election error", + "error", err, + ) } } } @@ -131,18 +147,27 @@ func (lfm *LeaderFollowerManager) run(ctx context.Context) { lfm.isFollowerInitialized.Store(false) lfm.logger.Info("Leader: Starting leader work") if err := lfm.leaderWork(ctx); err != nil { - lfm.logger.Error("Error in leader work", "error", err) + lfm.logger.Error( + "Error in leader work", + "error", err, + ) } } else { if !lfm.isFollowerInitialized.Load() { if err := lfm.blockBuilder.ProcessLastPayload(ctx); err != nil { - lfm.logger.Error("Error processing last payload", "error", err) + lfm.logger.Error( + "Error processing last payload", + "error", err, + ) continue } lfm.isFollowerInitialized.Store(true) } if err := lfm.followerWork(ctx); err != nil { - lfm.logger.Error("Error in follower work", "error", err) + lfm.logger.Error( + "Error in follower work", + "error", err, + ) } // will do nothing, in case election already started @@ -164,7 +189,10 @@ func (lfm *LeaderFollowerManager) leaderWork(ctx context.Context) error { lfm.logger.Info("Leader: State is not synchronized, waiting for follower to catch up") err := lfm.leaderProc.Stop() if err != nil { - lfm.logger.Error("Leader: Failed to stop leader election", "error", err) + lfm.logger.Error( + "Leader: Failed to stop leader election", + "error", err, + ) return err } return nil @@ -183,9 +211,15 @@ func (lfm *LeaderFollowerManager) leaderWork(ctx context.Context) error { case types.StepBuildBlock: lfm.logger.Info("Leader: StepBuildBlock") if err := lfm.blockBuilder.GetPayload(ctx); err != nil { - lfm.logger.Error("Leader: GetPayload failed", "error", err) + lfm.logger.Error( + "Leader: GetPayload failed", + "error", err, + ) if resetErr := lfm.stateManager.ResetBlockState(ctx); resetErr != nil { - lfm.logger.Error("Leader: Failed to reset block state", "error", resetErr) + lfm.logger.Error( + "Leader: Failed to reset block state", + "error", resetErr, + ) } return err @@ -193,17 +227,26 @@ func (lfm *LeaderFollowerManager) leaderWork(ctx context.Context) error { case types.StepFinalizeBlock: lfm.logger.Info("Leader: StepFinalizeBlock") if err := lfm.blockBuilder.FinalizeBlock(ctx, bbState.PayloadID, bbState.ExecutionPayload, ""); err != nil { - lfm.logger.Error("Leader: FinalizeBlock failed", "error", err) + lfm.logger.Error( + "Leader: FinalizeBlock failed", + "error", err, + ) return err } if err := lfm.stateManager.ResetBlockState(ctx); err != nil { - lfm.logger.Error("Leader: Failed to reset block state", "error", err) + lfm.logger.Error( + "Leader: Failed to reset block state", + "error", err, + ) return err } default: lfm.logger.Warn("Leader: Unknown current step", "current_step", currentStep.String()) if err := lfm.stateManager.ResetBlockState(ctx); err != nil { - lfm.logger.Error("Leader: Failed to reset block state", "error", err) + lfm.logger.Error( + "Leader: Failed to reset block state", + "error", err, + ) return err } } @@ -217,13 +260,19 @@ func (lfm *LeaderFollowerManager) leaderWork(ctx context.Context) error { // todo: refactor to generate timestamp outside blockbuilder lfm.blockBuilder.SetLastCallTimeToZero() if stopElecErr != nil { - lfm.logger.Error("Leader: Failed to stop leader election", "error", stopElecErr) + lfm.logger.Error( + "Leader: Failed to stop leader election", + "error", stopElecErr, + ) return stopElecErr } return err } // otherwise there is a problem with redis/payload, so we just log it and continue - lfm.logger.Error("Leader: Error in leader work", "error", err) + lfm.logger.Error( + "Leader: Error in leader work", + "error", err, + ) } } } @@ -240,7 +289,10 @@ func (lfm *LeaderFollowerManager) followerWork(ctx context.Context) error { messages, err := lfm.readMessages(ctx) if err != nil { - lfm.logger.Error("Follower: Error reading messages", "error", err) + lfm.logger.Error( + "Follower: Error reading messages", + "error", err, + ) continue } @@ -260,30 +312,61 @@ func (lfm *LeaderFollowerManager) followerWork(ctx context.Context) error { lfm.logger.Error("Follower: Invalid message format") // Acknowledge the message to avoid reprocessing if ackErr := lfm.stateManager.AckMessage(ctx, field.ID); ackErr != nil { - lfm.logger.Error("Follower: Failed to acknowledge message", "error", ackErr) + lfm.logger.Error( + "Follower: Failed to acknowledge message", + "error", ackErr, + ) } continue } // Ignore messages sent by self if senderInstanceID == lfm.instanceID { - lfm.logger.Info("Follower: Ignoring own message", "PayloadID", payloadIDStr) + lfm.logger.Info( + "Follower: Ignoring own message", + "PayloadID", payloadIDStr, + ) if ackErr := lfm.stateManager.AckMessage(ctx, field.ID); ackErr != nil { - lfm.logger.Error("Follower: Failed to acknowledge own message", "error", ackErr) + lfm.logger.Error( + "Follower: Failed to acknowledge own message", + "error", ackErr, + ) } continue } - lfm.logger.Info("Follower: Processing message", "PayloadID", payloadIDStr) + lfm.logger.Info( + "Follower: Processing message", + "PayloadID", payloadIDStr, + ) // Finalize block // msg will be acknowledged inside of state manager with execution head saved if err := lfm.blockBuilder.FinalizeBlock(ctx, payloadIDStr, executionPayloadStr, field.ID); err != nil { - lfm.logger.Error("Follower: Failed to finalize block", "error", err) + lfm.logger.Error( + "Follower: Failed to finalize block", + "error", err, + ) continue } - lfm.logger.Info("Follower: Successfully finalized block", "PayloadID", payloadIDStr) + err = lfm.stateManager.AckMessage(ctx, field.ID) + if err != nil { + lfm.logger.Error( + "Follower: Failed to acknowledge message", + "error", err, + ) + } else { + lfm.logger.Info( + "Follower: Successfully acknowledged message", + "PayloadID", payloadIDStr, + ) + } + + lfm.logger.Info( + "Follower: Successfully finalized block", + "PayloadID", payloadIDStr, + ) } } } @@ -294,7 +377,10 @@ func (lfm *LeaderFollowerManager) readMessages(ctx context.Context) ([]redis.XSt // Try to read pending messages first messages, err := lfm.stateManager.ReadMessagesFromStream(ctx, types.RedisMsgTypePending) if err != nil { - lfm.logger.Error("Follower: Error reading pending messages", "error", err) + lfm.logger.Error( + "Follower: Error reading pending messages", + "error", err, + ) return nil, err } @@ -302,7 +388,10 @@ func (lfm *LeaderFollowerManager) readMessages(ctx context.Context) ([]redis.XSt if len(messages) == 0 || len(messages[0].Messages) == 0 { messages, err = lfm.stateManager.ReadMessagesFromStream(ctx, types.RedisMsgTypeNew) if err != nil { - lfm.logger.Error("Follower: Error reading new messages", "error", err) + lfm.logger.Error( + "Follower: Error reading new messages", + "error", err, + ) return nil, err } } @@ -329,7 +418,10 @@ func (lfm *LeaderFollowerManager) WaitForGoroutinesToStop() error { func (lfm *LeaderFollowerManager) HaveMessagesToProcess(ctx context.Context) bool { messages, err := lfm.readMessages(ctx) if err != nil { - lfm.logger.Error("Error reading messages", "error", err) + lfm.logger.Error( + "Error reading messages", + "error", err, + ) return false } if len(messages) == 0 || len(messages[0].Messages) == 0 { diff --git a/cl/redisapp/leaderfollower/leaderfollower_test.go b/cl/redisapp/leaderfollower/leaderfollower_test.go index c35703afd..66917fbd8 100644 --- a/cl/redisapp/leaderfollower/leaderfollower_test.go +++ b/cl/redisapp/leaderfollower/leaderfollower_test.go @@ -42,7 +42,7 @@ func TestNewLeaderFollowerManager(t *testing.T) { ctrl := gomock.NewController(t) defer ctrl.Finish() - stateManager := mocks.NewMockStateManager(ctrl) + stateManager := mocks.NewMockCoordinator(ctrl) blockBuilder := mocks.NewMockBlockBuilder(ctrl) // Execute @@ -62,7 +62,7 @@ func TestHaveMessagesToProcess(t *testing.T) { ctrl := gomock.NewController(t) defer ctrl.Finish() - mockSM := mocks.NewMockStateManager(ctrl) + mockSM := mocks.NewMockCoordinator(ctrl) // Prepare mock state manager to return some messages messages := []redis.XStream{ @@ -104,7 +104,7 @@ func TestHaveMessagesToProcess_NoMessages(t *testing.T) { ctrl := gomock.NewController(t) defer ctrl.Finish() - mockSM := mocks.NewMockStateManager(ctrl) + mockSM := mocks.NewMockCoordinator(ctrl) // Set up expectations gomock.InOrder( @@ -130,7 +130,7 @@ func TestLeaderWork_StepBuildBlock(t *testing.T) { ctrl := gomock.NewController(t) defer ctrl.Finish() - mockSM := mocks.NewMockStateManager(ctrl) + mockSM := mocks.NewMockCoordinator(ctrl) mockBB := mocks.NewMockBlockBuilder(ctrl) lfm := &LeaderFollowerManager{ @@ -176,7 +176,7 @@ func TestFollowerWork_NoMessages(t *testing.T) { ctrl := gomock.NewController(t) defer ctrl.Finish() - mockSM := mocks.NewMockStateManager(ctrl) + mockSM := mocks.NewMockCoordinator(ctrl) mockBB := mocks.NewMockBlockBuilder(ctrl) lfm := &LeaderFollowerManager{ @@ -212,7 +212,7 @@ func TestFollowerWork_WithMessages(t *testing.T) { ctrl := gomock.NewController(t) defer ctrl.Finish() - mockSM := mocks.NewMockStateManager(ctrl) + mockSM := mocks.NewMockCoordinator(ctrl) mockBB := mocks.NewMockBlockBuilder(ctrl) lfm := &LeaderFollowerManager{ @@ -222,7 +222,6 @@ func TestFollowerWork_WithMessages(t *testing.T) { instanceID: "test-instance", } - // Prepare messages to return messages := []redis.XStream{ { Stream: "test-stream", @@ -239,26 +238,30 @@ func TestFollowerWork_WithMessages(t *testing.T) { }, } - // Set up expectations gomock.InOrder( mockSM.EXPECT().ReadMessagesFromStream(ctx, types.RedisMsgTypePending).Return([]redis.XStream{}, nil), mockSM.EXPECT().ReadMessagesFromStream(ctx, types.RedisMsgTypeNew).Return(messages, nil), mockBB.EXPECT().FinalizeBlock(ctx, "test-payload-id", "test-execution-payload", "1-0").Return(nil), + mockSM.EXPECT().AckMessage(ctx, "1-0").Return(nil).Do(func(ctx context.Context, msgID string) { + cancel() + }), ) mockSM.EXPECT().ReadMessagesFromStream(ctx, gomock.Any()).AnyTimes().Return([]redis.XStream{}, nil) - // Run followerWork in a separate goroutine to allow context cancellation done := make(chan error) go func() { err := lfm.followerWork(ctx) done <- err }() - // Wait for the function to return - err := <-done - if err != nil { - t.Errorf("followerWork returned error: %v", err) + select { + case err := <-done: + if err != nil { + t.Errorf("followerWork returned error: %v", err) + } + case <-time.After(5 * time.Second): + t.Errorf("followerWork timed out") } } diff --git a/cl/redisapp/rapp.go b/cl/redisapp/rapp.go index 8b1327e51..b997990a0 100644 --- a/cl/redisapp/rapp.go +++ b/cl/redisapp/rapp.go @@ -24,9 +24,11 @@ type MevCommitChain struct { lfm *leaderfollower.LeaderFollowerManager } -func NewMevCommitChain(instanceID, ecURL, jwtSecret, genesisBlockHash, redisAddr, feeReceipt string, +func NewMevCommitChain( + instanceID, ecURL, jwtSecret, redisAddr, feeReceipt string, logger *slog.Logger, - buildDelay, buildDelayEmptyBlocks time.Duration) (*MevCommitChain, error) { + buildDelay, buildDelayEmptyBlocks time.Duration, +) (*MevCommitChain, error) { // Create a context for cancellation ctx, cancel := context.WithCancel(context.Background()) @@ -34,14 +36,20 @@ func NewMevCommitChain(instanceID, ecURL, jwtSecret, genesisBlockHash, redisAddr bytes, err := hex.DecodeString(jwtSecret) if err != nil { cancel() - logger.Error("Error decoding JWT secret", "error", err) + logger.Error( + "Error decoding JWT secret", + "error", err, + ) return nil, err } engineCL, err := ethclient.NewAuthClient(ctx, ecURL, bytes) if err != nil { cancel() - logger.Error("Error creating engine client", "error", err) + logger.Error( + "Error creating engine client", + "error", err, + ) return nil, err } @@ -52,32 +60,41 @@ func NewMevCommitChain(instanceID, ecURL, jwtSecret, genesisBlockHash, redisAddr err = redisClient.ConfigSet(ctx, "min-replicas-to-write", "1").Err() if err != nil { cancel() - logger.Error("Error setting min-replicas-to-write", "error", err) + logger.Error( + "Error setting min-replicas-to-write", + "error", err, + ) return nil, err } - stateManager, err := state.NewRedisStateManager(instanceID, redisClient, logger, genesisBlockHash) + coordinator, err := state.NewRedisCoordinator(instanceID, redisClient, logger) if err != nil { cancel() - logger.Error("Error creating state manager", "error", err) + logger.Error( + "Error creating state manager", + "error", err, + ) return nil, err } - blockBuilder := blockbuilder.NewBlockBuilder(stateManager, engineCL, logger, buildDelay, buildDelayEmptyBlocks, feeReceipt) + blockBuilder := blockbuilder.NewBlockBuilder(coordinator, engineCL, logger, buildDelay, buildDelayEmptyBlocks, feeReceipt) lfm, err := leaderfollower.NewLeaderFollowerManager( instanceID, logger, redisClient, - stateManager, + coordinator, blockBuilder, ) if err != nil { cancel() - logger.Error("Error creating lfm", "error", err) + logger.Error( + "Error creating lfm", + "error", err, + ) return nil, err } app := &MevCommitChain{ - stateManager: stateManager, + stateManager: coordinator, blockBuilder: blockBuilder, logger: logger, cancel: cancel, @@ -98,7 +115,10 @@ func (app *MevCommitChain) Stop() { app.stateManager.Stop() err := app.lfm.WaitForGoroutinesToStop() if err != nil { - app.logger.Error("Error waiting for goroutines to stop", "error", err) + app.logger.Error( + "Error waiting for goroutines to stop", + "error", err, + ) } app.logger.Info("MevCommitChain stopped gracefully") } diff --git a/cl/redisapp/state/state.go b/cl/redisapp/state/state.go index 07e34e00e..2a9edd9ea 100644 --- a/cl/redisapp/state/state.go +++ b/cl/redisapp/state/state.go @@ -2,7 +2,6 @@ package state import ( "context" - "encoding/hex" "errors" "fmt" "log/slog" @@ -21,97 +20,118 @@ type RedisClient interface { Close() error } +type PipelineOperation func(redis.Pipeliner) error + type StateManager interface { - SaveExecutionHead(ctx context.Context, head *types.ExecutionHead) error - LoadExecutionHead(ctx context.Context) (*types.ExecutionHead, error) - LoadOrInitializeBlockState(ctx context.Context) error - SaveBlockState(ctx context.Context) error ResetBlockState(ctx context.Context) error - SaveExecutionHeadAndAck(ctx context.Context, head *types.ExecutionHead, messageID string) error - SaveBlockStateAndPublishToStream(ctx context.Context, bsState *types.BlockBuildState) error GetBlockBuildState(ctx context.Context) types.BlockBuildState - CreateConsumerGroup(ctx context.Context) error + Stop() +} + +type StreamManager interface { ReadMessagesFromStream(ctx context.Context, msgType types.RedisMsgType) ([]redis.XStream, error) AckMessage(ctx context.Context, messageID string) error Stop() } +type Coordinator interface { + StreamManager + StateManager + SaveBlockStateAndPublishToStream(ctx context.Context, bsState *types.BlockBuildState) error + Stop() +} + type RedisStateManager struct { - instanceID string - redisClient RedisClient - logger *slog.Logger - genesisBlockHash string - groupName string - consumerName string + instanceID string + redisClient RedisClient + logger *slog.Logger blockStateKey string blockBuildState *types.BlockBuildState } +type RedisStreamManager struct { + instanceID string + redisClient RedisClient + logger *slog.Logger + + groupName string + consumerName string +} + +type RedisCoordinator struct { + stateMgr *RedisStateManager + streamMgr *RedisStreamManager + redisClient RedisClient // added to close the client + logger *slog.Logger +} + func NewRedisStateManager( instanceID string, redisClient RedisClient, logger *slog.Logger, - genesisBlockHash string, -) (StateManager, error) { - rsm := &RedisStateManager{ - instanceID: instanceID, - redisClient: redisClient, - logger: logger, - genesisBlockHash: genesisBlockHash, - blockStateKey: fmt.Sprintf("blockBuildState:%s", instanceID), - groupName: fmt.Sprintf("mevcommit_consumer_group:%s", instanceID), - consumerName: fmt.Sprintf("follower:%s", instanceID), +) *RedisStateManager { + return &RedisStateManager{ + instanceID: instanceID, + redisClient: redisClient, + logger: logger, + blockStateKey: fmt.Sprintf("blockBuildState:%s", instanceID), } - if err := rsm.CreateConsumerGroup(context.Background()); err != nil { - return nil, err +} + +func NewRedisStreamManager( + instanceID string, + redisClient RedisClient, + logger *slog.Logger, +) *RedisStreamManager { + return &RedisStreamManager{ + instanceID: instanceID, + redisClient: redisClient, + logger: logger, + groupName: fmt.Sprintf("mevcommit_consumer_group:%s", instanceID), + consumerName: fmt.Sprintf("follower:%s", instanceID), } - return rsm, nil } -func (s *RedisStateManager) SaveExecutionHead(ctx context.Context, head *types.ExecutionHead) error { - data, err := msgpack.Marshal(head) - if err != nil { - return fmt.Errorf("failed to serialize execution head: %w", err) +func NewRedisCoordinator( + instanceID string, + redisClient RedisClient, + logger *slog.Logger, +) (*RedisCoordinator, error) { + stateMgr := NewRedisStateManager(instanceID, redisClient, logger) + streamMgr := NewRedisStreamManager(instanceID, redisClient, logger) + + coordinator := &RedisCoordinator{ + stateMgr: stateMgr, + streamMgr: streamMgr, + redisClient: redisClient, + logger: logger, } - key := fmt.Sprintf("executionHead:%s", s.instanceID) - if err := s.redisClient.Set(ctx, key, data, 0).Err(); err != nil { - return fmt.Errorf("failed to save execution head to Redis: %w", err) + if err := streamMgr.createConsumerGroup(context.Background()); err != nil { + return nil, fmt.Errorf("failed to create consumer group: %w", err) } - return nil + return coordinator, nil } -func (s *RedisStateManager) LoadExecutionHead(ctx context.Context) (*types.ExecutionHead, error) { - key := fmt.Sprintf("executionHead:%s", s.instanceID) - data, err := s.redisClient.Get(ctx, key).Result() - if err != nil { - if errors.Is(err, redis.Nil) { - s.logger.Info("executionHead not found in Redis, initializing with default values") - hashBytes, decodeErr := hex.DecodeString(s.genesisBlockHash) - if decodeErr != nil { - s.logger.Error("Error decoding genesis block hash", "error", decodeErr) - return nil, decodeErr - } - head := &types.ExecutionHead{BlockHash: hashBytes, BlockTime: uint64(time.Now().UnixMilli())} - if saveErr := s.SaveExecutionHead(ctx, head); saveErr != nil { - return nil, saveErr - } - return head, nil +func (s *RedisStateManager) executeTransaction(ctx context.Context, ops ...PipelineOperation) error { + pipe := s.redisClient.TxPipeline() + + for _, op := range ops { + if err := op(pipe); err != nil { + return fmt.Errorf("failed to execute operation: %w", err) } - return nil, fmt.Errorf("failed to retrieve execution head: %w", err) } - var head types.ExecutionHead - if err := msgpack.Unmarshal([]byte(data), &head); err != nil { - return nil, fmt.Errorf("failed to deserialize execution head: %w", err) + if _, err := pipe.Exec(ctx); err != nil { + return fmt.Errorf("state transaction failed: %w", err) } - return &head, nil + return nil } -func (s *RedisStateManager) LoadOrInitializeBlockState(ctx context.Context) error { +func (s *RedisStateManager) loadOrInitializeBlockState(ctx context.Context) error { data, err := s.redisClient.Get(ctx, s.blockStateKey).Result() if err != nil { if errors.Is(err, redis.Nil) { @@ -119,7 +139,7 @@ func (s *RedisStateManager) LoadOrInitializeBlockState(ctx context.Context) erro CurrentStep: types.StepBuildBlock, } s.logger.Info("Leader block build state not found in Redis, initializing with default values") - return s.SaveBlockState(ctx) + return s.saveBlockState(ctx) } return fmt.Errorf("failed to retrieve leader block build state: %w", err) } @@ -129,22 +149,28 @@ func (s *RedisStateManager) LoadOrInitializeBlockState(ctx context.Context) erro return fmt.Errorf("failed to deserialize leader block build state: %w", err) } - s.logger.Info("Loaded leader block build state", "CurrentStep", state.CurrentStep.String()) + s.logger.Info( + "Loaded leader block build state", + "CurrentStep", state.CurrentStep.String(), + ) s.blockBuildState = &state return nil } -func (s *RedisStateManager) SaveBlockState(ctx context.Context) error { - data, err := msgpack.Marshal(s.blockBuildState) - if err != nil { - return fmt.Errorf("failed to serialize leader block build state: %w", err) - } +func (s *RedisStateManager) saveBlockState(ctx context.Context) error { + return s.executeTransaction(ctx, s.saveBlockStateFunc(ctx, s.blockBuildState)) +} - if err := s.redisClient.Set(ctx, s.blockStateKey, data, 0).Err(); err != nil { - return fmt.Errorf("failed to save leader block build state to Redis: %w", err) - } +func (s *RedisStateManager) saveBlockStateFunc(ctx context.Context, bsState *types.BlockBuildState) PipelineOperation { + return func(pipe redis.Pipeliner) error { + data, err := msgpack.Marshal(bsState) + if err != nil { + return fmt.Errorf("failed to serialize block build state: %w", err) + } - return nil + pipe.Set(ctx, s.blockStateKey, data, 0) + return nil + } } func (s *RedisStateManager) ResetBlockState(ctx context.Context) error { @@ -152,82 +178,61 @@ func (s *RedisStateManager) ResetBlockState(ctx context.Context) error { CurrentStep: types.StepBuildBlock, } - if err := s.SaveBlockState(ctx); err != nil { + if err := s.saveBlockState(ctx); err != nil { return fmt.Errorf("failed to reset leader state: %w", err) } return nil } -func (s *RedisStateManager) SaveExecutionHeadAndAck(ctx context.Context, head *types.ExecutionHead, messageID string) error { - data, err := msgpack.Marshal(head) - if err != nil { - return fmt.Errorf("failed to serialize execution head: %w", err) +func (s *RedisStateManager) GetBlockBuildState(ctx context.Context) types.BlockBuildState { + if s.blockBuildState == nil { + s.logger.Error("Leader blockBuildState is not initialized") + if err := s.loadOrInitializeBlockState(ctx); err != nil { + s.logger.Warn( + "Failed to load/init state", + "error", err, + ) + return types.BlockBuildState{} + } } - key := fmt.Sprintf("executionHead:%s", s.instanceID) - pipe := s.redisClient.TxPipeline() - - pipe.Set(ctx, key, data, 0) - pipe.XAck(ctx, blockStreamName, s.groupName, messageID) - - if _, err := pipe.Exec(ctx); err != nil { - return fmt.Errorf("transaction failed: %w", err) + if s.blockBuildState == nil { + s.logger.Error("Leader blockBuildState is still not initialized") + return types.BlockBuildState{} } - s.logger.Info("Follower: Execution head saved and message acknowledged", "MessageID", messageID) - return nil + s.logger.Info( + "Leader blockBuildState retrieved", + "CurrentStep", s.blockBuildState.CurrentStep.String(), + ) + // Return a copy of the state to prevent external modification + return *s.blockBuildState } -func (s *RedisStateManager) SaveBlockStateAndPublishToStream(ctx context.Context, bsState *types.BlockBuildState) error { - s.blockBuildState = bsState - data, err := msgpack.Marshal(bsState) - if err != nil { - return fmt.Errorf("failed to serialize leader block build state: %w", err) +func (s *RedisStateManager) Stop() { + if err := s.redisClient.Close(); err != nil { + s.logger.Error("Error closing Redis client in StateManager", "error", err) } +} - pipe := s.redisClient.Pipeline() - pipe.Set(ctx, s.blockStateKey, data, 0) +func (s *RedisStreamManager) executeTransaction(ctx context.Context, ops ...PipelineOperation) error { + pipe := s.redisClient.TxPipeline() - message := map[string]interface{}{ - "payload_id": bsState.PayloadID, - "execution_payload": bsState.ExecutionPayload, - "timestamp": time.Now().UnixNano(), - "sender_instance_id": s.instanceID, + for _, op := range ops { + if err := op(pipe); err != nil { + return fmt.Errorf("failed to execute operation: %w", err) + } } - pipe.XAdd(ctx, &redis.XAddArgs{ - Stream: blockStreamName, - Values: message, - }) - if _, err := pipe.Exec(ctx); err != nil { - return fmt.Errorf("pipeline failed: %w", err) + return fmt.Errorf("stream transaction failed: %w", err) } return nil } -func (s *RedisStateManager) GetBlockBuildState(ctx context.Context) types.BlockBuildState { - if s.blockBuildState == nil { - s.logger.Error("Leader blockBuildState is not initialized") - if err := s.LoadOrInitializeBlockState(ctx); err != nil { - s.logger.Warn("Failed to load/init state", "error", err) - return types.BlockBuildState{} - } - } - - if s.blockBuildState == nil { - s.logger.Error("Leader blockBuildState is still not initialized") - return types.BlockBuildState{} - } - - s.logger.Info("Leader blockBuildState retrieved", "CurrentStep", s.blockBuildState.CurrentStep.String()) - // Return a copy of the state to prevent external modification - return *s.blockBuildState -} - -func (s *RedisStateManager) CreateConsumerGroup(ctx context.Context) error { +func (s *RedisStreamManager) createConsumerGroup(ctx context.Context) error { if err := s.redisClient.XGroupCreateMkStream(ctx, blockStreamName, s.groupName, "0").Err(); err != nil { if !strings.Contains(err.Error(), "BUSYGROUP") { return fmt.Errorf("failed to create consumer group '%s': %w", s.groupName, err) @@ -236,7 +241,7 @@ func (s *RedisStateManager) CreateConsumerGroup(ctx context.Context) error { return nil } -func (s *RedisStateManager) ReadMessagesFromStream(ctx context.Context, msgType types.RedisMsgType) ([]redis.XStream, error) { +func (s *RedisStreamManager) ReadMessagesFromStream(ctx context.Context, msgType types.RedisMsgType) ([]redis.XStream, error) { args := &redis.XReadGroupArgs{ Group: s.groupName, Consumer: s.consumerName, @@ -253,15 +258,79 @@ func (s *RedisStateManager) ReadMessagesFromStream(ctx context.Context, msgType return messages, nil } -func (s *RedisStateManager) AckMessage(ctx context.Context, messageID string) error { - if err := s.redisClient.XAck(ctx, blockStreamName, s.groupName, messageID).Err(); err != nil { - return fmt.Errorf("failed to acknowledge message: %w", err) +func (s *RedisStreamManager) AckMessage(ctx context.Context, messageID string) error { + return s.executeTransaction(ctx, s.ackMessageFunc(ctx, messageID)) +} + +func (s *RedisStreamManager) ackMessageFunc(ctx context.Context, messageID string) PipelineOperation { + return func(pipe redis.Pipeliner) error { + pipe.XAck(ctx, blockStreamName, s.groupName, messageID) + return nil } - return nil } -func (s *RedisStateManager) Stop() { +func (s *RedisStreamManager) publishToStreamFunc(ctx context.Context, bsState *types.BlockBuildState) PipelineOperation { + return func(pipe redis.Pipeliner) error { + message := map[string]interface{}{ + "payload_id": bsState.PayloadID, + "execution_payload": bsState.ExecutionPayload, + "timestamp": time.Now().UnixNano(), + "sender_instance_id": s.instanceID, + } + + pipe.XAdd(ctx, &redis.XAddArgs{ + Stream: blockStreamName, + Values: message, + }) + return nil + } +} + +func (s *RedisStreamManager) Stop() { if err := s.redisClient.Close(); err != nil { - s.logger.Error("Error closing Redis client", "error", err) + s.logger.Error( + "Error closing Redis client in StreamManager", + "error", err, + ) + } +} + +func (c *RedisCoordinator) SaveBlockStateAndPublishToStream(ctx context.Context, bsState *types.BlockBuildState) error { + c.stateMgr.blockBuildState = bsState + + err := c.stateMgr.executeTransaction( + ctx, + c.stateMgr.saveBlockStateFunc(ctx, bsState), + c.streamMgr.publishToStreamFunc(ctx, bsState), + ) + if err != nil { + return fmt.Errorf("transaction failed: %w", err) + } + + return nil +} + +func (c *RedisCoordinator) ResetBlockState(ctx context.Context) error { + return c.stateMgr.ResetBlockState(ctx) +} + +func (c *RedisCoordinator) GetBlockBuildState(ctx context.Context) types.BlockBuildState { + return c.stateMgr.GetBlockBuildState(ctx) +} + +func (c *RedisCoordinator) ReadMessagesFromStream(ctx context.Context, msgType types.RedisMsgType) ([]redis.XStream, error) { + return c.streamMgr.ReadMessagesFromStream(ctx, msgType) +} + +func (c *RedisCoordinator) AckMessage(ctx context.Context, messageID string) error { + return c.streamMgr.AckMessage(ctx, messageID) +} + +func (c *RedisCoordinator) Stop() { + if err := c.redisClient.Close(); err != nil { + c.logger.Error( + "Error closing Redis client in StateManager", + "error", err, + ) } } From 344bb3d0b068bcf6aeaefa2fa51aaf5896504421 Mon Sep 17 00:00:00 2001 From: Mikhail Wall Date: Mon, 12 May 2025 15:16:53 +0200 Subject: [PATCH 02/24] feat: moved types to outer package --- cl/mocks/mock_state.go | 2 +- cl/redisapp/blockbuilder/blockbuilder.go | 6 +++--- cl/redisapp/blockbuilder/blockbuilder_test.go | 2 +- cl/redisapp/leaderfollower/leaderfollower.go | 4 ++-- cl/redisapp/leaderfollower/leaderfollower_test.go | 2 +- cl/redisapp/state/state.go | 12 ++++++------ cl/{redisapp => }/types/types.go | 0 7 files changed, 14 insertions(+), 14 deletions(-) rename cl/{redisapp => }/types/types.go (100%) diff --git a/cl/mocks/mock_state.go b/cl/mocks/mock_state.go index 85604c9b4..3b35a6a05 100644 --- a/cl/mocks/mock_state.go +++ b/cl/mocks/mock_state.go @@ -11,7 +11,7 @@ import ( gomock "github.com/golang/mock/gomock" state "github.com/primev/mev-commit/cl/redisapp/state" - types "github.com/primev/mev-commit/cl/redisapp/types" + types "github.com/primev/mev-commit/cl/types" redis "github.com/redis/go-redis/v9" ) diff --git a/cl/redisapp/blockbuilder/blockbuilder.go b/cl/redisapp/blockbuilder/blockbuilder.go index b12a74161..c0a003246 100644 --- a/cl/redisapp/blockbuilder/blockbuilder.go +++ b/cl/redisapp/blockbuilder/blockbuilder.go @@ -16,8 +16,8 @@ import ( "github.com/ethereum/go-ethereum/beacon/engine" "github.com/ethereum/go-ethereum/common" etypes "github.com/ethereum/go-ethereum/core/types" - "github.com/primev/mev-commit/cl/redisapp/types" "github.com/primev/mev-commit/cl/redisapp/util" + "github.com/primev/mev-commit/cl/types" "github.com/vmihailenco/msgpack/v5" ) @@ -284,8 +284,8 @@ func (bb *BlockBuilder) ProcessLastPayload(ctx context.Context) error { return nil // Success } else { bb.logger.Warn( - "Follower: Invalid block height, exit", - "invalid_height", invalidHeight, + "Follower: Invalid block height, exit", + "invalid_height", invalidHeight, "expected_height", expectedHeight, ) return backoff.Permanent(err) diff --git a/cl/redisapp/blockbuilder/blockbuilder_test.go b/cl/redisapp/blockbuilder/blockbuilder_test.go index 28349256c..4c365046f 100644 --- a/cl/redisapp/blockbuilder/blockbuilder_test.go +++ b/cl/redisapp/blockbuilder/blockbuilder_test.go @@ -14,7 +14,7 @@ import ( "github.com/golang/mock/gomock" "github.com/primev/mev-commit/cl/mocks" "github.com/primev/mev-commit/cl/redisapp/state" - "github.com/primev/mev-commit/cl/redisapp/types" + "github.com/primev/mev-commit/cl/types" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/mock" "github.com/stretchr/testify/require" diff --git a/cl/redisapp/leaderfollower/leaderfollower.go b/cl/redisapp/leaderfollower/leaderfollower.go index fed4bfd2e..71997be3e 100644 --- a/cl/redisapp/leaderfollower/leaderfollower.go +++ b/cl/redisapp/leaderfollower/leaderfollower.go @@ -9,8 +9,8 @@ import ( "time" "github.com/heyvito/go-leader/leader" - "github.com/primev/mev-commit/cl/redisapp/types" "github.com/primev/mev-commit/cl/redisapp/util" + "github.com/primev/mev-commit/cl/types" "github.com/redis/go-redis/v9" ) @@ -270,7 +270,7 @@ func (lfm *LeaderFollowerManager) leaderWork(ctx context.Context) error { } // otherwise there is a problem with redis/payload, so we just log it and continue lfm.logger.Error( - "Leader: Error in leader work", + "Leader: Error in leader work", "error", err, ) } diff --git a/cl/redisapp/leaderfollower/leaderfollower_test.go b/cl/redisapp/leaderfollower/leaderfollower_test.go index 66917fbd8..b813f0ab0 100644 --- a/cl/redisapp/leaderfollower/leaderfollower_test.go +++ b/cl/redisapp/leaderfollower/leaderfollower_test.go @@ -10,7 +10,7 @@ import ( "github.com/golang/mock/gomock" "github.com/primev/mev-commit/cl/mocks" - "github.com/primev/mev-commit/cl/redisapp/types" + "github.com/primev/mev-commit/cl/types" "github.com/redis/go-redis/v9" ) diff --git a/cl/redisapp/state/state.go b/cl/redisapp/state/state.go index 2a9edd9ea..ee61c8ea7 100644 --- a/cl/redisapp/state/state.go +++ b/cl/redisapp/state/state.go @@ -8,7 +8,7 @@ import ( "strings" "time" - "github.com/primev/mev-commit/cl/redisapp/types" + "github.com/primev/mev-commit/cl/types" "github.com/redis/go-redis/v9" "github.com/vmihailenco/msgpack/v5" ) @@ -150,7 +150,7 @@ func (s *RedisStateManager) loadOrInitializeBlockState(ctx context.Context) erro } s.logger.Info( - "Loaded leader block build state", + "Loaded leader block build state", "CurrentStep", state.CurrentStep.String(), ) s.blockBuildState = &state @@ -190,7 +190,7 @@ func (s *RedisStateManager) GetBlockBuildState(ctx context.Context) types.BlockB s.logger.Error("Leader blockBuildState is not initialized") if err := s.loadOrInitializeBlockState(ctx); err != nil { s.logger.Warn( - "Failed to load/init state", + "Failed to load/init state", "error", err, ) return types.BlockBuildState{} @@ -203,7 +203,7 @@ func (s *RedisStateManager) GetBlockBuildState(ctx context.Context) types.BlockB } s.logger.Info( - "Leader blockBuildState retrieved", + "Leader blockBuildState retrieved", "CurrentStep", s.blockBuildState.CurrentStep.String(), ) // Return a copy of the state to prevent external modification @@ -289,7 +289,7 @@ func (s *RedisStreamManager) publishToStreamFunc(ctx context.Context, bsState *t func (s *RedisStreamManager) Stop() { if err := s.redisClient.Close(); err != nil { s.logger.Error( - "Error closing Redis client in StreamManager", + "Error closing Redis client in StreamManager", "error", err, ) } @@ -329,7 +329,7 @@ func (c *RedisCoordinator) AckMessage(ctx context.Context, messageID string) err func (c *RedisCoordinator) Stop() { if err := c.redisClient.Close(); err != nil { c.logger.Error( - "Error closing Redis client in StateManager", + "Error closing Redis client in StateManager", "error", err, ) } diff --git a/cl/redisapp/types/types.go b/cl/types/types.go similarity index 100% rename from cl/redisapp/types/types.go rename to cl/types/types.go From f746bfe3fd85424fe433454d3e8f48a12d0592c2 Mon Sep 17 00:00:00 2001 From: Mikhail Wall Date: Tue, 13 May 2025 17:54:13 +0200 Subject: [PATCH 03/24] feat: added single node --- .../blockbuilder/blockbuilder.go | 2 +- .../blockbuilder/blockbuilder_test.go | 0 cl/cmd/singlenode/main.go | 242 ++++++++++++++++++ cl/redisapp/leaderfollower/leaderfollower.go | 2 +- cl/redisapp/rapp.go | 2 +- cl/singlenode/singlenode.go | 222 ++++++++++++++++ cl/singlenode/state/localstate.go | 68 +++++ cl/{redisapp => }/util/retry.go | 0 8 files changed, 535 insertions(+), 3 deletions(-) rename cl/{redisapp => }/blockbuilder/blockbuilder.go (99%) rename cl/{redisapp => }/blockbuilder/blockbuilder_test.go (100%) create mode 100644 cl/singlenode/singlenode.go create mode 100644 cl/singlenode/state/localstate.go rename cl/{redisapp => }/util/retry.go (100%) diff --git a/cl/redisapp/blockbuilder/blockbuilder.go b/cl/blockbuilder/blockbuilder.go similarity index 99% rename from cl/redisapp/blockbuilder/blockbuilder.go rename to cl/blockbuilder/blockbuilder.go index c0a003246..cef88e680 100644 --- a/cl/redisapp/blockbuilder/blockbuilder.go +++ b/cl/blockbuilder/blockbuilder.go @@ -16,7 +16,7 @@ import ( "github.com/ethereum/go-ethereum/beacon/engine" "github.com/ethereum/go-ethereum/common" etypes "github.com/ethereum/go-ethereum/core/types" - "github.com/primev/mev-commit/cl/redisapp/util" + "github.com/primev/mev-commit/cl/util" "github.com/primev/mev-commit/cl/types" "github.com/vmihailenco/msgpack/v5" ) diff --git a/cl/redisapp/blockbuilder/blockbuilder_test.go b/cl/blockbuilder/blockbuilder_test.go similarity index 100% rename from cl/redisapp/blockbuilder/blockbuilder_test.go rename to cl/blockbuilder/blockbuilder_test.go diff --git a/cl/cmd/singlenode/main.go b/cl/cmd/singlenode/main.go index 06ab7d0f9..cc221b9b2 100644 --- a/cl/cmd/singlenode/main.go +++ b/cl/cmd/singlenode/main.go @@ -1 +1,243 @@ package main + +import ( + "context" + "encoding/hex" + "fmt" + "net/url" + "os" + "os/signal" + "slices" + "strings" + "syscall" + "time" + + "github.com/primev/mev-commit/cl/singlenode" + "github.com/primev/mev-commit/x/util" + "github.com/urfave/cli/v2" + "github.com/urfave/cli/v2/altsrc" +) + +const ( + categoryDebug = "Debug" +) + +var ( + stringInCheck = func(flag string, opts []string) func(c *cli.Context, p string) error { + return func(c *cli.Context, p string) error { + if !slices.Contains(opts, p) { + return fmt.Errorf("invalid %s option %q, expected one of %s", flag, p, strings.Join(opts, ", ")) + } + return nil + } + } +) + +// CLI Flags (subset of redisapp, without Redis-specific ones) +var ( + configFlag = &cli.StringFlag{ + Name: "config", + Usage: "Path to YAML config file", + EnvVars: []string{"SNODE_CONFIG"}, + } + + instanceIDFlag = altsrc.NewStringFlag(&cli.StringFlag{ + Name: "instance-id", + Usage: "Unique instance ID for this node (for logging/identification)", + EnvVars: []string{"SNODE_INSTANCE_ID"}, + Required: true, + Action: func(_ *cli.Context, s string) error { + if s == "" { + return fmt.Errorf("instance-id is required") + } + return nil + }, + }) + + ethClientURLFlag = altsrc.NewStringFlag(&cli.StringFlag{ + Name: "eth-client-url", + Usage: "Ethereum Execution client Engine API URL (e.g., http://localhost:8551)", + EnvVars: []string{"SNODE_ETH_CLIENT_URL"}, + Value: "http://localhost:8551", + Action: func(_ *cli.Context, s string) error { + if _, err := url.Parse(s); err != nil { + return fmt.Errorf("invalid eth-client-url: %v", err) + } + return nil + }, + }) + + jwtSecretFlag = altsrc.NewStringFlag(&cli.StringFlag{ + Name: "jwt-secret", + Usage: "Hex-encoded JWT secret for Ethereum Execution client Engine API", + EnvVars: []string{"SNODE_JWT_SECRET"}, + // Example default, replace with secure generation or require user input + Value: "13373d9a0257983ad150392d7ddb2f9172c9396b4c450e26af469d123c7aaa5c", + Action: func(_ *cli.Context, s string) error { + if len(s) != 64 { // 32 bytes = 64 hex characters + return fmt.Errorf("invalid jwt-secret: must be 64 hex characters (32 bytes)") + } + if _, err := hex.DecodeString(s); err != nil { + return fmt.Errorf("invalid jwt-secret: failed to decode hex: %v", err) + } + return nil + }, + }) + + logFmtFlag = altsrc.NewStringFlag(&cli.StringFlag{ + Name: "log-fmt", + Usage: "Log format ('text' or 'json')", + EnvVars: []string{"MEV_COMMIT_LOG_FMT"}, // Keep consistent env var if desired + Value: "text", + Action: stringInCheck("log-fmt", []string{"text", "json"}), + Category: categoryDebug, + }) + + logLevelFlag = altsrc.NewStringFlag(&cli.StringFlag{ + Name: "log-level", + Usage: "Log level ('debug', 'info', 'warn', 'error')", + EnvVars: []string{"MEV_COMMIT_LOG_LEVEL"}, // Keep consistent + Value: "info", + Action: stringInCheck("log-level", []string{"debug", "info", "warn", "error"}), + Category: categoryDebug, + }) + + logTagsFlag = altsrc.NewStringFlag(&cli.StringFlag{ + Name: "log-tags", + Usage: "Comma-separated log tags (e.g., env:prod,service:snode)", + EnvVars: []string{"MEV_COMMIT_LOG_TAGS"}, // Keep consistent + Action: func(ctx *cli.Context, s string) error { + if s == "" { + return nil + } + for i, p := range strings.Split(s, ",") { + if len(strings.Split(p, ":")) != 2 { + return fmt.Errorf("invalid log-tags at index %d, expecting ", i) + } + } + return nil + }, + Category: categoryDebug, + }) + + evmBuildDelayFlag = altsrc.NewDurationFlag(&cli.DurationFlag{ + Name: "evm-build-delay", + Usage: "Delay after initiating payload construction before calling getPayload (e.g., '200ms')", + EnvVars: []string{"SNODE_EVM_BUILD_DELAY"}, + Value: 200 * time.Millisecond, + }) + + evmBuildDelayEmptyBlockFlag = altsrc.NewDurationFlag(&cli.DurationFlag{ + Name: "evm-build-delay-empty-block", + Usage: "Minimum time since last block to build an empty block (0 to disable skipping, e.g., '2s')", + EnvVars: []string{"SNODE_EVM_BUILD_DELAY_EMPTY_BLOCK"}, + Value: 2 * time.Second, + }) + + priorityFeeReceiptFlag = altsrc.NewStringFlag(&cli.StringFlag{ + Name: "priority-fee-recipient", // Changed flag name for clarity + Usage: "Ethereum address for receiving priority fees (block proposer fee)", + EnvVars: []string{"SNODE_PRIORITY_FEE_RECIPIENT"}, + // Value: "0xYourFeeRecipientAddressHere", // Require this or ensure a safe default/handling + Required: true, // Making this required is safer + Action: func(c *cli.Context, s string) error { + if !strings.HasPrefix(s, "0x") || len(s) != 42 { + return fmt.Errorf("priority-fee-recipient must be a 0x-prefixed 42-character hex string") + } + // Basic validation, more robust hex address validation could be added + if _, err := hex.DecodeString(s[2:]); err != nil { + return fmt.Errorf("priority-fee-recipient is not a valid hex string: %v", err) + } + return nil + }, + }) +) + +func main() { + flags := []cli.Flag{ + configFlag, + instanceIDFlag, + ethClientURLFlag, + jwtSecretFlag, + logFmtFlag, + logLevelFlag, + logTagsFlag, + evmBuildDelayFlag, + evmBuildDelayEmptyBlockFlag, + priorityFeeReceiptFlag, + } + + app := &cli.App{ + Name: "snode", + Usage: "Single-node MEV-commit application", + Commands: []*cli.Command{ + { + Name: "start", + Usage: "Start the snode node", + Flags: flags, + Before: altsrc.InitInputSourceWithContext(flags, + func(c *cli.Context) (altsrc.InputSourceContext, error) { + configFile := c.String(configFlag.Name) + if configFile != "" { + return altsrc.NewYamlSourceFromFile(configFile) + } + return &altsrc.MapInputSource{}, nil // Empty source if no config file + }), + Action: func(c *cli.Context) error { + return startSingleNodeApplication(c) + }, + }, + }, + } + + if err := app.Run(os.Args); err != nil { + // Use app.Writer for logging consistency if logger not yet initialized + fmt.Fprintf(app.Writer, "Error running snode: %v\n", err) + os.Exit(1) + } +} + +func startSingleNodeApplication(c *cli.Context) error { + logger, err := util.NewLogger( + c.String(logLevelFlag.Name), + c.String(logFmtFlag.Name), + c.String(logTagsFlag.Name), + c.App.Writer, // Use CLI app's writer for logs + ) + if err != nil { + return fmt.Errorf("failed to create logger: %w", err) + } + logger = logger.With("app", "snode") + + cfg := singlenode.Config{ + InstanceID: c.String(instanceIDFlag.Name), + EthClientURL: c.String(ethClientURLFlag.Name), + JWTSecret: c.String(jwtSecretFlag.Name), + EVMBuildDelay: c.Duration(evmBuildDelayFlag.Name), + EVMBuildDelayEmptyBlocks: c.Duration(evmBuildDelayEmptyBlockFlag.Name), + PriorityFeeReceipt: c.String(priorityFeeReceiptFlag.Name), + } + + logger.Info("Starting snode with configuration", "config", cfg) // Be careful logging sensitive parts of config + + // Create a root context that can be cancelled for graceful shutdown + rootCtx, rootCancel := signal.NotifyContext(context.Background(), os.Interrupt, syscall.SIGTERM) + defer rootCancel() + + snode, err := singlenode.NewSingleNodeApp(rootCtx, cfg, logger) + if err != nil { + logger.Error("Failed to initialize SingleNodeApp", "error", err) + return err + } + + snode.Start() // Start the application's main loop + + // Wait for the application context to be done (e.g., OS signal) + <-rootCtx.Done() + + logger.Info("Shutdown signal received, stopping snode...") + snode.Stop() // Initiate graceful shutdown of the application + + logger.Info("SRApp shutdown completed.") + return nil +} diff --git a/cl/redisapp/leaderfollower/leaderfollower.go b/cl/redisapp/leaderfollower/leaderfollower.go index 71997be3e..6b748c42f 100644 --- a/cl/redisapp/leaderfollower/leaderfollower.go +++ b/cl/redisapp/leaderfollower/leaderfollower.go @@ -9,7 +9,7 @@ import ( "time" "github.com/heyvito/go-leader/leader" - "github.com/primev/mev-commit/cl/redisapp/util" + "github.com/primev/mev-commit/cl/util" "github.com/primev/mev-commit/cl/types" "github.com/redis/go-redis/v9" ) diff --git a/cl/redisapp/rapp.go b/cl/redisapp/rapp.go index b997990a0..e86fa7ea8 100644 --- a/cl/redisapp/rapp.go +++ b/cl/redisapp/rapp.go @@ -7,7 +7,7 @@ import ( "time" "github.com/primev/mev-commit/cl/ethclient" - "github.com/primev/mev-commit/cl/redisapp/blockbuilder" + "github.com/primev/mev-commit/cl/blockbuilder" "github.com/primev/mev-commit/cl/redisapp/leaderfollower" "github.com/primev/mev-commit/cl/redisapp/state" "github.com/redis/go-redis/v9" diff --git a/cl/singlenode/singlenode.go b/cl/singlenode/singlenode.go new file mode 100644 index 000000000..1d990e82b --- /dev/null +++ b/cl/singlenode/singlenode.go @@ -0,0 +1,222 @@ +package singlenode + +import ( + "context" + "encoding/hex" + "errors" + "log/slog" + "strings" + "sync" + "time" + + "github.com/cenkalti/backoff/v4" + "github.com/primev/mev-commit/cl/blockbuilder" + "github.com/primev/mev-commit/cl/ethclient" + "github.com/primev/mev-commit/cl/singlenode/state" + "github.com/primev/mev-commit/cl/types" + "github.com/primev/mev-commit/cl/util" +) + +// Config holds the configuration for the SingleNodeApp. +type Config struct { + InstanceID string + EthClientURL string + JWTSecret string + EVMBuildDelay time.Duration + EVMBuildDelayEmptyBlocks time.Duration + PriorityFeeReceipt string +} + +// SingleNodeApp orchestrates block production for a single node. +type SingleNodeApp struct { + logger *slog.Logger + cfg Config + blockBuilder *blockbuilder.BlockBuilder + stateManager *localstate.LocalStateManager + engineClient blockbuilder.EngineClient // Keep a reference if needed for direct calls, though BB handles most + appCtx context.Context + cancel context.CancelFunc + wg sync.WaitGroup +} + +// NewSingleNodeApp creates and initializes a new SingleNodeApp. +func NewSingleNodeApp( + appCtx context.Context, // Parent context + cfg Config, + logger *slog.Logger, +) (*SingleNodeApp, error) { + ctx, cancel := context.WithCancel(appCtx) + + jwtBytes, err := hex.DecodeString(cfg.JWTSecret) + if err != nil { + cancel() + logger.Error("Failed to decode JWT secret", "error", err) + return nil, err + } + + engineCL, err := ethclient.NewAuthClient(ctx, cfg.EthClientURL, jwtBytes) + if err != nil { + cancel() + logger.Error("Failed to create Ethereum engine client", "error", err) + return nil, err + } + + stateMgr := localstate.NewLocalStateManager(logger.With("component", "LocalStateManager")) + bb := blockbuilder.NewBlockBuilder( + stateMgr, + engineCL, + logger.With("component", "BlockBuilder"), + cfg.EVMBuildDelay, + cfg.EVMBuildDelayEmptyBlocks, + cfg.PriorityFeeReceipt, + ) + + return &SingleNodeApp{ + logger: logger, + cfg: cfg, + blockBuilder: bb, + stateManager: stateMgr, + engineClient: engineCL, // Stored if needed + appCtx: ctx, + cancel: cancel, + }, nil +} + +// Start begins the main block production loop. +func (app *SingleNodeApp) Start() { + app.logger.Info("Starting SingleNodeApp...") + app.wg.Add(1) + go func() { + defer app.wg.Done() + defer app.logger.Info("SingleNodeApp run loop finished.") + app.runLoop() + }() +} + +func (app *SingleNodeApp) runLoop() { + app.logger.Info("SingleNodeApp run loop started", "instanceID", app.cfg.InstanceID) + + // On startup, process any pending block from a previous (crashed) session. + // With LocalStateManager (in-memory), this is mostly for conceptual completeness + // unless persistence is added to LocalStateManager. + // if err := app.blockBuilder.ProcessLastPayload(app.appCtx); err != nil { + // // If ProcessLastPayload fails (e.g., after retries on FinalizeBlock), + // // it might indicate a persistent issue with Geth. + // if errors.Is(err, util.ErrFailedAfterNAttempts) || errors.Is(err, context.Canceled) { + // app.logger.Error("Critical error processing last payload at startup. Shutting down.", "error", err) + // app.cancel() // Trigger shutdown for the rest of the app + // return + // } + // app.logger.Warn("Non-critical error processing last payload at startup. State might be reset.", "error", err) + // // Attempt to reset state to ensure clean start if ProcessLastPayload had an issue but wasn't critical. + // if resetErr := app.stateManager.ResetBlockState(app.appCtx); resetErr != nil { + // app.logger.Error("Failed to reset state after ProcessLastPayload error. Shutting down.", "error", resetErr) + // app.cancel() + // return + // } + // } + + // Main production loop + for { + select { + case <-app.appCtx.Done(): + app.logger.Info("SingleNodeApp run loop stopping due to context cancellation.") + return + default: + // Determine current step from state + currentState := app.stateManager.GetBlockBuildState(app.appCtx) + var err error + + switch currentState.CurrentStep { + case types.StepBuildBlock: + app.logger.Info("RunLoop: StepBuildBlock") + err = app.blockBuilder.GetPayload(app.appCtx) + if err != nil { + app.logger.Error("RunLoop: GetPayload failed", "error", err) + // If GetPayload fails, state remains StepBuildBlock. + // Retry is handled within GetPayload. If it returns ErrFailedAfterNAttempts, + // it's a critical error. + if errors.Is(err, util.ErrFailedAfterNAttempts) { + app.logger.Error("RunLoop: GetPayload failed critically after retries. Shutting down.", "error", err) + app.cancel() + return + } + // For other errors, or if GetPayload skipped an empty block (err == nil but state not changed), + // the loop will retry. Add a small delay to prevent tight spin on persistent non-critical errors. + time.Sleep(500 * time.Millisecond) + } + // If GetPayload was successful and built a block, stateManager would have updated + // CurrentStep to StepFinalizeBlock. If it skipped an empty block, CurrentStep remains StepBuildBlock. + + case types.StepFinalizeBlock: + app.logger.Info("RunLoop: StepFinalizeBlock", "payload_id", currentState.PayloadID) + err = app.blockBuilder.FinalizeBlock(app.appCtx, currentState.PayloadID, currentState.ExecutionPayload, "") + if err != nil { + app.logger.Error("RunLoop: FinalizeBlock failed", "error", err) + // If FinalizeBlock fails, state remains StepFinalizeBlock. + // Retry is handled within FinalizeBlock. If it returns ErrFailedAfterNAttempts, critical. + if errors.Is(err, util.ErrFailedAfterNAttempts) { + app.logger.Error("RunLoop: FinalizeBlock failed critically after retries. Shutting down.", "error", err) + app.cancel() + return + } + // If error is due to payload validation (permanent), reset state to avoid retrying same bad payload. + var bErr *backoff.PermanentError + if errors.As(err, &bErr) && strings.Contains(bErr.Err.Error(), "execution payload validation failed") { + app.logger.Error("RunLoop: FinalizeBlock failed due to permanent payload validation error. Resetting state.", "original_error", bErr.Err) + if resetErr := app.stateManager.ResetBlockState(app.appCtx); resetErr != nil { + app.logger.Error("RunLoop: Failed to reset state after permanent FinalizeBlock error. Shutting down.", "error", resetErr) + app.cancel() + } + return // return from switch case, not entire loop, to re-evaluate state + } + // For other errors, loop will retry. Add delay. + time.Sleep(500 * time.Millisecond) + } else { + // FinalizeBlock successful, reset state for the next block. + app.logger.Info("RunLoop: FinalizeBlock successful. Resetting state for next block.") + if resetErr := app.stateManager.ResetBlockState(app.appCtx); resetErr != nil { + app.logger.Error("RunLoop: Failed to reset state after successful FinalizeBlock. Shutting down.", "error", resetErr) + app.cancel() + return + } + } + + default: + app.logger.Warn("RunLoop: Unknown current step in state", "step", currentState.CurrentStep.String()) + if resetErr := app.stateManager.ResetBlockState(app.appCtx); resetErr != nil { + app.logger.Error("RunLoop: Failed to reset state from unknown step. Shutting down.", "error", resetErr) + app.cancel() + return + } + time.Sleep(1 * time.Second) // Pause if in unknown state before retrying + } + // A short general delay to prevent extremely tight loops if conditions lead to no state change + // For example, if GetPayload consistently skips empty blocks very fast. + // This can be tied to blockBuilder.GetBuildDelay() or a fixed minimum. + time.Sleep(100 * time.Millisecond) + } + } +} + +// Stop signals the application to shut down and waits for goroutines to finish. +func (app *SingleNodeApp) Stop() { + app.logger.Info("Stopping SingleNodeApp...") + app.cancel() // Signal all operations using app.appCtx to stop + + // Wait for the main run loop to finish + // Set a timeout for waiting to prevent indefinite blocking. + waitCh := make(chan struct{}) + go func() { + app.wg.Wait() + close(waitCh) + }() + + select { + case <-waitCh: + app.logger.Info("SingleNodeApp run loop shut down gracefully.") + case <-time.After(5 * time.Second): // Timeout for shutdown + app.logger.Warn("SingleNodeApp shutdown timed out waiting for run loop.") + } + app.logger.Info("SingleNodeApp stopped.") +} diff --git a/cl/singlenode/state/localstate.go b/cl/singlenode/state/localstate.go new file mode 100644 index 000000000..df7c19227 --- /dev/null +++ b/cl/singlenode/state/localstate.go @@ -0,0 +1,68 @@ +package localstate + +import ( + "context" + "log/slog" + "sync" + + "github.com/primev/mev-commit/cl/types" +) + +// LocalStateManager implements the blockbuilder.StateManager interface for single-node operation. +// It manages state in-memory. +type LocalStateManager struct { + mu sync.RWMutex + blockBuildState *types.BlockBuildState + logger *slog.Logger +} + +// NewLocalStateManager creates a new LocalStateManager. +func NewLocalStateManager(logger *slog.Logger) *LocalStateManager { + return &LocalStateManager{ + // Initialize with a default state, typically to start building a block. + blockBuildState: &types.BlockBuildState{ + CurrentStep: types.StepBuildBlock, + }, + logger: logger, + } +} + +// SaveBlockStateAndPublishToStream saves the block state locally. +// The "PublishToStream" aspect is a NOP for this local manager. +func (lsm *LocalStateManager) SaveBlockStateAndPublishToStream(_ context.Context, state *types.BlockBuildState) error { + lsm.mu.Lock() + defer lsm.mu.Unlock() + + lsm.blockBuildState = state // Store the provided state + lsm.logger.Info("LocalStateManager: Saved block state", + "step", state.CurrentStep.String(), + "payload_id", state.PayloadID) + return nil +} + +// GetBlockBuildState retrieves the current block build state. +func (lsm *LocalStateManager) GetBlockBuildState(_ context.Context) types.BlockBuildState { + lsm.mu.RLock() + defer lsm.mu.RUnlock() + + if lsm.blockBuildState == nil { + // This should ideally not happen if constructor initializes it. + lsm.logger.Error("LocalStateManager: blockBuildState is nil, returning default. This indicates an issue.") + return types.BlockBuildState{CurrentStep: types.StepBuildBlock} + } + // Return a copy to prevent external modification of the internal state. + stateCopy := *lsm.blockBuildState + return stateCopy +} + +// ResetBlockState resets the block build state to the initial step (StepBuildBlock). +func (lsm *LocalStateManager) ResetBlockState(_ context.Context) error { + lsm.mu.Lock() + defer lsm.mu.Unlock() + + lsm.blockBuildState = &types.BlockBuildState{ + CurrentStep: types.StepBuildBlock, + } + lsm.logger.Info("LocalStateManager: Reset block state to StepBuildBlock") + return nil +} diff --git a/cl/redisapp/util/retry.go b/cl/util/retry.go similarity index 100% rename from cl/redisapp/util/retry.go rename to cl/util/retry.go From e78e64fd5c198aca2cedb738a1b949294683ef38 Mon Sep 17 00:00:00 2001 From: Mikhail Wall Date: Thu, 15 May 2025 19:29:35 +0200 Subject: [PATCH 04/24] feat: added single node --- cl/blockbuilder/blockbuilder.go | 30 +-- cl/blockbuilder/blockbuilder_test.go | 11 +- cl/cmd/singlenode/main.go | 35 ++-- cl/ethclient/engineclient.go | 22 ++- cl/redisapp/leaderfollower/leaderfollower.go | 7 +- cl/singlenode/singlenode.go | 194 ++++++++----------- cl/singlenode/state/localstate.go | 7 +- 7 files changed, 153 insertions(+), 153 deletions(-) diff --git a/cl/blockbuilder/blockbuilder.go b/cl/blockbuilder/blockbuilder.go index cef88e680..418308173 100644 --- a/cl/blockbuilder/blockbuilder.go +++ b/cl/blockbuilder/blockbuilder.go @@ -15,18 +15,20 @@ import ( "github.com/cenkalti/backoff/v4" "github.com/ethereum/go-ethereum/beacon/engine" "github.com/ethereum/go-ethereum/common" + "github.com/ethereum/go-ethereum/common/hexutil" etypes "github.com/ethereum/go-ethereum/core/types" - "github.com/primev/mev-commit/cl/util" "github.com/primev/mev-commit/cl/types" + "github.com/primev/mev-commit/cl/util" "github.com/vmihailenco/msgpack/v5" ) const maxAttempts = 3 -type EngineClient interface { - NewPayloadV3(ctx context.Context, params engine.ExecutableData, versionedHashes []common.Hash, - beaconRoot *common.Hash) (engine.PayloadStatusV1, error) +var ErrEmptyBlock = errors.New("payloadID is empty") +type EngineClient interface { + NewPayloadV4(ctx context.Context, params engine.ExecutableData, versionedHashes []common.Hash, + beaconRoot *common.Hash, executionRequests []hexutil.Bytes) (engine.PayloadStatusV1, error) ForkchoiceUpdatedV3(ctx context.Context, update engine.ForkchoiceStateV1, payloadAttributes *engine.PayloadAttributes) (engine.ForkChoiceResponse, error) @@ -210,16 +212,16 @@ func (bb *BlockBuilder) GetPayload(ctx context.Context) error { return fmt.Errorf("failed to get payload: %w", err) } - hasTransactions := len(payloadResp.ExecutionPayload.Transactions) > 0 + // hasTransactions := len(payloadResp.ExecutionPayload.Transactions) > 0 now := time.Now() - timeSinceLastBlock := now.Sub(bb.lastBlockTime) - if !hasTransactions && timeSinceLastBlock < bb.buildEmptyBlocksDelay { - bb.logger.Info( - "Leader: Skipping empty block", - "timeSinceLastBlock", timeSinceLastBlock, - ) - return nil - } + // timeSinceLastBlock := now.Sub(bb.lastBlockTime) + // if !hasTransactions && timeSinceLastBlock < bb.buildEmptyBlocksDelay { + // bb.logger.Info( + // "Leader: Skipping empty block", + // "timeSinceLastBlock", timeSinceLastBlock, + // ) + // return ErrEmptyBlock + // } payloadData, err := msgpack.Marshal(payloadResp.ExecutionPayload) if err != nil { @@ -449,7 +451,7 @@ func (bb *BlockBuilder) selectRetryFunction(ctx context.Context, msgID string) f func (bb *BlockBuilder) pushNewPayload(ctx context.Context, executionPayload engine.ExecutableData, hash common.Hash, retryFunc func(f func() error) error) error { emptyVersionHashes := []common.Hash{} return retryFunc(func() error { - status, err := bb.engineCl.NewPayloadV3(ctx, executionPayload, emptyVersionHashes, &hash) + status, err := bb.engineCl.NewPayloadV4(ctx, executionPayload, emptyVersionHashes, &hash, []hexutil.Bytes{}) if err != nil || isUnknown(status) { bb.logger.Error("Failed to push new payload", "error", err) return err // Will retry diff --git a/cl/blockbuilder/blockbuilder_test.go b/cl/blockbuilder/blockbuilder_test.go index 4c365046f..eca0df5a2 100644 --- a/cl/blockbuilder/blockbuilder_test.go +++ b/cl/blockbuilder/blockbuilder_test.go @@ -21,6 +21,7 @@ import ( "github.com/ethereum/go-ethereum/beacon/engine" "github.com/ethereum/go-ethereum/common" + "github.com/ethereum/go-ethereum/common/hexutil" etypes "github.com/ethereum/go-ethereum/core/types" redismock "github.com/go-redis/redismock/v9" @@ -48,8 +49,8 @@ func (m *MockEngineClient) GetPayloadV3(ctx context.Context, payloadID engine.Pa return args.Get(0).(*engine.ExecutionPayloadEnvelope), args.Error(1) } -func (m *MockEngineClient) NewPayloadV3(ctx context.Context, executionPayload engine.ExecutableData, versionHashes []common.Hash, randao *common.Hash) (engine.PayloadStatusV1, error) { - args := m.Called(ctx, executionPayload, versionHashes, randao) +func (m *MockEngineClient) NewPayloadV4(ctx context.Context, executionPayload engine.ExecutableData, versionHashes []common.Hash, randao *common.Hash, executionRequests []hexutil.Bytes) (engine.PayloadStatusV1, error) { + args := m.Called(ctx, executionPayload, versionHashes, randao, executionRequests) return args.Get(0).(engine.PayloadStatusV1), args.Error(1) } @@ -250,7 +251,7 @@ func TestBlockBuilder_FinalizeBlock(t *testing.T) { Status: engine.VALID, } - mockEngineClient.On("NewPayloadV3", mock.Anything, executionPayload, []common.Hash{}, mock.Anything).Return(payloadStatus, nil) + mockEngineClient.On("NewPayloadV4", mock.Anything, executionPayload, []common.Hash{}, mock.Anything, mock.Anything).Return(payloadStatus, nil) hash := executionPayload.BlockHash fcs := engine.ForkchoiceStateV1{ @@ -519,7 +520,7 @@ func TestBlockBuilder_FinalizeBlock_NewPayloadInvalidStatus(t *testing.T) { payloadStatus := engine.PayloadStatusV1{ Status: "INVALID", } - mockEngineClient.On("NewPayloadV3", mock.Anything, executionPayload, []common.Hash{}, mock.Anything).Return(payloadStatus, nil) + mockEngineClient.On("NewPayloadV4", mock.Anything, executionPayload, []common.Hash{}, mock.Anything, mock.Anything).Return(payloadStatus, nil) blockBuilder.executionHead = executionHead err = blockBuilder.FinalizeBlock(ctx, payloadIDStr, executionPayloadEncoded, "") @@ -578,7 +579,7 @@ func TestBlockBuilder_FinalizeBlock_ForkchoiceUpdatedInvalidStatus(t *testing.T) payloadStatus := engine.PayloadStatusV1{ Status: engine.VALID, } - mockEngineClient.On("NewPayloadV3", mock.Anything, executionPayload, []common.Hash{}, mock.Anything).Return(payloadStatus, nil) + mockEngineClient.On("NewPayloadV4", mock.Anything, executionPayload, []common.Hash{}, mock.Anything, mock.Anything).Return(payloadStatus, nil) fcs := engine.ForkchoiceStateV1{ HeadBlockHash: executionPayload.BlockHash, diff --git a/cl/cmd/singlenode/main.go b/cl/cmd/singlenode/main.go index cc221b9b2..84c22503a 100644 --- a/cl/cmd/singlenode/main.go +++ b/cl/cmd/singlenode/main.go @@ -33,7 +33,6 @@ var ( } ) -// CLI Flags (subset of redisapp, without Redis-specific ones) var ( configFlag = &cli.StringFlag{ Name: "config", @@ -71,10 +70,9 @@ var ( Name: "jwt-secret", Usage: "Hex-encoded JWT secret for Ethereum Execution client Engine API", EnvVars: []string{"SNODE_JWT_SECRET"}, - // Example default, replace with secure generation or require user input - Value: "13373d9a0257983ad150392d7ddb2f9172c9396b4c450e26af469d123c7aaa5c", + Value: "13373d9a0257983ad150392d7ddb2f9172c9396b4c450e26af469d123c7aaa5c", Action: func(_ *cli.Context, s string) error { - if len(s) != 64 { // 32 bytes = 64 hex characters + if len(s) != 64 { return fmt.Errorf("invalid jwt-secret: must be 64 hex characters (32 bytes)") } if _, err := hex.DecodeString(s); err != nil { @@ -87,7 +85,7 @@ var ( logFmtFlag = altsrc.NewStringFlag(&cli.StringFlag{ Name: "log-fmt", Usage: "Log format ('text' or 'json')", - EnvVars: []string{"MEV_COMMIT_LOG_FMT"}, // Keep consistent env var if desired + EnvVars: []string{"MEV_COMMIT_LOG_FMT"}, Value: "text", Action: stringInCheck("log-fmt", []string{"text", "json"}), Category: categoryDebug, @@ -96,7 +94,7 @@ var ( logLevelFlag = altsrc.NewStringFlag(&cli.StringFlag{ Name: "log-level", Usage: "Log level ('debug', 'info', 'warn', 'error')", - EnvVars: []string{"MEV_COMMIT_LOG_LEVEL"}, // Keep consistent + EnvVars: []string{"MEV_COMMIT_LOG_LEVEL"}, Value: "info", Action: stringInCheck("log-level", []string{"debug", "info", "warn", "error"}), Category: categoryDebug, @@ -105,7 +103,7 @@ var ( logTagsFlag = altsrc.NewStringFlag(&cli.StringFlag{ Name: "log-tags", Usage: "Comma-separated log tags (e.g., env:prod,service:snode)", - EnvVars: []string{"MEV_COMMIT_LOG_TAGS"}, // Keep consistent + EnvVars: []string{"MEV_COMMIT_LOG_TAGS"}, Action: func(ctx *cli.Context, s string) error { if s == "" { return nil @@ -124,7 +122,7 @@ var ( Name: "evm-build-delay", Usage: "Delay after initiating payload construction before calling getPayload (e.g., '200ms')", EnvVars: []string{"SNODE_EVM_BUILD_DELAY"}, - Value: 200 * time.Millisecond, + Value: 100 * time.Millisecond, }) evmBuildDelayEmptyBlockFlag = altsrc.NewDurationFlag(&cli.DurationFlag{ @@ -135,16 +133,15 @@ var ( }) priorityFeeReceiptFlag = altsrc.NewStringFlag(&cli.StringFlag{ - Name: "priority-fee-recipient", // Changed flag name for clarity - Usage: "Ethereum address for receiving priority fees (block proposer fee)", - EnvVars: []string{"SNODE_PRIORITY_FEE_RECIPIENT"}, - // Value: "0xYourFeeRecipientAddressHere", // Require this or ensure a safe default/handling - Required: true, // Making this required is safer + Name: "priority-fee-recipient", + Usage: "Ethereum address for receiving priority fees (block proposer fee)", + EnvVars: []string{"SNODE_PRIORITY_FEE_RECIPIENT"}, + Required: true, Action: func(c *cli.Context, s string) error { if !strings.HasPrefix(s, "0x") || len(s) != 42 { return fmt.Errorf("priority-fee-recipient must be a 0x-prefixed 42-character hex string") } - // Basic validation, more robust hex address validation could be added + // Basic validation if _, err := hex.DecodeString(s[2:]); err != nil { return fmt.Errorf("priority-fee-recipient is not a valid hex string: %v", err) } @@ -181,7 +178,7 @@ func main() { if configFile != "" { return altsrc.NewYamlSourceFromFile(configFile) } - return &altsrc.MapInputSource{}, nil // Empty source if no config file + return &altsrc.MapInputSource{}, nil }), Action: func(c *cli.Context) error { return startSingleNodeApplication(c) @@ -191,7 +188,6 @@ func main() { } if err := app.Run(os.Args); err != nil { - // Use app.Writer for logging consistency if logger not yet initialized fmt.Fprintf(app.Writer, "Error running snode: %v\n", err) os.Exit(1) } @@ -202,7 +198,7 @@ func startSingleNodeApplication(c *cli.Context) error { c.String(logLevelFlag.Name), c.String(logFmtFlag.Name), c.String(logTagsFlag.Name), - c.App.Writer, // Use CLI app's writer for logs + c.App.Writer, ) if err != nil { return fmt.Errorf("failed to create logger: %w", err) @@ -230,13 +226,12 @@ func startSingleNodeApplication(c *cli.Context) error { return err } - snode.Start() // Start the application's main loop + snode.Start() - // Wait for the application context to be done (e.g., OS signal) <-rootCtx.Done() logger.Info("Shutdown signal received, stopping snode...") - snode.Stop() // Initiate graceful shutdown of the application + snode.Stop() logger.Info("SRApp shutdown completed.") return nil diff --git a/cl/ethclient/engineclient.go b/cl/ethclient/engineclient.go index 96ba4053b..e7af835bb 100644 --- a/cl/ethclient/engineclient.go +++ b/cl/ethclient/engineclient.go @@ -8,6 +8,7 @@ import ( "github.com/ethereum/go-ethereum/beacon/engine" "github.com/ethereum/go-ethereum/common" + "github.com/ethereum/go-ethereum/common/hexutil" "github.com/ethereum/go-ethereum/rpc" ) @@ -16,6 +17,7 @@ const ( newPayloadV2 = "engine_newPayloadV2" newPayloadV3 = "engine_newPayloadV3" + newPayloadV4 = "engine_newPayloadV4" forkchoiceUpdatedV2 = "engine_forkchoiceUpdatedV2" forkchoiceUpdatedV3 = "engine_forkchoiceUpdatedV3" @@ -34,7 +36,9 @@ type EngineClient interface { // NewPayloadV3 creates an Eth1 block, inserts it in the chain, and returns the status of the chain. NewPayloadV3(ctx context.Context, params engine.ExecutableData, versionedHashes []common.Hash, beaconRoot *common.Hash) (engine.PayloadStatusV1, error) - + NewPayloadV4(params engine.ExecutableData, versionedHashes []common.Hash, beaconRoot *common.Hash, + executionRequests []hexutil.Bytes) (engine.PayloadStatusV1, error) + // ForkchoiceUpdatedV2 has several responsibilities: // - It sets the chain the head. // - And/or it sets the chain's finalized block hash. @@ -106,6 +110,22 @@ func (c engineClient) NewPayloadV3(ctx context.Context, params engine.Executable return resp, nil } +func (c engineClient) NewPayloadV4(ctx context.Context, params engine.ExecutableData, versionedHashes []common.Hash, + beaconRoot *common.Hash, executionRequests []hexutil.Bytes, +) (engine.PayloadStatusV1, error) { + const endpoint = "new_payload_v4" + defer latency(c.chain, endpoint)() + + var resp engine.PayloadStatusV1 + err := c.cl.Client().CallContext(ctx, &resp, newPayloadV4, params, versionedHashes, beaconRoot, executionRequests) + if err != nil { + incError(c.chain, endpoint) + return engine.PayloadStatusV1{}, fmt.Errorf("rpc new payload v4: %w", err) + } + + return resp, nil +} + func (c engineClient) ForkchoiceUpdatedV2(ctx context.Context, update engine.ForkchoiceStateV1, payloadAttributes *engine.PayloadAttributes, ) (engine.ForkChoiceResponse, error) { diff --git a/cl/redisapp/leaderfollower/leaderfollower.go b/cl/redisapp/leaderfollower/leaderfollower.go index 6b748c42f..f7b38d30f 100644 --- a/cl/redisapp/leaderfollower/leaderfollower.go +++ b/cl/redisapp/leaderfollower/leaderfollower.go @@ -9,8 +9,9 @@ import ( "time" "github.com/heyvito/go-leader/leader" - "github.com/primev/mev-commit/cl/util" + "github.com/primev/mev-commit/cl/blockbuilder" "github.com/primev/mev-commit/cl/types" + "github.com/primev/mev-commit/cl/util" "github.com/redis/go-redis/v9" ) @@ -211,6 +212,10 @@ func (lfm *LeaderFollowerManager) leaderWork(ctx context.Context) error { case types.StepBuildBlock: lfm.logger.Info("Leader: StepBuildBlock") if err := lfm.blockBuilder.GetPayload(ctx); err != nil { + if errors.Is(err, blockbuilder.ErrEmptyBlock) { + lfm.logger.Info("Leader: Empty block, skipping") + return nil + } lfm.logger.Error( "Leader: GetPayload failed", "error", err, diff --git a/cl/singlenode/singlenode.go b/cl/singlenode/singlenode.go index 1d990e82b..d1cace9f1 100644 --- a/cl/singlenode/singlenode.go +++ b/cl/singlenode/singlenode.go @@ -4,17 +4,19 @@ import ( "context" "encoding/hex" "errors" + "fmt" "log/slog" - "strings" "sync" "time" - "github.com/cenkalti/backoff/v4" "github.com/primev/mev-commit/cl/blockbuilder" "github.com/primev/mev-commit/cl/ethclient" - "github.com/primev/mev-commit/cl/singlenode/state" - "github.com/primev/mev-commit/cl/types" - "github.com/primev/mev-commit/cl/util" + localstate "github.com/primev/mev-commit/cl/singlenode/state" +) + +const ( + // Stop Function + shutdownTimeout = 5 * time.Second ) // Config holds the configuration for the SingleNodeApp. @@ -33,7 +35,6 @@ type SingleNodeApp struct { cfg Config blockBuilder *blockbuilder.BlockBuilder stateManager *localstate.LocalStateManager - engineClient blockbuilder.EngineClient // Keep a reference if needed for direct calls, though BB handles most appCtx context.Context cancel context.CancelFunc wg sync.WaitGroup @@ -41,23 +42,29 @@ type SingleNodeApp struct { // NewSingleNodeApp creates and initializes a new SingleNodeApp. func NewSingleNodeApp( - appCtx context.Context, // Parent context + parentCtx context.Context, cfg Config, logger *slog.Logger, ) (*SingleNodeApp, error) { - ctx, cancel := context.WithCancel(appCtx) + ctx, cancel := context.WithCancel(parentCtx) jwtBytes, err := hex.DecodeString(cfg.JWTSecret) if err != nil { - cancel() - logger.Error("Failed to decode JWT secret", "error", err) + cancel() // Cancel the derived context + logger.Error( + "failed to decode JWT secret", + "error", err, + ) return nil, err } engineCL, err := ethclient.NewAuthClient(ctx, cfg.EthClientURL, jwtBytes) if err != nil { - cancel() - logger.Error("Failed to create Ethereum engine client", "error", err) + cancel() // Cancel the derived context + logger.Error( + "failed to create Ethereum engine client", + "error", err, + ) return nil, err } @@ -76,7 +83,6 @@ func NewSingleNodeApp( cfg: cfg, blockBuilder: bb, stateManager: stateMgr, - engineClient: engineCL, // Stored if needed appCtx: ctx, cancel: cancel, }, nil @@ -93,119 +99,89 @@ func (app *SingleNodeApp) Start() { }() } +// shutdownWithError handles errors during the run loop and initiates a shutdown. +func (app *SingleNodeApp) shutdownWithError(err error, message string, args ...any) { + // slog handles key-value pairs directly + logArgs := append(args, "error", err) + app.logger.Error(message, logArgs...) + app.cancel() +} + +// resetBlockProduction clears state and prepares for a new block production cycle. +// It returns true if a shutdown is initiated due to a reset failure. +func (app *SingleNodeApp) resetBlockProduction(logMessage string, logArgs ...interface{}) (shutdownInitiated bool) { + app.logger.Info(logMessage, logArgs...) + if err := app.stateManager.ResetBlockState(app.appCtx); err != nil { + app.shutdownWithError(err, "Failed to reset block state during run loop operations") + return true + } + return false +} + func (app *SingleNodeApp) runLoop() { app.logger.Info("SingleNodeApp run loop started", "instanceID", app.cfg.InstanceID) - // On startup, process any pending block from a previous (crashed) session. - // With LocalStateManager (in-memory), this is mostly for conceptual completeness - // unless persistence is added to LocalStateManager. - // if err := app.blockBuilder.ProcessLastPayload(app.appCtx); err != nil { - // // If ProcessLastPayload fails (e.g., after retries on FinalizeBlock), - // // it might indicate a persistent issue with Geth. - // if errors.Is(err, util.ErrFailedAfterNAttempts) || errors.Is(err, context.Canceled) { - // app.logger.Error("Critical error processing last payload at startup. Shutting down.", "error", err) - // app.cancel() // Trigger shutdown for the rest of the app - // return - // } - // app.logger.Warn("Non-critical error processing last payload at startup. State might be reset.", "error", err) - // // Attempt to reset state to ensure clean start if ProcessLastPayload had an issue but wasn't critical. - // if resetErr := app.stateManager.ResetBlockState(app.appCtx); resetErr != nil { - // app.logger.Error("Failed to reset state after ProcessLastPayload error. Shutting down.", "error", resetErr) - // app.cancel() - // return - // } - // } - - // Main production loop + // Make sure we're starting with a clean state + if app.resetBlockProduction("Initializing block production state") { + return // Shutdown initiated by resetBlockProduction + } + for { select { case <-app.appCtx.Done(): app.logger.Info("SingleNodeApp run loop stopping due to context cancellation.") return default: - // Determine current step from state - currentState := app.stateManager.GetBlockBuildState(app.appCtx) - var err error - - switch currentState.CurrentStep { - case types.StepBuildBlock: - app.logger.Info("RunLoop: StepBuildBlock") - err = app.blockBuilder.GetPayload(app.appCtx) - if err != nil { - app.logger.Error("RunLoop: GetPayload failed", "error", err) - // If GetPayload fails, state remains StepBuildBlock. - // Retry is handled within GetPayload. If it returns ErrFailedAfterNAttempts, - // it's a critical error. - if errors.Is(err, util.ErrFailedAfterNAttempts) { - app.logger.Error("RunLoop: GetPayload failed critically after retries. Shutting down.", "error", err) - app.cancel() - return - } - // For other errors, or if GetPayload skipped an empty block (err == nil but state not changed), - // the loop will retry. Add a small delay to prevent tight spin on persistent non-critical errors. - time.Sleep(500 * time.Millisecond) - } - // If GetPayload was successful and built a block, stateManager would have updated - // CurrentStep to StepFinalizeBlock. If it skipped an empty block, CurrentStep remains StepBuildBlock. - - case types.StepFinalizeBlock: - app.logger.Info("RunLoop: StepFinalizeBlock", "payload_id", currentState.PayloadID) - err = app.blockBuilder.FinalizeBlock(app.appCtx, currentState.PayloadID, currentState.ExecutionPayload, "") - if err != nil { - app.logger.Error("RunLoop: FinalizeBlock failed", "error", err) - // If FinalizeBlock fails, state remains StepFinalizeBlock. - // Retry is handled within FinalizeBlock. If it returns ErrFailedAfterNAttempts, critical. - if errors.Is(err, util.ErrFailedAfterNAttempts) { - app.logger.Error("RunLoop: FinalizeBlock failed critically after retries. Shutting down.", "error", err) - app.cancel() - return - } - // If error is due to payload validation (permanent), reset state to avoid retrying same bad payload. - var bErr *backoff.PermanentError - if errors.As(err, &bErr) && strings.Contains(bErr.Err.Error(), "execution payload validation failed") { - app.logger.Error("RunLoop: FinalizeBlock failed due to permanent payload validation error. Resetting state.", "original_error", bErr.Err) - if resetErr := app.stateManager.ResetBlockState(app.appCtx); resetErr != nil { - app.logger.Error("RunLoop: Failed to reset state after permanent FinalizeBlock error. Shutting down.", "error", resetErr) - app.cancel() - } - return // return from switch case, not entire loop, to re-evaluate state - } - // For other errors, loop will retry. Add delay. - time.Sleep(500 * time.Millisecond) - } else { - // FinalizeBlock successful, reset state for the next block. - app.logger.Info("RunLoop: FinalizeBlock successful. Resetting state for next block.") - if resetErr := app.stateManager.ResetBlockState(app.appCtx); resetErr != nil { - app.logger.Error("RunLoop: Failed to reset state after successful FinalizeBlock. Shutting down.", "error", resetErr) - app.cancel() - return - } - } - - default: - app.logger.Warn("RunLoop: Unknown current step in state", "step", currentState.CurrentStep.String()) - if resetErr := app.stateManager.ResetBlockState(app.appCtx); resetErr != nil { - app.logger.Error("RunLoop: Failed to reset state from unknown step. Shutting down.", "error", resetErr) - app.cancel() - return + // Directly run the block production cycle without steps + if err := app.produceBlock(); err != nil { + if errors.Is(err, blockbuilder.ErrEmptyBlock) { + // Handle empty block error + app.logger.Info("empty block produced, waiting for new payload") + continue } - time.Sleep(1 * time.Second) // Pause if in unknown state before retrying + // Handle errors but continue the loop + app.logger.Error( + "block production cycle failed", + "error", err, + ) + } + // Successful block production, reset for the next block + if app.resetBlockProduction("Block production successful. Resetting state for next block.") { + // 0 chance to happen, if in-memory store is used + return // Shutdown initiated by resetBlockProduction } - // A short general delay to prevent extremely tight loops if conditions lead to no state change - // For example, if GetPayload consistently skips empty blocks very fast. - // This can be tied to blockBuilder.GetBuildDelay() or a fixed minimum. - time.Sleep(100 * time.Millisecond) + } } } +// produceBlock handles the entire block production cycle in a direct, procedural manner +func (app *SingleNodeApp) produceBlock() error { + // Step 1: Build the block + if err := app.blockBuilder.GetPayload(app.appCtx); err != nil { + return fmt.Errorf("failed to get payload: %w", err) + } + + // Retrieve the current state after payload creation + currentState := app.stateManager.GetBlockBuildState(app.appCtx) + if currentState.PayloadID == "" { + return errors.New("payload ID is empty after GetPayload call") + } + + // Step 2: Finalize the block + app.logger.Info("Finalizing block", "payload_id", currentState.PayloadID) + if err := app.blockBuilder.FinalizeBlock(app.appCtx, currentState.PayloadID, currentState.ExecutionPayload, ""); err != nil { + return fmt.Errorf("failed to finalize block: %w", err) + } + + return nil +} + // Stop signals the application to shut down and waits for goroutines to finish. func (app *SingleNodeApp) Stop() { - app.logger.Info("Stopping SingleNodeApp...") - app.cancel() // Signal all operations using app.appCtx to stop + app.logger.Info("stopping SingleNodeApp...") + app.cancel() - // Wait for the main run loop to finish - // Set a timeout for waiting to prevent indefinite blocking. waitCh := make(chan struct{}) go func() { app.wg.Wait() @@ -215,7 +191,7 @@ func (app *SingleNodeApp) Stop() { select { case <-waitCh: app.logger.Info("SingleNodeApp run loop shut down gracefully.") - case <-time.After(5 * time.Second): // Timeout for shutdown + case <-time.After(shutdownTimeout): app.logger.Warn("SingleNodeApp shutdown timed out waiting for run loop.") } app.logger.Info("SingleNodeApp stopped.") diff --git a/cl/singlenode/state/localstate.go b/cl/singlenode/state/localstate.go index df7c19227..92b714dcd 100644 --- a/cl/singlenode/state/localstate.go +++ b/cl/singlenode/state/localstate.go @@ -34,9 +34,11 @@ func (lsm *LocalStateManager) SaveBlockStateAndPublishToStream(_ context.Context defer lsm.mu.Unlock() lsm.blockBuildState = state // Store the provided state - lsm.logger.Info("LocalStateManager: Saved block state", + lsm.logger.Info( + "LocalStateManager: Saved block state", "step", state.CurrentStep.String(), - "payload_id", state.PayloadID) + "payload_id", state.PayloadID, + ) return nil } @@ -63,6 +65,5 @@ func (lsm *LocalStateManager) ResetBlockState(_ context.Context) error { lsm.blockBuildState = &types.BlockBuildState{ CurrentStep: types.StepBuildBlock, } - lsm.logger.Info("LocalStateManager: Reset block state to StepBuildBlock") return nil } From f1915a369e230186ff6452a55e8e5b4392283701 Mon Sep 17 00:00:00 2001 From: Mikhail Wall Date: Thu, 15 May 2025 19:31:34 +0200 Subject: [PATCH 05/24] feat: upload genesis.json --- cl/geth-setup/genesis.json | 19 ++++++++++++++++--- 1 file changed, 16 insertions(+), 3 deletions(-) diff --git a/cl/geth-setup/genesis.json b/cl/geth-setup/genesis.json index 927ac64cb..b1b046759 100644 --- a/cl/geth-setup/genesis.json +++ b/cl/geth-setup/genesis.json @@ -18,6 +18,19 @@ "mergeNetsplitBlock": 0, "shanghaiTime": 0, "cancunTime": 0, + "pragueTime": 0, + "blobSchedule": { + "cancun": { + "target": 3, + "max": 6, + "baseFeeUpdateFraction": 3338477 + }, + "prague": { + "target": 6, + "max": 9, + "baseFeeUpdateFraction": 5007716 + } + }, "terminalTotalDifficulty": 0, "terminalTotalDifficultyPassed": true }, @@ -47,7 +60,7 @@ "number": "0x0", "gasUsed": "0x0", "parentHash": "0x0000000000000000000000000000000000000000000000000000000000000000", - "baseFeePerGas": null, - "excessBlobGas": null, - "blobGasUsed": null + "baseFeePerGas": "0x7", + "excessBlobGas": "0x0", + "blobGasUsed": "0x0" } From 314779057432dc58ad368ada64c62c9998aab6fe Mon Sep 17 00:00:00 2001 From: Mikhail Wall Date: Thu, 15 May 2025 19:48:14 +0200 Subject: [PATCH 06/24] feat: add readme --- cl/README.md | 91 +++++++++++++++++++++++++++++++++++++++++++++++++--- 1 file changed, 87 insertions(+), 4 deletions(-) diff --git a/cl/README.md b/cl/README.md index cbf88ce14..67154a055 100644 --- a/cl/README.md +++ b/cl/README.md @@ -2,7 +2,7 @@ ## Introduction -This project sets up a local Ethereum environment with two Geth nodes, a Redis instance, and a consensus client. The setup is useful for testing and development purposes, allowing you to simulate a blockchain network locally. +This project sets up a local Ethereum environment with two Geth nodes, a Redis instance, and both a consensus client and a single node application (snode). The setup is useful for testing and development purposes, allowing you to simulate a blockchain network locally with different consensus options. ## Prerequisites @@ -129,7 +129,7 @@ We will use Docker Compose to run Redis. ### Docker Compose Configuration -Redis is configured in `redis-cluster` folder withing `docker-compose.yml` +Redis is configured in `redis-cluster` folder within `docker-compose.yml` ### Start Redis @@ -228,10 +228,93 @@ Run the client with the configuration file: ./consensus-client start --config config.yaml ``` +## Running the Single Node Application (snode) + +The single node application provides a simplified MEV-commit setup that doesn't require Redis. + +### Build the Single Node Application + +```bash +go mod tidy +go build -o snode main.go +``` + +### Configuration + +The snode application can be configured via command-line flags, environment variables, or a YAML configuration file. + +#### Command-Line Flags + +- `--instance-id`: **(Required)** Unique instance ID for this node. +- `--eth-client-url`: Ethereum Execution client Engine API URL (default: `http://localhost:8551`). +- `--jwt-secret`: Hex-encoded JWT secret for Ethereum Execution client Engine API (default: `13373d9a0257983ad150392d7ddb2f9172c9396b4c450e26af469d123c7aaa5c`). +- `--priority-fee-recipient`: **(Required)** Ethereum address for receiving priority fees (block proposer fee). +- `--evm-build-delay`: Delay after initiating payload construction before calling getPayload (default: `100ms`). +- `--evm-build-delay-empty-block`: Minimum time since last block to build an empty block (default: `2s`, 0 to disable skipping). +- `--config`: Path to a YAML configuration file. +- `--log-fmt`: Log format ('text' or 'json') (default: `text`). +- `--log-level`: Log level ('debug', 'info', 'warn', 'error') (default: `info`). +- `--log-tags`: Comma-separated log tags (e.g., `env:prod,service:snode`). + +#### Environment Variables + +- `SNODE_INSTANCE_ID` +- `SNODE_ETH_CLIENT_URL` +- `SNODE_JWT_SECRET` +- `SNODE_PRIORITY_FEE_RECIPIENT` +- `SNODE_EVM_BUILD_DELAY` +- `SNODE_EVM_BUILD_DELAY_EMPTY_BLOCK` +- `SNODE_CONFIG` +- `MEV_COMMIT_LOG_FMT` +- `MEV_COMMIT_LOG_LEVEL` +- `MEV_COMMIT_LOG_TAGS` + +### Run the Single Node Application + +Run the application using command-line flags: + +```bash +./snode start \ + --instance-id "snode1" \ + --eth-client-url "http://localhost:8551" \ + --jwt-secret "13373d9a0257983ad150392d7ddb2f9172c9396b4c450e26af469d123c7aaa5c" \ + --priority-fee-recipient "0xYourEthereumAddress" \ + --evm-build-delay "100ms" \ + --evm-build-delay-empty-block "2s" \ + --log-level "info" +``` + +**Note**: + +- Replace `"0xYourEthereumAddress"` with a valid Ethereum address for receiving priority fees. +- The JWT secret should be a 64-character hex string (32 bytes). + +### Using a Configuration File for snode + +Create a `snode-config.yaml` file: + +```yaml +instance-id: "snode1" +eth-client-url: "http://localhost:8551" +jwt-secret: "13373d9a0257983ad150392d7ddb2f9172c9396b4c450e26af469d123c7aaa5c" +priority-fee-recipient: "0xYourEthereumAddress" +evm-build-delay: "100ms" +evm-build-delay-empty-block: "2s" +log-fmt: "text" +log-level: "info" +log-tags: "env:dev,service:snode" +``` + +Run the application with the configuration file: + +```bash +./snode start --config snode-config.yaml +``` + ## Additional Notes -- **Multiple Instances**: You can run multiple instances of the consensus client by changing the `--instance-id` and `--eth-client-url` parameters. +- **Graceful Shutdown**: Both applications support graceful shutdown via SIGTERM or Ctrl+C. ## Conclusion -You now have a local Ethereum environment with Geth nodes, Redis, and a consensus client. +You now have a local Ethereum environment with Geth nodes, optional Redis, and a choice between full consensus or single node operation. From 142ac797bf02c6acc06cc50a29cce43ea6a2dfc3 Mon Sep 17 00:00:00 2001 From: harshsingh1002 <1002.harshsingh@gmail.com> Date: Fri, 16 May 2025 11:24:55 +0530 Subject: [PATCH 07/24] feat(cl): add Dockerfile for snode --- cl/Dockerfile.snode | 22 ++++++++++++++++++++++ 1 file changed, 22 insertions(+) create mode 100644 cl/Dockerfile.snode diff --git a/cl/Dockerfile.snode b/cl/Dockerfile.snode new file mode 100644 index 000000000..f36f36ef4 --- /dev/null +++ b/cl/Dockerfile.snode @@ -0,0 +1,22 @@ +# --- Build Stage --- +FROM golang:1.23-alpine AS builder + +RUN apk update && apk add --no-cache git ca-certificates + +WORKDIR /app + +COPY . . + +RUN go build -o snode ./cmd/singlenode + +# --- Runtime Stage --- +FROM alpine:3.18 + +RUN apk add --no-cache ca-certificates curl jq + +COPY --from=builder /app/snode /usr/local/bin/snode + +WORKDIR /usr/local/bin + +CMD ["snode"] + From a902fc0a5e3249cdac6b13f691f131b2def4308a Mon Sep 17 00:00:00 2001 From: Mikhail Wall Date: Sun, 18 May 2025 22:21:30 +0200 Subject: [PATCH 08/24] feat: added health endpoint --- cl/blockbuilder/blockbuilder.go | 37 +++- cl/cmd/singlenode/main.go | 18 ++ cl/singlenode/singlenode.go | 144 +++++++++--- cl/singlenode/singlenode_test.go | 369 +++++++++++++++++++++++++++++++ 4 files changed, 532 insertions(+), 36 deletions(-) create mode 100644 cl/singlenode/singlenode_test.go diff --git a/cl/blockbuilder/blockbuilder.go b/cl/blockbuilder/blockbuilder.go index 418308173..df0da9701 100644 --- a/cl/blockbuilder/blockbuilder.go +++ b/cl/blockbuilder/blockbuilder.go @@ -22,7 +22,7 @@ import ( "github.com/vmihailenco/msgpack/v5" ) -const maxAttempts = 3 +const maxAttempts = 10 var ErrEmptyBlock = errors.New("payloadID is empty") @@ -115,12 +115,25 @@ func (bb *BlockBuilder) startBuild(ctx context.Context, head *types.ExecutionHea } func (bb *BlockBuilder) GetPayload(ctx context.Context) error { - var payloadID *engine.PayloadID - + var ( + payloadID *engine.PayloadID + head *types.ExecutionHead + err error + ) currentCallTime := time.Now() // Load execution head to get previous block timestamp - head, err := bb.loadExecutionHead(ctx) + err = util.RetryWithBackoff(ctx, maxAttempts, bb.logger, func() error { + head, err = bb.loadExecutionHead(ctx) + if err != nil { + bb.logger.Warn( + "Failed to load execution head, retrying...", + "error", err, + ) + return err // Will retry + } + return nil // Success + }) if err != nil { return fmt.Errorf("latest execution block: %w", err) } @@ -387,13 +400,25 @@ func (bb *BlockBuilder) FinalizeBlock(ctx context.Context, payloadIDStr, executi if err := msgpack.Unmarshal(executionPayloadBytes, &executionPayload); err != nil { return fmt.Errorf("failed to deserialize ExecutionPayload: %w", err) } - head, err := bb.loadExecutionHead(ctx) + + var head *types.ExecutionHead + err = util.RetryWithBackoff(ctx, maxAttempts, bb.logger, func() error { + head, err = bb.loadExecutionHead(ctx) + if err != nil { + bb.logger.Warn( + "Failed to load execution head, retrying...", + "error", err, + ) + return err // Will retry + } + return nil // Success + }) if err != nil { return fmt.Errorf("failed to load execution head: %w", err) } if err := bb.validateExecutionPayload(executionPayload, head); err != nil { - return err + return fmt.Errorf("failed to validate execution payload: %w", err) } hash := common.BytesToHash(head.BlockHash) diff --git a/cl/cmd/singlenode/main.go b/cl/cmd/singlenode/main.go index 84c22503a..b894b1bab 100644 --- a/cl/cmd/singlenode/main.go +++ b/cl/cmd/singlenode/main.go @@ -148,6 +148,22 @@ var ( return nil }, }) + + healthAddrPortFlag = altsrc.NewStringFlag(&cli.StringFlag{ + Name: "health-addr", + Usage: "Address for health check endpoint (e.g., ':8080')", + EnvVars: []string{"SNODE_HEALTH_ADDR"}, + Value: ":8080", + Action: func(_ *cli.Context, s string) error { + if !strings.HasPrefix(s, ":") { + return fmt.Errorf("health-addr must start with ':'") + } + if _, err := url.Parse(s); err != nil { + return fmt.Errorf("invalid health-addr: %v", err) + } + return nil + }, + }) ) func main() { @@ -162,6 +178,7 @@ func main() { evmBuildDelayFlag, evmBuildDelayEmptyBlockFlag, priorityFeeReceiptFlag, + healthAddrPortFlag, } app := &cli.App{ @@ -212,6 +229,7 @@ func startSingleNodeApplication(c *cli.Context) error { EVMBuildDelay: c.Duration(evmBuildDelayFlag.Name), EVMBuildDelayEmptyBlocks: c.Duration(evmBuildDelayEmptyBlockFlag.Name), PriorityFeeReceipt: c.String(priorityFeeReceiptFlag.Name), + HealthAddr: c.String(healthAddrPortFlag.Name), } logger.Info("Starting snode with configuration", "config", cfg) // Be careful logging sensitive parts of config diff --git a/cl/singlenode/singlenode.go b/cl/singlenode/singlenode.go index d1cace9f1..92fbab6f7 100644 --- a/cl/singlenode/singlenode.go +++ b/cl/singlenode/singlenode.go @@ -6,6 +6,8 @@ import ( "errors" "fmt" "log/slog" + "net/http" + "strings" "sync" "time" @@ -27,17 +29,27 @@ type Config struct { EVMBuildDelay time.Duration EVMBuildDelayEmptyBlocks time.Duration PriorityFeeReceipt string + HealthAddr string +} + +type BlockBuilder interface { + GetPayload(ctx context.Context) error + FinalizeBlock(ctx context.Context, payloadID string, executionPayload string, extraData string) error } // SingleNodeApp orchestrates block production for a single node. type SingleNodeApp struct { logger *slog.Logger cfg Config - blockBuilder *blockbuilder.BlockBuilder - stateManager *localstate.LocalStateManager - appCtx context.Context - cancel context.CancelFunc - wg sync.WaitGroup + blockBuilder BlockBuilder + // stateManager is a local state manager for block production + // it's not anticipated to use DB as all the state already in geth client + stateManager *localstate.LocalStateManager + appCtx context.Context + cancel context.CancelFunc + wg sync.WaitGroup + connectionStatus sync.Mutex + connectionRefused bool } // NewSingleNodeApp creates and initializes a new SingleNodeApp. @@ -50,7 +62,7 @@ func NewSingleNodeApp( jwtBytes, err := hex.DecodeString(cfg.JWTSecret) if err != nil { - cancel() // Cancel the derived context + cancel() logger.Error( "failed to decode JWT secret", "error", err, @@ -60,7 +72,7 @@ func NewSingleNodeApp( engineCL, err := ethclient.NewAuthClient(ctx, cfg.EthClientURL, jwtBytes) if err != nil { - cancel() // Cancel the derived context + cancel() logger.Error( "failed to create Ethereum engine client", "error", err, @@ -79,18 +91,92 @@ func NewSingleNodeApp( ) return &SingleNodeApp{ - logger: logger, - cfg: cfg, - blockBuilder: bb, - stateManager: stateMgr, - appCtx: ctx, - cancel: cancel, + logger: logger, + cfg: cfg, + blockBuilder: bb, + stateManager: stateMgr, + appCtx: ctx, + cancel: cancel, + connectionRefused: false, }, nil } -// Start begins the main block production loop. +// isConnectionRefused checks if the error is a connection refused error +func isConnectionRefused(err error) bool { + return strings.Contains(err.Error(), "connection refused") +} + +// setConnectionStatus updates the connection status based on the error +func (app *SingleNodeApp) setConnectionStatus(err error) { + app.connectionStatus.Lock() + defer app.connectionStatus.Unlock() + + if err == nil { + // Reset the connection refused flag if the operation was successful + app.connectionRefused = false + return + } + + // Check if the error indicates a connection refused + if isConnectionRefused(err) { + app.connectionRefused = true + app.logger.Warn( + "Connection refused detected, Ethereum might be unavailable", + "error", err, + ) + } +} + +// healthHandler responds on /health with 200 OK if the app context is active and no connection refusal, or 503 otherwise. +func (app *SingleNodeApp) healthHandler(w http.ResponseWriter, r *http.Request) { + if err := app.appCtx.Err(); err != nil { + http.Error(w, "unavailable", http.StatusServiceUnavailable) + return + } + + // Check connection status + app.connectionStatus.Lock() + connectionRefused := app.connectionRefused + app.connectionStatus.Unlock() + + if connectionRefused { + app.logger.Warn("Health check failed: ethereum is not available (connection refused)") + http.Error(w, "ethereum is not available", http.StatusServiceUnavailable) + return + } + + w.WriteHeader(http.StatusOK) + w.Write([]byte("OK")) +} + +// Start begins the main block production loop and launches the health endpoint. func (app *SingleNodeApp) Start() { app.logger.Info("Starting SingleNodeApp...") + + // Launch health server + app.wg.Add(1) + go func() { + defer app.wg.Done() + mux := http.NewServeMux() + mux.HandleFunc("/health", app.healthHandler) + addr := app.cfg.HealthAddr + server := &http.Server{Addr: addr, Handler: mux} + app.logger.Info("Health endpoint listening", "address", addr) + + // Shutdown server when app context is done + go func() { + <-app.appCtx.Done() + ctx, cancel := context.WithTimeout(context.Background(), shutdownTimeout) + defer cancel() + server.Shutdown(ctx) + }() + + if err := server.ListenAndServe(); err != nil && !errors.Is(err, http.ErrServerClosed) { + app.logger.Error("Health server error", "error", err) + } + }() + + // Start block production loop app.wg.Add(1) go func() { defer app.wg.Done() @@ -101,15 +187,13 @@ func (app *SingleNodeApp) Start() { // shutdownWithError handles errors during the run loop and initiates a shutdown. func (app *SingleNodeApp) shutdownWithError(err error, message string, args ...any) { - // slog handles key-value pairs directly logArgs := append(args, "error", err) app.logger.Error(message, logArgs...) app.cancel() } // resetBlockProduction clears state and prepares for a new block production cycle. -// It returns true if a shutdown is initiated due to a reset failure. -func (app *SingleNodeApp) resetBlockProduction(logMessage string, logArgs ...interface{}) (shutdownInitiated bool) { +func (app *SingleNodeApp) resetBlockProduction(logMessage string, logArgs ...interface{}) bool { app.logger.Info(logMessage, logArgs...) if err := app.stateManager.ResetBlockState(app.appCtx); err != nil { app.shutdownWithError(err, "Failed to reset block state during run loop operations") @@ -120,10 +204,8 @@ func (app *SingleNodeApp) resetBlockProduction(logMessage string, logArgs ...int func (app *SingleNodeApp) runLoop() { app.logger.Info("SingleNodeApp run loop started", "instanceID", app.cfg.InstanceID) - - // Make sure we're starting with a clean state if app.resetBlockProduction("Initializing block production state") { - return // Shutdown initiated by resetBlockProduction + return } for { @@ -132,30 +214,32 @@ func (app *SingleNodeApp) runLoop() { app.logger.Info("SingleNodeApp run loop stopping due to context cancellation.") return default: - // Directly run the block production cycle without steps - if err := app.produceBlock(); err != nil { + err := app.produceBlock() + // Update connection status based on the error + app.setConnectionStatus(err) + + if err != nil { if errors.Is(err, blockbuilder.ErrEmptyBlock) { - // Handle empty block error app.logger.Info("empty block produced, waiting for new payload") continue + } else if errors.Is(err, context.Canceled) || errors.Is(err, context.DeadlineExceeded) { + app.logger.Info("context canceled or deadline exceeded, stopping block production") + return } - // Handle errors but continue the loop app.logger.Error( "block production cycle failed", "error", err, ) } - // Successful block production, reset for the next block if app.resetBlockProduction("Block production successful. Resetting state for next block.") { - // 0 chance to happen, if in-memory store is used - return // Shutdown initiated by resetBlockProduction + // if state is local, it couldn't happen + return } - } } } -// produceBlock handles the entire block production cycle in a direct, procedural manner +// produceBlock handles the entire block production cycle. func (app *SingleNodeApp) produceBlock() error { // Step 1: Build the block if err := app.blockBuilder.GetPayload(app.appCtx); err != nil { @@ -169,7 +253,7 @@ func (app *SingleNodeApp) produceBlock() error { } // Step 2: Finalize the block - app.logger.Info("Finalizing block", "payload_id", currentState.PayloadID) + app.logger.Info("finalizing block", "payload_id", currentState.PayloadID) if err := app.blockBuilder.FinalizeBlock(app.appCtx, currentState.PayloadID, currentState.ExecutionPayload, ""); err != nil { return fmt.Errorf("failed to finalize block: %w", err) } diff --git a/cl/singlenode/singlenode_test.go b/cl/singlenode/singlenode_test.go new file mode 100644 index 000000000..5db0de14c --- /dev/null +++ b/cl/singlenode/singlenode_test.go @@ -0,0 +1,369 @@ +package singlenode + +import ( + "context" + "fmt" + "log/slog" + "net/http" + "net/http/httptest" + "os" + "strings" + "testing" + "time" + + "github.com/primev/mev-commit/cl/blockbuilder" + localstate "github.com/primev/mev-commit/cl/singlenode/state" + "github.com/primev/mev-commit/cl/types" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/mock" + "github.com/stretchr/testify/require" +) + +// MockBlockBuilder implements the BlockBuilder interface for testing +type MockBlockBuilder struct { + mock.Mock +} + +func (m *MockBlockBuilder) GetPayload(ctx context.Context) error { + args := m.Called(ctx) + return args.Error(0) +} + +func (m *MockBlockBuilder) FinalizeBlock(ctx context.Context, payloadID string, executionPayload string, extraData string) error { + args := m.Called(ctx, payloadID, executionPayload, extraData) + return args.Error(0) +} + +// MockConnectionRefused provides a safe implementation for testing +type MockConnectionRefused struct{} + +func (m *MockConnectionRefused) IsConnectionRefused(err error) bool { + if err == nil { + return false + } + return strings.Contains(err.Error(), "connection refused") +} + +// setupTestLogger creates a logger for testing +func setupTestLogger() *slog.Logger { + return slog.New(slog.NewTextHandler(os.Stdout, &slog.HandlerOptions{ + Level: slog.LevelDebug, + })) +} + +// TestNewSingleNodeApp tests the creation of a new SingleNodeApp +func TestNewSingleNodeApp(t *testing.T) { + ctx := context.Background() + logger := setupTestLogger() + + validCfg := Config{ + InstanceID: "test-instance", + EthClientURL: "http://localhost:8545", + JWTSecret: "0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef", + EVMBuildDelay: time.Second, + EVMBuildDelayEmptyBlocks: time.Second * 2, + PriorityFeeReceipt: "0x1234567890abcdef1234567890abcdef12345678", + HealthAddr: ":8080", + } + + app, err := NewSingleNodeApp(ctx, validCfg, logger) + + if err == nil && app != nil { + app.Stop() + } + + invalidJWTCfg := validCfg + invalidJWTCfg.JWTSecret = "invalid-jwt" + + _, err = NewSingleNodeApp(ctx, invalidJWTCfg, logger) + require.Error(t, err, "Expected error with invalid JWT secret") +} + +// TestHealthHandler tests the health endpoint +func TestHealthHandler(t *testing.T) { + logger := setupTestLogger() + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + + app := &SingleNodeApp{ + logger: logger, + appCtx: ctx, + cancel: cancel, + connectionRefused: false, + } + + req := httptest.NewRequest(http.MethodGet, "/health", nil) + w := httptest.NewRecorder() + app.healthHandler(w, req) + + resp := w.Result() + defer resp.Body.Close() + assert.Equal(t, http.StatusOK, resp.StatusCode, "Expected 200 OK for healthy app") + + app.connectionRefused = true + w = httptest.NewRecorder() + app.healthHandler(w, req) + + resp = w.Result() + defer resp.Body.Close() + assert.Equal(t, http.StatusServiceUnavailable, resp.StatusCode, "Expected 503 when connection refused") + + app.connectionRefused = false + app.cancel() + w = httptest.NewRecorder() + app.healthHandler(w, req) + + resp = w.Result() + defer resp.Body.Close() + assert.Equal(t, http.StatusServiceUnavailable, resp.StatusCode, "Expected 503 when context canceled") +} + +// TestSetConnectionStatus tests the connection status management +func TestSetConnectionStatus(t *testing.T) { + logger := setupTestLogger() + ctx := context.Background() + + app := &SingleNodeApp{ + logger: logger, + appCtx: ctx, + connectionRefused: false, + } + + app.setConnectionStatus(nil) + assert.False(t, app.connectionRefused, "Connection refused should be false after nil error") + + err := fmt.Errorf("connection refused") + app.setConnectionStatus(err) + assert.True(t, app.connectionRefused, "Connection refused should be true after connection refused error") + + app.connectionRefused = false + app.setConnectionStatus(fmt.Errorf("some other error")) + assert.False(t, app.connectionRefused, "Connection refused should remain false after other error") +} + +// TestProduceBlock tests the block production cycle +func TestProduceBlock(t *testing.T) { + logger := setupTestLogger() + ctx := context.Background() + + mockBuilder := new(MockBlockBuilder) + stateMgr := localstate.NewLocalStateManager(logger) + + app := &SingleNodeApp{ + logger: logger, + appCtx: ctx, + blockBuilder: mockBuilder, + stateManager: stateMgr, + } + + err := stateMgr.SaveBlockStateAndPublishToStream(ctx, &types.BlockBuildState{ + CurrentStep: types.StepBuildBlock, + PayloadID: "test-payload-id", + ExecutionPayload: "test-execution-payload", + }) + require.NoError(t, err) + + mockBuilder.On("GetPayload", mock.Anything).Return(nil) + mockBuilder.On("FinalizeBlock", mock.Anything, "test-payload-id", "test-execution-payload", "").Return(nil) + + err = app.produceBlock() + require.NoError(t, err, "Expected no error from produceBlock") + + mockBuilder.AssertExpectations(t) + + mockBuilder = new(MockBlockBuilder) + app.blockBuilder = mockBuilder + + mockBuilder.On("GetPayload", mock.Anything).Return(assert.AnError) + + err = app.produceBlock() + require.Error(t, err, "Expected error from produceBlock when GetPayload fails") + assert.Contains(t, err.Error(), "failed to get payload", "Expected specific error message") + + mockBuilder.AssertExpectations(t) + + mockBuilder = new(MockBlockBuilder) + app.blockBuilder = mockBuilder + + err = stateMgr.SaveBlockStateAndPublishToStream(ctx, &types.BlockBuildState{ + CurrentStep: types.StepBuildBlock, + PayloadID: "", // Empty payload ID + }) + require.NoError(t, err) + + mockBuilder.On("GetPayload", mock.Anything).Return(nil) + + err = app.produceBlock() + require.Error(t, err, "Expected error with empty payload ID") + assert.Contains(t, err.Error(), "payload ID is empty", "Expected specific error message") + + mockBuilder = new(MockBlockBuilder) + app.blockBuilder = mockBuilder + + err = stateMgr.SaveBlockStateAndPublishToStream(ctx, &types.BlockBuildState{ + CurrentStep: types.StepBuildBlock, + PayloadID: "test-payload-id", + ExecutionPayload: "test-execution-payload", + }) + require.NoError(t, err) + + mockBuilder.On("GetPayload", mock.Anything).Return(nil) + mockBuilder.On("FinalizeBlock", mock.Anything, "test-payload-id", "test-execution-payload", "").Return(assert.AnError) + + err = app.produceBlock() + require.Error(t, err, "Expected error from produceBlock when FinalizeBlock fails") + assert.Contains(t, err.Error(), "failed to finalize block", "Expected specific error message") + + mockBuilder.AssertExpectations(t) + + mockBuilder = new(MockBlockBuilder) + app.blockBuilder = mockBuilder + + mockBuilder.On("GetPayload", mock.Anything).Return(blockbuilder.ErrEmptyBlock) + + err = app.produceBlock() + assert.Contains(t, err.Error(), blockbuilder.ErrEmptyBlock.Error(), + "Expected error to contain ErrEmptyBlock message") + + mockBuilder.AssertExpectations(t) +} + +// TestRunLoop tests parts of the run loop that can be isolated +func TestRunLoop(t *testing.T) { + logger := setupTestLogger() + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + + mockBuilder := new(MockBlockBuilder) + stateMgr := localstate.NewLocalStateManager(logger) + + app := &SingleNodeApp{ + logger: logger, + appCtx: ctx, + cancel: cancel, + blockBuilder: mockBuilder, + stateManager: stateMgr, + } + + result := app.resetBlockProduction("Test reset") + assert.False(t, result, "Expected resetBlockProduction to return false on success") + + state := stateMgr.GetBlockBuildState(ctx) + assert.Equal(t, types.StepBuildBlock, state.CurrentStep, "Expected state to be reset") + + testCancel := context.CancelFunc(func() { + testCtx, testCancelFunc := context.WithCancel(context.Background()) + testCancelFunc() + app.appCtx = testCtx + }) + + originalCancel := app.cancel + + app.cancel = testCancel + + app.shutdownWithError(assert.AnError, "Test shutdown error") + + select { + case <-app.appCtx.Done(): + // Context was canceled as expected + default: + t.Error("Context was not canceled by shutdownWithError") + } + + app.cancel = originalCancel +} + +// TestStartStop tests the Start and Stop methods +func TestStartStop(t *testing.T) { + logger := setupTestLogger() + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + + stateMgr := localstate.NewLocalStateManager(logger) + + mockBuilder := new(MockBlockBuilder) + + // Setup mock to immediately cancel the context when GetPayload is called + // This ensures the run loop exits right away + mockBuilder.On("GetPayload", mock.Anything).Run(func(args mock.Arguments) { + cancel() // Cancel context to exit the run loop immediately + }).Return(context.Canceled).Once() + + // Create app with minimal configuration for testing + app := &SingleNodeApp{ + logger: logger, + cfg: Config{HealthAddr: ":0"}, + appCtx: ctx, + cancel: cancel, + blockBuilder: mockBuilder, + stateManager: stateMgr, + } + + app.Start() + + time.Sleep(100 * time.Millisecond) + + app.Stop() + + mockBuilder.AssertExpectations(t) +} + +// TestRunLoopEmptyBlockHandling tests how runLoop handles ErrEmptyBlock +func TestRunLoopEmptyBlockHandling(t *testing.T) { + logger := setupTestLogger() + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + + mockBuilder := new(MockBlockBuilder) + stateMgr := localstate.NewLocalStateManager(logger) + + app := &SingleNodeApp{ + logger: logger, + appCtx: ctx, + cancel: cancel, + blockBuilder: mockBuilder, + stateManager: stateMgr, + } + + mockBuilder.On("GetPayload", mock.Anything).Return(blockbuilder.ErrEmptyBlock).Once() + + mockBuilder.On("GetPayload", mock.Anything).Run(func(args mock.Arguments) { + cancel() + }).Return(context.Canceled).Once() + + done := make(chan struct{}) + go func() { + app.runLoop() + close(done) + }() + + // Wait for the run loop to exit + select { + case <-done: + // Run loop exited as expected + case <-time.After(500 * time.Millisecond): + t.Fatal("Timeout waiting for run loop to exit") + } + + mockBuilder.AssertExpectations(t) +} + +// TestIsConnectionRefused tests the connection refused detection logic +func TestIsConnectionRefused(t *testing.T) { + mock := &MockConnectionRefused{} + + err := fmt.Errorf("connection refused") + assert.True(t, mock.IsConnectionRefused(err), + "Should detect 'connection refused' error") + + err = fmt.Errorf("Something with connection refused inside") + assert.True(t, mock.IsConnectionRefused(err), + "Should detect error with 'connection refused' substring") + + err = fmt.Errorf("some other error") + assert.False(t, mock.IsConnectionRefused(err), + "Should not detect generic error as connection refused") + + assert.False(t, mock.IsConnectionRefused(nil), + "Should not detect nil as connection refused") +} From 8787a5db0de2969db83d664323c0510a950ff2a3 Mon Sep 17 00:00:00 2001 From: Mikhail Wall Date: Sun, 18 May 2025 22:38:16 +0200 Subject: [PATCH 09/24] feat: added health endpoint to the readme --- cl/README.md | 3 +++ 1 file changed, 3 insertions(+) diff --git a/cl/README.md b/cl/README.md index 67154a055..9ea99f029 100644 --- a/cl/README.md +++ b/cl/README.md @@ -251,6 +251,7 @@ The snode application can be configured via command-line flags, environment vari - `--priority-fee-recipient`: **(Required)** Ethereum address for receiving priority fees (block proposer fee). - `--evm-build-delay`: Delay after initiating payload construction before calling getPayload (default: `100ms`). - `--evm-build-delay-empty-block`: Minimum time since last block to build an empty block (default: `2s`, 0 to disable skipping). +- `--health-addr`: Address for health check endpoint (default: `:8080`). - `--config`: Path to a YAML configuration file. - `--log-fmt`: Log format ('text' or 'json') (default: `text`). - `--log-level`: Log level ('debug', 'info', 'warn', 'error') (default: `info`). @@ -264,6 +265,7 @@ The snode application can be configured via command-line flags, environment vari - `SNODE_PRIORITY_FEE_RECIPIENT` - `SNODE_EVM_BUILD_DELAY` - `SNODE_EVM_BUILD_DELAY_EMPTY_BLOCK` +- `SNODE_HEALTH_ADDR` - `SNODE_CONFIG` - `MEV_COMMIT_LOG_FMT` - `MEV_COMMIT_LOG_LEVEL` @@ -314,6 +316,7 @@ Run the application with the configuration file: ## Additional Notes - **Graceful Shutdown**: Both applications support graceful shutdown via SIGTERM or Ctrl+C. +- **Health Endpoint**: The snode application provides a health check endpoint at `/health` that returns a 200 OK response when the application is running normally, or a 503 Service Unavailable if there are connection issues with the Ethereum client. ## Conclusion From d229fbbd38f1f6efaf3fcbe2a0f93a739565186e Mon Sep 17 00:00:00 2001 From: Mikhail Wall Date: Sun, 18 May 2025 22:44:03 +0200 Subject: [PATCH 10/24] fix: fmt --- cl/ethclient/engineclient.go | 4 ++-- cl/ethclient/ethclient.go | 1 + cl/redisapp/rapp.go | 14 +++++++------- 3 files changed, 10 insertions(+), 9 deletions(-) diff --git a/cl/ethclient/engineclient.go b/cl/ethclient/engineclient.go index e7af835bb..92334a95a 100644 --- a/cl/ethclient/engineclient.go +++ b/cl/ethclient/engineclient.go @@ -36,9 +36,9 @@ type EngineClient interface { // NewPayloadV3 creates an Eth1 block, inserts it in the chain, and returns the status of the chain. NewPayloadV3(ctx context.Context, params engine.ExecutableData, versionedHashes []common.Hash, beaconRoot *common.Hash) (engine.PayloadStatusV1, error) - NewPayloadV4(params engine.ExecutableData, versionedHashes []common.Hash, beaconRoot *common.Hash, + NewPayloadV4(params engine.ExecutableData, versionedHashes []common.Hash, beaconRoot *common.Hash, executionRequests []hexutil.Bytes) (engine.PayloadStatusV1, error) - + // ForkchoiceUpdatedV2 has several responsibilities: // - It sets the chain the head. // - And/or it sets the chain's finalized block hash. diff --git a/cl/ethclient/ethclient.go b/cl/ethclient/ethclient.go index 9c912fab3..ed1df81d1 100644 --- a/cl/ethclient/ethclient.go +++ b/cl/ethclient/ethclient.go @@ -3,6 +3,7 @@ package ethclient import ( "context" "fmt" + "github.com/ethereum/go-ethereum/common" "github.com/ethereum/go-ethereum/core/types" "github.com/ethereum/go-ethereum/ethclient" diff --git a/cl/redisapp/rapp.go b/cl/redisapp/rapp.go index e86fa7ea8..dabf3fc25 100644 --- a/cl/redisapp/rapp.go +++ b/cl/redisapp/rapp.go @@ -6,8 +6,8 @@ import ( "log/slog" "time" - "github.com/primev/mev-commit/cl/ethclient" "github.com/primev/mev-commit/cl/blockbuilder" + "github.com/primev/mev-commit/cl/ethclient" "github.com/primev/mev-commit/cl/redisapp/leaderfollower" "github.com/primev/mev-commit/cl/redisapp/state" "github.com/redis/go-redis/v9" @@ -37,7 +37,7 @@ func NewMevCommitChain( if err != nil { cancel() logger.Error( - "Error decoding JWT secret", + "Error decoding JWT secret", "error", err, ) return nil, err @@ -47,7 +47,7 @@ func NewMevCommitChain( if err != nil { cancel() logger.Error( - "Error creating engine client", + "Error creating engine client", "error", err, ) return nil, err @@ -61,7 +61,7 @@ func NewMevCommitChain( if err != nil { cancel() logger.Error( - "Error setting min-replicas-to-write", + "Error setting min-replicas-to-write", "error", err, ) return nil, err @@ -71,7 +71,7 @@ func NewMevCommitChain( if err != nil { cancel() logger.Error( - "Error creating state manager", + "Error creating state manager", "error", err, ) return nil, err @@ -88,7 +88,7 @@ func NewMevCommitChain( if err != nil { cancel() logger.Error( - "Error creating lfm", + "Error creating lfm", "error", err, ) return nil, err @@ -116,7 +116,7 @@ func (app *MevCommitChain) Stop() { err := app.lfm.WaitForGoroutinesToStop() if err != nil { app.logger.Error( - "Error waiting for goroutines to stop", + "Error waiting for goroutines to stop", "error", err, ) } From c6c097ebd60183724334158f50b21ee7d5440203 Mon Sep 17 00:00:00 2001 From: Mikhail Wall Date: Sun, 18 May 2025 22:44:46 +0200 Subject: [PATCH 11/24] fix: fmt --- cl/blockbuilder/blockbuilder.go | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/cl/blockbuilder/blockbuilder.go b/cl/blockbuilder/blockbuilder.go index df0da9701..216e1f884 100644 --- a/cl/blockbuilder/blockbuilder.go +++ b/cl/blockbuilder/blockbuilder.go @@ -117,8 +117,8 @@ func (bb *BlockBuilder) startBuild(ctx context.Context, head *types.ExecutionHea func (bb *BlockBuilder) GetPayload(ctx context.Context) error { var ( payloadID *engine.PayloadID - head *types.ExecutionHead - err error + head *types.ExecutionHead + err error ) currentCallTime := time.Now() From f07e931187ec2f671473aa116fb83230264dcfb5 Mon Sep 17 00:00:00 2001 From: Mikhail Wall Date: Sun, 18 May 2025 23:02:15 +0200 Subject: [PATCH 12/24] fix: fmt --- cl/mocks/mock_state.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/cl/mocks/mock_state.go b/cl/mocks/mock_state.go index 3b35a6a05..27ef00388 100644 --- a/cl/mocks/mock_state.go +++ b/cl/mocks/mock_state.go @@ -7795,4 +7795,4 @@ func (m *MockCoordinator) Stop() { func (mr *MockCoordinatorMockRecorder) Stop() *gomock.Call { mr.mock.ctrl.T.Helper() return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Stop", reflect.TypeOf((*MockCoordinator)(nil).Stop)) -} \ No newline at end of file +} From 94a53f6b7f8925501f6c43409ffb057bed3a41a8 Mon Sep 17 00:00:00 2001 From: Mikhail Wall Date: Sun, 18 May 2025 23:14:22 +0200 Subject: [PATCH 13/24] fix: lint --- cl/cmd/singlenode/main.go | 2 +- cl/singlenode/singlenode.go | 4 ++-- cl/singlenode/singlenode_test.go | 3 +++ 3 files changed, 6 insertions(+), 3 deletions(-) diff --git a/cl/cmd/singlenode/main.go b/cl/cmd/singlenode/main.go index b894b1bab..3ccd84215 100644 --- a/cl/cmd/singlenode/main.go +++ b/cl/cmd/singlenode/main.go @@ -205,7 +205,7 @@ func main() { } if err := app.Run(os.Args); err != nil { - fmt.Fprintf(app.Writer, "Error running snode: %v\n", err) + _, _ = fmt.Fprintf(app.Writer, "Error running snode: %v\n", err) os.Exit(1) } } diff --git a/cl/singlenode/singlenode.go b/cl/singlenode/singlenode.go index 92fbab6f7..9bd7c11c9 100644 --- a/cl/singlenode/singlenode.go +++ b/cl/singlenode/singlenode.go @@ -146,7 +146,7 @@ func (app *SingleNodeApp) healthHandler(w http.ResponseWriter, r *http.Request) } w.WriteHeader(http.StatusOK) - w.Write([]byte("OK")) + _, _ = w.Write([]byte("OK")) } // Start begins the main block production loop and launches the health endpoint. @@ -168,7 +168,7 @@ func (app *SingleNodeApp) Start() { <-app.appCtx.Done() ctx, cancel := context.WithTimeout(context.Background(), shutdownTimeout) defer cancel() - server.Shutdown(ctx) + _ = server.Shutdown(ctx) }() if err := server.ListenAndServe(); err != nil && !errors.Is(err, http.ErrServerClosed) { diff --git a/cl/singlenode/singlenode_test.go b/cl/singlenode/singlenode_test.go index 5db0de14c..838a0d9b7 100644 --- a/cl/singlenode/singlenode_test.go +++ b/cl/singlenode/singlenode_test.go @@ -97,6 +97,7 @@ func TestHealthHandler(t *testing.T) { app.healthHandler(w, req) resp := w.Result() + //nolint:errcheck defer resp.Body.Close() assert.Equal(t, http.StatusOK, resp.StatusCode, "Expected 200 OK for healthy app") @@ -105,6 +106,7 @@ func TestHealthHandler(t *testing.T) { app.healthHandler(w, req) resp = w.Result() + //nolint:errcheck defer resp.Body.Close() assert.Equal(t, http.StatusServiceUnavailable, resp.StatusCode, "Expected 503 when connection refused") @@ -114,6 +116,7 @@ func TestHealthHandler(t *testing.T) { app.healthHandler(w, req) resp = w.Result() + //nolint:errcheck defer resp.Body.Close() assert.Equal(t, http.StatusServiceUnavailable, resp.StatusCode, "Expected 503 when context canceled") } From 6aefc3303ed61c88ff54b9b8f8b6c7619511e9a8 Mon Sep 17 00:00:00 2001 From: Mikhail Wall Date: Tue, 20 May 2025 23:00:38 +0800 Subject: [PATCH 14/24] fix: fixed readme --- cl/README.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/cl/README.md b/cl/README.md index 9ea99f029..b17871058 100644 --- a/cl/README.md +++ b/cl/README.md @@ -320,4 +320,4 @@ Run the application with the configuration file: ## Conclusion -You now have a local Ethereum environment with Geth nodes, optional Redis, and a choice between full consensus or single node operation. +You now have a local Ethereum environment with Geth nodes offering two consensus options: a Redis-based leader-follower consensus setup or a simplified single node consensus. From b811d681c20f4d8cb377da5660d857556f8738f9 Mon Sep 17 00:00:00 2001 From: Mikhail Wall Date: Wed, 21 May 2025 23:50:56 +0800 Subject: [PATCH 15/24] fix: deleted redundant execution head saving func --- cl/blockbuilder/blockbuilder.go | 17 +++++------------ 1 file changed, 5 insertions(+), 12 deletions(-) diff --git a/cl/blockbuilder/blockbuilder.go b/cl/blockbuilder/blockbuilder.go index 216e1f884..cf2a55d6e 100644 --- a/cl/blockbuilder/blockbuilder.go +++ b/cl/blockbuilder/blockbuilder.go @@ -438,9 +438,12 @@ func (bb *BlockBuilder) FinalizeBlock(ctx context.Context, payloadIDStr, executi return fmt.Errorf("failed to finalize fork choice update: %w", err) } - if err := bb.saveExecutionHead(executionPayload); err != nil { - return fmt.Errorf("failed to save execution head: %w", err) + bb.executionHead = &types.ExecutionHead{ + BlockHeight: executionPayload.Number, + BlockHash: executionPayload.BlockHash[:], + BlockTime: executionPayload.Timestamp, } + return nil } @@ -528,13 +531,3 @@ func (bb *BlockBuilder) loadExecutionHead(ctx context.Context) (*types.Execution return bb.executionHead, nil } - -func (bb *BlockBuilder) saveExecutionHead(executionPayload engine.ExecutableData) error { - bb.executionHead = &types.ExecutionHead{ - BlockHeight: executionPayload.Number, - BlockHash: executionPayload.BlockHash[:], - BlockTime: executionPayload.Timestamp, - } - - return nil -} From 73b086423c83d40d5b1814974edf466352e2397a Mon Sep 17 00:00:00 2001 From: Mikhail Wall Date: Mon, 26 May 2025 22:45:06 +0800 Subject: [PATCH 16/24] feat: implemented member nodes --- cl/README.md | 183 ++++++-- cl/blockbuilder/blockbuilder.go | 4 + cl/cmd/singlenode/main.go | 181 +++++++- cl/membernode/membernode.go | 617 +++++++++++++++++++++++++ cl/postgres/docker-compose.yml | 57 +++ cl/postgres/init-db.sql | 21 + cl/singlenode/api/api_client.go | 204 ++++++++ cl/singlenode/api/api_client_test.go | 576 +++++++++++++++++++++++ cl/singlenode/api/api_server.go | 315 +++++++++++++ cl/singlenode/api/api_server_test.go | 519 +++++++++++++++++++++ cl/singlenode/payloadstore/postgres.go | 266 +++++++++++ cl/singlenode/singlenode.go | 96 +++- cl/singlenode/singlenode_test.go | 11 + cl/types/types.go | 20 + 14 files changed, 3028 insertions(+), 42 deletions(-) create mode 100644 cl/membernode/membernode.go create mode 100644 cl/postgres/docker-compose.yml create mode 100644 cl/postgres/init-db.sql create mode 100644 cl/singlenode/api/api_client.go create mode 100644 cl/singlenode/api/api_client_test.go create mode 100644 cl/singlenode/api/api_server.go create mode 100644 cl/singlenode/api/api_server_test.go create mode 100644 cl/singlenode/payloadstore/postgres.go diff --git a/cl/README.md b/cl/README.md index b17871058..53bfe098f 100644 --- a/cl/README.md +++ b/cl/README.md @@ -230,7 +230,36 @@ Run the client with the configuration file: ## Running the Single Node Application (snode) -The single node application provides a simplified MEV-commit setup that doesn't require Redis. +The single node application provides a simplified MEV-commit setup that doesn't require Redis, but using Postgres to save payloads, so member nodes could request that payload later on. + +## Architecture Overview + +The application supports two operational modes: + +1. **Leader Node**: Produces blocks and serves payloads to member nodes via API +2. **Member Node**: Follows a leader node by polling for and processing payloads sequentially + +## Running Postgres + +We will use Docker Compose to run Redis + +### Docker Compose Configuration + +Postgres is configured in `postgres` folder within `docker-compose.yml` + +### Start Postgres + +Stop any existing containers and remove volumes: + +```bash +docker compose down -v +``` + +Start Postgres in detached mode: + +```bash +docker compose up -d +``` ### Build the Single Node Application @@ -243,80 +272,178 @@ go build -o snode main.go The snode application can be configured via command-line flags, environment variables, or a YAML configuration file. -#### Command-Line Flags +### Common Configuration Flags -- `--instance-id`: **(Required)** Unique instance ID for this node. -- `--eth-client-url`: Ethereum Execution client Engine API URL (default: `http://localhost:8551`). -- `--jwt-secret`: Hex-encoded JWT secret for Ethereum Execution client Engine API (default: `13373d9a0257983ad150392d7ddb2f9172c9396b4c450e26af469d123c7aaa5c`). -- `--priority-fee-recipient`: **(Required)** Ethereum address for receiving priority fees (block proposer fee). -- `--evm-build-delay`: Delay after initiating payload construction before calling getPayload (default: `100ms`). -- `--evm-build-delay-empty-block`: Minimum time since last block to build an empty block (default: `2s`, 0 to disable skipping). -- `--health-addr`: Address for health check endpoint (default: `:8080`). -- `--config`: Path to a YAML configuration file. -- `--log-fmt`: Log format ('text' or 'json') (default: `text`). -- `--log-level`: Log level ('debug', 'info', 'warn', 'error') (default: `info`). -- `--log-tags`: Comma-separated log tags (e.g., `env:prod,service:snode`). +- `--instance-id`: **(Required)** Unique instance ID for this node +- `--eth-client-url`: Ethereum Execution client Engine API URL (default: `http://localhost:8551`) +- `--jwt-secret`: Hex-encoded JWT secret for Ethereum Execution client Engine API (default: `13373d9a0257983ad150392d7ddb2f9172c9396b4c450e26af469d123c7aaa5c`) +- `--health-addr`: Address for health check endpoint (default: `:8080`) +- `--config`: Path to a YAML configuration file +- `--log-fmt`: Log format ('text' or 'json') (default: `text`) +- `--log-level`: Log level ('debug', 'info', 'warn', 'error') (default: `info`) +- `--log-tags`: Comma-separated log tags (e.g., `env:prod,service:snode`) -#### Environment Variables +### Leader Node Specific Flags +- `--priority-fee-recipient`: **(Required)** Ethereum address for receiving priority fees (block proposer fee) +- `--evm-build-delay`: Delay after initiating payload construction before calling getPayload (default: `100ms`) +- `--evm-build-delay-empty-block`: Minimum time since last block to build an empty block (default: `2s`, 0 to disable skipping) +- `--postgres-dsn`: PostgreSQL DSN for storing payloads (optional, e.g., `postgres://user:pass@host:port/dbname?sslmode=disable`) +- `--api-addr`: Address for member node API endpoint (default: `:9090`, empty to disable) + +### Member Node Specific Flags + +- `--leader-api-url`: **(Required)** Leader node API URL for member nodes (e.g., `http://leader:9090`) +- `--poll-interval`: Interval for polling leader node for new payloads (default: `1s`) + +### Environment Variables + +**Common:** - `SNODE_INSTANCE_ID` - `SNODE_ETH_CLIENT_URL` - `SNODE_JWT_SECRET` -- `SNODE_PRIORITY_FEE_RECIPIENT` -- `SNODE_EVM_BUILD_DELAY` -- `SNODE_EVM_BUILD_DELAY_EMPTY_BLOCK` - `SNODE_HEALTH_ADDR` - `SNODE_CONFIG` - `MEV_COMMIT_LOG_FMT` - `MEV_COMMIT_LOG_LEVEL` - `MEV_COMMIT_LOG_TAGS` -### Run the Single Node Application +**Leader Node:** +- `SNODE_PRIORITY_FEE_RECIPIENT` +- `SNODE_EVM_BUILD_DELAY` +- `SNODE_EVM_BUILD_DELAY_EMPTY_BLOCK` +- `SNODE_POSTGRES_DSN` +- `SNODE_API_ADDR` -Run the application using command-line flags: +**Member Node:** +- `MEMBER_LEADER_API_URL` +- `MEMBER_POLL_INTERVAL` + +## Running the Application + +### Leader Node + +Run as a leader node (produces blocks and serves API for member nodes): ```bash -./snode start \ - --instance-id "snode1" \ +./snode leader \ + --instance-id "leader1" \ --eth-client-url "http://localhost:8551" \ --jwt-secret "13373d9a0257983ad150392d7ddb2f9172c9396b4c450e26af469d123c7aaa5c" \ --priority-fee-recipient "0xYourEthereumAddress" \ --evm-build-delay "100ms" \ --evm-build-delay-empty-block "2s" \ + --api-addr ":9090" \ + --log-level "info" +``` + +### Member Node + +Run as a member node (follows leader by polling for payloads): + +```bash +./snode member \ + --instance-id "member1" \ + --eth-client-url "http://localhost:8552" \ + --jwt-secret "13373d9a0257983ad150392d7ddb2f9172c9396b4c450e26af469d123c7aaa5c" \ + --leader-api-url "http://localhost:9090" \ + --poll-interval "1s" \ --log-level "info" ``` +### Backward Compatibility + +The legacy `start` command is still supported and runs as a leader node: + +```bash +./snode start \ + --instance-id "snode1" \ + --priority-fee-recipient "0xYourEthereumAddress" \ + # ... other flags +``` + **Note**: - Replace `"0xYourEthereumAddress"` with a valid Ethereum address for receiving priority fees. - The JWT secret should be a 64-character hex string (32 bytes). -### Using a Configuration File for snode +## Configuration Files + +### Leader Node Configuration -Create a `snode-config.yaml` file: +Create a `leader-config.yaml` file: ```yaml -instance-id: "snode1" +instance-id: "leader1" eth-client-url: "http://localhost:8551" jwt-secret: "13373d9a0257983ad150392d7ddb2f9172c9396b4c450e26af469d123c7aaa5c" priority-fee-recipient: "0xYourEthereumAddress" evm-build-delay: "100ms" evm-build-delay-empty-block: "2s" +api-addr: ":9090" +postgres-dsn: "postgres://user:pass@localhost:5432/mevcommit?sslmode=disable" +health-addr: ":8080" +log-fmt: "text" +log-level: "info" +log-tags: "env:dev,service:leader" +``` + +Run with configuration file: + +```bash +./snode leader --config leader-config.yaml +``` + +### Member Node Configuration + +Create a `member-config.yaml` file: + +```yaml +instance-id: "member1" +eth-client-url: "http://localhost:8552" +jwt-secret: "13373d9a0257983ad150392d7ddb2f9172c9396b4c450e26af469d123c7aaa5c" +leader-api-url: "http://localhost:9090" +poll-interval: "1s" +health-addr: ":8081" log-fmt: "text" log-level: "info" -log-tags: "env:dev,service:snode" +log-tags: "env:dev,service:member" ``` -Run the application with the configuration file: +Run with configuration file: ```bash -./snode start --config snode-config.yaml +./snode member --config member-config.yaml ``` +### Health Endpoints + +Both node types provide health check endpoints: + +- **Leader**: Returns 200 OK when operational, 503 when Ethereum client unavailable +- **Member**: Returns 200 OK when operational and leader available, 503 otherwise + +Access health endpoints at: `http://localhost:8080/health` (or configured port) + +## Multi-Node Setup Example + +For a complete leader-follower setup: + +1. **Start Leader Node**: + ```bash + ./snode leader --instance-id "leader" --priority-fee-recipient "0xYourAddress" --api-addr ":9090" + ``` + +2. **Start Member Node(s)**: + ```bash + ./snode member --instance-id "member1" --leader-api-url "http://localhost:9090" --eth-client-url "http://localhost:8552" --health-addr ":8081" + ``` + +Each member node should connect to its own Geth instance and configure unique health endpoints to avoid port conflicts. + ## Additional Notes - **Graceful Shutdown**: Both applications support graceful shutdown via SIGTERM or Ctrl+C. -- **Health Endpoint**: The snode application provides a health check endpoint at `/health` that returns a 200 OK response when the application is running normally, or a 503 Service Unavailable if there are connection issues with the Ethereum client. ## Conclusion diff --git a/cl/blockbuilder/blockbuilder.go b/cl/blockbuilder/blockbuilder.go index cf2a55d6e..370d154c9 100644 --- a/cl/blockbuilder/blockbuilder.go +++ b/cl/blockbuilder/blockbuilder.go @@ -531,3 +531,7 @@ func (bb *BlockBuilder) loadExecutionHead(ctx context.Context) (*types.Execution return bb.executionHead, nil } + +func (bb *BlockBuilder) GetExecutionHead() *types.ExecutionHead { + return bb.executionHead +} diff --git a/cl/cmd/singlenode/main.go b/cl/cmd/singlenode/main.go index 3ccd84215..c19f48839 100644 --- a/cl/cmd/singlenode/main.go +++ b/cl/cmd/singlenode/main.go @@ -8,10 +8,12 @@ import ( "os" "os/signal" "slices" + "strconv" "strings" "syscall" "time" + "github.com/primev/mev-commit/cl/membernode" "github.com/primev/mev-commit/cl/singlenode" "github.com/primev/mev-commit/x/util" "github.com/urfave/cli/v2" @@ -19,7 +21,9 @@ import ( ) const ( - categoryDebug = "Debug" + categoryDebug = "Debug" + categoryDatabase = "Database" + categoryMember = "Member Node" ) var ( @@ -156,18 +160,70 @@ var ( Value: ":8080", Action: func(_ *cli.Context, s string) error { if !strings.HasPrefix(s, ":") { - return fmt.Errorf("health-addr must start with ':'") + return fmt.Errorf("health-addr must start with ':' (e.g., ':8080')") + } + // Validate port number + portStr := s[1:] // Remove the ':' + if port, err := strconv.Atoi(portStr); err != nil || port < 1 || port > 65535 { + return fmt.Errorf("health-addr must be a valid port number (e.g., ':8080')") + } + return nil + }, + }) + + postgresDSNFlag = altsrc.NewStringFlag(&cli.StringFlag{ + Name: "postgres-dsn", + Usage: "PostgreSQL DSN for storing payloads. If empty, saving to DB is disabled. " + + "(e.g., 'postgres://user:pass@host:port/dbname?sslmode=disable')", + EnvVars: []string{"SNODE_POSTGRES_DSN"}, + Value: "", // Default to empty, making it optional + Category: categoryDatabase, + }) + + apiAddrFlag = altsrc.NewStringFlag(&cli.StringFlag{ + Name: "api-addr", + Usage: "Address for member node API endpoint (e.g., ':9090'). If empty, API is disabled.", + EnvVars: []string{"SNODE_API_ADDR"}, + Value: ":9090", + Action: func(_ *cli.Context, s string) error { + if s == "" { + return nil // Optional flag + } + if !strings.HasPrefix(s, ":") { + return fmt.Errorf("api-addr must start with ':'") + } + return nil + }, + }) + + // Member node specific flags + leaderAPIURLFlag = altsrc.NewStringFlag(&cli.StringFlag{ + Name: "leader-api-url", + Usage: "Leader node API URL for member nodes (e.g., 'http://leader:9090')", + EnvVars: []string{"MEMBER_LEADER_API_URL"}, + Category: categoryMember, + Action: func(_ *cli.Context, s string) error { + if s == "" { + return nil // Will be validated in member command } if _, err := url.Parse(s); err != nil { - return fmt.Errorf("invalid health-addr: %v", err) + return fmt.Errorf("invalid leader-api-url: %v", err) } return nil }, }) + + pollIntervalFlag = altsrc.NewDurationFlag(&cli.DurationFlag{ + Name: "poll-interval", + Usage: "Interval for polling leader node for new payloads (e.g., '1s')", + EnvVars: []string{"MEMBER_POLL_INTERVAL"}, + Value: 1 * time.Second, + Category: categoryMember, + }) ) func main() { - flags := []cli.Flag{ + leaderFlags := []cli.Flag{ configFlag, instanceIDFlag, ethClientURLFlag, @@ -179,17 +235,65 @@ func main() { evmBuildDelayEmptyBlockFlag, priorityFeeReceiptFlag, healthAddrPortFlag, + postgresDSNFlag, + apiAddrFlag, + } + + memberFlags := []cli.Flag{ + configFlag, + instanceIDFlag, + ethClientURLFlag, + jwtSecretFlag, + logFmtFlag, + logLevelFlag, + logTagsFlag, + healthAddrPortFlag, + leaderAPIURLFlag, + pollIntervalFlag, } app := &cli.App{ Name: "snode", Usage: "Single-node MEV-commit application", Commands: []*cli.Command{ + { + Name: "leader", + Usage: "Start as leader node (produces blocks)", + Flags: leaderFlags, + Before: altsrc.InitInputSourceWithContext(leaderFlags, + func(c *cli.Context) (altsrc.InputSourceContext, error) { + configFile := c.String(configFlag.Name) + if configFile != "" { + return altsrc.NewYamlSourceFromFile(configFile) + } + return &altsrc.MapInputSource{}, nil + }), + Action: func(c *cli.Context) error { + return startLeaderNode(c) + }, + }, + { + Name: "member", + Usage: "Start as member node (follows leader)", + Flags: memberFlags, + Before: altsrc.InitInputSourceWithContext(memberFlags, + func(c *cli.Context) (altsrc.InputSourceContext, error) { + configFile := c.String(configFlag.Name) + if configFile != "" { + return altsrc.NewYamlSourceFromFile(configFile) + } + return &altsrc.MapInputSource{}, nil + }), + Action: func(c *cli.Context) error { + return startMemberNode(c) + }, + }, + // Keep the old "start" command for backward compatibility { Name: "start", - Usage: "Start the snode node", - Flags: flags, - Before: altsrc.InitInputSourceWithContext(flags, + Usage: "Start as leader node (deprecated, use 'leader' instead)", + Flags: leaderFlags, + Before: altsrc.InitInputSourceWithContext(leaderFlags, func(c *cli.Context) (altsrc.InputSourceContext, error) { configFile := c.String(configFlag.Name) if configFile != "" { @@ -198,7 +302,7 @@ func main() { return &altsrc.MapInputSource{}, nil }), Action: func(c *cli.Context) error { - return startSingleNodeApplication(c) + return startLeaderNode(c) }, }, }, @@ -210,7 +314,7 @@ func main() { } } -func startSingleNodeApplication(c *cli.Context) error { +func startLeaderNode(c *cli.Context) error { logger, err := util.NewLogger( c.String(logLevelFlag.Name), c.String(logFmtFlag.Name), @@ -220,7 +324,7 @@ func startSingleNodeApplication(c *cli.Context) error { if err != nil { return fmt.Errorf("failed to create logger: %w", err) } - logger = logger.With("app", "snode") + logger = logger.With("app", "snode", "role", "leader") cfg := singlenode.Config{ InstanceID: c.String(instanceIDFlag.Name), @@ -230,9 +334,11 @@ func startSingleNodeApplication(c *cli.Context) error { EVMBuildDelayEmptyBlocks: c.Duration(evmBuildDelayEmptyBlockFlag.Name), PriorityFeeReceipt: c.String(priorityFeeReceiptFlag.Name), HealthAddr: c.String(healthAddrPortFlag.Name), + PostgresDSN: c.String(postgresDSNFlag.Name), + APIAddr: c.String(apiAddrFlag.Name), } - logger.Info("Starting snode with configuration", "config", cfg) // Be careful logging sensitive parts of config + logger.Info("Starting leader node with configuration", "config", cfg) // Create a root context that can be cancelled for graceful shutdown rootCtx, rootCancel := signal.NotifyContext(context.Background(), os.Interrupt, syscall.SIGTERM) @@ -248,9 +354,58 @@ func startSingleNodeApplication(c *cli.Context) error { <-rootCtx.Done() - logger.Info("Shutdown signal received, stopping snode...") + logger.Info("Shutdown signal received, stopping leader node...") snode.Stop() - logger.Info("SRApp shutdown completed.") + logger.Info("Leader node shutdown completed.") + return nil +} + +func startMemberNode(c *cli.Context) error { + leaderURL := c.String(leaderAPIURLFlag.Name) + if leaderURL == "" { + return fmt.Errorf("leader-api-url is required for member nodes") + } + + logger, err := util.NewLogger( + c.String(logLevelFlag.Name), + c.String(logFmtFlag.Name), + c.String(logTagsFlag.Name), + c.App.Writer, + ) + if err != nil { + return fmt.Errorf("failed to create logger: %w", err) + } + logger = logger.With("app", "snode", "role", "member") + + cfg := membernode.Config{ + InstanceID: c.String(instanceIDFlag.Name), + LeaderAPIURL: leaderURL, + EthClientURL: c.String(ethClientURLFlag.Name), + JWTSecret: c.String(jwtSecretFlag.Name), + HealthAddr: c.String(healthAddrPortFlag.Name), + PollInterval: c.Duration(pollIntervalFlag.Name), + } + + logger.Info("Starting member node", "config", cfg) + + // Create a root context that can be cancelled for graceful shutdown + rootCtx, rootCancel := signal.NotifyContext(context.Background(), os.Interrupt, syscall.SIGTERM) + defer rootCancel() + + memberNode, err := membernode.NewMemberNodeApp(rootCtx, cfg, logger) + if err != nil { + logger.Error("Failed to initialize MemberNodeApp", "error", err) + return err + } + + memberNode.Start() + + <-rootCtx.Done() + + logger.Info("Shutdown signal received, stopping member node...") + memberNode.Stop() + + logger.Info("Member node shutdown completed.") return nil } diff --git a/cl/membernode/membernode.go b/cl/membernode/membernode.go new file mode 100644 index 000000000..cb280af7c --- /dev/null +++ b/cl/membernode/membernode.go @@ -0,0 +1,617 @@ +package membernode + +import ( + "context" + "encoding/hex" + "errors" + "fmt" + "log/slog" + "net/http" + "sync" + "time" + + "github.com/primev/mev-commit/cl/blockbuilder" + "github.com/primev/mev-commit/cl/ethclient" + "github.com/primev/mev-commit/cl/singlenode/api" +) + +const ( + shutdownTimeout = 5 * time.Second + maxConsecutiveErrors = 5 + batchSize = 10 + maxCatchupPayloads = 100 + // Timeout for API calls to leader and Geth + apiCallTimeout = 30 * time.Second + // Interval for retrying initialization steps + initRetryInterval = 2 * time.Second + // Threshold to exit catch-up mode + catchUpExitThreshold = batchSize / 2 +) + +// Config holds the configuration for the MemberNodeApp +type Config struct { + InstanceID string + LeaderAPIURL string + EthClientURL string + JWTSecret string + HealthAddr string + PollInterval time.Duration +} + +// MemberNodeApp represents a member node that follows the leader sequentially +type MemberNodeApp struct { + logger *slog.Logger + cfg Config + blockBuilder *blockbuilder.BlockBuilder + payloadClient *api.PayloadClient + engineClient blockbuilder.EngineClient + appCtx context.Context + cancel context.CancelFunc + wg sync.WaitGroup + connectionStatus sync.Mutex + leaderAvailable bool + initializedCh chan struct{} + + // Sequential processing state + processingMutex sync.RWMutex + lastProcessedHeight uint64 + isCatchingUp bool + isInitialized bool +} + +// NewMemberNodeApp creates and initializes a new MemberNodeApp +func NewMemberNodeApp( + parentCtx context.Context, + cfg Config, + logger *slog.Logger, +) (*MemberNodeApp, error) { + ctx, cancel := context.WithCancel(parentCtx) + + // Decode JWT secret + jwtBytes, err := hex.DecodeString(cfg.JWTSecret) + if err != nil { + cancel() + return nil, fmt.Errorf("failed to decode JWT secret: %w", err) + } + + // Create Ethereum engine client + engineClient, err := ethclient.NewAuthClient(ctx, cfg.EthClientURL, jwtBytes) + if err != nil { + cancel() + return nil, fmt.Errorf("failed to create Ethereum engine client: %w", err) + } + + // Create block builder for member node + bb := blockbuilder.NewMemberBlockBuilder(engineClient, logger.With("component", "BlockBuilder")) + + // Create payload client + payloadClient := api.NewPayloadClient(cfg.LeaderAPIURL, logger) + + return &MemberNodeApp{ + logger: logger, + cfg: cfg, + blockBuilder: bb, + payloadClient: payloadClient, + engineClient: engineClient, + appCtx: ctx, + cancel: cancel, + initializedCh: make(chan struct{}), + leaderAvailable: false, + lastProcessedHeight: 0, + isCatchingUp: false, + isInitialized: false, + }, nil +} + +// getLocalGethHeight gets the current block height from local geth +func (app *MemberNodeApp) getLocalGethHeight(ctx context.Context) (uint64, error) { + header, err := app.engineClient.HeaderByNumber(ctx, nil) // nil = latest + if err != nil { + return 0, fmt.Errorf("failed to get latest header from local geth: %w", err) + } + + height := header.Number.Uint64() + app.logger.Debug("Retrieved local geth height", "height", height) + return height, nil +} + +// initializeStartingHeight determines the starting height from local geth +func (app *MemberNodeApp) initializeStartingHeight() { + defer close(app.initializedCh) // Signal completion regardless of outcome (or handle errors preventing it) + + app.logger.Info("Detecting starting height from local geth...") + + for { + select { + case <-app.appCtx.Done(): + app.logger.Info("Initialization cancelled.") + return + default: + ctx, cancelTimeout := context.WithTimeout(app.appCtx, apiCallTimeout) + + // Check leader availability first + if err := app.payloadClient.CheckHealth(ctx); err != nil { + cancelTimeout() + app.logger.Warn("Leader not available during initialization, retrying...", "error", err) + select { + case <-app.appCtx.Done(): + return + case <-time.After(initRetryInterval): + continue + } + } + + // Get local geth's current height + localHeight, err := app.getLocalGethHeight(ctx) + cancelTimeout() // Release timeout context + + if err != nil { + app.logger.Warn("Failed to get local geth height, retrying...", "error", err) + select { + case <-app.appCtx.Done(): + return + case <-time.After(initRetryInterval): + continue + } + } + + // Set lastProcessedHeight to current local height + // The processing loop will request localHeight + 1 + app.processingMutex.Lock() + app.lastProcessedHeight = localHeight + app.isInitialized = true + app.processingMutex.Unlock() + + app.logger.Info( + "Successfully detected starting height from local geth", + "local_height", localHeight, + "will_start_from", localHeight+1, + ) + return // Initialization successful + } + } +} + +// Start begins the member node operation +func (app *MemberNodeApp) Start() { + app.logger.Info("Starting MemberNodeApp...") + + // Launch health server + app.wg.Add(1) + go func() { + defer app.wg.Done() + mux := http.NewServeMux() + mux.HandleFunc("/health", app.healthHandler) + addr := app.cfg.HealthAddr + server := &http.Server{Addr: addr, Handler: mux} + app.logger.Info("Health endpoint listening", "address", addr) + + go func() { + <-app.appCtx.Done() + ctx, cancelShutdown := context.WithTimeout(context.Background(), shutdownTimeout) + defer cancelShutdown() + if err := server.Shutdown(ctx); err != nil { + // ErrServerClosed is expected on graceful shutdown, + // context.DeadlineExceeded if shutdownTimeout is reached. + if !errors.Is(err, http.ErrServerClosed) && !errors.Is(err, context.DeadlineExceeded) { + app.logger.Warn("Health server shutdown error", "error", err) + } + } + }() + + if err := server.ListenAndServe(); err != nil && !errors.Is(err, http.ErrServerClosed) { + app.logger.Error("Health server error", "error", err) + app.cancel() // Trigger app shutdown if health server fails critically + } + }() + + // Initialize starting height from local geth + app.wg.Add(1) + go func() { + defer app.wg.Done() + app.initializeStartingHeight() + }() + + // Start sequential payload processing loop + app.wg.Add(1) + go func() { + defer app.wg.Done() + defer app.logger.Info("MemberNodeApp run loop finished.") + app.runSequentialLoop() + }() +} + +// healthHandler responds on /health +func (app *MemberNodeApp) healthHandler(w http.ResponseWriter, r *http.Request) { + if err := app.appCtx.Err(); err != nil { + http.Error(w, "unavailable (shutting down)", http.StatusServiceUnavailable) + return + } + + app.connectionStatus.Lock() + leaderAvailable := app.leaderAvailable + app.connectionStatus.Unlock() + + if !leaderAvailable { + app.logger.Warn("Health check failed: leader node is not available") + http.Error(w, "leader node is not available", http.StatusServiceUnavailable) + return + } + + // Optionally, check if initialized + app.processingMutex.RLock() + initialized := app.isInitialized + app.processingMutex.RUnlock() + if !initialized { + http.Error(w, "initializing", http.StatusServiceUnavailable) + return + } + + w.WriteHeader(http.StatusOK) + _, err := w.Write([]byte("OK")) + if err != nil { + app.logger.Error("Failed to write health response", "error", err) + // No return here, header already sent + } +} + +// runSequentialLoop continuously processes payloads in sequential order +func (app *MemberNodeApp) runSequentialLoop() { + // Wait for initialization + select { + case <-app.appCtx.Done(): + app.logger.Info("Run loop stopping before initialization due to context cancellation.") + return + case <-app.initializedCh: + app.logger.Info("Initialization complete, starting main processing.") + } + + // Check if initialization actually completed successfully (isInitialized flag) + // This is a safeguard in case initializedCh was closed due to appCtx.Done() during init. + app.processingMutex.RLock() + if !app.isInitialized { + app.processingMutex.RUnlock() + app.logger.Error("Initialization failed or was cancelled, run loop cannot start.") + return + } + startingHeight := app.lastProcessedHeight + 1 + app.processingMutex.RUnlock() + + app.logger.Info( + "MemberNodeApp sequential run loop started", + "instanceID", app.cfg.InstanceID, + "starting_height", startingHeight, + ) + + consecutiveErrors := 0 + ticker := time.NewTicker(app.cfg.PollInterval) + defer ticker.Stop() + + for { + select { + case <-app.appCtx.Done(): + app.logger.Info("MemberNodeApp run loop stopping due to context cancellation.") + return + case <-ticker.C: + err := app.processSequentialPayloads() + + if err != nil { + consecutiveErrors++ + app.setLeaderAvailability(false) + + if consecutiveErrors >= maxConsecutiveErrors { + app.logger.Error( + "Too many consecutive errors, member node may be unstable. Check leader connection and local Geth.", + "error", err, + "consecutive_errors", consecutiveErrors, + ) + } else { + app.logger.Warn( + "Failed to process sequential payloads", + "error", err, + "consecutive_errors", consecutiveErrors, + ) + } + } else { + if consecutiveErrors > 0 { + app.logger.Info( + "Recovered from errors", + "previous_consecutive_errors", consecutiveErrors, + ) + } + consecutiveErrors = 0 + app.setLeaderAvailability(true) // Assuming success implies leader is available + } + } + } +} + +// processSequentialPayloads fetches and processes payloads in sequential order +func (app *MemberNodeApp) processSequentialPayloads() error { + ctx, cancel := context.WithTimeout(app.appCtx, apiCallTimeout) // Overall timeout for this processing cycle + defer cancel() + + // Check leader health first + if err := app.payloadClient.CheckHealth(ctx); err != nil { + return fmt.Errorf("leader health check failed: %w", err) + } + app.setLeaderAvailability(true) // Leader is reachable + + app.processingMutex.RLock() + lastProcessedHeight := app.lastProcessedHeight + isCatchingUp := app.isCatchingUp + app.processingMutex.RUnlock() + + // Determine how many payloads to request + var requestLimit int + if isCatchingUp { + requestLimit = maxCatchupPayloads // e.g., 100 + app.logger.Debug("In catch-up mode, requesting more payloads", "limit", requestLimit) + } else { + requestLimit = batchSize // e.g., 10 + app.logger.Debug("In normal mode, requesting standard batch of payloads", "limit", requestLimit) + } + + // Get payloads since our last processed height + nextHeightToRequest := lastProcessedHeight + 1 + payloadsResponse, err := app.payloadClient.GetPayloadsSince(ctx, nextHeightToRequest, requestLimit) + if err != nil { + return fmt.Errorf("failed to get payloads since height %d: %w", nextHeightToRequest, err) + } + + if len(payloadsResponse.Payloads) == 0 { + app.logger.Debug("No new payloads available", "waiting_for_height", nextHeightToRequest) + return nil + } + + // Update catch-up mode status + currentlyCatchingUp := isCatchingUp + if !currentlyCatchingUp && len(payloadsResponse.Payloads) >= batchSize { + app.processingMutex.Lock() + app.isCatchingUp = true + app.processingMutex.Unlock() + app.logger.Info( + "Entering catch-up mode", + "current_height", lastProcessedHeight, + "available_payloads", len(payloadsResponse.Payloads), + ) + } else if currentlyCatchingUp && len(payloadsResponse.Payloads) < catchUpExitThreshold { + app.processingMutex.Lock() + app.isCatchingUp = false + app.processingMutex.Unlock() + app.logger.Info( + "Exiting catch-up mode", + "current_height", lastProcessedHeight, + "available_payloads", len(payloadsResponse.Payloads), + ) + } + + // Process payloads sequentially + processedCount := 0 + for _, payload := range payloadsResponse.Payloads { + select { + case <-app.appCtx.Done(): // Check for shutdown signal before processing each payload + return nil + default: + } + + // Get the most up-to-date lastProcessedHeight for sequence check + app.processingMutex.RLock() + currentSystemHeight := app.lastProcessedHeight + app.processingMutex.RUnlock() + + expectedHeightForThisPayload := currentSystemHeight + 1 + + // Case 1: Gap detected (payload is for a future height) + if payload.BlockHeight > expectedHeightForThisPayload { + app.logger.Warn( + "Gap detected in payload sequence, attempting to fill", + "expected_height", expectedHeightForThisPayload, + "received_payload_height", payload.BlockHeight, + "gap_size", payload.BlockHeight-expectedHeightForThisPayload, + ) + // Try to fill the gap from expectedHeightForThisPayload up to payload.BlockHeight - 1 + if err := app.fillPayloadGap(ctx, expectedHeightForThisPayload, payload.BlockHeight-1); err != nil { + return fmt.Errorf("failed to fill payload gap from %d to %d: %w", + expectedHeightForThisPayload, payload.BlockHeight-1, err) + } + // After gap fill, lastProcessedHeight should be (payload.BlockHeight - 1) + // Re-fetch currentSystemHeight to ensure the next check is correct + app.processingMutex.RLock() + currentSystemHeight = app.lastProcessedHeight + app.processingMutex.RUnlock() + expectedHeightForThisPayload = currentSystemHeight + 1 + } + + // Case 2: Payload is for an already processed or an older, unexpected height + if payload.BlockHeight < expectedHeightForThisPayload { + app.logger.Debug( + "Skipping already processed or out-of-order (older) payload", + "payload_height", payload.BlockHeight, + "expected_at_least", expectedHeightForThisPayload, + "current_system_height", currentSystemHeight, + ) + continue + } + + // Case 3: Payload is for the expected next height (critical check) + if payload.BlockHeight != expectedHeightForThisPayload { + // This should ideally not be reached if gap filling and previous checks are correct + return fmt.Errorf("critical sequence error: payload height %d does not match expected next height %d after potential gap fill. Current system height: %d", + payload.BlockHeight, expectedHeightForThisPayload, currentSystemHeight) + } + + // Process the payload + if err := app.processPayload(ctx, &payload); err != nil { + return fmt.Errorf("failed to process payload at height %d: %w", payload.BlockHeight, err) + } + + // Update processed height (this is critical) + app.processingMutex.Lock() + app.lastProcessedHeight = payload.BlockHeight + app.processingMutex.Unlock() + + processedCount++ + + // In catch-up mode, limit processing per cycle to avoid holding locks for too long + // or starving other operations, and to allow context cancellation checks. + app.processingMutex.RLock() + stillCatchingUp := app.isCatchingUp + currentHeightAfterProcess := app.lastProcessedHeight + app.processingMutex.RUnlock() + + if stillCatchingUp && processedCount >= maxCatchupPayloads { + app.logger.Info( + "Processed maximum catch-up payloads in this cycle, will continue in next cycle", + "processed_count", processedCount, + "current_height", currentHeightAfterProcess, + ) + break // Exit the loop for this batch, will fetch new batch in next ticker + } + } + + if processedCount > 0 { + app.processingMutex.RLock() + finalHeight := app.lastProcessedHeight + catchUpMode := app.isCatchingUp + app.processingMutex.RUnlock() + + app.logger.Info( + "Successfully processed sequential payloads batch", + "processed_count", processedCount, + "final_height", finalHeight, + "catch_up_mode", catchUpMode, + ) + } + return nil +} + +// fillPayloadGap attempts to fetch and process missing payloads in a range +func (app *MemberNodeApp) fillPayloadGap(ctx context.Context, startHeight, endHeight uint64) error { + if startHeight > endHeight { + app.logger.Info( + "No gap to fill or invalid range", + "start", startHeight, "end", endHeight, + ) + return nil + } + app.logger.Info( + "Filling payload gap", + "start_height", startHeight, + "end_height", endHeight, + "gap_size", endHeight-startHeight+1, + ) + + for height := startHeight; height <= endHeight; height++ { + select { + case <-ctx.Done(): + return fmt.Errorf("context cancelled during gap fill at height %d: %w", height, ctx.Err()) + case <-app.appCtx.Done(): + return fmt.Errorf("application shutting down during gap fill at height %d: %w", height, app.appCtx.Err()) + default: + } + + // Get specific payload by height + payload, err := app.payloadClient.GetPayloadByHeight(ctx, height) + if err != nil { + return fmt.Errorf("failed to get payload for gap at height %d: %w", height, err) + } + + // Process the payload + if err := app.processPayload(ctx, payload); err != nil { + return fmt.Errorf("failed to process gap payload at height %d: %w", height, err) + } + + app.processingMutex.Lock() + if app.lastProcessedHeight != height-1 { + app.processingMutex.Unlock() + // This indicates a severe internal inconsistency or a concurrent modification problem. + // It means another part of the code or a previous iteration did not leave lastProcessedHeight as expected. + return fmt.Errorf("critical sequence error during gap fill: expected lastProcessedHeight %d before processing %d, but got %d", + height-1, height, app.lastProcessedHeight) + } + app.lastProcessedHeight = height + app.processingMutex.Unlock() + + app.logger.Debug("Filled gap payload", "height", height) + } + + app.logger.Info( + "Successfully filled payload gap", + "start_height", startHeight, + "end_height", endHeight, + "final_processed_height_after_gap_fill", endHeight, + ) + return nil +} + +// processPayload applies a single payload to the local geth client +func (app *MemberNodeApp) processPayload(ctx context.Context, payload *api.PayloadResponse) error { + app.logger.Info( + "Processing payload", + "payload_id", payload.PayloadID, + "block_height", payload.BlockHeight, + ) + + // Apply payload to local geth client + err := app.blockBuilder.FinalizeBlock(ctx, payload.PayloadID, payload.ExecutionPayload, "") + if err != nil { + app.logger.Error( + "Failed to finalize block", + "payload_id", payload.PayloadID, + "block_height", payload.BlockHeight, + "error", err, + ) + return fmt.Errorf("blockBuilder.FinalizeBlock failed for height %d: %w", payload.BlockHeight, err) + } + + app.logger.Info( + "Successfully applied payload", + "payload_id", payload.PayloadID, + "block_height", payload.BlockHeight, + ) + return nil +} + +// setLeaderAvailability updates the leader availability status +func (app *MemberNodeApp) setLeaderAvailability(available bool) { + app.connectionStatus.Lock() + defer app.connectionStatus.Unlock() + + if app.leaderAvailable != available { + app.leaderAvailable = available + app.logger.Info("Leader availability changed", "available", available) + } +} + +// GetLastProcessedHeight returns the last successfully processed block height +func (app *MemberNodeApp) GetLastProcessedHeight() uint64 { + app.processingMutex.RLock() + defer app.processingMutex.RUnlock() + return app.lastProcessedHeight +} + +// Stop gracefully stops the member node +func (app *MemberNodeApp) Stop() { + app.logger.Info("Stopping MemberNodeApp...") + app.cancel() // Signal all goroutines to stop + + waitCh := make(chan struct{}) + go func() { + app.wg.Wait() // Wait for all primary goroutines to finish + close(waitCh) + }() + + select { + case <-waitCh: + app.logger.Info("MemberNodeApp goroutines shut down gracefully.") + case <-time.After(shutdownTimeout + 1*time.Second): + app.logger.Warn("MemberNodeApp shutdown timed out waiting for goroutines.") + } + + app.processingMutex.RLock() + finalHeight := app.lastProcessedHeight + app.processingMutex.RUnlock() + + app.logger.Info("MemberNodeApp stopped.", "final_processed_height", finalHeight) +} diff --git a/cl/postgres/docker-compose.yml b/cl/postgres/docker-compose.yml new file mode 100644 index 000000000..fc2ee0bbf --- /dev/null +++ b/cl/postgres/docker-compose.yml @@ -0,0 +1,57 @@ +version: '3.8' + +services: + postgres: + image: postgres:17 + container_name: mev-commit-postgres + environment: + POSTGRES_DB: mevcommit + POSTGRES_USER: mevcommit + POSTGRES_PASSWORD: password123 + POSTGRES_INITDB_ARGS: "--encoding=UTF-8 --lc-collate=C --lc-ctype=C" + ports: + - "5432:5432" + volumes: + - postgres_data:/var/lib/postgresql/data + - ./init-db.sql:/docker-entrypoint-initdb.d/init-db.sql:ro + networks: + - mev-commit-network + healthcheck: + test: ["CMD-SHELL", "pg_isready -U mevcommit -d mevcommit"] + interval: 10s + timeout: 5s + retries: 5 + start_period: 30s + restart: unless-stopped + command: > + postgres + -c shared_preload_libraries=pg_stat_statements + -c pg_stat_statements.track=all + -c max_connections=200 + -c shared_buffers=256MB + -c effective_cache_size=1GB + -c maintenance_work_mem=64MB + -c checkpoint_completion_target=0.9 + -c wal_buffers=16MB + -c default_statistics_target=100 + -c random_page_cost=1.1 + -c effective_io_concurrency=200 + -c work_mem=4MB + -c min_wal_size=1GB + -c max_wal_size=4GB + -c log_statement=all + -c log_duration=on + -c log_line_prefix='%t [%p]: [%l-1] user=%u,db=%d,app=%a,client=%h ' + +volumes: + postgres_data: + driver: local + pgadmin_data: + driver: local + +networks: + mev-commit-network: + driver: bridge + ipam: + config: + - subnet: 172.20.0.0/16 \ No newline at end of file diff --git a/cl/postgres/init-db.sql b/cl/postgres/init-db.sql new file mode 100644 index 000000000..8420f1349 --- /dev/null +++ b/cl/postgres/init-db.sql @@ -0,0 +1,21 @@ +-- init-db.sql +-- This script will be automatically executed when PostgreSQL starts for the first time + +-- Create the execution_payloads table with proper indexing +CREATE TABLE IF NOT EXISTS execution_payloads ( + id SERIAL PRIMARY KEY, + payload_id VARCHAR(66) UNIQUE NOT NULL, -- e.g., 0x... (32 bytes hex + 0x prefix) + raw_execution_payload TEXT NOT NULL, + block_height BIGINT NOT NULL UNIQUE, + inserted_at TIMESTAMPTZ DEFAULT NOW() +); + +-- Create indexes for efficient querying +CREATE INDEX IF NOT EXISTS idx_execution_payloads_block_height ON execution_payloads(block_height); +CREATE INDEX IF NOT EXISTS idx_execution_payloads_inserted_at ON execution_payloads(inserted_at); +CREATE INDEX IF NOT EXISTS idx_execution_payloads_payload_id ON execution_payloads(payload_id); + +-- Create a partial index for recent payloads (optimization for common queries) +CREATE INDEX IF NOT EXISTS idx_execution_payloads_recent +ON execution_payloads(block_height DESC) +WHERE inserted_at > NOW() - INTERVAL '24 hours'; diff --git a/cl/singlenode/api/api_client.go b/cl/singlenode/api/api_client.go new file mode 100644 index 000000000..7fbaac52d --- /dev/null +++ b/cl/singlenode/api/api_client.go @@ -0,0 +1,204 @@ +package api + +import ( + "context" + "encoding/json" + "fmt" + "io" + "log/slog" + "net/http" + "time" +) + +// PayloadResponse represents the API response for payload requests +type PayloadResponse struct { + PayloadID string `json:"payload_id"` + ExecutionPayload string `json:"execution_payload"` + BlockHeight uint64 `json:"block_height"` + Timestamp int64 `json:"timestamp"` +} + +// PayloadListResponse represents the response for multiple payloads +type PayloadListResponse struct { + Payloads []PayloadResponse `json:"payloads"` + HasMore bool `json:"has_more"` + NextHeight uint64 `json:"next_height,omitempty"` + TotalCount int `json:"total_count"` +} + +// ErrorResponse represents an error response +type ErrorResponse struct { + Error string `json:"error"` + Code int `json:"code"` + Message string `json:"message"` +} + +// PayloadClient handles communication with the leader node's API +type PayloadClient struct { + baseURL string + httpClient *http.Client + logger *slog.Logger +} + +// NewPayloadClient creates a new payload API client +func NewPayloadClient(baseURL string, logger *slog.Logger) *PayloadClient { + return &PayloadClient{ + baseURL: baseURL, + httpClient: &http.Client{ + Timeout: 30 * time.Second, + }, + logger: logger.With("component", "PayloadClient"), + } +} + +// GetLatestPayload fetches the latest payload from the leader node +func (pc *PayloadClient) GetLatestPayload(ctx context.Context) (*PayloadResponse, error) { + url := fmt.Sprintf("%s/api/v1/payload/latest", pc.baseURL) + + req, err := http.NewRequestWithContext(ctx, http.MethodGet, url, nil) + if err != nil { + return nil, fmt.Errorf("failed to create request: %w", err) + } + + resp, err := pc.httpClient.Do(req) + if err != nil { + return nil, fmt.Errorf("failed to execute request: %w", err) + } + //nolint:errcheck + defer resp.Body.Close() + + body, err := io.ReadAll(resp.Body) + if err != nil { + return nil, fmt.Errorf("failed to read response body: %w", err) + } + + if resp.StatusCode != http.StatusOK { + var errorResp ErrorResponse + if err := json.Unmarshal(body, &errorResp); err != nil { + return nil, fmt.Errorf("API error (status %d): %s", resp.StatusCode, string(body)) + } + return nil, fmt.Errorf("API error: %s", errorResp.Message) + } + + var payloadResp PayloadResponse + if err := json.Unmarshal(body, &payloadResp); err != nil { + return nil, fmt.Errorf("failed to unmarshal response: %w", err) + } + + pc.logger.Debug( + "Retrieved payload from leader", + "payload_id", payloadResp.PayloadID, + "block_height", payloadResp.BlockHeight, + ) + return &payloadResp, nil +} + +// GetPayloadsSince fetches payloads with block height >= sinceHeight from the leader node +func (pc *PayloadClient) GetPayloadsSince(ctx context.Context, sinceHeight uint64, limit int) (*PayloadListResponse, error) { + url := fmt.Sprintf("%s/api/v1/payload/since/%d?limit=%d", pc.baseURL, sinceHeight, limit) + + req, err := http.NewRequestWithContext(ctx, http.MethodGet, url, nil) + if err != nil { + return nil, fmt.Errorf("failed to create request: %w", err) + } + + resp, err := pc.httpClient.Do(req) + if err != nil { + return nil, fmt.Errorf("failed to execute request: %w", err) + } + //nolint:errcheck + defer resp.Body.Close() + + body, err := io.ReadAll(resp.Body) + if err != nil { + return nil, fmt.Errorf("failed to read response body: %w", err) + } + + if resp.StatusCode != http.StatusOK { + var errorResp ErrorResponse + if err := json.Unmarshal(body, &errorResp); err != nil { + return nil, fmt.Errorf("API error (status %d): %s", resp.StatusCode, string(body)) + } + return nil, fmt.Errorf("API error: %s", errorResp.Message) + } + + var payloadListResp PayloadListResponse + if err := json.Unmarshal(body, &payloadListResp); err != nil { + return nil, fmt.Errorf("failed to unmarshal response: %w", err) + } + + pc.logger.Debug( + "Retrieved payloads since height from leader", + "since_height", sinceHeight, + "count", len(payloadListResp.Payloads), + "has_more", payloadListResp.HasMore, + "next_height", payloadListResp.NextHeight, + ) + + return &payloadListResp, nil +} + +// GetPayloadByHeight fetches a specific payload by block height from the leader node +func (pc *PayloadClient) GetPayloadByHeight(ctx context.Context, height uint64) (*PayloadResponse, error) { + url := fmt.Sprintf("%s/api/v1/payload/height/%d", pc.baseURL, height) + + req, err := http.NewRequestWithContext(ctx, http.MethodGet, url, nil) + if err != nil { + return nil, fmt.Errorf("failed to create request: %w", err) + } + + resp, err := pc.httpClient.Do(req) + if err != nil { + return nil, fmt.Errorf("failed to execute request: %w", err) + } + //nolint:errcheck + defer resp.Body.Close() + + body, err := io.ReadAll(resp.Body) + if err != nil { + return nil, fmt.Errorf("failed to read response body: %w", err) + } + + if resp.StatusCode != http.StatusOK { + var errorResp ErrorResponse + if err := json.Unmarshal(body, &errorResp); err != nil { + return nil, fmt.Errorf("API error (status %d): %s", resp.StatusCode, string(body)) + } + return nil, fmt.Errorf("API error: %s", errorResp.Message) + } + + var payloadResp PayloadResponse + if err := json.Unmarshal(body, &payloadResp); err != nil { + return nil, fmt.Errorf("failed to unmarshal response: %w", err) + } + + pc.logger.Debug( + "Retrieved payload by height from leader", + "height", height, + "payload_id", payloadResp.PayloadID, + ) + return &payloadResp, nil +} + +// CheckHealth checks if the leader node API is healthy +func (pc *PayloadClient) CheckHealth(ctx context.Context) error { + url := fmt.Sprintf("%s/api/v1/health", pc.baseURL) + + req, err := http.NewRequestWithContext(ctx, http.MethodGet, url, nil) + if err != nil { + return fmt.Errorf("failed to create health check request: %w", err) + } + + resp, err := pc.httpClient.Do(req) + if err != nil { + return fmt.Errorf("failed to execute health check: %w", err) + } + //nolint:errcheck + defer resp.Body.Close() + + if resp.StatusCode != http.StatusOK { + return fmt.Errorf("leader node unhealthy (status %d)", resp.StatusCode) + } + + return nil +} diff --git a/cl/singlenode/api/api_client_test.go b/cl/singlenode/api/api_client_test.go new file mode 100644 index 000000000..0d4446fd3 --- /dev/null +++ b/cl/singlenode/api/api_client_test.go @@ -0,0 +1,576 @@ +package api + +import ( + "context" + "encoding/json" + "log/slog" + "net/http" + "net/http/httptest" + "os" + "testing" + "time" +) + +func TestNewPayloadClient(t *testing.T) { + logger := slog.New(slog.NewTextHandler(os.Stdout, nil)) + baseURL := "http://localhost:8080" + + client := NewPayloadClient(baseURL, logger) + + if client.baseURL != baseURL { + t.Errorf("Expected baseURL %s, got %s", baseURL, client.baseURL) + } + + if client.httpClient == nil { + t.Error("Expected httpClient to be initialized") + } + + if client.httpClient.Timeout != 30*time.Second { + t.Errorf("Expected timeout to be 30s, got %v", client.httpClient.Timeout) + } + + if client.logger == nil { + t.Error("Expected logger to be initialized") + } +} + +func TestPayloadClient_GetLatestPayload_Success(t *testing.T) { + // Create test payload response + expectedPayload := PayloadResponse{ + PayloadID: "payload_123", + ExecutionPayload: "0x1234567890abcdef", + BlockHeight: 100, + Timestamp: time.Now().Unix(), + } + + // Create test server + server := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + if r.URL.Path != "/api/v1/payload/latest" { + t.Errorf("Expected path /api/v1/payload/latest, got %s", r.URL.Path) + } + if r.Method != http.MethodGet { + t.Errorf("Expected GET method, got %s", r.Method) + } + + w.Header().Set("Content-Type", "application/json") + w.WriteHeader(http.StatusOK) + json.NewEncoder(w).Encode(expectedPayload) + })) + defer server.Close() + + // Create client + logger := slog.New(slog.NewTextHandler(os.Stdout, nil)) + client := NewPayloadClient(server.URL, logger) + + // Test the method + ctx := context.Background() + result, err := client.GetLatestPayload(ctx) + + if err != nil { + t.Fatalf("Expected no error, got %v", err) + } + + if result.PayloadID != expectedPayload.PayloadID { + t.Errorf("Expected PayloadID %s, got %s", expectedPayload.PayloadID, result.PayloadID) + } + + if result.ExecutionPayload != expectedPayload.ExecutionPayload { + t.Errorf("Expected ExecutionPayload %s, got %s", expectedPayload.ExecutionPayload, result.ExecutionPayload) + } + + if result.BlockHeight != expectedPayload.BlockHeight { + t.Errorf("Expected BlockHeight %d, got %d", expectedPayload.BlockHeight, result.BlockHeight) + } +} + +func TestPayloadClient_GetLatestPayload_ErrorResponse(t *testing.T) { + // Create test server that returns error + server := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + errorResp := ErrorResponse{ + Error: "internal_error", + Code: 500, + Message: "Internal server error occurred", + } + + w.Header().Set("Content-Type", "application/json") + w.WriteHeader(http.StatusInternalServerError) + json.NewEncoder(w).Encode(errorResp) + })) + defer server.Close() + + logger := slog.New(slog.NewTextHandler(os.Stdout, nil)) + client := NewPayloadClient(server.URL, logger) + + ctx := context.Background() + result, err := client.GetLatestPayload(ctx) + + if err == nil { + t.Fatal("Expected error, got nil") + } + + if result != nil { + t.Error("Expected nil result on error") + } + + expectedError := "API error: Internal server error occurred" + if err.Error() != expectedError { + t.Errorf("Expected error %s, got %s", expectedError, err.Error()) + } +} + +func TestPayloadClient_GetLatestPayload_InvalidJSON(t *testing.T) { + // Create test server that returns invalid JSON + server := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + w.Header().Set("Content-Type", "application/json") + w.WriteHeader(http.StatusOK) + w.Write([]byte("invalid json")) + })) + defer server.Close() + + logger := slog.New(slog.NewTextHandler(os.Stdout, nil)) + client := NewPayloadClient(server.URL, logger) + + ctx := context.Background() + result, err := client.GetLatestPayload(ctx) + + if err == nil { + t.Fatal("Expected error, got nil") + } + + if result != nil { + t.Error("Expected nil result on error") + } +} + +func TestPayloadClient_GetLatestPayload_ContextCanceled(t *testing.T) { + // Create test server with delay + server := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + time.Sleep(100 * time.Millisecond) + w.WriteHeader(http.StatusOK) + })) + defer server.Close() + + logger := slog.New(slog.NewTextHandler(os.Stdout, nil)) + client := NewPayloadClient(server.URL, logger) + + // Create context that will be canceled + ctx, cancel := context.WithCancel(context.Background()) + cancel() // Cancel immediately + + result, err := client.GetLatestPayload(ctx) + + if err == nil { + t.Fatal("Expected error, got nil") + } + + if result != nil { + t.Error("Expected nil result on error") + } +} + +func TestPayloadClient_GetPayloadsSince_Success(t *testing.T) { + // Create test payload list response + expectedResponse := PayloadListResponse{ + Payloads: []PayloadResponse{ + { + PayloadID: "payload_100", + ExecutionPayload: "0x100", + BlockHeight: 100, + Timestamp: time.Now().Unix(), + }, + { + PayloadID: "payload_101", + ExecutionPayload: "0x101", + BlockHeight: 101, + Timestamp: time.Now().Unix(), + }, + }, + HasMore: true, + NextHeight: 102, + TotalCount: 50, + } + + // Create test server + server := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + expectedPath := "/api/v1/payload/since/100" + if r.URL.Path != expectedPath { + t.Errorf("Expected path %s, got %s", expectedPath, r.URL.Path) + } + + expectedQuery := "limit=10" + if r.URL.RawQuery != expectedQuery { + t.Errorf("Expected query %s, got %s", expectedQuery, r.URL.RawQuery) + } + + if r.Method != http.MethodGet { + t.Errorf("Expected GET method, got %s", r.Method) + } + + w.Header().Set("Content-Type", "application/json") + w.WriteHeader(http.StatusOK) + json.NewEncoder(w).Encode(expectedResponse) + })) + defer server.Close() + + logger := slog.New(slog.NewTextHandler(os.Stdout, nil)) + client := NewPayloadClient(server.URL, logger) + + ctx := context.Background() + result, err := client.GetPayloadsSince(ctx, 100, 10) + + if err != nil { + t.Fatalf("Expected no error, got %v", err) + } + + if len(result.Payloads) != 2 { + t.Errorf("Expected 2 payloads, got %d", len(result.Payloads)) + } + + if result.HasMore != true { + t.Error("Expected HasMore to be true") + } + + if result.NextHeight != 102 { + t.Errorf("Expected NextHeight 102, got %d", result.NextHeight) + } + + if result.TotalCount != 50 { + t.Errorf("Expected TotalCount 50, got %d", result.TotalCount) + } +} + +func TestPayloadClient_GetPayloadsSince_ErrorResponse(t *testing.T) { + // Create test server that returns error + server := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + errorResp := ErrorResponse{ + Error: "not_found", + Code: 404, + Message: "No payloads found for the given height", + } + + w.Header().Set("Content-Type", "application/json") + w.WriteHeader(http.StatusNotFound) + json.NewEncoder(w).Encode(errorResp) + })) + defer server.Close() + + logger := slog.New(slog.NewTextHandler(os.Stdout, nil)) + client := NewPayloadClient(server.URL, logger) + + ctx := context.Background() + result, err := client.GetPayloadsSince(ctx, 999, 10) + + if err == nil { + t.Fatal("Expected error, got nil") + } + + if result != nil { + t.Error("Expected nil result on error") + } + + expectedError := "API error: No payloads found for the given height" + if err.Error() != expectedError { + t.Errorf("Expected error %s, got %s", expectedError, err.Error()) + } +} + +func TestPayloadClient_GetPayloadByHeight_Success(t *testing.T) { + // Create test payload response + expectedPayload := PayloadResponse{ + PayloadID: "payload_150", + ExecutionPayload: "0x150", + BlockHeight: 150, + Timestamp: time.Now().Unix(), + } + + // Create test server + server := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + expectedPath := "/api/v1/payload/height/150" + if r.URL.Path != expectedPath { + t.Errorf("Expected path %s, got %s", expectedPath, r.URL.Path) + } + + if r.Method != http.MethodGet { + t.Errorf("Expected GET method, got %s", r.Method) + } + + w.Header().Set("Content-Type", "application/json") + w.WriteHeader(http.StatusOK) + json.NewEncoder(w).Encode(expectedPayload) + })) + defer server.Close() + + logger := slog.New(slog.NewTextHandler(os.Stdout, nil)) + client := NewPayloadClient(server.URL, logger) + + ctx := context.Background() + result, err := client.GetPayloadByHeight(ctx, 150) + + if err != nil { + t.Fatalf("Expected no error, got %v", err) + } + + if result.PayloadID != expectedPayload.PayloadID { + t.Errorf("Expected PayloadID %s, got %s", expectedPayload.PayloadID, result.PayloadID) + } + + if result.BlockHeight != expectedPayload.BlockHeight { + t.Errorf("Expected BlockHeight %d, got %d", expectedPayload.BlockHeight, result.BlockHeight) + } +} + +func TestPayloadClient_GetPayloadByHeight_NotFound(t *testing.T) { + // Create test server that returns 404 + server := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + errorResp := ErrorResponse{ + Error: "not_found", + Code: 404, + Message: "Payload not found for height 999", + } + + w.Header().Set("Content-Type", "application/json") + w.WriteHeader(http.StatusNotFound) + json.NewEncoder(w).Encode(errorResp) + })) + defer server.Close() + + logger := slog.New(slog.NewTextHandler(os.Stdout, nil)) + client := NewPayloadClient(server.URL, logger) + + ctx := context.Background() + result, err := client.GetPayloadByHeight(ctx, 999) + + if err == nil { + t.Fatal("Expected error, got nil") + } + + if result != nil { + t.Error("Expected nil result on error") + } + + expectedError := "API error: Payload not found for height 999" + if err.Error() != expectedError { + t.Errorf("Expected error %s, got %s", expectedError, err.Error()) + } +} + +func TestPayloadClient_CheckHealth_Success(t *testing.T) { + // Create test server + server := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + if r.URL.Path != "/api/v1/health" { + t.Errorf("Expected path /api/v1/health, got %s", r.URL.Path) + } + + if r.Method != http.MethodGet { + t.Errorf("Expected GET method, got %s", r.Method) + } + + w.WriteHeader(http.StatusOK) + w.Write([]byte("OK")) + })) + defer server.Close() + + logger := slog.New(slog.NewTextHandler(os.Stdout, nil)) + client := NewPayloadClient(server.URL, logger) + + ctx := context.Background() + err := client.CheckHealth(ctx) + + if err != nil { + t.Fatalf("Expected no error, got %v", err) + } +} + +func TestPayloadClient_CheckHealth_Unhealthy(t *testing.T) { + // Create test server that returns unhealthy status + server := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + w.WriteHeader(http.StatusInternalServerError) + w.Write([]byte("Internal Server Error")) + })) + defer server.Close() + + logger := slog.New(slog.NewTextHandler(os.Stdout, nil)) + client := NewPayloadClient(server.URL, logger) + + ctx := context.Background() + err := client.CheckHealth(ctx) + + if err == nil { + t.Fatal("Expected error, got nil") + } + + expectedError := "leader node unhealthy (status 500)" + if err.Error() != expectedError { + t.Errorf("Expected error %s, got %s", expectedError, err.Error()) + } +} + +func TestPayloadClient_CheckHealth_NetworkError(t *testing.T) { + logger := slog.New(slog.NewTextHandler(os.Stdout, nil)) + client := NewPayloadClient("http://nonexistent.example.com", logger) + + ctx := context.Background() + err := client.CheckHealth(ctx) + + if err == nil { + t.Fatal("Expected error, got nil") + } + + // Should contain "failed to execute health check" + if !contains(err.Error(), "failed to execute health check") { + t.Errorf("Expected error to contain 'failed to execute health check', got %s", err.Error()) + } +} + +func TestPayloadClient_ErrorResponse_InvalidJSON(t *testing.T) { + // Create test server that returns non-JSON error response + server := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + w.WriteHeader(http.StatusBadRequest) + w.Write([]byte("Bad Request - Not JSON")) + })) + defer server.Close() + + logger := slog.New(slog.NewTextHandler(os.Stdout, nil)) + client := NewPayloadClient(server.URL, logger) + + ctx := context.Background() + result, err := client.GetLatestPayload(ctx) + + if err == nil { + t.Fatal("Expected error, got nil") + } + + if result != nil { + t.Error("Expected nil result on error") + } + + expectedError := "API error (status 400): Bad Request - Not JSON" + if err.Error() != expectedError { + t.Errorf("Expected error %s, got %s", expectedError, err.Error()) + } +} + +// Benchmark tests +func BenchmarkPayloadClient_GetLatestPayload(b *testing.B) { + payload := PayloadResponse{ + PayloadID: "payload_bench", + ExecutionPayload: "0xbenchmark", + BlockHeight: 1000, + Timestamp: time.Now().Unix(), + } + + server := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + w.Header().Set("Content-Type", "application/json") + w.WriteHeader(http.StatusOK) + json.NewEncoder(w).Encode(payload) + })) + defer server.Close() + + logger := slog.New(slog.NewTextHandler(os.Stdout, nil)) + client := NewPayloadClient(server.URL, logger) + ctx := context.Background() + + b.ResetTimer() + for i := 0; i < b.N; i++ { + _, err := client.GetLatestPayload(ctx) + if err != nil { + b.Fatalf("Unexpected error: %v", err) + } + } +} + +// Helper function to check if a string contains a substring +func contains(s, substr string) bool { + return len(s) >= len(substr) && (s == substr || + len(s) > len(substr) && (s[:len(substr)] == substr || + s[len(s)-len(substr):] == substr || + containsHelper(s, substr))) +} + +func containsHelper(s, substr string) bool { + for i := 0; i <= len(s)-len(substr); i++ { + if s[i:i+len(substr)] == substr { + return true + } + } + return false +} + +// Table-driven test for multiple scenarios +func TestPayloadClient_GetLatestPayload_TableDriven(t *testing.T) { + tests := []struct { + name string + serverResponse func(w http.ResponseWriter, r *http.Request) + expectedError bool + errorContains string + }{ + { + name: "successful response", + serverResponse: func(w http.ResponseWriter, r *http.Request) { + payload := PayloadResponse{ + PayloadID: "test_payload", + BlockHeight: 42, + } + w.Header().Set("Content-Type", "application/json") + w.WriteHeader(http.StatusOK) + json.NewEncoder(w).Encode(payload) + }, + expectedError: false, + }, + { + name: "server error", + serverResponse: func(w http.ResponseWriter, r *http.Request) { + w.WriteHeader(http.StatusInternalServerError) + w.Write([]byte("Internal Server Error")) + }, + expectedError: true, + errorContains: "API error (status 500)", + }, + { + name: "delayed response", + serverResponse: func(w http.ResponseWriter, r *http.Request) { + time.Sleep(50 * time.Millisecond) // Short delay to test but not cause timeout + payload := PayloadResponse{ + PayloadID: "delayed_payload", + BlockHeight: 123, + } + w.Header().Set("Content-Type", "application/json") + w.WriteHeader(http.StatusOK) + json.NewEncoder(w).Encode(payload) + }, + expectedError: false, + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + server := httptest.NewServer(http.HandlerFunc(tt.serverResponse)) + defer server.Close() + + logger := slog.New(slog.NewTextHandler(os.Stdout, nil)) + client := NewPayloadClient(server.URL, logger) + + ctx := context.Background() + result, err := client.GetLatestPayload(ctx) + + if tt.expectedError { + if err == nil { + t.Fatal("Expected error, got nil") + } + if tt.errorContains != "" && !contains(err.Error(), tt.errorContains) { + t.Errorf("Expected error to contain %s, got %s", tt.errorContains, err.Error()) + } + if result != nil { + t.Error("Expected nil result on error") + } + } else { + if err != nil { + t.Fatalf("Expected no error, got %v", err) + } + if result == nil { + t.Error("Expected non-nil result on success") + } + } + }) + } +} diff --git a/cl/singlenode/api/api_server.go b/cl/singlenode/api/api_server.go new file mode 100644 index 000000000..672a42b56 --- /dev/null +++ b/cl/singlenode/api/api_server.go @@ -0,0 +1,315 @@ +package api + +import ( + "context" + "database/sql" + "encoding/json" + "log/slog" + "net/http" + "strconv" + "strings" + "time" + + "github.com/primev/mev-commit/cl/types" +) + +// StateManager interface for accessing block build state +type StateManager interface { + GetBlockBuildState(ctx context.Context) types.BlockBuildState +} + +// PayloadServer provides HTTP API for member nodes to fetch payloads +type PayloadServer struct { + logger *slog.Logger + stateManager StateManager + payloadRepo types.PayloadRepository + server *http.Server +} + +// NewPayloadServer creates a new payload API server +func NewPayloadServer( + addr string, + stateManager StateManager, + payloadRepo types.PayloadRepository, + logger *slog.Logger, +) *PayloadServer { + mux := http.NewServeMux() + + ps := &PayloadServer{ + logger: logger.With("component", "PayloadServer"), + stateManager: stateManager, + payloadRepo: payloadRepo, + server: &http.Server{ + Addr: addr, + Handler: mux, + ReadTimeout: 30 * time.Second, + WriteTimeout: 30 * time.Second, + IdleTimeout: 60 * time.Second, + }, + } + + // Register endpoints + mux.HandleFunc("/api/v1/payload/latest", ps.handleGetLatestPayload) + mux.HandleFunc("/api/v1/payload/since/", ps.handleGetPayloadsSince) + mux.HandleFunc("/api/v1/payload/height/", ps.handleGetPayloadByHeight) + mux.HandleFunc("/api/v1/health", ps.handleHealth) + + return ps +} + +// Start starts the HTTP server +func (ps *PayloadServer) Start(ctx context.Context) error { + ps.logger.Info("Starting payload API server", "addr", ps.server.Addr) + + // Start server in goroutine + go func() { + if err := ps.server.ListenAndServe(); err != nil && err != http.ErrServerClosed { + ps.logger.Error("Payload API server error", "error", err) + } + }() + + // Wait for context cancellation to shutdown + <-ctx.Done() + return ps.Stop() +} + +// Stop gracefully stops the HTTP server +func (ps *PayloadServer) Stop() error { + ps.logger.Info("Stopping payload API server") + + ctx, cancel := context.WithTimeout(context.Background(), 5*time.Second) + defer cancel() + + return ps.server.Shutdown(ctx) +} + +// convertToResponse converts types.PayloadInfo to PayloadResponse +func convertToResponse(payload *types.PayloadInfo) PayloadResponse { + return PayloadResponse{ + PayloadID: payload.PayloadID, + ExecutionPayload: payload.ExecutionPayload, + BlockHeight: payload.BlockHeight, + Timestamp: payload.InsertedAt.Unix(), + } +} + +// handleGetLatestPayload returns the latest payload from the current block build state +func (ps *PayloadServer) handleGetLatestPayload(w http.ResponseWriter, r *http.Request) { + if r.Method != http.MethodGet { + ps.writeError(w, "Method not allowed", http.StatusMethodNotAllowed) + return + } + + ctx, cancel := context.WithTimeout(r.Context(), 5*time.Second) + defer cancel() + + // Try to get from repository first if available + if ps.payloadRepo != nil { + if payload, err := ps.payloadRepo.GetLatestPayload(ctx); err == nil && payload != nil { + response := convertToResponse(payload) + ps.writeJSON(w, response, http.StatusOK) + ps.logger.Debug( + "Served latest payload from repository", + "payload_id", payload.PayloadID, + "height", payload.BlockHeight, + ) + return + } + } + + // Fallback to state manager + state := ps.stateManager.GetBlockBuildState(ctx) + + if state.PayloadID == "" || state.ExecutionPayload == "" { + ps.writeError(w, "No payload available", http.StatusNotFound) + return + } + + response := PayloadResponse{ + PayloadID: state.PayloadID, + ExecutionPayload: state.ExecutionPayload, + BlockHeight: 0, // We don't have height from state manager + Timestamp: time.Now().Unix(), + } + + ps.writeJSON(w, response, http.StatusOK) + ps.logger.Debug("Served latest payload from state", "payload_id", state.PayloadID) +} + +// handleGetPayloadsSince returns payloads since a given block height +func (ps *PayloadServer) handleGetPayloadsSince(w http.ResponseWriter, r *http.Request) { + if r.Method != http.MethodGet { + ps.writeError(w, "Method not allowed", http.StatusMethodNotAllowed) + return + } + + if ps.payloadRepo == nil { + ps.writeError(w, "Payload repository not available", http.StatusServiceUnavailable) + return + } + + // Extract height from URL path: /api/v1/payload/since/{height} + pathParts := strings.Split(r.URL.Path, "/") + if len(pathParts) < 6 { + ps.writeError(w, "Block height required", http.StatusBadRequest) + return + } + + heightStr := pathParts[5] + height, err := strconv.ParseUint(heightStr, 10, 64) + if err != nil { + ps.writeError(w, "Invalid block height", http.StatusBadRequest) + return + } + + // Parse query parameters + limitStr := r.URL.Query().Get("limit") + limit := 50 // Default limit + if limitStr != "" { + if parsedLimit, err := strconv.Atoi(limitStr); err == nil && parsedLimit > 0 && parsedLimit <= 1000 { + limit = parsedLimit + } + } + + ctx, cancel := context.WithTimeout(r.Context(), 10*time.Second) + defer cancel() + + payloads, err := ps.payloadRepo.GetPayloadsSince(ctx, height, limit+1) // +1 to check if there are more + if err != nil { + ps.logger.Error( + "Failed to get payloads since height", + "height", height, + "error", err, + ) + ps.writeError(w, "Failed to retrieve payloads", http.StatusInternalServerError) + return + } + + // Check if there are more payloads + hasMore := len(payloads) > limit + if hasMore { + payloads = payloads[:limit] // Remove the extra payload + } + + // Convert to response format + responsePayloads := make([]PayloadResponse, len(payloads)) + var nextHeight uint64 + for i, payload := range payloads { + responsePayloads[i] = convertToResponse(&payload) + if i == len(payloads)-1 { + nextHeight = payload.BlockHeight + 1 + } + } + + response := PayloadListResponse{ + Payloads: responsePayloads, + HasMore: hasMore, + NextHeight: nextHeight, + TotalCount: len(responsePayloads), + } + + ps.writeJSON(w, response, http.StatusOK) + ps.logger.Debug( + "Served payloads since height", + "since_height", height, + "count", len(responsePayloads), + "has_more", hasMore, + "next_height", nextHeight, + ) +} + +// handleGetPayloadByHeight returns a specific payload by block height +func (ps *PayloadServer) handleGetPayloadByHeight(w http.ResponseWriter, r *http.Request) { + if r.Method != http.MethodGet { + ps.writeError(w, "Method not allowed", http.StatusMethodNotAllowed) + return + } + + if ps.payloadRepo == nil { + ps.writeError(w, "Payload repository not available", http.StatusServiceUnavailable) + return + } + + // Extract height from URL path: /api/v1/payload/height/{height} + pathParts := strings.Split(r.URL.Path, "/") + if len(pathParts) < 6 { + ps.writeError(w, "Block height required", http.StatusBadRequest) + return + } + + heightStr := pathParts[5] + height, err := strconv.ParseUint(heightStr, 10, 64) + if err != nil { + ps.writeError(w, "Invalid block height", http.StatusBadRequest) + return + } + + ctx, cancel := context.WithTimeout(r.Context(), 5*time.Second) + defer cancel() + + payload, err := ps.payloadRepo.GetPayloadByHeight(ctx, height) + if err != nil { + if err == sql.ErrNoRows { + ps.writeError(w, "Payload not found", http.StatusNotFound) + } else { + ps.logger.Error( + "Failed to get payload by height", + "height", height, + "error", err, + ) + ps.writeError(w, "Failed to retrieve payload", http.StatusInternalServerError) + } + return + } + + response := convertToResponse(payload) + ps.writeJSON(w, response, http.StatusOK) + ps.logger.Debug( + "Served payload by height", + "height", height, + "payload_id", payload.PayloadID, + ) +} + +// handleHealth returns server health status +func (ps *PayloadServer) handleHealth(w http.ResponseWriter, r *http.Request) { + if r.Method != http.MethodGet { + ps.writeError(w, "Method not allowed", http.StatusMethodNotAllowed) + return + } + + w.WriteHeader(http.StatusOK) + _, err := w.Write([]byte("OK")) + if err != nil { + ps.logger.Error( + "Failed to write health response", + "error", err, + ) + http.Error(w, "Internal Server Error", http.StatusInternalServerError) + return + } +} + +// writeJSON writes a JSON response +func (ps *PayloadServer) writeJSON(w http.ResponseWriter, data interface{}, statusCode int) { + w.Header().Set("Content-Type", "application/json") + w.WriteHeader(statusCode) + + if err := json.NewEncoder(w).Encode(data); err != nil { + ps.logger.Error( + "Failed to encode JSON response", + "error", err, + ) + } +} + +// writeError writes an error response +func (ps *PayloadServer) writeError(w http.ResponseWriter, message string, statusCode int) { + response := ErrorResponse{ + Error: message, + Code: statusCode, + Message: message, + } + + ps.writeJSON(w, response, statusCode) +} diff --git a/cl/singlenode/api/api_server_test.go b/cl/singlenode/api/api_server_test.go new file mode 100644 index 000000000..8e6107b91 --- /dev/null +++ b/cl/singlenode/api/api_server_test.go @@ -0,0 +1,519 @@ +package api + +import ( + "context" + "database/sql" + "encoding/json" + "fmt" + "io" + "net/http" + "net/http/httptest" + "strings" + "testing" + "time" + + "log/slog" + + "github.com/primev/mev-commit/cl/types" +) + +// Mock implementations for testing + +type mockStateManager struct { + state types.BlockBuildState +} + +func (m *mockStateManager) GetBlockBuildState(ctx context.Context) types.BlockBuildState { + return m.state +} + +type mockPayloadRepository struct { + latestPayload *types.PayloadInfo + latestError error + payloadsByHeight map[uint64]*types.PayloadInfo + payloadsSince []types.PayloadInfo + payloadsSinceErr error + getByHeightErr error +} + +func (m *mockPayloadRepository) Close() error { + return nil +} + +func (m *mockPayloadRepository) GetLatestPayload(ctx context.Context) (*types.PayloadInfo, error) { + return m.latestPayload, m.latestError +} + +func (m *mockPayloadRepository) GetPayloadsSince(ctx context.Context, height uint64, limit int) ([]types.PayloadInfo, error) { + if m.payloadsSinceErr != nil { + return nil, m.payloadsSinceErr + } + return m.payloadsSince, nil +} + +func (m *mockPayloadRepository) GetPayloadByHeight(ctx context.Context, height uint64) (*types.PayloadInfo, error) { + if m.getByHeightErr != nil { + return nil, m.getByHeightErr + } + if payload, exists := m.payloadsByHeight[height]; exists { + return payload, nil + } + return nil, sql.ErrNoRows +} +func (m *mockPayloadRepository) SavePayload(ctx context.Context, info *types.PayloadInfo) error { + if m.latestPayload != nil && m.latestPayload.PayloadID == info.PayloadID { + return fmt.Errorf("payload already exists") + } + + // Simulate saving the payload + m.latestPayload = info + + if info.BlockHeight == 0 { + return fmt.Errorf("invalid block height") + } + + m.payloadsByHeight[info.BlockHeight] = info + return nil +} + +// Helper function to create a test logger +func createTestLogger() *slog.Logger { + return slog.New(slog.NewTextHandler(io.Discard, &slog.HandlerOptions{ + Level: slog.LevelError, // Only log errors during tests + })) +} + +// Helper function to create test payload info +func createTestPayloadInfo(id string, height uint64) *types.PayloadInfo { + return &types.PayloadInfo{ + PayloadID: id, + ExecutionPayload: fmt.Sprintf("payload-data-%s", id), + BlockHeight: height, + InsertedAt: time.Now(), + } +} + +func TestNewPayloadServer(t *testing.T) { + logger := createTestLogger() + stateManager := &mockStateManager{} + payloadRepo := &mockPayloadRepository{} + + server := NewPayloadServer("localhost:8080", stateManager, payloadRepo, logger) + + if server == nil { + t.Fatal("Expected non-nil PayloadServer") + } + + if server.server.Addr != "localhost:8080" { + t.Errorf("Expected addr localhost:8080, got %s", server.server.Addr) + } + + if server.server.ReadTimeout != 30*time.Second { + t.Errorf("Expected ReadTimeout 30s, got %v", server.server.ReadTimeout) + } + + if server.server.WriteTimeout != 30*time.Second { + t.Errorf("Expected WriteTimeout 30s, got %v", server.server.WriteTimeout) + } + + if server.server.IdleTimeout != 60*time.Second { + t.Errorf("Expected IdleTimeout 60s, got %v", server.server.IdleTimeout) + } +} + +func TestHandleGetLatestPayload_FromRepository(t *testing.T) { + testPayload := createTestPayloadInfo("test-payload-1", 100) + + stateManager := &mockStateManager{} + payloadRepo := &mockPayloadRepository{ + latestPayload: testPayload, + } + + server := NewPayloadServer("localhost:8080", stateManager, payloadRepo, createTestLogger()) + + req := httptest.NewRequest(http.MethodGet, "/api/v1/payload/latest", nil) + w := httptest.NewRecorder() + + server.handleGetLatestPayload(w, req) + + if w.Code != http.StatusOK { + t.Errorf("Expected status %d, got %d", http.StatusOK, w.Code) + } + + var response PayloadResponse + if err := json.Unmarshal(w.Body.Bytes(), &response); err != nil { + t.Fatalf("Failed to unmarshal response: %v", err) + } + + if response.PayloadID != testPayload.PayloadID { + t.Errorf("Expected PayloadID %s, got %s", testPayload.PayloadID, response.PayloadID) + } + + if response.ExecutionPayload != testPayload.ExecutionPayload { + t.Errorf("Expected ExecutionPayload %s, got %s", testPayload.ExecutionPayload, response.ExecutionPayload) + } + + if response.BlockHeight != testPayload.BlockHeight { + t.Errorf("Expected BlockHeight %d, got %d", testPayload.BlockHeight, response.BlockHeight) + } +} + +func TestHandleGetLatestPayload_FromStateManager(t *testing.T) { + state := types.BlockBuildState{ + PayloadID: "state-payload-1", + ExecutionPayload: "state-execution-data", + } + + stateManager := &mockStateManager{state: state} + payloadRepo := &mockPayloadRepository{ + latestPayload: nil, + latestError: fmt.Errorf("repository error"), + } + + server := NewPayloadServer("localhost:8080", stateManager, payloadRepo, createTestLogger()) + + req := httptest.NewRequest(http.MethodGet, "/api/v1/payload/latest", nil) + w := httptest.NewRecorder() + + server.handleGetLatestPayload(w, req) + + if w.Code != http.StatusOK { + t.Errorf("Expected status %d, got %d", http.StatusOK, w.Code) + } + + var response PayloadResponse + if err := json.Unmarshal(w.Body.Bytes(), &response); err != nil { + t.Fatalf("Failed to unmarshal response: %v", err) + } + + if response.PayloadID != state.PayloadID { + t.Errorf("Expected PayloadID %s, got %s", state.PayloadID, response.PayloadID) + } + + if response.ExecutionPayload != state.ExecutionPayload { + t.Errorf("Expected ExecutionPayload %s, got %s", state.ExecutionPayload, response.ExecutionPayload) + } + + if response.BlockHeight != 0 { + t.Errorf("Expected BlockHeight 0, got %d", response.BlockHeight) + } +} + +func TestHandleGetLatestPayload_NoPayloadAvailable(t *testing.T) { + stateManager := &mockStateManager{ + state: types.BlockBuildState{}, + } + payloadRepo := &mockPayloadRepository{ + latestPayload: nil, + latestError: fmt.Errorf("no payload"), + } + + server := NewPayloadServer("localhost:8080", stateManager, payloadRepo, createTestLogger()) + + req := httptest.NewRequest(http.MethodGet, "/api/v1/payload/latest", nil) + w := httptest.NewRecorder() + + server.handleGetLatestPayload(w, req) + + if w.Code != http.StatusNotFound { + t.Errorf("Expected status %d, got %d", http.StatusNotFound, w.Code) + } + + var response ErrorResponse + if err := json.Unmarshal(w.Body.Bytes(), &response); err != nil { + t.Fatalf("Failed to unmarshal error response: %v", err) + } + + if response.Error != "No payload available" { + t.Errorf("Expected error 'No payload available', got %s", response.Error) + } +} + +func TestHandleGetLatestPayload_MethodNotAllowed(t *testing.T) { + server := NewPayloadServer("localhost:8080", &mockStateManager{}, &mockPayloadRepository{}, createTestLogger()) + + req := httptest.NewRequest(http.MethodPost, "/api/v1/payload/latest", nil) + w := httptest.NewRecorder() + + server.handleGetLatestPayload(w, req) + + if w.Code != http.StatusMethodNotAllowed { + t.Errorf("Expected status %d, got %d", http.StatusMethodNotAllowed, w.Code) + } +} + +func TestHandleGetPayloadsSince_Success(t *testing.T) { + payloads := []types.PayloadInfo{ + *createTestPayloadInfo("payload-1", 101), + *createTestPayloadInfo("payload-2", 102), + *createTestPayloadInfo("payload-3", 103), + } + + payloadRepo := &mockPayloadRepository{ + payloadsSince: payloads, + } + + server := NewPayloadServer("localhost:8080", &mockStateManager{}, payloadRepo, createTestLogger()) + + req := httptest.NewRequest(http.MethodGet, "/api/v1/payload/since/100", nil) + w := httptest.NewRecorder() + + server.handleGetPayloadsSince(w, req) + + if w.Code != http.StatusOK { + t.Errorf("Expected status %d, got %d", http.StatusOK, w.Code) + } + + var response PayloadListResponse + if err := json.Unmarshal(w.Body.Bytes(), &response); err != nil { + t.Fatalf("Failed to unmarshal response: %v", err) + } + + if len(response.Payloads) != 3 { + t.Errorf("Expected 3 payloads, got %d", len(response.Payloads)) + } + + if response.HasMore != false { + t.Errorf("Expected HasMore false, got %v", response.HasMore) + } + + if response.NextHeight != 104 { + t.Errorf("Expected NextHeight 104, got %d", response.NextHeight) + } + + if response.TotalCount != 3 { + t.Errorf("Expected TotalCount 3, got %d", response.TotalCount) + } +} + +func TestHandleGetPayloadsSince_WithLimit(t *testing.T) { + // Create 6 payloads but set limit to 2 + payloads := make([]types.PayloadInfo, 3) // Return 3 to test hasMore logic + for i := 0; i < 3; i++ { + payloads[i] = *createTestPayloadInfo(fmt.Sprintf("payload-%d", i+1), uint64(101+i)) + } + + payloadRepo := &mockPayloadRepository{ + payloadsSince: payloads, + } + + server := NewPayloadServer("localhost:8080", &mockStateManager{}, payloadRepo, createTestLogger()) + + req := httptest.NewRequest(http.MethodGet, "/api/v1/payload/since/100?limit=2", nil) + w := httptest.NewRecorder() + + server.handleGetPayloadsSince(w, req) + + if w.Code != http.StatusOK { + t.Errorf("Expected status %d, got %d", http.StatusOK, w.Code) + } + + var response PayloadListResponse + if err := json.Unmarshal(w.Body.Bytes(), &response); err != nil { + t.Fatalf("Failed to unmarshal response: %v", err) + } + + if len(response.Payloads) != 2 { + t.Errorf("Expected 2 payloads, got %d", len(response.Payloads)) + } + + if response.HasMore != true { + t.Errorf("Expected HasMore true, got %v", response.HasMore) + } +} + +func TestHandleGetPayloadsSince_NoRepository(t *testing.T) { + server := NewPayloadServer("localhost:8080", &mockStateManager{}, nil, createTestLogger()) + + req := httptest.NewRequest(http.MethodGet, "/api/v1/payload/since/100", nil) + w := httptest.NewRecorder() + + server.handleGetPayloadsSince(w, req) + + if w.Code != http.StatusServiceUnavailable { + t.Errorf("Expected status %d, got %d", http.StatusServiceUnavailable, w.Code) + } +} + +func TestHandleGetPayloadsSince_InvalidHeight(t *testing.T) { + server := NewPayloadServer("localhost:8080", &mockStateManager{}, &mockPayloadRepository{}, createTestLogger()) + + req := httptest.NewRequest(http.MethodGet, "/api/v1/payload/since/invalid", nil) + w := httptest.NewRecorder() + + server.handleGetPayloadsSince(w, req) + + if w.Code != http.StatusBadRequest { + t.Errorf("Expected status %d, got %d", http.StatusBadRequest, w.Code) + } +} + +func TestHandleGetPayloadsSince_RepositoryError(t *testing.T) { + payloadRepo := &mockPayloadRepository{ + payloadsSinceErr: fmt.Errorf("database error"), + } + + server := NewPayloadServer("localhost:8080", &mockStateManager{}, payloadRepo, createTestLogger()) + + req := httptest.NewRequest(http.MethodGet, "/api/v1/payload/since/100", nil) + w := httptest.NewRecorder() + + server.handleGetPayloadsSince(w, req) + + if w.Code != http.StatusInternalServerError { + t.Errorf("Expected status %d, got %d", http.StatusInternalServerError, w.Code) + } +} + +func TestHandleGetPayloadByHeight_Success(t *testing.T) { + testPayload := createTestPayloadInfo("height-payload", 150) + + payloadRepo := &mockPayloadRepository{ + payloadsByHeight: map[uint64]*types.PayloadInfo{ + 150: testPayload, + }, + } + + server := NewPayloadServer("localhost:8080", &mockStateManager{}, payloadRepo, createTestLogger()) + + req := httptest.NewRequest(http.MethodGet, "/api/v1/payload/height/150", nil) + w := httptest.NewRecorder() + + server.handleGetPayloadByHeight(w, req) + + if w.Code != http.StatusOK { + t.Errorf("Expected status %d, got %d", http.StatusOK, w.Code) + } + + var response PayloadResponse + if err := json.Unmarshal(w.Body.Bytes(), &response); err != nil { + t.Fatalf("Failed to unmarshal response: %v", err) + } + + if response.PayloadID != testPayload.PayloadID { + t.Errorf("Expected PayloadID %s, got %s", testPayload.PayloadID, response.PayloadID) + } + + if response.BlockHeight != testPayload.BlockHeight { + t.Errorf("Expected BlockHeight %d, got %d", testPayload.BlockHeight, response.BlockHeight) + } +} + +func TestHandleGetPayloadByHeight_NotFound(t *testing.T) { + payloadRepo := &mockPayloadRepository{ + payloadsByHeight: map[uint64]*types.PayloadInfo{}, + } + + server := NewPayloadServer("localhost:8080", &mockStateManager{}, payloadRepo, createTestLogger()) + + req := httptest.NewRequest(http.MethodGet, "/api/v1/payload/height/999", nil) + w := httptest.NewRecorder() + + server.handleGetPayloadByHeight(w, req) + + if w.Code != http.StatusNotFound { + t.Errorf("Expected status %d, got %d", http.StatusNotFound, w.Code) + } +} + +func TestHandleGetPayloadByHeight_InvalidHeight(t *testing.T) { + server := NewPayloadServer("localhost:8080", &mockStateManager{}, &mockPayloadRepository{}, createTestLogger()) + + req := httptest.NewRequest(http.MethodGet, "/api/v1/payload/height/invalid", nil) + w := httptest.NewRecorder() + + server.handleGetPayloadByHeight(w, req) + + if w.Code != http.StatusBadRequest { + t.Errorf("Expected status %d, got %d", http.StatusBadRequest, w.Code) + } +} + +func TestHandleGetPayloadByHeight_NoRepository(t *testing.T) { + server := NewPayloadServer("localhost:8080", &mockStateManager{}, nil, createTestLogger()) + + req := httptest.NewRequest(http.MethodGet, "/api/v1/payload/height/100", nil) + w := httptest.NewRecorder() + + server.handleGetPayloadByHeight(w, req) + + if w.Code != http.StatusServiceUnavailable { + t.Errorf("Expected status %d, got %d", http.StatusServiceUnavailable, w.Code) + } +} + +func TestHandleGetPayloadByHeight_RepositoryError(t *testing.T) { + payloadRepo := &mockPayloadRepository{ + getByHeightErr: fmt.Errorf("database connection error"), + } + + server := NewPayloadServer("localhost:8080", &mockStateManager{}, payloadRepo, createTestLogger()) + + req := httptest.NewRequest(http.MethodGet, "/api/v1/payload/height/100", nil) + w := httptest.NewRecorder() + + server.handleGetPayloadByHeight(w, req) + + if w.Code != http.StatusInternalServerError { + t.Errorf("Expected status %d, got %d", http.StatusInternalServerError, w.Code) + } +} + +func TestHandleHealth_Success(t *testing.T) { + server := NewPayloadServer("localhost:8080", &mockStateManager{}, &mockPayloadRepository{}, createTestLogger()) + + req := httptest.NewRequest(http.MethodGet, "/api/v1/health", nil) + w := httptest.NewRecorder() + + server.handleHealth(w, req) + + if w.Code != http.StatusOK { + t.Errorf("Expected status %d, got %d", http.StatusOK, w.Code) + } + + body := strings.TrimSpace(w.Body.String()) + if body != "OK" { + t.Errorf("Expected body 'OK', got '%s'", body) + } +} + +func TestHandleHealth_MethodNotAllowed(t *testing.T) { + server := NewPayloadServer("localhost:8080", &mockStateManager{}, &mockPayloadRepository{}, createTestLogger()) + + req := httptest.NewRequest(http.MethodPost, "/api/v1/health", nil) + w := httptest.NewRecorder() + + server.handleHealth(w, req) + + if w.Code != http.StatusMethodNotAllowed { + t.Errorf("Expected status %d, got %d", http.StatusMethodNotAllowed, w.Code) + } +} + +func TestConvertToResponse(t *testing.T) { + timestamp := time.Now() + payload := &types.PayloadInfo{ + PayloadID: "test-id", + ExecutionPayload: "test-execution-data", + BlockHeight: 42, + InsertedAt: timestamp, + } + + response := convertToResponse(payload) + + if response.PayloadID != payload.PayloadID { + t.Errorf("Expected PayloadID %s, got %s", payload.PayloadID, response.PayloadID) + } + + if response.ExecutionPayload != payload.ExecutionPayload { + t.Errorf("Expected ExecutionPayload %s, got %s", payload.ExecutionPayload, response.ExecutionPayload) + } + + if response.BlockHeight != payload.BlockHeight { + t.Errorf("Expected BlockHeight %d, got %d", payload.BlockHeight, response.BlockHeight) + } + + if response.Timestamp != timestamp.Unix() { + t.Errorf("Expected Timestamp %d, got %d", timestamp.Unix(), response.Timestamp) + } +} diff --git a/cl/singlenode/payloadstore/postgres.go b/cl/singlenode/payloadstore/postgres.go new file mode 100644 index 000000000..c0389cf3d --- /dev/null +++ b/cl/singlenode/payloadstore/postgres.go @@ -0,0 +1,266 @@ +package payloadstore + +import ( + "context" + "database/sql" + "fmt" + "log/slog" + "time" + + _ "github.com/lib/pq" + "github.com/primev/mev-commit/cl/types" // Import shared types +) + +// PostgresRepository implements the types.PayloadRepository interface using PostgreSQL. +type PostgresRepository struct { + db *sql.DB + logger *slog.Logger +} + +// NewPostgresRepository creates a new PostgresRepository. +// It also attempts to create the necessary table if it doesn't exist. +func NewPostgresRepository(ctx context.Context, dsn string, logger *slog.Logger) (*PostgresRepository, error) { + l := logger.With("component", "PostgresRepository") + + db, err := sql.Open("postgres", dsn) + if err != nil { + return nil, fmt.Errorf("failed to open postgres connection: %w", err) + } + + db.SetMaxOpenConns(25) + db.SetMaxIdleConns(25) + db.SetConnMaxLifetime(5 * time.Minute) + + pingCtx, cancel := context.WithTimeout(ctx, 5*time.Second) + defer cancel() + if err := db.PingContext(pingCtx); err != nil { + err := db.Close() + if err != nil { + l.Error("Failed to close database connection after error", "error", err) + } + return nil, fmt.Errorf("failed to ping postgres: %w", err) + } + + // Create table with enhanced schema for sequential access + schemaCreationQuery := ` + CREATE TABLE IF NOT EXISTS execution_payloads ( + id SERIAL PRIMARY KEY, + payload_id VARCHAR(66) UNIQUE NOT NULL, -- e.g., 0x... (32 bytes hex + 0x prefix) + raw_execution_payload TEXT NOT NULL, + block_height BIGINT NOT NULL, + inserted_at TIMESTAMPTZ DEFAULT NOW(), + + -- Indexes for efficient querying + UNIQUE(block_height) + ); + + -- Create indexes if they don't exist + CREATE INDEX IF NOT EXISTS idx_block_height ON execution_payloads(block_height); + CREATE INDEX IF NOT EXISTS idx_inserted_at ON execution_payloads(inserted_at); + ` + execCtx, execCancel := context.WithTimeout(ctx, 10*time.Second) + defer execCancel() + if _, err := db.ExecContext(execCtx, schemaCreationQuery); err != nil { + err := db.Close() + if err != nil { + l.Error("Failed to close database connection after error", "error", err) + } + return nil, fmt.Errorf("failed to create execution_payloads table: %w", err) + } + l.Info("Successfully connected to PostgreSQL and ensured table exists.") + return &PostgresRepository{db: db, logger: l}, nil +} + +// SavePayload saves the payload information to the database. +func (r *PostgresRepository) SavePayload(ctx context.Context, info *types.PayloadInfo) error { + query := ` + INSERT INTO execution_payloads (payload_id, raw_execution_payload, block_height) + VALUES ($1, $2, $3) + ON CONFLICT (payload_id) DO NOTHING; + ` // ON CONFLICT DO NOTHING will silently ignore duplicates by payload_id + + insertCtx, cancel := context.WithTimeout(ctx, 5*time.Second) + defer cancel() + + result, err := r.db.ExecContext(insertCtx, query, info.PayloadID, info.ExecutionPayload, info.BlockHeight) + if err != nil { + r.logger.Error( + "Failed to insert payload into postgres", + "payload_id", info.PayloadID, + "block_height", info.BlockHeight, + "error", err, + ) + return fmt.Errorf("failed to insert payload into postgres: %w", err) + } + + rowsAffected, err := result.RowsAffected() + if err == nil && rowsAffected > 0 { + r.logger.Debug( + "Payload saved to database", + "payload_id", info.PayloadID, + "block_height", info.BlockHeight, + ) + } else if err == nil && rowsAffected == 0 { + r.logger.Debug( + "Payload already exists in database or no rows affected", + "payload_id", info.PayloadID, + "block_height", info.BlockHeight, + ) + } + + return nil +} + +// GetPayloadsSince retrieves payloads with block height >= sinceHeight, ordered by block height +func (r *PostgresRepository) GetPayloadsSince(ctx context.Context, sinceHeight uint64, limit int) ([]types.PayloadInfo, error) { + query := ` + SELECT payload_id, raw_execution_payload, block_height, inserted_at + FROM execution_payloads + WHERE block_height >= $1 + ORDER BY block_height ASC + LIMIT $2; + ` + + queryCtx, cancel := context.WithTimeout(ctx, 10*time.Second) + defer cancel() + + rows, err := r.db.QueryContext(queryCtx, query, sinceHeight, limit) + if err != nil { + r.logger.Error( + "Failed to query payloads since height", + "since_height", sinceHeight, + "limit", limit, + "error", err, + ) + return nil, fmt.Errorf("failed to query payloads since height %d: %w", sinceHeight, err) + } + //nolint:errcheck + defer rows.Close() + + var payloads []types.PayloadInfo + for rows.Next() { + var payload types.PayloadInfo + err := rows.Scan( + &payload.PayloadID, + &payload.ExecutionPayload, + &payload.BlockHeight, + &payload.InsertedAt, + ) + if err != nil { + r.logger.Error( + "Failed to scan payload row", + "error", err, + ) + return nil, fmt.Errorf("failed to scan payload row: %w", err) + } + payloads = append(payloads, payload) + } + + if err := rows.Err(); err != nil { + r.logger.Error( + "Error iterating payload rows", + "error", err, + ) + return nil, fmt.Errorf("error iterating payload rows: %w", err) + } + + r.logger.Debug( + "Retrieved payloads since height", + "since_height", sinceHeight, + "count", len(payloads), + "limit", limit, + ) + + return payloads, nil +} + +// GetPayloadByHeight retrieves a specific payload by block height +func (r *PostgresRepository) GetPayloadByHeight(ctx context.Context, height uint64) (*types.PayloadInfo, error) { + query := ` + SELECT payload_id, raw_execution_payload, block_height, inserted_at + FROM execution_payloads + WHERE block_height = $1; + ` + + queryCtx, cancel := context.WithTimeout(ctx, 5*time.Second) + defer cancel() + + var payload types.PayloadInfo + err := r.db.QueryRowContext(queryCtx, query, height).Scan( + &payload.PayloadID, + &payload.ExecutionPayload, + &payload.BlockHeight, + &payload.InsertedAt, + ) + + if err != nil { + if err == sql.ErrNoRows { + r.logger.Debug("Payload not found for height", "height", height) + return nil, sql.ErrNoRows + } + r.logger.Error( + "Failed to query payload by height", + "height", height, + "error", err, + ) + return nil, fmt.Errorf("failed to query payload by height %d: %w", height, err) + } + + r.logger.Debug( + "Retrieved payload by height", + "height", height, + "payload_id", payload.PayloadID, + ) + + return &payload, nil +} + +// GetLatestPayload retrieves the most recent payload +func (r *PostgresRepository) GetLatestPayload(ctx context.Context) (*types.PayloadInfo, error) { + query := ` + SELECT payload_id, raw_execution_payload, block_height, inserted_at + FROM execution_payloads + ORDER BY block_height DESC + LIMIT 1; + ` + + queryCtx, cancel := context.WithTimeout(ctx, 5*time.Second) + defer cancel() + + var payload types.PayloadInfo + err := r.db.QueryRowContext(queryCtx, query).Scan( + &payload.PayloadID, + &payload.ExecutionPayload, + &payload.BlockHeight, + &payload.InsertedAt, + ) + + if err != nil { + if err == sql.ErrNoRows { + r.logger.Debug("No payloads found in database") + return nil, sql.ErrNoRows + } + r.logger.Error( + "Failed to query latest payload", + "error", err, + ) + return nil, fmt.Errorf("failed to query latest payload: %w", err) + } + + r.logger.Debug( + "Retrieved latest payload", + "payload_id", payload.PayloadID, + "block_height", payload.BlockHeight, + ) + + return &payload, nil +} + +// Close closes the database connection. +func (r *PostgresRepository) Close() error { + if r.db != nil { + r.logger.Info("Closing PostgreSQL connection") + return r.db.Close() + } + return nil +} diff --git a/cl/singlenode/singlenode.go b/cl/singlenode/singlenode.go index 9bd7c11c9..db121e210 100644 --- a/cl/singlenode/singlenode.go +++ b/cl/singlenode/singlenode.go @@ -13,7 +13,10 @@ import ( "github.com/primev/mev-commit/cl/blockbuilder" "github.com/primev/mev-commit/cl/ethclient" + "github.com/primev/mev-commit/cl/singlenode/api" + "github.com/primev/mev-commit/cl/singlenode/payloadstore" localstate "github.com/primev/mev-commit/cl/singlenode/state" + "github.com/primev/mev-commit/cl/types" ) const ( @@ -30,11 +33,14 @@ type Config struct { EVMBuildDelayEmptyBlocks time.Duration PriorityFeeReceipt string HealthAddr string + PostgresDSN string + APIAddr string } type BlockBuilder interface { GetPayload(ctx context.Context) error FinalizeBlock(ctx context.Context, payloadID string, executionPayload string, extraData string) error + GetExecutionHead() *types.ExecutionHead } // SingleNodeApp orchestrates block production for a single node. @@ -45,6 +51,8 @@ type SingleNodeApp struct { // stateManager is a local state manager for block production // it's not anticipated to use DB as all the state already in geth client stateManager *localstate.LocalStateManager + payloadRepo types.PayloadRepository + payloadServer *api.PayloadServer appCtx context.Context cancel context.CancelFunc wg sync.WaitGroup @@ -90,11 +98,43 @@ func NewSingleNodeApp( cfg.PriorityFeeReceipt, ) + var pRepo types.PayloadRepository + if cfg.PostgresDSN != "" { + repo, err := payloadstore.NewPostgresRepository(ctx, cfg.PostgresDSN, logger) + if err != nil { + cancel() + logger.Error( + "failed to create payload repository", + "error", err, + ) + return nil, fmt.Errorf("failed to initialize payload repository: %w", err) + } + pRepo = repo + logger.Info("Payload repository initialized, payloads will be saved to PostgreSQL.") + } else { + logger.Info("PostgresDSN not provided, payload saving to DB is disabled.") + } + + var payloadServer *api.PayloadServer + if cfg.APIAddr != "" { + payloadServer = api.NewPayloadServer( + cfg.APIAddr, + stateMgr, + pRepo, + logger.With("component", "APIServer"), + ) + logger.Info("API server initialized for member nodes", "addr", cfg.APIAddr) + } else { + logger.Info("API address not provided, member node API is disabled.") + } + return &SingleNodeApp{ logger: logger, cfg: cfg, blockBuilder: bb, stateManager: stateMgr, + payloadRepo: pRepo, + payloadServer: payloadServer, appCtx: ctx, cancel: cancel, connectionRefused: false, @@ -176,6 +216,16 @@ func (app *SingleNodeApp) Start() { } }() + if app.payloadServer != nil { + app.wg.Add(1) + go func() { + defer app.wg.Done() + if err := app.payloadServer.Start(app.appCtx); err != nil { + app.logger.Error("API server error", "error", err) + } + }() + } + // Start block production loop app.wg.Add(1) go func() { @@ -252,8 +302,43 @@ func (app *SingleNodeApp) produceBlock() error { return errors.New("payload ID is empty after GetPayload call") } + // Get current block height from the execution head + executionHead := app.blockBuilder.GetExecutionHead() + var blockHeight uint64 + if executionHead != nil { + blockHeight = executionHead.BlockHeight + 1 // Next block height + } else { + app.logger.Warn("No execution head available, using height 0") + blockHeight = 0 + } + + if app.payloadRepo != nil { + payloadInfo := &types.PayloadInfo{ + PayloadID: currentState.PayloadID, + ExecutionPayload: currentState.ExecutionPayload, + BlockHeight: blockHeight, + } + saveCtx, saveCancel := context.WithTimeout(app.appCtx, 200*time.Millisecond) + defer saveCancel() + + if err := app.payloadRepo.SavePayload(saveCtx, payloadInfo); err != nil { + app.logger.Error( + "Failed to save payload to database", + "payload_id", currentState.PayloadID, + "error", err, + ) + return fmt.Errorf("failed to save payload to database: %w", err) + } else { + app.logger.Info("Payload details submitted to database for saving", "payload_id", currentState.PayloadID) + } + } + // Step 2: Finalize the block - app.logger.Info("finalizing block", "payload_id", currentState.PayloadID) + app.logger.Info( + "finalizing block", + "payload_id", currentState.PayloadID, + "block_height", blockHeight, + ) if err := app.blockBuilder.FinalizeBlock(app.appCtx, currentState.PayloadID, currentState.ExecutionPayload, ""); err != nil { return fmt.Errorf("failed to finalize block: %w", err) } @@ -278,5 +363,14 @@ func (app *SingleNodeApp) Stop() { case <-time.After(shutdownTimeout): app.logger.Warn("SingleNodeApp shutdown timed out waiting for run loop.") } + + if app.payloadRepo != nil { + if err := app.payloadRepo.Close(); err != nil { + app.logger.Error("Error closing payload repository", "error", err) + } else { + app.logger.Info("Payload repository closed.") + } + } + app.logger.Info("SingleNodeApp stopped.") } diff --git a/cl/singlenode/singlenode_test.go b/cl/singlenode/singlenode_test.go index 838a0d9b7..e11a7bec6 100644 --- a/cl/singlenode/singlenode_test.go +++ b/cl/singlenode/singlenode_test.go @@ -34,6 +34,15 @@ func (m *MockBlockBuilder) FinalizeBlock(ctx context.Context, payloadID string, return args.Error(0) } +// Add missing method to satisfy BlockBuilder interface +func (m *MockBlockBuilder) GetExecutionHead() *types.ExecutionHead { + args := m.Called() + if head, ok := args.Get(0).(*types.ExecutionHead); ok { + return head + } + return nil +} + // MockConnectionRefused provides a safe implementation for testing type MockConnectionRefused struct{} @@ -166,6 +175,7 @@ func TestProduceBlock(t *testing.T) { }) require.NoError(t, err) + mockBuilder.On("GetExecutionHead").Return((*types.ExecutionHead)(nil)) mockBuilder.On("GetPayload", mock.Anything).Return(nil) mockBuilder.On("FinalizeBlock", mock.Anything, "test-payload-id", "test-execution-payload", "").Return(nil) @@ -210,6 +220,7 @@ func TestProduceBlock(t *testing.T) { }) require.NoError(t, err) + mockBuilder.On("GetExecutionHead").Return((*types.ExecutionHead)(nil)) mockBuilder.On("GetPayload", mock.Anything).Return(nil) mockBuilder.On("FinalizeBlock", mock.Anything, "test-payload-id", "test-execution-payload", "").Return(assert.AnError) diff --git a/cl/types/types.go b/cl/types/types.go index bca82812f..51a5a2a91 100644 --- a/cl/types/types.go +++ b/cl/types/types.go @@ -1,5 +1,10 @@ package types +import ( + "context" + "time" +) + type ExecutionHead struct { BlockHeight uint64 BlockHash []byte @@ -36,3 +41,18 @@ const ( RedisMsgTypePending RedisMsgType = "0" RedisMsgTypeNew RedisMsgType = ">" ) + +type PayloadInfo struct { + PayloadID string + ExecutionPayload string + BlockHeight uint64 + InsertedAt time.Time +} + +type PayloadRepository interface { + SavePayload(ctx context.Context, info *PayloadInfo) error + GetPayloadsSince(ctx context.Context, sinceHeight uint64, limit int) ([]PayloadInfo, error) + GetPayloadByHeight(ctx context.Context, height uint64) (*PayloadInfo, error) + GetLatestPayload(ctx context.Context) (*PayloadInfo, error) + Close() error +} From 32e3325e94184dcf5aa6f1123aab842eb381896e Mon Sep 17 00:00:00 2001 From: Mikhail Wall Date: Mon, 26 May 2025 22:47:54 +0800 Subject: [PATCH 17/24] feat: clean up docker --- cl/postgres/docker-compose.yml | 19 +------------------ 1 file changed, 1 insertion(+), 18 deletions(-) diff --git a/cl/postgres/docker-compose.yml b/cl/postgres/docker-compose.yml index fc2ee0bbf..77fad4d4d 100644 --- a/cl/postgres/docker-compose.yml +++ b/cl/postgres/docker-compose.yml @@ -25,23 +25,6 @@ services: restart: unless-stopped command: > postgres - -c shared_preload_libraries=pg_stat_statements - -c pg_stat_statements.track=all - -c max_connections=200 - -c shared_buffers=256MB - -c effective_cache_size=1GB - -c maintenance_work_mem=64MB - -c checkpoint_completion_target=0.9 - -c wal_buffers=16MB - -c default_statistics_target=100 - -c random_page_cost=1.1 - -c effective_io_concurrency=200 - -c work_mem=4MB - -c min_wal_size=1GB - -c max_wal_size=4GB - -c log_statement=all - -c log_duration=on - -c log_line_prefix='%t [%p]: [%l-1] user=%u,db=%d,app=%a,client=%h ' volumes: postgres_data: @@ -54,4 +37,4 @@ networks: driver: bridge ipam: config: - - subnet: 172.20.0.0/16 \ No newline at end of file + - subnet: 172.20.0.0/16 From 2aec71cac5f851145fbadc2385241405844abebf Mon Sep 17 00:00:00 2001 From: Mikhail Wall Date: Mon, 26 May 2025 23:00:11 +0800 Subject: [PATCH 18/24] fix: updated readme --- cl/README.md | 96 +++++----------------------------------------------- 1 file changed, 8 insertions(+), 88 deletions(-) diff --git a/cl/README.md b/cl/README.md index b48f4317e..b774ce107 100644 --- a/cl/README.md +++ b/cl/README.md @@ -230,91 +230,6 @@ Run the client with the configuration file: ## Running the Single Node Application (snode) -The single node application provides a simplified MEV-commit setup that doesn't require Redis. - -### Build the Single Node Application - -```bash -go mod tidy -go build -o snode main.go -``` - -### Configuration - -The snode application can be configured via command-line flags, environment variables, or a YAML configuration file. - -#### Command-Line Flags - -- `--instance-id`: **(Required)** Unique instance ID for this node. -- `--eth-client-url`: Ethereum Execution client Engine API URL (default: `http://localhost:8551`). -- `--jwt-secret`: Hex-encoded JWT secret for Ethereum Execution client Engine API (default: `13373d9a0257983ad150392d7ddb2f9172c9396b4c450e26af469d123c7aaa5c`). -- `--priority-fee-recipient`: **(Required)** Ethereum address for receiving priority fees (block proposer fee). -- `--evm-build-delay`: Delay after initiating payload construction before calling getPayload (default: `100ms`). -- `--evm-build-delay-empty-block`: Minimum time since last block to build an empty block (default: `2s`, 0 to disable skipping). -- `--health-addr`: Address for health check endpoint (default: `:8080`). -- `--config`: Path to a YAML configuration file. -- `--log-fmt`: Log format ('text' or 'json') (default: `text`). -- `--log-level`: Log level ('debug', 'info', 'warn', 'error') (default: `info`). -- `--log-tags`: Comma-separated log tags (e.g., `env:prod,service:snode`). - -#### Environment Variables - -- `SNODE_INSTANCE_ID` -- `SNODE_ETH_CLIENT_URL` -- `SNODE_JWT_SECRET` -- `SNODE_PRIORITY_FEE_RECIPIENT` -- `SNODE_EVM_BUILD_DELAY` -- `SNODE_EVM_BUILD_DELAY_EMPTY_BLOCK` -- `SNODE_HEALTH_ADDR` -- `SNODE_CONFIG` -- `MEV_COMMIT_LOG_FMT` -- `MEV_COMMIT_LOG_LEVEL` -- `MEV_COMMIT_LOG_TAGS` - -### Run the Single Node Application - -Run the application using command-line flags: - -```bash -./snode start \ - --instance-id "snode1" \ - --eth-client-url "http://localhost:8551" \ - --jwt-secret "13373d9a0257983ad150392d7ddb2f9172c9396b4c450e26af469d123c7aaa5c" \ - --priority-fee-recipient "0xYourEthereumAddress" \ - --evm-build-delay "100ms" \ - --evm-build-delay-empty-block "2s" \ - --log-level "info" -``` - -**Note**: - -- Replace `"0xYourEthereumAddress"` with a valid Ethereum address for receiving priority fees. -- The JWT secret should be a 64-character hex string (32 bytes). - -### Using a Configuration File for snode - -Create a `snode-config.yaml` file: - -```yaml -instance-id: "snode1" -eth-client-url: "http://localhost:8551" -jwt-secret: "13373d9a0257983ad150392d7ddb2f9172c9396b4c450e26af469d123c7aaa5c" -priority-fee-recipient: "0xYourEthereumAddress" -evm-build-delay: "100ms" -evm-build-delay-empty-block: "2s" -log-fmt: "text" -log-level: "info" -log-tags: "env:dev,service:snode" -``` - -Run the application with the configuration file: - -```bash -./snode start --config snode-config.yaml -``` - -## Running the Single Node Application (snode) - The single node application provides a simplified MEV-commit setup that doesn't require Redis, but using Postgres to save payloads, so member nodes could request that payload later on. ## Architecture Overview @@ -328,7 +243,7 @@ The application supports two operational modes: We will use Docker Compose to run Redis -### Docker Compose Configuration +### Postgres Docker Compose Configuration Postgres is configured in `postgres` folder within `docker-compose.yml` @@ -353,7 +268,7 @@ go mod tidy go build -o snode main.go ``` -### Configuration +### SNode Configuration The snode application can be configured via command-line flags, environment variables, or a YAML configuration file. @@ -381,9 +296,10 @@ The snode application can be configured via command-line flags, environment vari - `--leader-api-url`: **(Required)** Leader node API URL for member nodes (e.g., `http://leader:9090`) - `--poll-interval`: Interval for polling leader node for new payloads (default: `1s`) -### Environment Variables +### SNode Environment Variables **Common:** + - `SNODE_INSTANCE_ID` - `SNODE_ETH_CLIENT_URL` - `SNODE_JWT_SECRET` @@ -394,6 +310,7 @@ The snode application can be configured via command-line flags, environment vari - `MEV_COMMIT_LOG_TAGS` **Leader Node:** + - `SNODE_PRIORITY_FEE_RECIPIENT` - `SNODE_EVM_BUILD_DELAY` - `SNODE_EVM_BUILD_DELAY_EMPTY_BLOCK` @@ -401,6 +318,7 @@ The snode application can be configured via command-line flags, environment vari - `SNODE_API_ADDR` **Member Node:** + - `MEMBER_LEADER_API_URL` - `MEMBER_POLL_INTERVAL` @@ -515,11 +433,13 @@ Access health endpoints at: `http://localhost:8080/health` (or configured port) For a complete leader-follower setup: 1. **Start Leader Node**: + ```bash ./snode leader --instance-id "leader" --priority-fee-recipient "0xYourAddress" --api-addr ":9090" ``` 2. **Start Member Node(s)**: + ```bash ./snode member --instance-id "member1" --leader-api-url "http://localhost:9090" --eth-client-url "http://localhost:8552" --health-addr ":8081" ``` From 4839eefda6984cea1d25b4e5a7f236a4371a52e1 Mon Sep 17 00:00:00 2001 From: Mikhail Wall Date: Tue, 27 May 2025 12:50:14 +0800 Subject: [PATCH 19/24] fix: fixed go mod --- cl/go.mod | 1 + cl/go.sum | 2 ++ 2 files changed, 3 insertions(+) diff --git a/cl/go.mod b/cl/go.mod index 95f097407..1f8c77bed 100644 --- a/cl/go.mod +++ b/cl/go.mod @@ -8,6 +8,7 @@ require ( github.com/go-redis/redismock/v9 v9.2.0 github.com/golang-jwt/jwt/v5 v5.2.1 github.com/golang/mock v1.6.0 + github.com/lib/pq v1.10.9 github.com/redis/go-redis/v9 v9.6.1 github.com/urfave/cli/v2 v2.27.5 golang.org/x/tools v0.29.0 diff --git a/cl/go.sum b/cl/go.sum index 77dbe8528..d406a3e90 100644 --- a/cl/go.sum +++ b/cl/go.sum @@ -113,6 +113,8 @@ github.com/kylelemons/godebug v1.1.0 h1:RPNrshWIDI6G2gRW9EHilWtl7Z6Sb1BR0xunSBf0 github.com/kylelemons/godebug v1.1.0/go.mod h1:9/0rRGxNHcop5bhtWyNeEfOS8JIWk580+fNqagV/RAw= github.com/leanovate/gopter v0.2.11 h1:vRjThO1EKPb/1NsDXuDrzldR28RLkBflWYcU9CvzWu4= github.com/leanovate/gopter v0.2.11/go.mod h1:aK3tzZP/C+p1m3SPRE4SYZFGP7jjkuSI4f7Xvpt0S9c= +github.com/lib/pq v1.10.9 h1:YXG7RB+JIjhP29X+OtkiDnYaXQwpS4JEWq7dtCCRUEw= +github.com/lib/pq v1.10.9/go.mod h1:AlVN5x4E4T544tWzH6hKfbfQvm3HdbOxrmggDNAPY9o= github.com/mattn/go-colorable v0.1.13 h1:fFA4WZxdEF4tXPZVKMLwD8oUnCTTo08duU7wxecdEvA= github.com/mattn/go-colorable v0.1.13/go.mod h1:7S9/ev0klgBDR4GtXTXX8a3vIGJpMovkB8vQcUbaXHg= github.com/mattn/go-isatty v0.0.20 h1:xfD0iDuEKnDkl03q4limB+vH+GxLEtL/jb4xVJSWWEY= From a70a2bdecda8c006822ed48c936193f459f29df9 Mon Sep 17 00:00:00 2001 From: Mikhail Wall Date: Tue, 27 May 2025 13:23:25 +0800 Subject: [PATCH 20/24] fix: fix lint --- cl/singlenode/api/api_client_test.go | 35 ++++++++++++++++++++++------ 1 file changed, 28 insertions(+), 7 deletions(-) diff --git a/cl/singlenode/api/api_client_test.go b/cl/singlenode/api/api_client_test.go index 0d4446fd3..e1394145c 100644 --- a/cl/singlenode/api/api_client_test.go +++ b/cl/singlenode/api/api_client_test.go @@ -54,7 +54,10 @@ func TestPayloadClient_GetLatestPayload_Success(t *testing.T) { w.Header().Set("Content-Type", "application/json") w.WriteHeader(http.StatusOK) - json.NewEncoder(w).Encode(expectedPayload) + err := json.NewEncoder(w).Encode(expectedPayload) + if err != nil { + t.Fatalf("Failed to encode response: %v", err) + } })) defer server.Close() @@ -123,7 +126,10 @@ func TestPayloadClient_GetLatestPayload_InvalidJSON(t *testing.T) { server := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { w.Header().Set("Content-Type", "application/json") w.WriteHeader(http.StatusOK) - w.Write([]byte("invalid json")) + _, err := w.Write([]byte("invalid json")) + if err != nil { + t.Fatalf("Failed to write response: %v", err) + } })) defer server.Close() @@ -208,7 +214,10 @@ func TestPayloadClient_GetPayloadsSince_Success(t *testing.T) { w.Header().Set("Content-Type", "application/json") w.WriteHeader(http.StatusOK) - json.NewEncoder(w).Encode(expectedResponse) + err := json.NewEncoder(w).Encode(expectedResponse) + if err != nil { + t.Fatalf("Failed to encode response: %v", err) + } })) defer server.Close() @@ -296,7 +305,10 @@ func TestPayloadClient_GetPayloadByHeight_Success(t *testing.T) { w.Header().Set("Content-Type", "application/json") w.WriteHeader(http.StatusOK) - json.NewEncoder(w).Encode(expectedPayload) + err := json.NewEncoder(w).Encode(expectedPayload) + if err != nil { + t.Fatalf("Failed to encode response: %v", err) + } })) defer server.Close() @@ -366,7 +378,10 @@ func TestPayloadClient_CheckHealth_Success(t *testing.T) { } w.WriteHeader(http.StatusOK) - w.Write([]byte("OK")) + _, err := w.Write([]byte("OK")) + if err != nil { + t.Fatalf("Failed to write response: %v", err) + } })) defer server.Close() @@ -385,7 +400,10 @@ func TestPayloadClient_CheckHealth_Unhealthy(t *testing.T) { // Create test server that returns unhealthy status server := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { w.WriteHeader(http.StatusInternalServerError) - w.Write([]byte("Internal Server Error")) + _, err := w.Write([]byte("Internal Server Error")) + if err != nil { + t.Fatalf("Failed to write response: %v", err) + } })) defer server.Close() @@ -521,7 +539,10 @@ func TestPayloadClient_GetLatestPayload_TableDriven(t *testing.T) { name: "server error", serverResponse: func(w http.ResponseWriter, r *http.Request) { w.WriteHeader(http.StatusInternalServerError) - w.Write([]byte("Internal Server Error")) + _, err := w.Write([]byte("Internal Server Error")) + if err != nil { + t.Fatalf("Failed to write response: %v", err) + } }, expectedError: true, errorContains: "API error (status 500)", From 5645c995a4884b0ba0ac75c3eee00fde54491a45 Mon Sep 17 00:00:00 2001 From: Mikhail Wall Date: Tue, 27 May 2025 13:41:47 +0800 Subject: [PATCH 21/24] fix: lint --- cl/singlenode/api/api_client_test.go | 20 ++++++++++++++++---- 1 file changed, 16 insertions(+), 4 deletions(-) diff --git a/cl/singlenode/api/api_client_test.go b/cl/singlenode/api/api_client_test.go index e1394145c..26c3471e9 100644 --- a/cl/singlenode/api/api_client_test.go +++ b/cl/singlenode/api/api_client_test.go @@ -97,7 +97,10 @@ func TestPayloadClient_GetLatestPayload_ErrorResponse(t *testing.T) { w.Header().Set("Content-Type", "application/json") w.WriteHeader(http.StatusInternalServerError) - json.NewEncoder(w).Encode(errorResp) + err := json.NewEncoder(w).Encode(errorResp) + if err != nil { + t.Fatalf("Failed to encode error response: %v", err) + } })) defer server.Close() @@ -259,7 +262,10 @@ func TestPayloadClient_GetPayloadsSince_ErrorResponse(t *testing.T) { w.Header().Set("Content-Type", "application/json") w.WriteHeader(http.StatusNotFound) - json.NewEncoder(w).Encode(errorResp) + err := json.NewEncoder(w).Encode(errorResp) + if err != nil { + t.Fatalf("Failed to encode error response: %v", err) + } })) defer server.Close() @@ -342,7 +348,10 @@ func TestPayloadClient_GetPayloadByHeight_NotFound(t *testing.T) { w.Header().Set("Content-Type", "application/json") w.WriteHeader(http.StatusNotFound) - json.NewEncoder(w).Encode(errorResp) + err := json.NewEncoder(w).Encode(errorResp) + if err != nil { + t.Fatalf("Failed to encode error response: %v", err) + } })) defer server.Close() @@ -444,7 +453,10 @@ func TestPayloadClient_ErrorResponse_InvalidJSON(t *testing.T) { // Create test server that returns non-JSON error response server := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { w.WriteHeader(http.StatusBadRequest) - w.Write([]byte("Bad Request - Not JSON")) + _, err := w.Write([]byte("Bad Request - Not JSON")) + if err != nil { + t.Fatalf("Failed to write response: %v", err) + } })) defer server.Close() From b3e9c175fe75b94220e737028feea72f1a390734 Mon Sep 17 00:00:00 2001 From: Mikhail Wall Date: Tue, 27 May 2025 13:42:53 +0800 Subject: [PATCH 22/24] fix: lint --- cl/singlenode/api/api_client_test.go | 15 ++++++++++++--- 1 file changed, 12 insertions(+), 3 deletions(-) diff --git a/cl/singlenode/api/api_client_test.go b/cl/singlenode/api/api_client_test.go index 26c3471e9..e6b2d22a9 100644 --- a/cl/singlenode/api/api_client_test.go +++ b/cl/singlenode/api/api_client_test.go @@ -492,7 +492,10 @@ func BenchmarkPayloadClient_GetLatestPayload(b *testing.B) { server := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { w.Header().Set("Content-Type", "application/json") w.WriteHeader(http.StatusOK) - json.NewEncoder(w).Encode(payload) + err := json.NewEncoder(w).Encode(payload) + if err != nil { + b.Fatalf("Failed to encode response: %v", err) + } })) defer server.Close() @@ -543,7 +546,10 @@ func TestPayloadClient_GetLatestPayload_TableDriven(t *testing.T) { } w.Header().Set("Content-Type", "application/json") w.WriteHeader(http.StatusOK) - json.NewEncoder(w).Encode(payload) + err := json.NewEncoder(w).Encode(payload) + if err != nil { + t.Fatalf("Failed to encode response: %v", err) + } }, expectedError: false, }, @@ -569,7 +575,10 @@ func TestPayloadClient_GetLatestPayload_TableDriven(t *testing.T) { } w.Header().Set("Content-Type", "application/json") w.WriteHeader(http.StatusOK) - json.NewEncoder(w).Encode(payload) + err := json.NewEncoder(w).Encode(payload) + if err != nil { + t.Fatalf("Failed to encode response: %v", err) + } }, expectedError: false, }, From 7ea3b5c86751f1ccab9b1346a96425a8a670b04d Mon Sep 17 00:00:00 2001 From: Mikhail Wall Date: Fri, 30 May 2025 10:07:53 +0800 Subject: [PATCH 23/24] fix: updated readme --- cl/README.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/cl/README.md b/cl/README.md index b774ce107..f132f0ef9 100644 --- a/cl/README.md +++ b/cl/README.md @@ -241,7 +241,7 @@ The application supports two operational modes: ## Running Postgres -We will use Docker Compose to run Redis +We will use Docker Compose to run Postgres ### Postgres Docker Compose Configuration From 55de94b61302fd02424cd6c0d7667fd82b418729 Mon Sep 17 00:00:00 2001 From: Mikhail Wall Date: Sat, 31 May 2025 13:12:53 +0800 Subject: [PATCH 24/24] fix: fixed pr comments --- cl/README.md | 20 +++++++++--------- cl/cmd/singlenode/main.go | 22 ++++++++++---------- cl/{ => singlenode}/membernode/membernode.go | 0 3 files changed, 21 insertions(+), 21 deletions(-) rename cl/{ => singlenode}/membernode/membernode.go (100%) diff --git a/cl/README.md b/cl/README.md index f132f0ef9..3b0c65042 100644 --- a/cl/README.md +++ b/cl/README.md @@ -300,22 +300,22 @@ The snode application can be configured via command-line flags, environment vari **Common:** -- `SNODE_INSTANCE_ID` -- `SNODE_ETH_CLIENT_URL` -- `SNODE_JWT_SECRET` -- `SNODE_HEALTH_ADDR` -- `SNODE_CONFIG` +- `LEADER_INSTANCE_ID` +- `LEADER_ETH_CLIENT_URL` +- `LEADER_JWT_SECRET` +- `LEADER_HEALTH_ADDR` +- `LEADER_CONFIG` - `MEV_COMMIT_LOG_FMT` - `MEV_COMMIT_LOG_LEVEL` - `MEV_COMMIT_LOG_TAGS` **Leader Node:** -- `SNODE_PRIORITY_FEE_RECIPIENT` -- `SNODE_EVM_BUILD_DELAY` -- `SNODE_EVM_BUILD_DELAY_EMPTY_BLOCK` -- `SNODE_POSTGRES_DSN` -- `SNODE_API_ADDR` +- `LEADER_PRIORITY_FEE_RECIPIENT` +- `LEADER_EVM_BUILD_DELAY` +- `LEADER_EVM_BUILD_DELAY_EMPTY_BLOCK` +- `LEADER_POSTGRES_DSN` +- `LEADER_API_ADDR` **Member Node:** diff --git a/cl/cmd/singlenode/main.go b/cl/cmd/singlenode/main.go index c19f48839..04c2991a8 100644 --- a/cl/cmd/singlenode/main.go +++ b/cl/cmd/singlenode/main.go @@ -13,8 +13,8 @@ import ( "syscall" "time" - "github.com/primev/mev-commit/cl/membernode" "github.com/primev/mev-commit/cl/singlenode" + "github.com/primev/mev-commit/cl/singlenode/membernode" "github.com/primev/mev-commit/x/util" "github.com/urfave/cli/v2" "github.com/urfave/cli/v2/altsrc" @@ -41,13 +41,13 @@ var ( configFlag = &cli.StringFlag{ Name: "config", Usage: "Path to YAML config file", - EnvVars: []string{"SNODE_CONFIG"}, + EnvVars: []string{"LEADER_CONFIG"}, } instanceIDFlag = altsrc.NewStringFlag(&cli.StringFlag{ Name: "instance-id", Usage: "Unique instance ID for this node (for logging/identification)", - EnvVars: []string{"SNODE_INSTANCE_ID"}, + EnvVars: []string{"LEADER_INSTANCE_ID"}, Required: true, Action: func(_ *cli.Context, s string) error { if s == "" { @@ -60,7 +60,7 @@ var ( ethClientURLFlag = altsrc.NewStringFlag(&cli.StringFlag{ Name: "eth-client-url", Usage: "Ethereum Execution client Engine API URL (e.g., http://localhost:8551)", - EnvVars: []string{"SNODE_ETH_CLIENT_URL"}, + EnvVars: []string{"LEADER_ETH_CLIENT_URL"}, Value: "http://localhost:8551", Action: func(_ *cli.Context, s string) error { if _, err := url.Parse(s); err != nil { @@ -73,7 +73,7 @@ var ( jwtSecretFlag = altsrc.NewStringFlag(&cli.StringFlag{ Name: "jwt-secret", Usage: "Hex-encoded JWT secret for Ethereum Execution client Engine API", - EnvVars: []string{"SNODE_JWT_SECRET"}, + EnvVars: []string{"LEADER_JWT_SECRET"}, Value: "13373d9a0257983ad150392d7ddb2f9172c9396b4c450e26af469d123c7aaa5c", Action: func(_ *cli.Context, s string) error { if len(s) != 64 { @@ -125,21 +125,21 @@ var ( evmBuildDelayFlag = altsrc.NewDurationFlag(&cli.DurationFlag{ Name: "evm-build-delay", Usage: "Delay after initiating payload construction before calling getPayload (e.g., '200ms')", - EnvVars: []string{"SNODE_EVM_BUILD_DELAY"}, + EnvVars: []string{"LEADER_EVM_BUILD_DELAY"}, Value: 100 * time.Millisecond, }) evmBuildDelayEmptyBlockFlag = altsrc.NewDurationFlag(&cli.DurationFlag{ Name: "evm-build-delay-empty-block", Usage: "Minimum time since last block to build an empty block (0 to disable skipping, e.g., '2s')", - EnvVars: []string{"SNODE_EVM_BUILD_DELAY_EMPTY_BLOCK"}, + EnvVars: []string{"LEADER_EVM_BUILD_DELAY_EMPTY_BLOCK"}, Value: 2 * time.Second, }) priorityFeeReceiptFlag = altsrc.NewStringFlag(&cli.StringFlag{ Name: "priority-fee-recipient", Usage: "Ethereum address for receiving priority fees (block proposer fee)", - EnvVars: []string{"SNODE_PRIORITY_FEE_RECIPIENT"}, + EnvVars: []string{"LEADER_PRIORITY_FEE_RECIPIENT"}, Required: true, Action: func(c *cli.Context, s string) error { if !strings.HasPrefix(s, "0x") || len(s) != 42 { @@ -156,7 +156,7 @@ var ( healthAddrPortFlag = altsrc.NewStringFlag(&cli.StringFlag{ Name: "health-addr", Usage: "Address for health check endpoint (e.g., ':8080')", - EnvVars: []string{"SNODE_HEALTH_ADDR"}, + EnvVars: []string{"LEADER_HEALTH_ADDR"}, Value: ":8080", Action: func(_ *cli.Context, s string) error { if !strings.HasPrefix(s, ":") { @@ -175,7 +175,7 @@ var ( Name: "postgres-dsn", Usage: "PostgreSQL DSN for storing payloads. If empty, saving to DB is disabled. " + "(e.g., 'postgres://user:pass@host:port/dbname?sslmode=disable')", - EnvVars: []string{"SNODE_POSTGRES_DSN"}, + EnvVars: []string{"LEADER_POSTGRES_DSN"}, Value: "", // Default to empty, making it optional Category: categoryDatabase, }) @@ -183,7 +183,7 @@ var ( apiAddrFlag = altsrc.NewStringFlag(&cli.StringFlag{ Name: "api-addr", Usage: "Address for member node API endpoint (e.g., ':9090'). If empty, API is disabled.", - EnvVars: []string{"SNODE_API_ADDR"}, + EnvVars: []string{"LEADER_API_ADDR"}, Value: ":9090", Action: func(_ *cli.Context, s string) error { if s == "" { diff --git a/cl/membernode/membernode.go b/cl/singlenode/membernode/membernode.go similarity index 100% rename from cl/membernode/membernode.go rename to cl/singlenode/membernode/membernode.go