diff --git a/src/server/server_impl.go b/src/server/server_impl.go index a377cb625..a0803f2a9 100644 --- a/src/server/server_impl.go +++ b/src/server/server_impl.go @@ -99,11 +99,11 @@ func (server *server) Runtime() loader.IFace { return server.runtime } -func NewServer(name string, opts ...settings.Option) Server { - return newServer(name, opts...) +func NewServer(name string, store stats.Store, opts ...settings.Option) Server { + return newServer(name, store, opts...) } -func newServer(name string, opts ...settings.Option) *server { +func newServer(name string, store stats.Store, opts ...settings.Option) *server { s := settings.NewSettings() for _, opt := range opts { @@ -119,7 +119,7 @@ func newServer(name string, opts ...settings.Option) *server { ret.debugPort = s.DebugPort // setup stats - ret.store = stats.NewDefaultStore() + ret.store = store ret.scope = ret.store.Scope(name) ret.store.AddStatGenerator(stats.NewRuntimeStats(ret.scope.Scope("go"))) diff --git a/src/service_cmd/main.go b/src/service_cmd/main.go index 6b0faba63..5df1ab032 100644 --- a/src/service_cmd/main.go +++ b/src/service_cmd/main.go @@ -3,5 +3,6 @@ package main import "github.com/lyft/ratelimit/src/service_cmd/runner" func main() { + runner := runner.NewRunner() runner.Run() } diff --git a/src/service_cmd/runner/runner.go b/src/service_cmd/runner/runner.go index 3c39f696e..2c0d8f83e 100644 --- a/src/service_cmd/runner/runner.go +++ b/src/service_cmd/runner/runner.go @@ -6,6 +6,8 @@ import ( "net/http" "time" + stats "github.com/lyft/gostats" + "github.com/coocood/freecache" pb "github.com/envoyproxy/go-control-plane/envoy/service/ratelimit/v2" @@ -19,7 +21,19 @@ import ( logger "github.com/sirupsen/logrus" ) -func Run() { +type Runner struct { + statsStore stats.Store +} + +func NewRunner() Runner { + return Runner{stats.NewDefaultStore()} +} + +func (runner *Runner) GetStatsStore() stats.Store { + return runner.statsStore +} + +func (runner *Runner) Run() { s := settings.NewSettings() logLevel, err := logger.ParseLevel(s.LogLevel) @@ -29,7 +43,7 @@ func Run() { logger.SetLevel(logLevel) } - srv := server.NewServer("ratelimit", settings.GrpcUnaryInterceptor(nil)) + srv := server.NewServer("ratelimit", runner.statsStore, settings.GrpcUnaryInterceptor(nil)) var perSecondPool redis.Pool if s.RedisPerSecond { diff --git a/test/integration/integration_test.go b/test/integration/integration_test.go index faa8e41c3..1965e3466 100644 --- a/test/integration/integration_test.go +++ b/test/integration/integration_test.go @@ -41,18 +41,20 @@ func newDescriptorStatusLegacy( } } +// TODO: Once adding the ability of stopping the server in the runner (https://github.com/lyft/ratelimit/issues/119), +// stop the server at the end of each test, thus we can reuse the grpc port among these integration tests. func TestBasicConfig(t *testing.T) { t.Run("WithoutPerSecondRedis", testBasicConfig("8083", "false", "0")) t.Run("WithPerSecondRedis", testBasicConfig("8085", "true", "0")) - t.Run("WithoutPerSecondRedisWithLocalCache", testBasicConfig("8083", "false", "1000")) - t.Run("WithPerSecondRedisWithLocalCache", testBasicConfig("8085", "true", "1000")) + t.Run("WithoutPerSecondRedisWithLocalCache", testBasicConfig("18083", "false", "1000")) + t.Run("WithPerSecondRedisWithLocalCache", testBasicConfig("18085", "true", "1000")) } func TestBasicTLSConfig(t *testing.T) { t.Run("WithoutPerSecondRedisTLS", testBasicConfigAuthTLS("8087", "false", "0")) t.Run("WithPerSecondRedisTLS", testBasicConfigAuthTLS("8089", "true", "0")) - t.Run("WithoutPerSecondRedisTLSWithLocalCache", testBasicConfigAuthTLS("8087", "false", "1000")) - t.Run("WithPerSecondRedisTLSWithLocalCache", testBasicConfigAuthTLS("8089", "true", "1000")) + t.Run("WithoutPerSecondRedisTLSWithLocalCache", testBasicConfigAuthTLS("18087", "false", "1000")) + t.Run("WithPerSecondRedisTLSWithLocalCache", testBasicConfigAuthTLS("18089", "true", "1000")) } func testBasicConfigAuthTLS(grpcPort, perSecond string, local_cache_size string) func(*testing.T) { @@ -93,6 +95,7 @@ func testBasicBaseConfig(grpcPort, perSecond string, local_cache_size string) fu local_cache_size_val, _ := strconv.Atoi(local_cache_size) enable_local_cache := local_cache_size_val > 0 + runner := runner.NewRunner() go func() { runner.Run() @@ -128,6 +131,10 @@ func testBasicBaseConfig(grpcPort, perSecond string, local_cache_size string) fu response) assert.NoError(err) + // store.NewCounter returns the existing counter. + key1HitCounter := runner.GetStatsStore().NewCounter(fmt.Sprintf("ratelimit.service.rate_limit.basic.%s.total_hits", getCacheKey("key1", enable_local_cache))) + assert.Equal(1, int(key1HitCounter.Value())) + // Now come up with a random key, and go over limit for a minute limit which should always work. r := rand.New(rand.NewSource(time.Now().UnixNano())) randomInt := r.Int() @@ -151,6 +158,8 @@ func testBasicBaseConfig(grpcPort, perSecond string, local_cache_size string) fu newDescriptorStatus(status, 20, pb.RateLimitResponse_RateLimit_MINUTE, limitRemaining)}}, response) assert.NoError(err) + key2HitCounter := runner.GetStatsStore().NewCounter(fmt.Sprintf("ratelimit.service.rate_limit.another.%s.total_hits", getCacheKey("key2", enable_local_cache))) + assert.Equal(i+1, int(key2HitCounter.Value())) } // Limit now against 2 keys in the same domain. @@ -180,6 +189,11 @@ func testBasicBaseConfig(grpcPort, perSecond string, local_cache_size string) fu newDescriptorStatus(status, 10, pb.RateLimitResponse_RateLimit_HOUR, limitRemaining2)}}, response) assert.NoError(err) + key2HitCounter := runner.GetStatsStore().NewCounter(fmt.Sprintf("ratelimit.service.rate_limit.another.%s.total_hits", getCacheKey("key2", enable_local_cache))) + assert.Equal(i+26, int(key2HitCounter.Value())) + + key3HitCounter := runner.GetStatsStore().NewCounter(fmt.Sprintf("ratelimit.service.rate_limit.another.%s.total_hits", getCacheKey("key3", enable_local_cache))) + assert.Equal(i+1, int(key3HitCounter.Value())) } } } @@ -205,6 +219,7 @@ func testBasicConfigLegacy(local_cache_size string) func(*testing.T) { local_cache_size_val, _ := strconv.Atoi(local_cache_size) enable_local_cache := local_cache_size_val > 0 + runner := runner.NewRunner() go func() { runner.Run() }()