From 32373a75695b81641f4a5e73e11b4b1f43e05c87 Mon Sep 17 00:00:00 2001 From: David Weitzman Date: Tue, 26 May 2020 16:41:49 -0700 Subject: [PATCH] Split redis-specific logic from generic key-value store logic This is a pure refactoring with no behavior changes. It's a step toward being able to add memcache as a backend (see #140). This PR moves RateLimitCache from the redis package to a new "limiter" package, along with code for time/jitter, local cache stats, and constructing cache keys. All that can be reused with memcache. After this PR, the redis package is imported in exactly two places: - in service_cmd/runner/runner.go to call redis.NewRateLimiterCacheImplFromSettings() - in service/ratelimit.go in ShouldRateLimit to identify if a recovered panic is a redis.RedisError. If so, a stat is incremented and the panic() propagation is ended and in favor of returning the error as a the function result. The PR also includes changes by goimports to test/service/ratelimit_test.go so that the difference between package name vs file path name is explicit instead of implicit. Signed-off-by: David Weitzman --- src/{redis => limiter}/cache.go | 2 +- src/limiter/cache_key.go | 90 +++++++++++ src/{redis => limiter}/local_cache_stats.go | 2 +- src/limiter/time.go | 40 +++++ src/redis/cache_impl.go | 170 +++++--------------- src/server/server_impl.go | 5 +- src/service/ratelimit.go | 7 +- src/service_cmd/runner/runner.go | 24 +-- test/mocks/limiter/limiter.go | 136 ++++++++++++++++ test/mocks/mocks.go | 3 +- test/mocks/redis/redis.go | 128 +-------------- test/redis/bench_test.go | 3 +- test/redis/cache_impl_test.go | 16 +- test/service/ratelimit_test.go | 16 +- 14 files changed, 344 insertions(+), 298 deletions(-) rename src/{redis => limiter}/cache.go (98%) create mode 100644 src/limiter/cache_key.go rename src/{redis => limiter}/local_cache_stats.go (98%) create mode 100644 src/limiter/time.go create mode 100644 test/mocks/limiter/limiter.go diff --git a/src/redis/cache.go b/src/limiter/cache.go similarity index 98% rename from src/redis/cache.go rename to src/limiter/cache.go index b090cc995..2ca16956f 100644 --- a/src/redis/cache.go +++ b/src/limiter/cache.go @@ -1,4 +1,4 @@ -package redis +package limiter import ( pb "github.com/envoyproxy/go-control-plane/envoy/service/ratelimit/v2" diff --git a/src/limiter/cache_key.go b/src/limiter/cache_key.go new file mode 100644 index 000000000..65540fa22 --- /dev/null +++ b/src/limiter/cache_key.go @@ -0,0 +1,90 @@ +package limiter + +import ( + "bytes" + "strconv" + "sync" + + pb_struct "github.com/envoyproxy/go-control-plane/envoy/api/v2/ratelimit" + pb "github.com/envoyproxy/go-control-plane/envoy/service/ratelimit/v2" + "github.com/envoyproxy/ratelimit/src/config" +) + +type CacheKeyGenerator struct { + // bytes.Buffer pool used to efficiently generate cache keys. + bufferPool sync.Pool +} + +func NewCacheKeyGenerator() CacheKeyGenerator { + return CacheKeyGenerator{bufferPool: sync.Pool{ + New: func() interface{} { + return new(bytes.Buffer) + }, + }} +} + +type CacheKey struct { + Key string + // True if the key corresponds to a limit with a SECOND unit. False otherwise. + PerSecond bool +} + +func isPerSecondLimit(unit pb.RateLimitResponse_RateLimit_Unit) bool { + return unit == pb.RateLimitResponse_RateLimit_SECOND +} + +// Convert a rate limit into a time divider. +// @param unit supplies the unit to convert. +// @return the divider to use in time computations. +func UnitToDivider(unit pb.RateLimitResponse_RateLimit_Unit) int64 { + switch unit { + case pb.RateLimitResponse_RateLimit_SECOND: + return 1 + case pb.RateLimitResponse_RateLimit_MINUTE: + return 60 + case pb.RateLimitResponse_RateLimit_HOUR: + return 60 * 60 + case pb.RateLimitResponse_RateLimit_DAY: + return 60 * 60 * 24 + } + + panic("should not get here") +} + +// Generate a cache key for a limit lookup. +// @param domain supplies the cache key domain. +// @param descriptor supplies the descriptor to generate the key for. +// @param limit supplies the rate limit to generate the key for (may be nil). +// @param now supplies the current unix time. +// @return CacheKey struct. +func (this *CacheKeyGenerator) GenerateCacheKey( + domain string, descriptor *pb_struct.RateLimitDescriptor, limit *config.RateLimit, now int64) CacheKey { + + if limit == nil { + return CacheKey{ + Key: "", + PerSecond: false, + } + } + + b := this.bufferPool.Get().(*bytes.Buffer) + defer this.bufferPool.Put(b) + b.Reset() + + b.WriteString(domain) + b.WriteByte('_') + + for _, entry := range descriptor.Entries { + b.WriteString(entry.Key) + b.WriteByte('_') + b.WriteString(entry.Value) + b.WriteByte('_') + } + + divider := UnitToDivider(limit.Limit.Unit) + b.WriteString(strconv.FormatInt((now/divider)*divider, 10)) + + return CacheKey{ + Key: b.String(), + PerSecond: isPerSecondLimit(limit.Limit.Unit)} +} diff --git a/src/redis/local_cache_stats.go b/src/limiter/local_cache_stats.go similarity index 98% rename from src/redis/local_cache_stats.go rename to src/limiter/local_cache_stats.go index 60a94194f..d0d59dc27 100644 --- a/src/redis/local_cache_stats.go +++ b/src/limiter/local_cache_stats.go @@ -1,4 +1,4 @@ -package redis +package limiter import ( "github.com/coocood/freecache" diff --git a/src/limiter/time.go b/src/limiter/time.go new file mode 100644 index 000000000..e6a779e70 --- /dev/null +++ b/src/limiter/time.go @@ -0,0 +1,40 @@ +package limiter + +import ( + "math/rand" + "sync" + "time" +) + +type timeSourceImpl struct{} + +func NewTimeSourceImpl() TimeSource { + return &timeSourceImpl{} +} + +func (this *timeSourceImpl) UnixNow() int64 { + return time.Now().Unix() +} + +// rand for jitter. +type lockedSource struct { + lk sync.Mutex + src rand.Source +} + +func NewLockedSource(seed int64) JitterRandSource { + return &lockedSource{src: rand.NewSource(seed)} +} + +func (r *lockedSource) Int63() (n int64) { + r.lk.Lock() + n = r.src.Int63() + r.lk.Unlock() + return +} + +func (r *lockedSource) Seed(seed int64) { + r.lk.Lock() + r.src.Seed(seed) + r.lk.Unlock() +} diff --git a/src/redis/cache_impl.go b/src/redis/cache_impl.go index 4e37d10f0..1beb0de43 100644 --- a/src/redis/cache_impl.go +++ b/src/redis/cache_impl.go @@ -1,18 +1,16 @@ package redis import ( - "bytes" "math" "math/rand" - "strconv" - "sync" - "time" "github.com/coocood/freecache" - pb_struct "github.com/envoyproxy/go-control-plane/envoy/api/v2/ratelimit" pb "github.com/envoyproxy/go-control-plane/envoy/service/ratelimit/v2" "github.com/envoyproxy/ratelimit/src/assert" "github.com/envoyproxy/ratelimit/src/config" + "github.com/envoyproxy/ratelimit/src/limiter" + "github.com/envoyproxy/ratelimit/src/server" + "github.com/envoyproxy/ratelimit/src/settings" logger "github.com/sirupsen/logrus" "golang.org/x/net/context" ) @@ -24,72 +22,11 @@ type rateLimitCacheImpl struct { // limits regardless of unit. If this client is not nil, then it // is used for limits that have a SECOND unit. perSecondClient Client - timeSource TimeSource + timeSource limiter.TimeSource jitterRand *rand.Rand expirationJitterMaxSeconds int64 - // bytes.Buffer pool used to efficiently generate cache keys. - bufferPool sync.Pool - localCache *freecache.Cache -} - -// Convert a rate limit into a time divider. -// @param unit supplies the unit to convert. -// @return the divider to use in time computations. -func unitToDivider(unit pb.RateLimitResponse_RateLimit_Unit) int64 { - switch unit { - case pb.RateLimitResponse_RateLimit_SECOND: - return 1 - case pb.RateLimitResponse_RateLimit_MINUTE: - return 60 - case pb.RateLimitResponse_RateLimit_HOUR: - return 60 * 60 - case pb.RateLimitResponse_RateLimit_DAY: - return 60 * 60 * 24 - } - - panic("should not get here") -} - -// Generate a cache key for a limit lookup. -// @param domain supplies the cache key domain. -// @param descriptor supplies the descriptor to generate the key for. -// @param limit supplies the rate limit to generate the key for (may be nil). -// @param now supplies the current unix time. -// @return cacheKey struct. -func (this *rateLimitCacheImpl) generateCacheKey( - domain string, descriptor *pb_struct.RateLimitDescriptor, limit *config.RateLimit, now int64) cacheKey { - - if limit == nil { - return cacheKey{ - key: "", - perSecond: false, - } - } - - b := this.bufferPool.Get().(*bytes.Buffer) - defer this.bufferPool.Put(b) - b.Reset() - - b.WriteString(domain) - b.WriteByte('_') - - for _, entry := range descriptor.Entries { - b.WriteString(entry.Key) - b.WriteByte('_') - b.WriteString(entry.Value) - b.WriteByte('_') - } - - divider := unitToDivider(limit.Limit.Unit) - b.WriteString(strconv.FormatInt((now/divider)*divider, 10)) - - return cacheKey{ - key: b.String(), - perSecond: isPerSecondLimit(limit.Limit.Unit)} -} - -func isPerSecondLimit(unit pb.RateLimitResponse_RateLimit_Unit) bool { - return unit == pb.RateLimitResponse_RateLimit_SECOND + cacheKeyGenerator limiter.CacheKeyGenerator + localCache *freecache.Cache } func max(a uint32, b uint32) uint32 { @@ -99,12 +36,6 @@ func max(a uint32, b uint32) uint32 { return b } -type cacheKey struct { - key string - // True if the key corresponds to a limit with a SECOND unit. False otherwise. - perSecond bool -} - func pipelineAppend(client Client, key string, hitsAddend uint32, result *uint32, expirationSeconds int64) (err error) { if err = client.DoCmd(result, "INCRBY", key, hitsAddend); err != nil { return @@ -125,14 +56,15 @@ func (this *rateLimitCacheImpl) DoLimit( // request.HitsAddend could be 0 (default value) if not specified by the caller in the Ratelimit request. hitsAddend := max(1, request.HitsAddend) - // First build a list of all cache keys that we are actually going to hit. generateCacheKey() + // First build a list of all cache keys that we are actually going to hit. GenerateCacheKey() // returns an empty string in the key if there is no limit so that we can keep the arrays // all the same size. assert.Assert(len(request.Descriptors) == len(limits)) - cacheKeys := make([]cacheKey, len(request.Descriptors)) + cacheKeys := make([]limiter.CacheKey, len(request.Descriptors)) now := this.timeSource.UnixNow() for i := 0; i < len(request.Descriptors); i++ { - cacheKeys[i] = this.generateCacheKey(request.Domain, request.Descriptors[i], limits[i], now) + cacheKeys[i] = this.cacheKeyGenerator.GenerateCacheKey( + request.Domain, request.Descriptors[i], limits[i], now) // Increase statistics for limits hit by their respective requests. if limits[i] != nil { @@ -146,34 +78,34 @@ func (this *rateLimitCacheImpl) DoLimit( // Now, actually setup the pipeline, skipping empty cache keys. for i, cacheKey := range cacheKeys { - if cacheKey.key == "" { + if cacheKey.Key == "" { continue } if this.localCache != nil { // Get returns the value or not found error. - _, err := this.localCache.Get([]byte(cacheKey.key)) + _, err := this.localCache.Get([]byte(cacheKey.Key)) if err == nil { isOverLimitWithLocalCache[i] = true - logger.Debugf("cache key is over the limit: %s", cacheKey.key) + logger.Debugf("cache key is over the limit: %s", cacheKey.Key) continue } } - logger.Debugf("looking up cache key: %s", cacheKey.key) + logger.Debugf("looking up cache key: %s", cacheKey.Key) - expirationSeconds := unitToDivider(limits[i].Limit.Unit) + expirationSeconds := limiter.UnitToDivider(limits[i].Limit.Unit) if this.expirationJitterMaxSeconds > 0 { expirationSeconds += this.jitterRand.Int63n(this.expirationJitterMaxSeconds) } // Use the perSecondConn if it is not nil and the cacheKey represents a per second Limit. - if this.perSecondClient != nil && cacheKey.perSecond { - if err = pipelineAppend(this.perSecondClient, cacheKey.key, hitsAddend, &results[i], expirationSeconds); err != nil { + if this.perSecondClient != nil && cacheKey.PerSecond { + if err = pipelineAppend(this.perSecondClient, cacheKey.Key, hitsAddend, &results[i], expirationSeconds); err != nil { break } } else { - if err = pipelineAppend(this.client, cacheKey.key, hitsAddend, &results[i], expirationSeconds); err != nil { + if err = pipelineAppend(this.client, cacheKey.Key, hitsAddend, &results[i], expirationSeconds); err != nil { break } } @@ -184,7 +116,7 @@ func (this *rateLimitCacheImpl) DoLimit( responseDescriptorStatuses := make([]*pb.RateLimitResponse_DescriptorStatus, len(request.Descriptors)) for i, cacheKey := range cacheKeys { - if cacheKey.key == "" { + if cacheKey.Key == "" { responseDescriptorStatuses[i] = &pb.RateLimitResponse_DescriptorStatus{ Code: pb.RateLimitResponse_OK, @@ -213,7 +145,7 @@ func (this *rateLimitCacheImpl) DoLimit( // We need to know it in both the OK and OVER_LIMIT scenarios. nearLimitThreshold := uint32(math.Floor(float64(float32(overLimitThreshold) * config.NearLimitRatio))) - logger.Debugf("cache key: %s current: %d", cacheKey.key, limitAfterIncrease) + logger.Debugf("cache key: %s current: %d", cacheKey.Key, limitAfterIncrease) if limitAfterIncrease > overLimitThreshold { responseDescriptorStatuses[i] = &pb.RateLimitResponse_DescriptorStatus{ @@ -244,9 +176,9 @@ func (this *rateLimitCacheImpl) DoLimit( // similar to mongo_1h, mongo_2h, etc. In the hour 1 (0h0m - 0h59m), the cache key is mongo_1h, we start // to get ratelimited in the 50th minute, the ttl of local_cache will be set as 1 hour(0h50m-1h49m). // In the time of 1h1m, since the cache key becomes different (mongo_2h), it won't get ratelimited. - err := this.localCache.Set([]byte(cacheKey.key), []byte{}, int(unitToDivider(limits[i].Limit.Unit))) + err := this.localCache.Set([]byte(cacheKey.Key), []byte{}, int(limiter.UnitToDivider(limits[i].Limit.Unit))) if err != nil { - logger.Errorf("Failing to set local cache key: %s", cacheKey.key) + logger.Errorf("Failing to set local cache key: %s", cacheKey.Key) } } } else { @@ -275,55 +207,33 @@ func (this *rateLimitCacheImpl) DoLimit( return responseDescriptorStatuses } -func NewRateLimitCacheImpl(client Client, perSecondClient Client, timeSource TimeSource, jitterRand *rand.Rand, expirationJitterMaxSeconds int64, localCache *freecache.Cache) RateLimitCache { +func NewRateLimitCacheImpl(client Client, perSecondClient Client, timeSource limiter.TimeSource, jitterRand *rand.Rand, expirationJitterMaxSeconds int64, localCache *freecache.Cache) limiter.RateLimitCache { return &rateLimitCacheImpl{ client: client, perSecondClient: perSecondClient, timeSource: timeSource, jitterRand: jitterRand, expirationJitterMaxSeconds: expirationJitterMaxSeconds, - bufferPool: newBufferPool(), + cacheKeyGenerator: limiter.NewCacheKeyGenerator(), localCache: localCache, } } -func newBufferPool() sync.Pool { - return sync.Pool{ - New: func() interface{} { - return new(bytes.Buffer) - }, +func NewRateLimiterCacheImplFromSettings(s settings.Settings, localCache *freecache.Cache, srv server.Server, timeSource limiter.TimeSource, jitterRand *rand.Rand, expirationJitterMaxSeconds int64) limiter.RateLimitCache { + var perSecondPool Client + if s.RedisPerSecond { + perSecondPool = NewClientImpl(srv.Scope().Scope("redis_per_second_pool"), s.RedisPerSecondTls, s.RedisPerSecondAuth, + s.RedisPerSecondUrl, s.RedisPerSecondPoolSize, s.RedisPipelineWindow, s.RedisPipelineLimit) } -} - -type timeSourceImpl struct{} - -func NewTimeSourceImpl() TimeSource { - return &timeSourceImpl{} -} - -func (this *timeSourceImpl) UnixNow() int64 { - return time.Now().Unix() -} - -// rand for jitter. -type lockedSource struct { - lk sync.Mutex - src rand.Source -} - -func NewLockedSource(seed int64) JitterRandSource { - return &lockedSource{src: rand.NewSource(seed)} -} - -func (r *lockedSource) Int63() (n int64) { - r.lk.Lock() - n = r.src.Int63() - r.lk.Unlock() - return -} - -func (r *lockedSource) Seed(seed int64) { - r.lk.Lock() - r.src.Seed(seed) - r.lk.Unlock() + var otherPool Client + otherPool = NewClientImpl(srv.Scope().Scope("redis_pool"), s.RedisTls, s.RedisAuth, s.RedisUrl, s.RedisPoolSize, + s.RedisPipelineWindow, s.RedisPipelineLimit) + + return NewRateLimitCacheImpl( + otherPool, + perSecondPool, + timeSource, + jitterRand, + expirationJitterMaxSeconds, + localCache) } diff --git a/src/server/server_impl.go b/src/server/server_impl.go index b0349594e..da92df840 100644 --- a/src/server/server_impl.go +++ b/src/server/server_impl.go @@ -9,8 +9,6 @@ import ( "net/http/pprof" "sort" - "github.com/envoyproxy/ratelimit/src/redis" - "os" "os/signal" "syscall" @@ -19,6 +17,7 @@ import ( "github.com/coocood/freecache" pb "github.com/envoyproxy/go-control-plane/envoy/service/ratelimit/v2" + "github.com/envoyproxy/ratelimit/src/limiter" "github.com/envoyproxy/ratelimit/src/settings" "github.com/gorilla/mux" reuseport "github.com/kavu/go_reuseport" @@ -159,7 +158,7 @@ func newServer(name string, store stats.Store, localCache *freecache.Cache, opts ret.scope = ret.store.Scope(name) ret.store.AddStatGenerator(stats.NewRuntimeStats(ret.scope.Scope("go"))) if localCache != nil { - ret.store.AddStatGenerator(redis.NewLocalCacheStats(localCache, ret.scope.Scope("localcache"))) + ret.store.AddStatGenerator(limiter.NewLocalCacheStats(localCache, ret.scope.Scope("localcache"))) } // setup runtime diff --git a/src/service/ratelimit.go b/src/service/ratelimit.go index 3982a39ed..07a8c3132 100644 --- a/src/service/ratelimit.go +++ b/src/service/ratelimit.go @@ -7,9 +7,10 @@ import ( pb "github.com/envoyproxy/go-control-plane/envoy/service/ratelimit/v2" "github.com/envoyproxy/ratelimit/src/assert" "github.com/envoyproxy/ratelimit/src/config" + "github.com/envoyproxy/ratelimit/src/limiter" "github.com/envoyproxy/ratelimit/src/redis" "github.com/lyft/goruntime/loader" - "github.com/lyft/gostats" + stats "github.com/lyft/gostats" logger "github.com/sirupsen/logrus" "golang.org/x/net/context" ) @@ -52,7 +53,7 @@ type service struct { configLoader config.RateLimitConfigLoader config config.RateLimitConfig runtimeUpdateEvent chan int - cache redis.RateLimitCache + cache limiter.RateLimitCache stats serviceStats rlStatsScope stats.Scope legacy *legacyService @@ -174,7 +175,7 @@ func (this *service) GetCurrentConfig() config.RateLimitConfig { return this.config } -func NewService(runtime loader.IFace, cache redis.RateLimitCache, +func NewService(runtime loader.IFace, cache limiter.RateLimitCache, configLoader config.RateLimitConfigLoader, stats stats.Scope) RateLimitServiceServer { newService := &service{ diff --git a/src/service_cmd/runner/runner.go b/src/service_cmd/runner/runner.go index 471caadcc..5e43307ad 100644 --- a/src/service_cmd/runner/runner.go +++ b/src/service_cmd/runner/runner.go @@ -14,6 +14,7 @@ import ( pb_legacy "github.com/envoyproxy/ratelimit/proto/ratelimit" "github.com/envoyproxy/ratelimit/src/config" + "github.com/envoyproxy/ratelimit/src/limiter" "github.com/envoyproxy/ratelimit/src/redis" "github.com/envoyproxy/ratelimit/src/server" ratelimit "github.com/envoyproxy/ratelimit/src/service" @@ -49,24 +50,15 @@ func (runner *Runner) Run() { srv := server.NewServer("ratelimit", runner.statsStore, localCache, settings.GrpcUnaryInterceptor(nil)) - var perSecondPool redis.Client - if s.RedisPerSecond { - perSecondPool = redis.NewClientImpl(srv.Scope().Scope("redis_per_second_pool"), s.RedisPerSecondTls, s.RedisPerSecondAuth, - s.RedisPerSecondUrl, s.RedisPerSecondPoolSize, s.RedisPipelineWindow, s.RedisPipelineLimit) - } - var otherPool redis.Client - otherPool = redis.NewClientImpl(srv.Scope().Scope("redis_pool"), s.RedisTls, s.RedisAuth, s.RedisUrl, s.RedisPoolSize, - s.RedisPipelineWindow, s.RedisPipelineLimit) - service := ratelimit.NewService( srv.Runtime(), - redis.NewRateLimitCacheImpl( - otherPool, - perSecondPool, - redis.NewTimeSourceImpl(), - rand.New(redis.NewLockedSource(time.Now().Unix())), - s.ExpirationJitterMaxSeconds, - localCache), + redis.NewRateLimiterCacheImplFromSettings( + s, + localCache, + srv, + limiter.NewTimeSourceImpl(), + rand.New(limiter.NewLockedSource(time.Now().Unix())), + s.ExpirationJitterMaxSeconds), config.NewRateLimitConfigLoaderImpl(), srv.Scope().Scope("service")) diff --git a/test/mocks/limiter/limiter.go b/test/mocks/limiter/limiter.go new file mode 100644 index 000000000..f5c9f8bfa --- /dev/null +++ b/test/mocks/limiter/limiter.go @@ -0,0 +1,136 @@ +// Code generated by MockGen. DO NOT EDIT. +// Source: github.com/envoyproxy/ratelimit/src/limiter (interfaces: RateLimitCache,TimeSource,JitterRandSource) + +// Package mock_limiter is a generated GoMock package. +package mock_limiter + +import ( + context "context" + v2 "github.com/envoyproxy/go-control-plane/envoy/service/ratelimit/v2" + config "github.com/envoyproxy/ratelimit/src/config" + gomock "github.com/golang/mock/gomock" + reflect "reflect" +) + +// MockRateLimitCache is a mock of RateLimitCache interface +type MockRateLimitCache struct { + ctrl *gomock.Controller + recorder *MockRateLimitCacheMockRecorder +} + +// MockRateLimitCacheMockRecorder is the mock recorder for MockRateLimitCache +type MockRateLimitCacheMockRecorder struct { + mock *MockRateLimitCache +} + +// NewMockRateLimitCache creates a new mock instance +func NewMockRateLimitCache(ctrl *gomock.Controller) *MockRateLimitCache { + mock := &MockRateLimitCache{ctrl: ctrl} + mock.recorder = &MockRateLimitCacheMockRecorder{mock} + return mock +} + +// EXPECT returns an object that allows the caller to indicate expected use +func (m *MockRateLimitCache) EXPECT() *MockRateLimitCacheMockRecorder { + return m.recorder +} + +// DoLimit mocks base method +func (m *MockRateLimitCache) DoLimit(arg0 context.Context, arg1 *v2.RateLimitRequest, arg2 []*config.RateLimit) []*v2.RateLimitResponse_DescriptorStatus { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "DoLimit", arg0, arg1, arg2) + ret0, _ := ret[0].([]*v2.RateLimitResponse_DescriptorStatus) + return ret0 +} + +// DoLimit indicates an expected call of DoLimit +func (mr *MockRateLimitCacheMockRecorder) DoLimit(arg0, arg1, arg2 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "DoLimit", reflect.TypeOf((*MockRateLimitCache)(nil).DoLimit), arg0, arg1, arg2) +} + +// MockTimeSource is a mock of TimeSource interface +type MockTimeSource struct { + ctrl *gomock.Controller + recorder *MockTimeSourceMockRecorder +} + +// MockTimeSourceMockRecorder is the mock recorder for MockTimeSource +type MockTimeSourceMockRecorder struct { + mock *MockTimeSource +} + +// NewMockTimeSource creates a new mock instance +func NewMockTimeSource(ctrl *gomock.Controller) *MockTimeSource { + mock := &MockTimeSource{ctrl: ctrl} + mock.recorder = &MockTimeSourceMockRecorder{mock} + return mock +} + +// EXPECT returns an object that allows the caller to indicate expected use +func (m *MockTimeSource) EXPECT() *MockTimeSourceMockRecorder { + return m.recorder +} + +// UnixNow mocks base method +func (m *MockTimeSource) UnixNow() int64 { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "UnixNow") + ret0, _ := ret[0].(int64) + return ret0 +} + +// UnixNow indicates an expected call of UnixNow +func (mr *MockTimeSourceMockRecorder) UnixNow() *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "UnixNow", reflect.TypeOf((*MockTimeSource)(nil).UnixNow)) +} + +// MockJitterRandSource is a mock of JitterRandSource interface +type MockJitterRandSource struct { + ctrl *gomock.Controller + recorder *MockJitterRandSourceMockRecorder +} + +// MockJitterRandSourceMockRecorder is the mock recorder for MockJitterRandSource +type MockJitterRandSourceMockRecorder struct { + mock *MockJitterRandSource +} + +// NewMockJitterRandSource creates a new mock instance +func NewMockJitterRandSource(ctrl *gomock.Controller) *MockJitterRandSource { + mock := &MockJitterRandSource{ctrl: ctrl} + mock.recorder = &MockJitterRandSourceMockRecorder{mock} + return mock +} + +// EXPECT returns an object that allows the caller to indicate expected use +func (m *MockJitterRandSource) EXPECT() *MockJitterRandSourceMockRecorder { + return m.recorder +} + +// Int63 mocks base method +func (m *MockJitterRandSource) Int63() int64 { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "Int63") + ret0, _ := ret[0].(int64) + return ret0 +} + +// Int63 indicates an expected call of Int63 +func (mr *MockJitterRandSourceMockRecorder) Int63() *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Int63", reflect.TypeOf((*MockJitterRandSource)(nil).Int63)) +} + +// Seed mocks base method +func (m *MockJitterRandSource) Seed(arg0 int64) { + m.ctrl.T.Helper() + m.ctrl.Call(m, "Seed", arg0) +} + +// Seed indicates an expected call of Seed +func (mr *MockJitterRandSourceMockRecorder) Seed(arg0 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Seed", reflect.TypeOf((*MockJitterRandSource)(nil).Seed), arg0) +} diff --git a/test/mocks/mocks.go b/test/mocks/mocks.go index 98d5a4a97..490d20977 100644 --- a/test/mocks/mocks.go +++ b/test/mocks/mocks.go @@ -3,4 +3,5 @@ package mocks //go:generate go run github.com/golang/mock/mockgen -destination ./runtime/snapshot/snapshot.go github.com/lyft/goruntime/snapshot IFace //go:generate go run github.com/golang/mock/mockgen -destination ./runtime/loader/loader.go github.com/lyft/goruntime/loader IFace //go:generate go run github.com/golang/mock/mockgen -destination ./config/config.go github.com/envoyproxy/ratelimit/src/config RateLimitConfig,RateLimitConfigLoader -//go:generate go run github.com/golang/mock/mockgen -destination ./redis/redis.go github.com/envoyproxy/ratelimit/src/redis RateLimitCache,Client,TimeSource,JitterRandSource +//go:generate go run github.com/golang/mock/mockgen -destination ./redis/redis.go github.com/envoyproxy/ratelimit/src/redis Client +//go:generate go run github.com/golang/mock/mockgen -destination ./limiter/limiter.go github.com/envoyproxy/ratelimit/src/limiter RateLimitCache,TimeSource,JitterRandSource diff --git a/test/mocks/redis/redis.go b/test/mocks/redis/redis.go index be5f5a34f..4d3001468 100644 --- a/test/mocks/redis/redis.go +++ b/test/mocks/redis/redis.go @@ -1,54 +1,14 @@ // Code generated by MockGen. DO NOT EDIT. -// Source: github.com/envoyproxy/ratelimit/src/redis (interfaces: RateLimitCache,Client,TimeSource,JitterRandSource) +// Source: github.com/envoyproxy/ratelimit/src/redis (interfaces: Client) // Package mock_redis is a generated GoMock package. package mock_redis import ( - context "context" - v2 "github.com/envoyproxy/go-control-plane/envoy/service/ratelimit/v2" - config "github.com/envoyproxy/ratelimit/src/config" gomock "github.com/golang/mock/gomock" reflect "reflect" ) -// MockRateLimitCache is a mock of RateLimitCache interface -type MockRateLimitCache struct { - ctrl *gomock.Controller - recorder *MockRateLimitCacheMockRecorder -} - -// MockRateLimitCacheMockRecorder is the mock recorder for MockRateLimitCache -type MockRateLimitCacheMockRecorder struct { - mock *MockRateLimitCache -} - -// NewMockRateLimitCache creates a new mock instance -func NewMockRateLimitCache(ctrl *gomock.Controller) *MockRateLimitCache { - mock := &MockRateLimitCache{ctrl: ctrl} - mock.recorder = &MockRateLimitCacheMockRecorder{mock} - return mock -} - -// EXPECT returns an object that allows the caller to indicate expected use -func (m *MockRateLimitCache) EXPECT() *MockRateLimitCacheMockRecorder { - return m.recorder -} - -// DoLimit mocks base method -func (m *MockRateLimitCache) DoLimit(arg0 context.Context, arg1 *v2.RateLimitRequest, arg2 []*config.RateLimit) []*v2.RateLimitResponse_DescriptorStatus { - m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "DoLimit", arg0, arg1, arg2) - ret0, _ := ret[0].([]*v2.RateLimitResponse_DescriptorStatus) - return ret0 -} - -// DoLimit indicates an expected call of DoLimit -func (mr *MockRateLimitCacheMockRecorder) DoLimit(arg0, arg1, arg2 interface{}) *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "DoLimit", reflect.TypeOf((*MockRateLimitCache)(nil).DoLimit), arg0, arg1, arg2) -} - // MockClient is a mock of Client interface type MockClient struct { ctrl *gomock.Controller @@ -118,89 +78,3 @@ func (mr *MockClientMockRecorder) NumActiveConns() *gomock.Call { mr.mock.ctrl.T.Helper() return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "NumActiveConns", reflect.TypeOf((*MockClient)(nil).NumActiveConns)) } - -// MockTimeSource is a mock of TimeSource interface -type MockTimeSource struct { - ctrl *gomock.Controller - recorder *MockTimeSourceMockRecorder -} - -// MockTimeSourceMockRecorder is the mock recorder for MockTimeSource -type MockTimeSourceMockRecorder struct { - mock *MockTimeSource -} - -// NewMockTimeSource creates a new mock instance -func NewMockTimeSource(ctrl *gomock.Controller) *MockTimeSource { - mock := &MockTimeSource{ctrl: ctrl} - mock.recorder = &MockTimeSourceMockRecorder{mock} - return mock -} - -// EXPECT returns an object that allows the caller to indicate expected use -func (m *MockTimeSource) EXPECT() *MockTimeSourceMockRecorder { - return m.recorder -} - -// UnixNow mocks base method -func (m *MockTimeSource) UnixNow() int64 { - m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "UnixNow") - ret0, _ := ret[0].(int64) - return ret0 -} - -// UnixNow indicates an expected call of UnixNow -func (mr *MockTimeSourceMockRecorder) UnixNow() *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "UnixNow", reflect.TypeOf((*MockTimeSource)(nil).UnixNow)) -} - -// MockJitterRandSource is a mock of JitterRandSource interface -type MockJitterRandSource struct { - ctrl *gomock.Controller - recorder *MockJitterRandSourceMockRecorder -} - -// MockJitterRandSourceMockRecorder is the mock recorder for MockJitterRandSource -type MockJitterRandSourceMockRecorder struct { - mock *MockJitterRandSource -} - -// NewMockJitterRandSource creates a new mock instance -func NewMockJitterRandSource(ctrl *gomock.Controller) *MockJitterRandSource { - mock := &MockJitterRandSource{ctrl: ctrl} - mock.recorder = &MockJitterRandSourceMockRecorder{mock} - return mock -} - -// EXPECT returns an object that allows the caller to indicate expected use -func (m *MockJitterRandSource) EXPECT() *MockJitterRandSourceMockRecorder { - return m.recorder -} - -// Int63 mocks base method -func (m *MockJitterRandSource) Int63() int64 { - m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "Int63") - ret0, _ := ret[0].(int64) - return ret0 -} - -// Int63 indicates an expected call of Int63 -func (mr *MockJitterRandSourceMockRecorder) Int63() *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Int63", reflect.TypeOf((*MockJitterRandSource)(nil).Int63)) -} - -// Seed mocks base method -func (m *MockJitterRandSource) Seed(arg0 int64) { - m.ctrl.T.Helper() - m.ctrl.Call(m, "Seed", arg0) -} - -// Seed indicates an expected call of Seed -func (mr *MockJitterRandSourceMockRecorder) Seed(arg0 interface{}) *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Seed", reflect.TypeOf((*MockJitterRandSource)(nil).Seed), arg0) -} diff --git a/test/redis/bench_test.go b/test/redis/bench_test.go index 3463f25a0..b268de996 100644 --- a/test/redis/bench_test.go +++ b/test/redis/bench_test.go @@ -8,6 +8,7 @@ import ( pb "github.com/envoyproxy/go-control-plane/envoy/service/ratelimit/v2" "github.com/envoyproxy/ratelimit/src/config" + "github.com/envoyproxy/ratelimit/src/limiter" "github.com/envoyproxy/ratelimit/src/redis" stats "github.com/lyft/gostats" @@ -43,7 +44,7 @@ func BenchmarkParallelDoLimit(b *testing.B) { client := redis.NewClientImpl(statsStore, false, "", "127.0.0.1:6379", poolSize, pipelineWindow, pipelineLimit) defer client.Close() - cache := redis.NewRateLimitCacheImpl(client, nil, redis.NewTimeSourceImpl(), rand.New(redis.NewLockedSource(time.Now().Unix())), 10, nil) + cache := redis.NewRateLimitCacheImpl(client, nil, limiter.NewTimeSourceImpl(), rand.New(limiter.NewLockedSource(time.Now().Unix())), 10, nil) request := common.NewRateLimitRequest("domain", [][][2]string{{{"key", "value"}}}, 1) limits := []*config.RateLimit{config.NewRateLimit(1000000000, pb.RateLimitResponse_RateLimit_SECOND, "key_value", statsStore)} diff --git a/test/redis/cache_impl_test.go b/test/redis/cache_impl_test.go index e0807689b..7d6d0b8c6 100644 --- a/test/redis/cache_impl_test.go +++ b/test/redis/cache_impl_test.go @@ -8,6 +8,7 @@ import ( pb "github.com/envoyproxy/go-control-plane/envoy/service/ratelimit/v2" "github.com/envoyproxy/ratelimit/src/config" + "github.com/envoyproxy/ratelimit/src/limiter" "github.com/envoyproxy/ratelimit/src/redis" stats "github.com/lyft/gostats" @@ -15,6 +16,7 @@ import ( "github.com/alicebob/miniredis/v2" "github.com/envoyproxy/ratelimit/test/common" + mock_limiter "github.com/envoyproxy/ratelimit/test/mocks/limiter" mock_redis "github.com/envoyproxy/ratelimit/test/mocks/redis" "github.com/golang/mock/gomock" "github.com/stretchr/testify/assert" @@ -33,8 +35,8 @@ func testRedis(usePerSecondRedis bool) func(*testing.T) { client := mock_redis.NewMockClient(controller) perSecondClient := mock_redis.NewMockClient(controller) - timeSource := mock_redis.NewMockTimeSource(controller) - var cache redis.RateLimitCache + timeSource := mock_limiter.NewMockTimeSource(controller) + var cache limiter.RateLimitCache if usePerSecondRedis { cache = redis.NewRateLimitCacheImpl(client, perSecondClient, timeSource, rand.New(rand.NewSource(1)), 0, nil) } else { @@ -160,12 +162,12 @@ func TestOverLimitWithLocalCache(t *testing.T) { defer controller.Finish() client := mock_redis.NewMockClient(controller) - timeSource := mock_redis.NewMockTimeSource(controller) + timeSource := mock_limiter.NewMockTimeSource(controller) localCache := freecache.NewCache(100) cache := redis.NewRateLimitCacheImpl(client, nil, timeSource, rand.New(rand.NewSource(1)), 0, localCache) sink := &common.TestStatSink{} statsStore := stats.NewStore(sink, true) - localCacheStats := redis.NewLocalCacheStats(localCache, statsStore.Scope("localcache")) + localCacheStats := limiter.NewLocalCacheStats(localCache, statsStore.Scope("localcache")) // Test Near Limit Stats. Under Near Limit Ratio timeSource.EXPECT().UnixNow().Return(int64(1000000)) @@ -250,7 +252,7 @@ func TestNearLimit(t *testing.T) { defer controller.Finish() client := mock_redis.NewMockClient(controller) - timeSource := mock_redis.NewMockTimeSource(controller) + timeSource := mock_limiter.NewMockTimeSource(controller) cache := redis.NewRateLimitCacheImpl(client, nil, timeSource, rand.New(rand.NewSource(1)), 0, nil) statsStore := stats.NewStore(stats.NewNullSink(), false) @@ -400,8 +402,8 @@ func TestRedisWithJitter(t *testing.T) { defer controller.Finish() client := mock_redis.NewMockClient(controller) - timeSource := mock_redis.NewMockTimeSource(controller) - jitterSource := mock_redis.NewMockJitterRandSource(controller) + timeSource := mock_limiter.NewMockTimeSource(controller) + jitterSource := mock_limiter.NewMockJitterRandSource(controller) cache := redis.NewRateLimitCacheImpl(client, nil, timeSource, rand.New(jitterSource), 3600, nil) statsStore := stats.NewStore(stats.NewNullSink(), false) diff --git a/test/service/ratelimit_test.go b/test/service/ratelimit_test.go index 57fa0a65d..c51bc7984 100644 --- a/test/service/ratelimit_test.go +++ b/test/service/ratelimit_test.go @@ -7,14 +7,14 @@ import ( pb "github.com/envoyproxy/go-control-plane/envoy/service/ratelimit/v2" "github.com/envoyproxy/ratelimit/src/config" "github.com/envoyproxy/ratelimit/src/redis" - "github.com/envoyproxy/ratelimit/src/service" + ratelimit "github.com/envoyproxy/ratelimit/src/service" "github.com/envoyproxy/ratelimit/test/common" - "github.com/envoyproxy/ratelimit/test/mocks/config" - "github.com/envoyproxy/ratelimit/test/mocks/redis" - "github.com/envoyproxy/ratelimit/test/mocks/runtime/loader" - "github.com/envoyproxy/ratelimit/test/mocks/runtime/snapshot" + mock_config "github.com/envoyproxy/ratelimit/test/mocks/config" + mock_limiter "github.com/envoyproxy/ratelimit/test/mocks/limiter" + mock_loader "github.com/envoyproxy/ratelimit/test/mocks/runtime/loader" + mock_snapshot "github.com/envoyproxy/ratelimit/test/mocks/runtime/snapshot" "github.com/golang/mock/gomock" - "github.com/lyft/gostats" + stats "github.com/lyft/gostats" "github.com/stretchr/testify/assert" "golang.org/x/net/context" ) @@ -51,7 +51,7 @@ type rateLimitServiceTestSuite struct { controller *gomock.Controller runtime *mock_loader.MockIFace snapshot *mock_snapshot.MockIFace - cache *mock_redis.MockRateLimitCache + cache *mock_limiter.MockRateLimitCache configLoader *mock_config.MockRateLimitConfigLoader config *mock_config.MockRateLimitConfig runtimeUpdateCallback chan<- int @@ -64,7 +64,7 @@ func commonSetup(t *testing.T) rateLimitServiceTestSuite { ret.controller = gomock.NewController(t) ret.runtime = mock_loader.NewMockIFace(ret.controller) ret.snapshot = mock_snapshot.NewMockIFace(ret.controller) - ret.cache = mock_redis.NewMockRateLimitCache(ret.controller) + ret.cache = mock_limiter.NewMockRateLimitCache(ret.controller) ret.configLoader = mock_config.NewMockRateLimitConfigLoader(ret.controller) ret.config = mock_config.NewMockRateLimitConfig(ret.controller) ret.statStore = stats.NewStore(stats.NewNullSink(), false)